Skip to content

Commit

Permalink
Nope CNN cannot be done on this image , the image features are too po…
Browse files Browse the repository at this point in the history
…or and my pc cant handle the higher quality images , switching to use info from image
  • Loading branch information
vigneshhari committed Jul 28, 2018
1 parent e5b2d97 commit e50db77
Show file tree
Hide file tree
Showing 10 changed files with 733 additions and 8 deletions.
291 changes: 291 additions & 0 deletions CNN_with_image/.ipynb_checkpoints/Testing-checkpoint.ipynb

Large diffs are not rendered by default.

291 changes: 291 additions & 0 deletions CNN_with_image/Testing.ipynb

Large diffs are not rendered by default.

130 changes: 130 additions & 0 deletions CNN_with_image/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
import tensorflow as tf
import csv
from PIL import Image
import numpy as np
import PIL.ImageOps
import cv2
from selenium import webdriver
from selenium.webdriver.common.keys import Keys



files = []
action = []


with open("1/out.csv" , "r") as f :
data = csv.reader(f, delimiter=',')
for i in data:
if(i[0] == 1):
action.append(0)
else:
action.append(1)
files.append(i[1])


def parse_function(filename,basewidth):
img = Image.open(filename).convert('L')
#img = PIL.ImageOps.invert(img)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
return img


x_v = [np.array(parse_function(i,270)).flatten() for i in files ]
y_v = action


print "Loaded all Images"

print '''Starting Training CNN ( Tensorflow ) '''

tf.set_random_seed(777)



x_inp = tf.placeholder(tf.float32, [None, 20250])
y = tf.placeholder(tf.float32, [None])

x = tf.reshape(x_inp, [-1, 75, 270, 1])

weight_conv1 = tf.Variable(tf.zeros([5,5,1,32]))
weight_conv2 = tf.Variable(tf.zeros([5,5,32,64]))
weight_fc = tf.Variable(tf.zeros([5 * 18 * 64 ,800]))
weight_out = tf.Variable(tf.zeros([800,1]))

bias_conv1 = tf.Variable(tf.random_normal([32]))
bias_conv2 = tf.Variable(tf.random_normal([64]))
bias_fc = tf.Variable(tf.random_normal([800]))
bias_out = tf.Variable(tf.random_normal([1]))

conv1 = tf.add(tf.nn.conv2d(x , weight_conv1 , strides=[1,1,1,1] , padding="SAME") , bias_conv1)
max_pool1 = tf.nn.max_pool(conv1 , ksize=[1,5,5,1] , strides=[1,5,5,1] , padding="SAME")

conv2 = tf.add(tf.nn.conv2d(max_pool1 , weight_conv2 , strides=[1,1,1,1] , padding="SAME") , bias_conv2)
max_pool2 = tf.nn.max_pool(conv2 , ksize=[1,3,3,1] , strides=[1,3,3,1] , padding="SAME")

fc = tf.reshape(max_pool2 , [-1 , 5 * 18 * 64 ])
fc = tf.nn.relu( tf.add(tf.matmul(fc , weight_fc) , bias_fc) )

out = tf.matmul(fc , weight_out) + bias_out

cost = tf.reduce_mean(-1 * (tf.reduce_sum(y*tf.log(out + 1e-30 )) ) )
optimizer = tf.train.AdamOptimizer(learning_rate=.000000001).minimize(cost)

correct_prediction = tf.equal(tf.cast(tf.argmax(out, 1),"float64"),tf.cast(y , "float64") )
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float32"))

sess = tf.Session()
sess.run(tf.global_variables_initializer())

hm_epochs = 2
batch_size = 50



for epoch in range(hm_epochs):
epoch_loss = 0
for i in range(int(len(files)//batch_size)):
epoch_x, epoch_y = x_v[ (i * batch_size) : (i +1 ) * batch_size ] , y_v[ (i * batch_size) : (i +1 ) * batch_size ]
#print sess.run(max_pool1, feed_dict={x_inp: epoch_x, y: epoch_y}).shape
_, c = sess.run([optimizer, cost], feed_dict={x_inp: epoch_x, y: epoch_y})
epoch_loss += c
print c
print 'Completed Epoch# : ', epoch, ' : Epochs Left : ',hm_epochs-epoch - 1,' : loss : ',epoch_loss


#print 'Accuracy For Training is ' ,sess.run(accuracy , feed_dict={x_inp:np.array(x_v[0:500]), y:y_v[0:500]}) , "%"
exit()

import math

driver = webdriver.Chrome()

driver.get("chrome://dino")
elem = driver.find_element_by_id("t")

variable = 1
while True:
if( elem.get_attribute("class") == "offline" ):continue
if(driver.execute_script("return Runner.instance_.playing;") == False ):
elem.send_keys(Keys.SPACE)
continue

driver.save_screenshot("temp/screenshot-{}.png".format(variable))
img = cv2.imread("temp/screenshot-{}.png".format( variable))

img = img[ 168 : 400 , 105: ]

ret,thresh_img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
cv2.imwrite("temp/train-1.png", thresh_img)

value = parse_function("temp/train-1.png",200)

value = sess.run(out , feed_dict={x_inp : [np.array(value).flatten()]})

print value

if(round(value) == 0) :
elem.send_keys(Keys.SPACE)
Binary file added CNN_with_image/temp/screenshot-1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added CNN_with_image/temp/train-1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Don't track content of these folders
screens/
1/
Logistic_Regression_with_image/1/
CNN_with_image/1/
temp/
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def parse_function(filename,basewidth):

print "Loaded all Images"

print "Starting Training on NN ( Tensorflow ) "
print "Starting Training NN ( Tensorflow ) "

'''
Using a Neural Network with No Hidden Layers with 55*200 input layers. total of 55 * 200 Values.
Expand All @@ -50,9 +50,10 @@ def parse_function(filename,basewidth):

# Parameters
#learning_rate = .00000000001
learning_rate = .0000000001
#learning_rate = .0000000000001
learning_rate = .00000000000000001
training_epochs = 100
batch_size = 100
batch_size = 1
display_step = 1

tf.set_random_seed(777)
Expand All @@ -66,7 +67,7 @@ def parse_function(filename,basewidth):


#cost = tf.reduce_mean(-1 * (tf.reduce_sum(y*tf.log(pred) + tf.reduce_sum( (y-1)*tf.log(pred -1) ) )))
cost = tf.reduce_mean(-1 * (tf.reduce_sum(y*tf.log(pred + 1e-30 )) ) )
cost = tf.reduce_mean(-1 * (tf.reduce_sum(y*tf.log(pred )) ) )

optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

Expand All @@ -80,11 +81,18 @@ def parse_function(filename,basewidth):
sess.run(init)

for epoch in range(training_epochs):
epoch_loss = 0
print sess.run(W, feed_dict={x: x_v,y: y_v})
for i in range(int(len(files)//batch_size)):
epoch_x, epoch_y = x_v[ (i * batch_size) : (i +1 ) * batch_size ] , y_v[ (i * batch_size) : (i +1 ) * batch_size ]
#print sess.run(max_pool1, feed_dict={x_inp: epoch_x, y: epoch_y}).shape
_, c , w = sess.run([optimizer, cost, W], feed_dict={x: x_v,y: y_v})
print sum(w) , c
epoch_loss += c

_, c , w = sess.run([optimizer, cost, W], feed_dict={x: x_v,y: y_v})
print sum(w)
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{}".format(c))
print("Epoch:", '%04d' % (epoch+1), "cost=", "{}".format(epoch_loss))

print(sess.run([accuracy , pred] ,feed_dict={x: x_v,y: y_v }))

Expand Down Expand Up @@ -115,5 +123,9 @@ def parse_function(filename,basewidth):

value = parse_function("temp/train-1.png",200)

if(round(sess.run(pred , feed_dict={x : [np.array(value).flatten()]})) == 0) :
value = sess.run(pred , feed_dict={x : [np.array(value).flatten()]})

print value

if(round(value) == 0) :
elem.send_keys(Keys.SPACE)

0 comments on commit e50db77

Please sign in to comment.