diff --git a/model.py b/model.py index 0bc4576..48e36eb 100644 --- a/model.py +++ b/model.py @@ -57,14 +57,31 @@ def conv2d(x, W, stride): keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) +#FCL12 +W_fc12 = weight_variable([1164, 800]) +b_fc12 = bias_variable([800]) + +h_fc12 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc12) + b_fc12) + +h_fc12_drop = tf.nn.dropout(h_fc12, keep_prob) + #FCL 2 -W_fc2 = weight_variable([1164, 100]) +W_fc2 = weight_variable([800, 100]) b_fc2 = bias_variable([100]) -h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) +h_fc2 = tf.nn.relu(tf.matmul(h_fc12_drop, W_fc2) + b_fc2) h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob) + +#FCL 2 +#W_fc2 = weight_variable([1164, 100]) +#b_fc2 = bias_variable([100]) + +#h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) + +#h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob) + #FCL 3 W_fc3 = weight_variable([100, 50]) b_fc3 = bias_variable([50]) @@ -85,4 +102,4 @@ def conv2d(x, W, stride): W_fc5 = weight_variable([10, 1]) b_fc5 = bias_variable([1]) -y = tf.mul(tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5), 2) #scale the atan output +y = tf.multiply(tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5), 2) #scale the atan output diff --git a/run.py b/run.py index fa94c3f..537a2ea 100644 --- a/run.py +++ b/run.py @@ -18,7 +18,7 @@ ret, frame = cap.read() image = scipy.misc.imresize(frame, [66, 200]) / 255.0 degrees = model.y.eval(feed_dict={model.x: [image], model.keep_prob: 1.0})[0][0] * 180 / scipy.pi - call("clear") + #call("clear") print("Predicted steering angle: " + str(degrees) + " degrees") cv2.imshow('frame', frame) #make smooth angle transitions by turning the steering wheel based on the difference of the current angle diff --git a/run_dataset.py b/run_dataset.py index 1c1a87d..86e01a1 100644 --- a/run_dataset.py +++ b/run_dataset.py @@ -9,6 +9,7 @@ saver.restore(sess, "save/model.ckpt") img = cv2.imread('steering_wheel_image.jpg',0) +#img = cv2.imread('me0.jpg',0) rows,cols = img.shape smoothed_angle = 0 @@ -18,7 +19,7 @@ full_image = scipy.misc.imread("driving_dataset/" + str(i) + ".jpg", mode="RGB") image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0 degrees = model.y.eval(feed_dict={model.x: [image], model.keep_prob: 1.0})[0][0] * 180.0 / scipy.pi - call("clear") + #call("clr") print("Predicted steering angle: " + str(degrees) + " degrees") cv2.imshow("frame", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR)) #make smooth angle transitions by turning the steering wheel based on the difference of the current angle diff --git a/save/model.ckpt.meta b/save/model.ckpt.meta index 8afe34b..9d902bd 100644 Binary files a/save/model.ckpt.meta and b/save/model.ckpt.meta differ diff --git a/train.py b/train.py index 4d6a6a2..de52a17 100644 --- a/train.py +++ b/train.py @@ -11,23 +11,26 @@ train_vars = tf.trainable_variables() -loss = tf.reduce_mean(tf.square(tf.sub(model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst +loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst train_step = tf.train.AdamOptimizer(1e-4).minimize(loss) -sess.run(tf.initialize_all_variables()) +#sess.run(tf.initialize_all_variables()) +sess.run(tf.global_variables_initializer()) # create a summary to monitor cost tensor -tf.scalar_summary("loss", loss) +tf.summary.scalar("loss", loss) # merge all summaries into a single op -merged_summary_op = tf.merge_all_summaries() +#merged_summary_op = tf.merge_all_summaries() +merged_summary_op = tf.summary.merge_all() saver = tf.train.Saver() # op to write logs to Tensorboard logs_path = './logs' -summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph()) +#summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph()) +summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) -epochs = 30 -batch_size = 100 +epochs = 3 +batch_size = 10 # train over the dataset about 30 times for epoch in range(epochs):