diff --git a/Tensorflow/tf.logs/run-20180827151946/events.out.tfevents.1535384221.LordSomen b/Tensorflow/tf.logs/run-20180827151946/events.out.tfevents.1535384221.LordSomen index caf809b..a7db969 100644 Binary files a/Tensorflow/tf.logs/run-20180827151946/events.out.tfevents.1535384221.LordSomen and b/Tensorflow/tf.logs/run-20180827151946/events.out.tfevents.1535384221.LordSomen differ diff --git a/Tensorflow/tf.py b/Tensorflow/tf.py index 30b47d3..8b3fb12 100644 --- a/Tensorflow/tf.py +++ b/Tensorflow/tf.py @@ -116,22 +116,28 @@ def reset_graph(seed=42): from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_housing_data = scaler.fit_transform(housing.data) -scaled_housing_data_with_bias = np.c_[np.ones((m,1)),scaled_housing_data] +scaled_housing_data_with_bias = np.c_[np.ones((m,1)), +scaled_housing_data] print(scaled_housing_data_with_bias) #%% n_epochs = 1000 learning_rate = 0.01 -X = tf.constant(scaled_housing_data_with_bias, dtype=tf.float32, name="X") -y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") -theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name="theta") +X = tf.constant(scaled_housing_data_with_bias, dtype=tf.float32, + name="X") +y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, + name="y") +theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), + name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") # gradients = 2/m * tf.matmul(tf.transpose(X), error) # gradients = tf.gradients(mse, [theta])[0] -# training_op = tf.assign(theta, theta - learning_rate * gradients) -optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) +# training_op = tf.assign(theta, +# theta - learning_rate * gradients) +optimizer = tf.train.GradientDescentOptimizer(learning_rate= +learning_rate) training_op = optimizer.minimize(mse) init = tf.global_variables_initializer() with tf.Session() as sess: @@ -181,8 +187,10 @@ def fetch_batch(epoch, batch_index, batch_size): sess.run(init) for epoch in range(n_epochs): for batch_index in range(n_batches): - X_batch, Y_batch = fetch_batch(epoch, batch_index, batch_size) - sess.run(training_op, feed_dict={X: X_batch, Y: Y_batch}) + X_batch, Y_batch = fetch_batch(epoch, batch_index, + batch_size) + sess.run(training_op, feed_dict={X: X_batch, + Y: Y_batch}) best_theta = theta.eval() @@ -239,7 +247,8 @@ def fetch_batch(epoch, batch_index, batch_size): #%% reset_graph() # this loads the graph structure -saver = tf.train.import_meta_graph("/tmp/my_model_final.ckpt.meta") +saver = tf.train.import_meta_graph( + "/tmp/my_model_final.ckpt.meta") theta = tf.get_default_graph().get_tensor_by_name("theta:0") with tf.Session() as sess: saver.restore(sess, "/tmp/my_model_final.ckpt") @@ -250,6 +259,7 @@ def fetch_batch(epoch, batch_index, batch_size): #%% ''' implementing tensorboard''' + reset_graph() from datetime import datetime @@ -303,3 +313,66 @@ def fetch_batch(epoch, batch_index, batch_size): #%% file_writer.close() print(best_theta) + +#%% +''' Name scopes using tensorflow ''' + +reset_graph() +now = datetime.utcnow().strftime("%Y%m%d%H%M%S") +root_logdir = "Tensorflow/tf_logs" +logdir = "{}/run-{}/".format(root_logdir, now) + +n_epochs = 1000 +learning_rate = 0.01 +X = tf.placeholder(tf.float32, shape=(None, n + 1), name="X") +y = tf.placeholder(tf.float32, shape=(None, 1), name="y") +theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, + seed=42), name="theta") +y_pred = tf.matmul(X, theta, name="predictions") + +#%% +with tf.name_scope("loss") as scope: + error = y_pred - y + mse = tf.reduce_mean(tf.square(error), name="mse") + +#%% +optimizer = tf.train.GradientDescentOptimizer(learning_rate= +learning_rate) +training_op = optimizer.minimize(mse) + +init = tf.global_variables_initializer() + +mse_summary = tf.summary.scalar('MSE', mse) +file_writer = tf.summary.FileWriter(logdir, +tf.get_default_graph()) + +#%% +n_epochs = 10 +batch_size = 100 +n_batches = int(np.ceil(m / batch_size)) + +with tf.Session() as sess: + sess.run(init) + + for epoch in range(n_epochs): + for batch_index in range(n_batches): + X_batch, y_batch = fetch_batch(epoch, batch_index, + batch_size) + if batch_index % 10 == 0: + summary_str = mse_summary.eval(feed_dict= + {X: X_batch, y: y_batch}) + step = epoch * n_batches + batch_index + file_writer.add_summary(summary_str, step) + sess.run(training_op, feed_dict={X: X_batch, + y: y_batch}) + + best_theta = theta.eval() + +file_writer.flush() +file_writer.close() +print("Best theta:") +print(best_theta) + +#%% +print(error.op.name) +print(mse.op.name) \ No newline at end of file diff --git a/Tensorflow/tf_logs/run-20180828080015/events.out.tfevents.1535443627.LordSomen b/Tensorflow/tf_logs/run-20180828080015/events.out.tfevents.1535443627.LordSomen new file mode 100644 index 0000000..bd6baf3 Binary files /dev/null and b/Tensorflow/tf_logs/run-20180828080015/events.out.tfevents.1535443627.LordSomen differ diff --git a/Tensorflow/tf_logs/run-20180828081337/events.out.tfevents.1535444023.LordSomen b/Tensorflow/tf_logs/run-20180828081337/events.out.tfevents.1535444023.LordSomen new file mode 100644 index 0000000..3d2d2c2 Binary files /dev/null and b/Tensorflow/tf_logs/run-20180828081337/events.out.tfevents.1535444023.LordSomen differ