Skip to content

Commit

Permalink
implemented namescope
Browse files Browse the repository at this point in the history
  • Loading branch information
LordSomen committed Aug 28, 2018
1 parent 6e99da6 commit 270d5c8
Show file tree
Hide file tree
Showing 4 changed files with 82 additions and 9 deletions.
Binary file not shown.
91 changes: 82 additions & 9 deletions Tensorflow/tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,22 +116,28 @@ def reset_graph(seed=42):
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_with_bias = np.c_[np.ones((m,1)),scaled_housing_data]
scaled_housing_data_with_bias = np.c_[np.ones((m,1)),
scaled_housing_data]
print(scaled_housing_data_with_bias)

#%%
n_epochs = 1000
learning_rate = 0.01
X = tf.constant(scaled_housing_data_with_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name="theta")
X = tf.constant(scaled_housing_data_with_bias, dtype=tf.float32,
name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32,
name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0),
name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
# gradients = 2/m * tf.matmul(tf.transpose(X), error)
# gradients = tf.gradients(mse, [theta])[0]
# training_op = tf.assign(theta, theta - learning_rate * gradients)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# training_op = tf.assign(theta,
# theta - learning_rate * gradients)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=
learning_rate)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
with tf.Session() as sess:
Expand Down Expand Up @@ -181,8 +187,10 @@ def fetch_batch(epoch, batch_index, batch_size):
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, Y_batch = fetch_batch(epoch, batch_index, batch_size)
sess.run(training_op, feed_dict={X: X_batch, Y: Y_batch})
X_batch, Y_batch = fetch_batch(epoch, batch_index,
batch_size)
sess.run(training_op, feed_dict={X: X_batch,
Y: Y_batch})
best_theta = theta.eval()


Expand Down Expand Up @@ -239,7 +247,8 @@ def fetch_batch(epoch, batch_index, batch_size):
#%%
reset_graph()
# this loads the graph structure
saver = tf.train.import_meta_graph("/tmp/my_model_final.ckpt.meta")
saver = tf.train.import_meta_graph(
"/tmp/my_model_final.ckpt.meta")
theta = tf.get_default_graph().get_tensor_by_name("theta:0")
with tf.Session() as sess:
saver.restore(sess, "/tmp/my_model_final.ckpt")
Expand All @@ -250,6 +259,7 @@ def fetch_batch(epoch, batch_index, batch_size):

#%%
''' implementing tensorboard'''

reset_graph()

from datetime import datetime
Expand Down Expand Up @@ -303,3 +313,66 @@ def fetch_batch(epoch, batch_index, batch_size):
#%%
file_writer.close()
print(best_theta)

#%%
''' Name scopes using tensorflow '''

reset_graph()
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "Tensorflow/tf_logs"
logdir = "{}/run-{}/".format(root_logdir, now)

n_epochs = 1000
learning_rate = 0.01
X = tf.placeholder(tf.float32, shape=(None, n + 1), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1), name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0,
seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")

#%%
with tf.name_scope("loss") as scope:
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")

#%%
optimizer = tf.train.GradientDescentOptimizer(learning_rate=
learning_rate)
training_op = optimizer.minimize(mse)

init = tf.global_variables_initializer()

mse_summary = tf.summary.scalar('MSE', mse)
file_writer = tf.summary.FileWriter(logdir,
tf.get_default_graph())

#%%
n_epochs = 10
batch_size = 100
n_batches = int(np.ceil(m / batch_size))

with tf.Session() as sess:
sess.run(init)

for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index,
batch_size)
if batch_index % 10 == 0:
summary_str = mse_summary.eval(feed_dict=
{X: X_batch, y: y_batch})
step = epoch * n_batches + batch_index
file_writer.add_summary(summary_str, step)
sess.run(training_op, feed_dict={X: X_batch,
y: y_batch})

best_theta = theta.eval()

file_writer.flush()
file_writer.close()
print("Best theta:")
print(best_theta)

#%%
print(error.op.name)
print(mse.op.name)
Binary file not shown.
Binary file not shown.

0 comments on commit 270d5c8

Please sign in to comment.