I am trying to implement multivariate linear regression in Python using TensorFlow, but have run into some logical and implementation issues. My code throws the following error:
Attempting to use uninitialized value Variable
Caused by op u'Variable/read'
Ideally the weights
output should be [2, 3]
def hypothesis_function(input_2d_matrix_trainingexamples,
output_matrix_of_trainingexamples,
initial_parameters_of_hypothesis_function,
learning_rate, num_steps):
# calculate num attributes and num examples
number_of_attributes = len(input_2d_matrix_trainingexamples[0])
number_of_trainingexamples = len(input_2d_matrix_trainingexamples)
#Graph inputs
x = []
for i in range(0, number_of_attributes, 1):
x.append(tf.placeholder("float"))
y_input = tf.placeholder("float")
# Create Model and Set Model weights
parameters = []
for i in range(0, number_of_attributes, 1):
parameters.append(
tf.Variable(initial_parameters_of_hypothesis_function[i]))
#Contruct linear model
y = tf.Variable(parameters[0], "float")
for i in range(1, number_of_attributes, 1):
y = tf.add(y, tf.multiply(x[i], parameters[i]))
# Minimize the mean squared errors
loss = tf.reduce_mean(tf.square(y - y_input))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(loss)
#Initialize the variables
init = tf.initialize_all_variables()
# launch the graph
session = tf.Session()
session.run(init)
for step in range(1, num_steps + 1, 1):
for i in range(0, number_of_trainingexamples, 1):
feed = {}
for j in range(0, number_of_attributes, 1):
array = [input_2d_matrix_trainingexamples[i][j]]
feed[j] = array
array1 = [output_matrix_of_trainingexamples[i]]
feed[number_of_attributes] = array1
session.run(train, feed_dict=feed)
for i in range(0, number_of_attributes - 1, 1):
print (session.run(parameters[i]))
array = [[0.0, 1.0, 2.0], [0.0, 2.0, 3.0], [0.0, 4.0, 5.0]]
hypothesis_function(array, [8.0, 13.0, 23.0], [1.0, 1.0, 1.0], 0.01, 200)
This question is related to
python
machine-learning
linear-regression
tensorflow
Run this:
init = tf.global_variables_initializer()
sess.run(init)
Or (depending on the version of TF that you have):
init = tf.initialize_all_variables()
sess.run(init)
run both:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
There is another the error happening which related to the order when calling initializing global variables. I've had the sample of code has similar error FailedPreconditionError (see above for traceback): Attempting to use uninitialized value W
def linear(X, n_input, n_output, activation = None):
W = tf.Variable(tf.random_normal([n_input, n_output], stddev=0.1), name='W')
b = tf.Variable(tf.constant(0, dtype=tf.float32, shape=[n_output]), name='b')
if activation != None:
h = tf.nn.tanh(tf.add(tf.matmul(X, W),b), name='h')
else:
h = tf.add(tf.matmul(X, W),b, name='h')
return h
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as sess:
# RUN INIT
sess.run(tf.global_variables_initializer())
# But W hasn't in the graph yet so not know to initialize
# EVAL then error
print(linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3).eval())
You should change to following
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as
# NOT RUNNING BUT ASSIGN
l = linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3)
# RUN INIT
sess.run(tf.global_variables_initializer())
print([op.name for op in g.get_operations()])
# ONLY EVAL AFTER INIT
print(l.eval(session=sess))
I want to give my resolution, it work when i replace the line [session = tf.Session()]
with [sess = tf.InteractiveSession()]
. Hope this will be useful to others.
Normally there are two ways of initializing variables, 1) using the sess.run(tf.global_variables_initializer())
as the previous answers noted; 2) the load the graph from checkpoint.
You can do like this:
sess = tf.Session(config=config)
saver = tf.train.Saver(max_to_keep=3)
try:
saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_dir))
# start from the latest checkpoint, the sess will be initialized
# by the variables in the latest checkpoint
except ValueError:
# train from scratch
init = tf.global_variables_initializer()
sess.run(init)
And the third method is to use the tf.train.Supervisor. The session will be
Create a session on 'master', recovering or initializing the model as needed, or wait for a session to be ready.
sv = tf.train.Supervisor([parameters])
sess = sv.prepare_or_wait_for_session()
Source: Stackoverflow.com