Tensorflow会话返回nan

时间:2017-08-19 10:47:09

标签: tensorflow

import numpy as np
import tensorflow as tf

x_input=np.linspace(0,20,100)
y_input = 4*x_input+6


W=tf.Variable(0.0, name="weight")
b= tf.Variable(0.0, name="bias")

X=tf.placeholder(tf.float32,name='InputX')
Y=tf.placeholder(tf.float32, name='InputY')

Y_pred = X*W+b

loss = tf.reduce_mean(tf.square(Y_pred-Y))
optimizer =  tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for step in range(50):
        total_loss = 0
        for x, y in zip(x_input,y_input):
            print (x, y)
            sess.run([optimizer,loss], feed_dict={X:x,Y:y})
    w, b = sess.run([W, b])

    print("Model parameters: ",w,b)
    # '''Above prints nan nan.'''

1 个答案:

答案 0 :(得分:0)

对于简单的问题;你需要更低learning_rate;似乎由于倍数更新和更高的学习率模型参数值消失了。

# setting learning_rate 0.001, gives proper value
optimizer =  tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)

...
print(w, b)
# results 4.05073 4.98799