我正在使用sklearn
的波士顿住房数据集进行多元线性回归,这是一个506x13矩阵。我计划使用所有数据进行训练,然后插入"随机数据,如boston_dataset.data[39]
,然后看到损失。但是当我打印结果时,我得到的只是NaN
。这是我的代码。
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
np.set_printoptions(suppress=True)
boston = load_boston()
m = boston.data.shape[0] - 1
bt_unfixed = np.transpose(boston.data)
bt = np.insert(bt_unfixed, 0, 1)
Y = tf.placeholder(tf.float64, name='Y___')
X = tf.placeholder(tf.float64, [1, 13], name='X_____')
#print X.shape
W = tf.Variable(tf.zeros([13, 1]), name='weights')
b = tf.Variable(0.5, name='bias')
hypothesis = tf.add(tf.matmul(X, tf.cast(W, tf.float64)), tf.cast(b, tf.float64))
loss = tf.reduce_sum(tf.square(hypothesis - Y)) / (2 * m)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(0, 500):
for (x, y) in zip(boston.data, boston.target):
sess.run(train_op, feed_dict={X:x.reshape(1, 13), Y:y})
if (i + 1)%50 == 0:
print "Ran " + str(i) + "times\nW=" +str(sess.run(W)) + "\nb=" +str(sess.run(b))
print "Done!\n"
print "Running test...\n"
t = sess.run(cost, feed_dict={X:boston.data[504], Y:boston.target.data[504]})
print "loss =" + str(t) + "Real value" + str(boston.target.data[504]) + "Pred " +str(sess.run(hypothesis, feed_dict={X:boston.data[504]}))
谢谢!另外,请随意添加任何建议
答案 0 :(得分:2)
您似乎没有对 波士顿数据 进行任何数据预处理,这会使损失和假设值转到 inf(NaN) 。所以我对数据进行了规范化并且可行。这是我的代码。
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
boston = load_boston()
data = boston.data
label = boston.target
# normalized data
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
M = boston.data.shape[0]
Y = tf.placeholder(tf.float32, name='Y')
X = tf.placeholder(tf.float32, [1, 13], name='X')
W = tf.Variable(tf.random_normal([13, 1]), name='weights')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.add(tf.matmul(X, W), b)
loss = tf.reduce_sum(tf.square(hypothesis - Y)) / (2. * (M - 1))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(0, 500):
for l in xrange(M):
_, loss_val, hypo = sess.run(
[train_op, loss, hypothesis],
feed_dict={X: data[l, :].reshape([1, 13]),
Y: label[l]})
if (i + 1) % 50 == 0:
print "Ran " + str(i) + "times\nW=" + \
str(sess.run(W)) + "\nb=" + str(sess.run(b))
print "Done!\n"
print "Running test...\n"
t = sess.run(
loss, feed_dict={X: data[50].reshape([1, 13]),
Y: label[50]})
print "loss =" + str(t)
print "Real value Y: " + str(label[50])
print "Pred Y: " + str(sess.run(hypothesis,
feed_dict={X: data[50].reshape([1, 13])}))