Tensorflow线性回归不会收敛到正确的成本

时间:2018-06-07 02:40:03

标签: python tensorflow

我正在尝试在tensorflow中实现多元线性回归(使用Boston Housing Dataset),但似乎我的成本函数正在收敛且错误的值(在我的情况下为24000)。我尝试扩展功能,但它仍然无法正常工作。关于我做错了什么的任何想法?这是代码:

from sklearn.datasets import load_boston
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from sklearn.preprocessing import MinMaxScaler

rate = 0.000000011
epochs = 100
errors = []

def load_data():
    boston = load_boston()

    bos = pd.DataFrame(boston.data)

    output = pd.DataFrame(boston.target)

    return [bos, output]

xS, yS = load_data()

m = len(yS)

x_train, x_test, y_train, y_test = train_test_split(xS, yS, test_size=0.2)

scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)

theta = tf.Variable(tf.zeros([len(xS.columns), 1]))
X = tf.placeholder(tf.float32, shape=[m, len(xS.columns)])
y = tf.placeholder(tf.float32, shape=[m, 1])
b = tf.Variable(tf.zeros([m, 1]))

model = tf.matmul(tf.transpose(theta), tf.transpose(X)) + b

cost = tf.reduce_sum(tf.square(y-model))/(2*m)

optimizer = tf.train.GradientDescentOptimizer(rate).minimize(cost)

init = [tf.global_variables_initializer(), tf.local_variables_initializer()]

with tf.Session() as sess:
    sess.run(init)
    for e in range(epochs):
        sess.run(optimizer, feed_dict={X:xS, y:yS})
        loss = sess.run(cost, feed_dict={X:xS, y:yS})
        print("cost at step", e, loss)
        errors.append(loss)
        if errors[len(errors)-1] > errors[len(errors)-2]:
            break

    theta_temp = np.array(sess.run(theta))
    b_temp = np.array(sess.run(b))

plt.plot(list(range(len(errors))), errors)
plt.show()
h = np.transpose(np.add(np.matmul(np.transpose(theta_temp), np.transpose(xS)), np.transpose(b_temp)))
print(r2_score(h, yS))

1 个答案:

答案 0 :(得分:0)

你正在做的大部分事情。我会建议您在代码中进行以下更改。

model = tf.matmul(X, theta) + b

尝试使用学习率0.001和纪元1000,请报告结果。

在你正在做的情况下

model = tf.matmul(tf.transpose(theta), tf.transpose(X)) + b

你犯了一个错误。右侧的第一部分的尺寸为(1,m),第二部分的尺寸为(m,1)。然后你得到了一些结果,因为广播是你没想到的。这就是为什么你看到非常差的结果,学习率为0.01或0.1。

我的第二个建议是删除中断标准。

if errors[len(errors)-1] > errors[len(errors)-2]: break

随机渐变是嘈杂的。没有证据表明,如果你进入较小的梯度方向,你总会降低成本(可能是这个凸起的问题,但我必须考虑)。

from sklearn.datasets import load_boston
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from sklearn.preprocessing import MinMaxScaler

rate = 0.1
epochs = 100
errors = []

def load_data():
    boston = load_boston()

    bos = pd.DataFrame(boston.data)

    output = pd.DataFrame(boston.target)

    return [bos, output]

xS, yS = load_data()


x_train, x_test, y_train, y_test = train_test_split(xS, yS, test_size=0.2)
m = len(y_train)

scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)

theta = tf.Variable(tf.zeros([len(xS.columns), 1]))
X = tf.placeholder(tf.float32, shape=[m, len(xS.columns)])
y = tf.placeholder(tf.float32, shape=[m, 1])
b = tf.Variable(tf.zeros([1]))

model = tf.matmul(X, theta) + b

cost = tf.reduce_sum(tf.square(y-model))/(2*m)

optimizer = tf.train.GradientDescentOptimizer(rate).minimize(cost)

init = [tf.global_variables_initializer(), tf.local_variables_initializer()]

with tf.Session() as sess:
    sess.run(init)
    for e in range(epochs):
        sess.run(optimizer, feed_dict={X:x_train, y:y_train})
        loss = sess.run(cost, feed_dict={X:x_train, y:y_train})
        print("cost at step", e, loss)
        errors.append(loss)

enter image description here