以下是我用于梯度下降的代码,当将x初始化为第一行时,该代码工作正常,并且我得到了正确的thetas,但是当使用第二行中定义的x时,所有代码均相同在每次迭代(正向或负向)时theta都不断增大,我无法思考为什么会这样。
x = 2 * np.random.rand(100, 1) #first initialization
x = np.arange(100).reshape(100,1) #second initialization
#i am initializing one at a time in the main code here i wrote both just for the question
delta = np.random.uniform(-5,5,size=(100,)).reshape(100,1)
y = 2*x + 3 + delta
# y = 3.5*x + 6
eta = 0.01
iterations = 10
#in main code i am using iteraton=1000 and not printing in the below loop, but just as i was getting nan's when using second x, so i made it run for 10 iteration and printed out to check what is happening
x_c = np.c_[np.ones((100, 1)), x]
theta = np.random.randn(2,1)
print(theta)
for i in range(iterations):
grad = 2/100 * x_c.T.dot(x_c.dot(theta) - y)
theta = theta - eta * grad
print(theta)
print(theta)
plt.scatter(x,y)
y=theta[1][0] + theta[0][0]*x
plt.plot(x, y)
plt.show()