我正在尝试使用梯度下降法将直线拟合到几个点。我对此不是专家,并试图用python写下数学算法。它运行了几次迭代,但是我的预测似乎在某个时候爆炸了。这是代码:
import numpy as np
import matplotlib.pyplot as plt
def mean_squared_error(n, A, b, m, c):
e = 0
for i in range(n):
e += (b[i] - (m*A[i] + c)) ** 2
return e/n
def der_wrt_m(n,A,b,m,c):
d = 0
for i in range(n):
d += (2 * (b[i] - (m*A[i] + c)) * (-A[i]))
return d/n
def der_wrt_c(n,A,b,m,c):
d = 0
for i in range(n):
d += (2 * (b[i] - (m*A[i] + c)))
return d/n
def update(n,A,b,m,c,descent_rate):
return descent_rate * der_wrt_m(n,A,b,m,c)), descent_rate * der_wrt_c(n,A,b,m,c))
A = np.array(((0,1),
(1,1),
(2,1),
(3,1)))
x = A.T[0]
b = np.array((1,2,0,3), ndmin=2 ).T
y = b.reshape(4)
def descent(x,y):
m = 0
c = 0
descent_rate = 0.00001
iterations = 100
n = len(x)
plt.scatter(x, y)
u = np.linspace(0,3,100)
prediction = 0
for itr in range(iterations):
print(m,c)
prediction = prediction + m * x + c
m,c = update(n,x,y,m,c,descent_rate)
plt.plot(u, u * m + c, '-')
descent(x,y)
那是我的输出:
0 0
19.25 -10.5
-71335.1953125 24625.9453125
5593771382944640.0 -2166081169939480.2
-2.542705027685638e+48 9.692684648057364e+47
2.40856742196228e+146 -9.202614421953049e+145
-inf inf
nan nan
nan nan
nan nan
nan nan
nan nan
nan nan
etc...
更新:值不再爆炸,但仍未以一种很好的方式收敛:
# We could also solve it using gradient descent
import numpy as np
import matplotlib.pyplot as plt
def mean_squared_error(n, A, b, m, c):
e = 0
for i in range(n):
e += ((b[i] - (m * A[i] + c)) ** 2)
#print("mse:",e/n)
return e/n
def der_wrt_m(n,A,b,m,c):
d = 0
for i in range(n):
# d += (2 * (b[i] - (m*A[i] + c)) * (-A[i]))
d += (A[i] * (b[i] - (m*A[i] + c)))
#print("Dm",-2 * d/n)
return (-2 * d/n)
def der_wrt_c(n,A,b,m,c):
d = 0
for i in range(n):
d += (2 * (b[i] - (m*A[i] + c)))
#print("Dc",d/n)
return d/n
def update(n,A,b,m,c, descent_rate):
return (m - descent_rate * der_wrt_m(n,A,b,m,c)),(c - descent_rate * der_wrt_c(n,A,b,m,c))
A = np.array(((0,1),
(1,1),
(2,1),
(3,1)))
x = A.T[0]
b = np.array((1,2,0,3), ndmin=2 ).T
y = b.reshape(4)
def descent(x,y):
m = 0
c = 0
descent_rate = 0.0001
iterations = 10000
n = len(x)
plt.scatter(x, y)
u = np.linspace(0,3,100)
prediction = 0
for itr in range(iterations):
prediction = prediction + m * x + c
m,c = update(n,x,y,m,c,descent_rate)
loss = mean_squared_error(n, A, b, m, c)
print(loss)
print(m,c)
plt.plot(u, u * m + c, '-')
descent(x,y)
现在该图经过大约10000次迭代,学习率为0.0001,如下所示:
[4.10833186 5.21468937]
1.503547594304175 -1.9947003678083184
最小二乘拟合显示如下:
答案 0 :(得分:3)
在更新功能中,您应该从当前的m和c中减去计算出的梯度
def update(n,A,b,m,c,descent_rate):
return m - (descent_rate * der_wrt_m(n,A,b,m,c)), c - (descent_rate * der_wrt_c(n,A,b,m,c))
更新:这是工作版本。获得x,y后我摆脱了A矩阵,因为它使我感到困惑=)。例如,在梯度计算中,您有一个表达式d += (A[i] * (b[i] - (m*A[i] + c)))
,但是它应该为d += (x[i] * (b[i] - (m*x[i] + c)))
,因为x [i]给您一个元素,而A [i]给您一个列表。
在计算关于c的导数时,您也忘记了减号。如果您的表达式为(y - (m*x + c))^2)
,则相对于c的导数应该为2 * (-1) * (y - (m*x + c))
,因为c前面有一个负号。
# We could also solve it using gradient descent
import numpy as np
import matplotlib.pyplot as plt
def mean_squared_error(n, x, y, m, c):
e = 0
for i in range(n):
e += (m*x[i]+c - y[i])**2
e = e/n
return e/n
def der_wrt_m(n, x, y, m, c):
d = 0
for i in range(n):
d += x[i] * (y[i] - (m*x[i] + c))
d = -2 * d/n
return d
def der_wrt_c(n, x, y, m, c):
d = 0
for i in range(n):
d += (y[i] - (m*x[i] + c))
d = -2 * d/n
return d
def update(n,x,y,m,c, descent_rate):
return (m - descent_rate * der_wrt_m(n,x,y,m,c)),(c - descent_rate * der_wrt_c(n,x,y,m,c))
A = np.array(((0,1),
(1,1),
(2,1),
(3,1)))
x = A.T[0]
b = np.array((1,2,0,3), ndmin=2 ).T
y = b.reshape(4)
print(x)
print(y)
def descent(x,y):
m = 0.0
c = 0.0
descent_rate = 0.01
iterations = 10000
n = len(x)
plt.scatter(x, y)
u = np.linspace(0,3,100)
prediction = 0
for itr in range(iterations):
prediction = prediction + m * x + c
m,c = update(n,x,y,m,c,descent_rate)
loss = mean_squared_error(n, x, y, m, c)
print(loss)
print(loss)
print(m,c)
plt.plot(u, u * m + c, '-')
plt.show()
descent(x,y)