我正在尝试使用python实现一个逻辑分类器。目标是使用mnist手写数字数据集训练算法识别数字0-9。但是,fmin_cg似乎正在改变输入参数的维度。我试过重塑cost()和gradient()中的争论而没有运气;只是更多的错误。
from scipy.io import loadmat
from numpy import shape, zeros, ones, dot, hstack, vstack, log, transpose, kron
from scipy.special import expit as sigmoid
import scipy.optimize
def cost(theta, X, y):
h = sigmoid( X.dot(theta) )
pos_class = y.T.dot( log(h) )
neg_class = (1.0-y).T.dot( log(1.0-h) )
cost = ((-1.0/m)*(pos_class+neg_class))
return cost
def gradient(theta, X, y):
h = sigmoid( X.dot(theta) )
grad = (1.0/m)*(X.T.dot((h-y)))
return grad
def one_vs_all(X, y, theta):
# add x1 feature,x1 = 1, to each example set
X = hstack( (ones((m,1)),X) )
# train the classifier for digit 9.0
temp_y = (y == 9.0)+0
result = scipy.optimize.fmin_cg( cost, fprime=gradient, x0=theta, \
args=(X, temp_y), maxiter=50, disp=False, full_output=True )
print result[1]
# Load data from Matlab file
data = loadmat('data.mat')
X,y = data['X'],data['y']
m,n = shape(X)
theta = zeros((n+1, 1))
one_vs_all(X, y, theta)
我收到的错误:
Traceback (most recent call last):
File "/Users/jkarimi91/Documents/Digit Recognizer/Digit_Recognizer.py", line 36, in <module>
one_vs_all(X, y, theta)
File "/Users/jkarimi91/Documents/Digit Recognizer/Digit_Recognizer.py", line 26, in one_vs_all
args=(X, temp_y), maxiter=50, disp=False, full_output=True )
File "/anaconda/lib/python2.7/site-packages/scipy/optimize/optimize.py", line 1092, in fmin_cg
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
File "/anaconda/lib/python2.7/site-packages/scipy/optimize/optimize.py", line 1156, in _minimize_cg
deltak = numpy.dot(gfk, gfk)
ValueError: shapes (401,5000) and (401,5000) not aligned: 5000 (dim 1) != 401 (dim 0)
[Finished in 1.0s with exit code 1]
答案 0 :(得分:0)
使用当前代码,成本&amp;梯度函数各自返回一个二维数组。为使fmin_cg正常运行,这些函数必须各自返回一维数组(如documentation所述)。
答案 1 :(得分:0)
我知道这可能有点晚了,但这应该有效 在你的渐变函数中我得到了几个内存错误,所以我稍微更改了代码并添加了正则化,检查出来
def gradients (theta,X,y,Lambda):
m,n = shape(X)
theta = reshape(theta,(n,1))
h = sigmoid(X.dot(theta))
h = h-y
theta[0,0] = 0
grad = ((X.T.dot(h)) / m) + (Lambda / m * theta)
return grad.ravel()