发生Python scipy.optimize.fmin_l_bfgs_b错误

时间:2015-10-28 08:38:57

标签: python numpy scipy mathematical-optimization

我的代码是使用L-BFGS优化实现主动学习算法。我想优化四个参数:alphabetawgamma

但是,当我运行下面的代码时,我收到了一个错误:

optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0 = x0, args = (X,Y,Z), fprime = func_grad)                                           
  File "C:\Python27\lib\site-packages\scipy\optimize\lbfgsb.py", line 188, in fmin_l_bfgs_b
    **opts)
  File "C:\Python27\lib\site-packages\scipy\optimize\lbfgsb.py", line 311, in _minimize_lbfgsb
    isave, dsave)
    _lbfgsb.error: failed in converting 7th argument ``g' of _lbfgsb.setulb to C/Fortran array 
    0-th dimension must be fixed to 22 but got 4

我的代码是:

# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import scipy.stats as sps

num_labeler = 3
num_instance = 5

X = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4],[5,5,5,5]])
Z = np.array([1,0,1,0,1])
Y = np.array([[1,0,1],[0,1,0],[0,0,0],[1,1,1],[1,0,0]])

W = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3]])
gamma = np.array([1,1,1,1,1])
alpha = np.array([1,1,1,1])
beta = 1
para = np.array([1,1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,1,1,1,1,1])

def get_params(para):
    # extract parameters from 1D parameter vector
    assert len(para) == 22
    alpha = para[0:4]
    beta = para[4]
    W = para[5:17].reshape(3, 4)
    gamma = para[17:]
    return alpha, beta, gamma, W

def log_p_y_xz(yit,zi,sigmati): #log P(y_it|x_i,z_i)
    return np.log(sps.norm(zi,sigmati).pdf(yit))#tested

def log_p_z_x(alpha,beta,xi): #log P(z_i=1|x_i)
    return -np.log(1+np.exp(-np.dot(alpha,xi)-beta))#tested

def sigma_eta_ti(xi, w_t, gamma_t): # 1+exp(-w_t x_i -gamma_t)^-1
    return 1/(1+np.exp(-np.dot(xi,w_t)-gamma_t)) #tested

def df_alpha(X,Y,Z,W,alpha,beta,gamma):#df/dalpha
    return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)*X[i]/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))
    #tested
def df_beta(X,Y,Z,W,alpha,beta,gamma):#df/dbelta
    return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))

def df_w(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dw
    return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]for t in range(num_labeler)) for i in range (num_instance))

def df_gamma(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dgamma
    return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))for t in range(num_labeler)) for i in range (num_instance))

def func(para, *args):
    alpha, beta, gamma, W = get_params(para)
    #args
    X = args [0]
    Y = args[1]
    Z = args[2]        
    return  np.sum(np.sum(log_p_y_xz(Y[i][t], Z[i], sigma_eta_ti(X[i],W[t],gamma[t]))+log_p_z_x(alpha, beta, X[i]) for t in range(num_labeler)) for i in range (num_instance))
    #tested

def func_grad(para, *args):
    alpha, beta, gamma, W = get_params(para)
    #args
    X = args [0]
    Y = args[1]
    Z = args[2]
    #gradiants
    d_f_a = df_alpha(X,Y,Z,W,alpha,beta,gamma)
    d_f_b = df_beta(X,Y,Z,W,alpha,beta,gamma)
    d_f_w = df_w(X,Y,Z,W,alpha,beta,gamma)
    d_f_g = df_gamma(X,Y,Z,W,alpha,beta,gamma)
    return np.array([d_f_a, d_f_b,d_f_w,d_f_g])

x0 = np.concatenate([np.ravel(alpha), np.ravel(beta), np.ravel(W), np.ravel(gamma)])

optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0 = x0, args = (X,Y,Z), fprime = func_grad)  

我不确定是什么问题。也许,func_grad导致问题?有人可以看看吗?感谢

1 个答案:

答案 0 :(得分:6)

你需要对func参数的级联数组中的每个元素采用alpha, beta, w, gamma的导数,因此func_grad应该返回单个1D数组与x0相同的长度(即22)。相反,它返回一个混杂的两个数组和嵌套在np.object数组中的两个标量浮点数:

In [1]: func_grad(x0, X, Y, Z)
Out[1]: 
array([array([ 0.00681272,  0.00681272,  0.00681272,  0.00681272]),
       0.006684719133999417,
       array([-0.01351227, -0.01351227, -0.01351227, -0.01351227]),
       -0.013639910534587798], dtype=object)

部分问题是np.array([d_f_a, d_f_b,d_f_w,d_f_g])没有将这些对象连接成单个1D数组,因为有些是numpy数组,有些是Python浮点数。通过使用np.hstack([d_f_a, d_f_b,d_f_w,d_f_g])来轻松解决该部分。

但是,这些对象的组合大小仍然只有10,而func_grad的输出需要是22长的向量。您需要再次查看df_*函数。特别是,W(3, 4)数组,但df_w只返回(4,)向量,而gamma(4,)向量,而df_gamma 1}}只返回一个标量。