python

时间:2017-03-14 11:03:18

标签: python machine-learning logistic-regression

我正在python中实现逻辑回归,并使用如下调整的丢失函数: enter image description here

但是梯度算法很糟糕。请先阅读粗体文字!只需按单元格粘贴代码

import numpy as np, scipy as sp, sklearn as sl
from scipy import special as ss
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.datasets import make_classification
import theano.tensor as T

这是损失函数:( scipy是“剪切”1附近的logorithm arg)

def lossf(w, X, y, l1, l2):
     w.resize((w.shape[0],1))
     y.resize((y.shape[0],1))

     lossf1 = np.sum(ss.log1p(1 + ss.expm1(np.multiply(-y, np.dot(X, w)))))
     lossf2 = l2 * (np.dot(np.transpose(w), w))
     lossf3 = l1 * sum(abs(w))
     lossf = np.float(lossf1 + lossf2 + lossf3)
     return lossf

这是渐变函数:( ??问题在这里? - 看到结束)

def gradf(w, X, y, l1, l2):
    w.resize((w.shape[0],1))
    y.resize((y.shape[0],1))

    gradw1 = l2 * 2 * w 
    gradw2 = l1 * np.sign(w)
    gradw3 = np.multiply(-y,(2 + ss.expm1(np.multiply(-y, np.dot(X, w)))))
    gradw3 = gradw3 / (2 + (ss.expm1((np.multiply(-y, np.dot(X, w))))))
    gradw3 = np.sum(np.multiply(gradw3, X), axis=0)
    gradw3.resize(gradw3.shape[0],1)
    gradw = gradw1 + gradw2 + gradw3
    gradw.resize(gradw.shape[0],)
    return np.transpose(gradw)

这是我的LR课程:

class LR(ClassifierMixin, BaseEstimator):
    def __init__(self, lr=0.0001, l1=0.1, l2=0.1, num_iter=100, verbose=0):
        self.l1 = l1
        self.l2 = l2
        self.w = None
        self.lr = lr
        self.verbose = verbose
        self.num_iter = num_iter

def fit(self, X, y):        
    n, d = X.shape 
    self.w = np.zeros(shape=(d,))
    for i in range(self.num_iter):
        g = gradf(self.w, X, y, self.l1, self.l2)
        g.resize((g.shape[0],1))
        self.w = self.w - g
        print "Loss: ", lossf(self.w, X, y, self.l1, self.l2)
    return self

def predict_proba(self, X):
    probs = 1/(2 + ss.expm1(np.dot(-X, self.w)))
    return probs 

def predict(self, X):
    probs = self.predict_proba(X)
    probs = np.sign(2 * probs - 1)
    probs.resize((probs.shape[0],))
    return probs 

以下是测试:

X, y = make_classification(n_features=100, n_samples=100)
y = 2 * (y - 0.5)
clf = LR(lr=0.000001, l1=0.1, l2=0.1, num_iter=10, verbose=0)
clf = clf.fit(X, y)
yp = clf.predict(X)
yp.resize((100,1))
accuracy = int(sum(y == yp))/len(y)

糟糕!这不会收敛。但如果我用theno替换我的gradw3:

gradw3 = get_gradw3(w,X,y)

where:
w,X,y = T.matrices("wXy") 
logloss = T.sum(T.log1p(1 + T.expm1(-y* T.dot(X, w)))) 
get_gradw3 = theano.function([w,X,y],T.grad(logloss,w).reshape(w.shape))

它收敛到100%准确度。这意味着,我的gradw3实现错误,但我找不到错误。贪婪地寻求帮助!

1 个答案:

答案 0 :(得分:0)

实际上,我终于让它发挥了作用。我不知道,究竟什么是至关重要的变化,但这是我改变的摘录:

  • 将所有np.multiply替换为*

  • 降低学习率和调节剂

  • np.nan_to_num应用于指数

所以这是最终的代码:

def lossf(w, X, y, l1, l2):
    w.resize((w.shape[0],1))
    y.resize((y.shape[0],1))

    lossf1 = np.sum(ss.log1p(1 + np.nan_to_num(ss.expm1(-y * np.dot(X, w)))))
    lossf2 = l2 * (np.dot(np.transpose(w), w))
    lossf3 = l1 * sum(abs(w))
    lossf = np.float(lossf1 + lossf2 + lossf3)
    return lossf

def gradf(w, X, y, l1, l2):
    w.resize((w.shape[0],1))
    y.resize((y.shape[0],1))

    gradw1 = l2 * 2 * w 
    gradw2 = l1 * np.sign(w)
    gradw3 = -y * (1 + np.nan_to_num(ss.expm1(-y * np.dot(X, w))))
    gradw3 = gradw3 / (2 + np.nan_to_num(ss.expm1(-y * np.dot(X, w))))
    gradw3 = np.sum(gradw3 * X, axis=0)
    gradw3.resize(gradw3.shape[0],1)
    gradw = gradw1 + gradw2 + gradw3
    gradw.resize(gradw.shape[0],)
    return np.transpose(gradw)
class LR(ClassifierMixin, BaseEstimator):
    def __init__(self, lr=0.000001, l1=0.1, l2=0.1, num_iter=100, verbose=0):
        self.l1 = l1
        self.l2 = l2
        self.w = None
        self.lr = lr
        self.verbose = verbose
        self.num_iter = num_iter

    def fit(self, X, y):       
        n, d = X.shape 
        self.w = np.zeros(shape=(d,))
        for i in range(self.num_iter):
            print "\n", "Iteration ", i
            g = gradf(self.w, X, y, self.l1, self.l2)
            g.resize((g.shape[0],1))
            self.w = self.w - g
            print "Loss: ", lossf(self.w, X, y, self.l1, self.l2)
        return self

    def predict_proba(self, X):
        probs = 1/(2 + ss.expm1(np.dot(-X, self.w)))
        return probs 

    def predict(self, X):
        probs = self.predict_proba(X)
        probs = np.sign(2 * probs - 1)
        probs.resize((probs.shape[0],))
        return probs