在python中实现神经网络

时间:2018-05-17 17:17:44

标签: python numpy neural-network

我正在尝试用numpy在python中实现神经网络(NN),我发现我的NN没有按预期工作。

我检查了数值梯度,并将其与Back Propagation计算的梯度进行比较。事实证明我是对的。但成本下降非常缓慢,并在一些时代后反弹。

我试图解决Exclusive Or的问题。但我的NN似乎忽略了每个样本的输入向量,并倾向于将所有样本预测为标签为1的样本百分比(例如,如果我用3个正样本和1个负样本喂它,它会预测所有四个样本到约0.75)。

任何人都可以帮我解决这个问题吗?这已经困扰了我很长时间了。

这是神经网络的结构和一些公式

structure of NN

formula

这是我的代码

import numpy as np
import matplotlib.pyplot as plt
np.random.seed(565113221)

def sigmoid(x): # sigmoid function 
    return 1/(1+np.power(np.e,-x))

def forward(x,W1,W2,b1,b2): # feed forward
    a = W1.dot(x)
    z = sigmoid(a+b1)
    b = W2.dot(z)
    y = sigmoid(b+b2)
    return a,z,b,y

def pred(X,W1,W2,b1,b2): # predict
    y_pred = np.zeros((X.shape[0],1))
    for i in range(X.shape[0]):
        _,_,_,y_pred[i] = forward(x.reshape((-1,1)),W1,W2,b1,b2)
    return y_pred

X = np.array([[0,0],[0,1],[1,0],[1,1]]) # features 4 * 2
Y = np.array([[0],[1],[1],[0]]) # labels 4 * 1

epsilon = 0.12 # initialize all weighs between -0.12 ~ 0.12
W1 = np.random.random((2,2)) * epsilon * 2 - epsilon # map from input to hidden
b1 = np.random.random((2,1)) * epsilon * 2 - epsilon # bias1 
W2 = np.random.random((1,2)) * epsilon * 2 - epsilon # map from hidden to output
b2 = np.random.random((1,1)) * epsilon * 2 - epsilon # bias2
epoch = 50 # maximum training turns
alpha = 0.01 # learning rate
for turn in range(epoch):
    print('turn:',turn,end=' ')
    epoch_cost = 0
    for index in range(X.shape[0]):
        x = X[index,:].reshape((-1,1))
        y = Y[index,:].reshape((-1,1))
        a,z,b,y_pred = forward(x,W1,W2,b1,b2) # feed forward

        cost = -y.dot(np.log(y_pred)) - (1-y).dot(np.log(1-y_pred)) # calculate cost
        epoch_cost += cost # calculate cumulative cost of this epoch

        for k in range(W2.shape[0]): # update W2
            for j in range(W2.shape[1]):
                W2[k,j] -= alpha * (y_pred - y) * z[j,0]

        for k in range(b2.shape[0]): # update b2
            b2[k,0] -= alpha * (y_pred - y)


        for j in range(W1.shape[0]): # update W1
            for i in range(W1.shape[1]):
                for k in range(W2.shape[0]):
                    W1[j,i] -= alpha * (y_pred - y) * W2[k,j] * z[j,0] * (1 - z[j,0]) * x[i]

        for j in range(b1.shape[0]): # update b1
            b1[j,0] -= alpha * (y_pred - y) * W2[k,j] * z[j,0] * (1 - z[j,0])

    print('cost:',epoch_cost)


print('prediction\n',pred(X,W1,W2,b1,b2))
print('ground-truth\n',Y)

1 个答案:

答案 0 :(得分:0)

不完整的答案。我刚刚用类似于简单evolution strategy (ES)的东西替换了渐变下降。这样可行,所以你的前锋可能没有错误。

# [...] sigmoid(), forward(), pred() not modified

X = np.array([[0,0],[0,1],[1,0],[1,1]]) # features 4 * 2
Y = np.array([[0],[1],[1],[0]]) # labels 4 * 1

W1 = np.zeros((2,2)) # map from input to hidden
b1 = np.zeros((2,1)) # bias1 
W2 = np.zeros((1,2)) # map from hidden to output
b2 = np.zeros((1,1)) # bias2
epoch = 2000 # maximum training turns
for turn in range(epoch):
    print('turn:',turn,end=' ')
    epoch_cost = 0
    for index in range(X.shape[0]):
        x = X[index,:].reshape((-1,1))
        y = Y[index,:].reshape((-1,1))
        a,z,b,y_pred = forward(x,W1,W2,b1,b2) # feed forward

        cost = -y.dot(np.log(y_pred)) - (1-y).dot(np.log(1-y_pred)) # calculate cost
        epoch_cost += cost # calculate cumulative cost of this epoch

    if turn == 0 or epoch_cost < epoch_cost_best:
        epoch_cost_best = epoch_cost
        W1_best = W1
        b1_best = b1
        W2_best = W2
        b2_best = b2

    epsilon = 0.12 # perturb all weighs between -0.12 ~ 0.12
    W1 = W1_best + np.random.random((2,2)) * epsilon * 2 - epsilon
    b1 = b1_best + np.random.random((2,1)) * epsilon * 2 - epsilon
    W2 = W2_best + np.random.random((1,2)) * epsilon * 2 - epsilon
    b2 = b2_best + np.random.random((1,1)) * epsilon * 2 - epsilon

    print('cost:',epoch_cost)


print('prediction\n',pred(X,W1_best,W2_best,b1_best,b2_best))
print('ground-truth\n',Y)