神经网络训练太慢

时间:2021-07-08 16:25:10

标签: python neural-network mnist

我正在用 python 从头开始​​实现一个神经网络,以在手写数据集的 mnist 数据集上训练它。我试图调试它,但我仍然找不到它为什么训练这么慢。当我绘制损失时,它会下降,但在预测数字时,它似乎只是随机预测。我已经用 iris 数据集对其进行了测试,它可以正确预测类别,但不能在手写数据集中...这是代码:

import numpy as np
import mnist
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from sklearn.metrics import accuracy_score

def ReLU(z):
    z[z<0] = 0
    return z 

def ReLU_prime(z):
    z[z<0] = 0
    z[z>=0] = 1
    return z

def softmax(z):
    return np.exp(z)/np.sum(np.exp(z),axis=0)


class NN:
    def __init__(self,layers,lr=0.0001):
        self.layers = layers
        self.lr = lr
        self.n_layers = len(layers)

        self.weights = [np.random.randn(y,x)/np.sqrt(y) for x,y in zip(layers[:-1],layers[1:])]
        self.biases = [np.zeros((y,1)) for y in layers[1:]]

    def forward(self,X):
        A = X.copy()#nxm
        if len(A.shape) == 1:
            
            A = A[:,np.newaxis]
        cache = [(None,A)]

        for l in range(self.n_layers - 1):
            Z = np.dot(self.weights[l],A) + self.biases[l]
            if l < self.n_layers - 2:
                A = ReLU(Z)
            else:
                A = softmax(Z)
                

            cache.append((Z,A))
        return A,cache

    def backprop(self,X,Y):
        A,cache = self.forward(X)
        m = X.shape[1]
        dz = A - Y
        dwdbs = []
        for l in reversed(range(1,self.n_layers)):
            dw = (1/m)*np.dot(dz,cache[l-1][1].T)
            db = (1/m)*np.sum(dz,axis=1)[:,np.newaxis]
            

            if l > 1:
                da = np.dot(self.weights[l-1].T,dz)
                dz = da*ReLU_prime(cache[l-1][0])
            dwdbs.append((dw,db))

        for i,(w,b) in enumerate(dwdbs):
            self.weights[self.n_layers - 2 - i] -= self.lr*dwdbs[i][0]
            self.biases[self.n_layers - 2 - i] -= self.lr*dwdbs[i][1]
        return self.loss(X,Y)
    
    def loss(self,X,Y):
        A,_ = self.forward(X)
        m = Y.shape[1]
        idxs = np.where(Y==1)

        return -(1/m)*np.sum(Y[idxs]*np.log(A[idxs]),axis=0)
    def predict(self,a):
        
        return np.argmax(a)

if __name__ == '__main__':                
    X,y = mnist.train_images()[:10_000],mnist.train_labels()[:10_000]

    X_train,X_test, y_train,y_test = train_test_split(X,y,test_size=0.1)
    X_train_orig = X_train.copy()

    X_test_orig = X_test.copy()


    X_train = X_train/255
    X_train = np.reshape(X_train,(784,-1))
    N = X_train.shape[1]

    X_test = X_test/255
    X_test = np.reshape(X_test,(784,-1))
    y_train_ = np.zeros((10,y_train.shape[0]))
    y_train_[y_train,[x for x in range(y_train.shape[0])]] = 1
    y_train = y_train_.copy()
    y_test = y_test
    nn = NN([784,150,150,50,10],0.1)

    epochs = 10_000
    losses = []

    for epoch in range(epochs):
        print('epoch',epoch,'lr',nn.lr)
        nn.lr *=0.99992
        idxs = np.random.permutation(N)

        loss = nn.backprop(X_train[:,idxs],y_train[:,idxs])
        losses.append(loss)
        print(loss)




    plt.plot([x for x in range(len(losses))],losses)
    plt.show()
    acc =  0
    N_test = X_test.shape[1]
    preds = []
    for i in range(10):
        idx = np.random.choice(N_test)
        a,_ = nn.forward(X_test[:,idx])
        plt.imshow(X_test_orig[idx])
        plt.show()
        a = nn.predict(a)
        print('a',a,'y',y_test[idx])
    
        if a == y_test[idx]:
            acc +=1
    print('acc',acc)
    

0 个答案:

没有答案