人工神经网络不训练

时间:2021-05-17 19:41:40

标签: python machine-learning neural-network

我已经为 CNN 中的 FC 层编写了一个密集类,但为了测试它是否可以简单地作为 FC ANN 工作,我尝试在它上面训练一个数据集,但损失从未下降。我似乎找不到问题所在。

代码如下:

class Dense:
    
    # Constructor
    def __init__(self, size, in_size, activation = 'relu'):
        # Assign vars
        self.size = size; self.activation = activation
        # Initialize Weights and Biases
        weights_dims = (size, in_size)
        self.weights = np.random.standard_normal(weights_dims) * 0.01
        self.biases = np.zeros([size, 1])
        # Initialize Accumulators
        self.sigma_acc = self.biases * 0
        self.delta_acc = self.weights * 0
        
    # ReLU Activation Function
    def relu(self, arr):
        return arr * (arr > 0)
    
    # Softmax Activation Function
    def softmax(self, arr):
        arr -= arr.max()
        exp = np.exp(arr)
        return exp / np.sum(exp)
    
    # Activation Manager Function
    def activate(self, arr):
        if self.activation == 'relu': return self.relu(arr)
        if self.activation == 'softmax': return self.softmax(arr)
        
    # Forward Propagation
    def step(self, vec):
        # Assign Input
        self._in = vec
        # Dot
        z = np.dot(self.weights, vec) + self.biases
        a = self.activate(z)
        # Return
        self.out = a
        return self.out
    
    # Back Propagation
    def back(self, grad):
        # Calculate sigma
        sigma = grad if self.activation == 'softmax' else grad * (self.out > 0)
        # Calculate delta
        delta = np.dot(sigma, self._in.T)
        # Accumulate
        self.sigma_acc += sigma
        self.delta_acc += delta
        # Return global gradient
        global_grad = np.dot(self.weights.T, sigma)
        return global_grad
    
    # Train
    def update(self, alpha, batch_size):
        dw = self.delta_acc / batch_size; self.delta_acc *= 0
        db = self.sigma_acc / batch_size; self.sigma_acc *= 0
        self.weights -= alpha * dw
        self.biases -= alpha * db

要将它们作为模型连接起来,我只需将这个 Dense 类的实例添加到列表中,然后使用 step() 向前和向后循环遍历它们>back() 函数。

如果您发现任何问题,请通知我!谢谢。

1 个答案:

答案 0 :(得分:1)

这就是我创建网络的方式,也许可以帮助您。

import numpy as np

X = np.array(([0, 0, 0], [0, 0, 1], [0, 1, 0],
             [0, 1, 1], [1, 0, 0], [1, 0, 1],
             [1, 1, 0], [1, 1, 1]), dtype=float)
y = np.array(([1], [0], [0], [0], [0], [0], [0], [1]), dtype=float)
xPredicted = np.array(([0, 0, 1]), dtype=float)

X = X/np.amax(X, axis=0)
xPredicted = xPredicted/np.amax(X, axis=0)

lossFile = open("Enter file", "w")


class Neural_Network(object):

    def __init__(self, inputLayerSize, outputLayerSize, hiddenLayerSize):

        self.inputLayerSize = inputLayerSize
        self.outputLayerSize = outputLayerSize
        self.hiddenLayerSize = hiddenLayerSize

        self.W1 = \
            np.random.randn(self.inputLayerSize, self.hiddenLayerSize)
        self.W2 = \
            np.random.randn(self.hiddenLayerSize, self.outputLayerSize)

    def feedForward(self, X):

        self.z = np.dot(X, self.W1)
        self.z2 = self.activationSigmoid(self.z)
        self.z3 = np.dot(self.z2, self.W2)
        o = self.activationSigmoid(self.z3)
        return o

    def backwardPropagate(self, X, y, o):

        self.o_error = y - o
        self.o_delta = self.o_error*self.activationSigmoidPrime(o)
        self.z2_error = self.o_delta.dot(self.W2.T)
        self.z2_delta = self.z2_error*self.activationSigmoidPrime(self.z2)
        self.W1 += X.T.dot(self.z2_delta)
        self.W2 += self.z2.T.dot(self.o_delta)

    def trainNetwork(self, X, y):

        o = self.feedForward(X)
        self.backwardPropagate(X, y, o)

    def activationSigmoid(self, s):

        return 1/(1+np.exp(-s))

    def activationSigmoidPrime(self, s):

        return s * (1 - s)

    def saveSumSquaredLossList(self, i, error):

        lossFile.write(str(i)+","+str(error.tolist())+"\n")

    def saveWeights(self):

        np.savetxt("Enter file", self.W1, fmt="%s")
        np.savetxt("Enter file",
                   self.W2, fmt="%s")

    def predictOutput(self):

        print("Predicted XOR output data based on trained weights: ")
        print("Expected (X1-X3); \n" + str(X))
        print("Output (Y1): \n" + str(self.feedForward(xPredicted)))


myNeuralNetwork = Neural_Network(3, 1, 4)
trainingEpochs = 1000

for i in range(trainingEpochs):

    print("Epoch # " + str(i) + "\n")
    print("Network Input : \n" + str(X))
    print("Expected Output of XOR Gate Neural Network: \n" + str(y))
    print("Actual Output from XOR Gate Neural Network: \n" +
          str(myNeuralNetwork.feedForward(X)))
    Loss = np.mean(np.square(y - myNeuralNetwork.feedForward(X)))
    myNeuralNetwork.saveSumSquaredLossList(i, Loss)
    print("Sum Squared Loss: \n" + str(Loss))
    print("\n")
    myNeuralNetwork.trainNetwork(X, y)

myNeuralNetwork.saveWeights()
myNeuralNetwork.predictOutput()