python - XOR反向传播神经网络收敛到0.5

时间:2018-06-10 22:42:22

标签: python neural-network backpropagation feed-forward

我读过像我这样的几个问题,但我似乎无法找到答案。

我试图构建一个使用Back Propagation作为使用python的学习方法的神经网络,我没有遵循任何类型的例子,我使用自己的架构,它看起来有点怪异所以我&#39 ; ll解释它是如何工作的。

该体系结构分为3个部分:Neuron,Layer和BPNeuralNetowrk,这是我存储激活函数的神经元类中最重要的部分(我使用sigmoid)和连接该神经元的突触的权重上一层。

我正在测试的特定网络有一个隐藏层有2个神经元和一个输出层有一个神经元,两个都有sigmoid作为激活函数,网有两个输入。

问题在于,无论我使用什么学习率(在我的算法中称为alpha),无论我为feedForward的输出运行多少个时期总是收敛到0.5,我无法理解为什么

这里是我写的代码,它可能有点乱:

import numpy as np
import sys
#np.random.seed(7)

def sigmoid(x):
    return np.divide(1.0, np.add(1.0, np.exp(-x)))

def sigmoid_derivate(x):
    return np.multiply(x, (np.add(1.0, -x)))

def linear(x):
    return x

class Neuron(object):

    def __init__(self, nWeights, actF, actFDer):
        # nWeights : int --> number of weights for this neuron
        # actF : function* --> pointer to the activation function

        # Initializing weight vector randomly and adding one more for bias weight
        self.weights = np.random.uniform(low = -1.0, high = 1.0, size=nWeights+1)

        # Saving pointer to activating function
        self.func = actF
        self.funcDer = actFDer

        # Initializing containers for delta value and output array
        self.delta = None
        self.output = None

    def getOutput(self, inp):
        # inp : NumpyArray<float> --> array with the inputs

        # Appending bias to the input array
        inp = np.append(np.array([1]), inp)

        # Calculating sum of weighed inputs trough activation function
        net = np.dot(inp, self.weights)
        self.output = self.func(net)

        #print(self.output)
        return self.output

class Layer(object):

    def __init__(self, size, prevSize, actF, actFDer):
        # size : int --> number of neurons in this layer
        # prevSize : int --> number of neurons in previous layer
        # actF : function* --> pointer to the activation function

        # Storing size data
        self.size = size

        # Creating and filling neuron list
        self.neurons = []
        for n in range(size):
            self.neurons.append(Neuron(
                nWeights = prevSize,
                actF = actF,
                actFDer = actFDer
            ))

    def getNeuronOutputs(self):
        outputs = []
        for neuron in self.neurons:
            if(neuron.output):
                outputs.append(neuron.output)
            else:
                print(' Tried to take the output from an unassigned nueron output')
                exit()

        return np.array(outputs)

class BPNeuralNetwork(object):

    def __init__(self, inputSize, outputSize, alpha, epochs):
        # inputSize : int --> number of neurons in the input layer
        # outputSize : int --> number of neurons in the output layer

        # Storing input and output size values
        self.inputSize = inputSize
        self.outputSize = outputSize

        # Layers list
        self.layers = []

        # Values for testing
        self.alpha = alpha
        self.epochs = epochs

        # Values for shit
        self.trainingErrorSq = []
        self.testErrorSq = []

    def addLayer(self, size, actF, actFDer):
        # size : int --> number of neurons in the layer
        # actF : function* --> pointer to the activation function

        if( not self.layers ):
            prevSize = self.inputSize
        else:
            prevSize = self.layers[-1].size

        self.layers.append(Layer(
            size = size,
            prevSize = prevSize,
            actF = actF,
            actFDer = actFDer
        ))

        return True

    def feedForward(self, inp):
        # inp : NumpyArray<float> --> array with the inputs

        lastOut = inp
        for layer in self.layers:
            output = np.array([])
            for neuron in layer.neurons:
                output = np.append(output, neuron.getOutput(lastOut))
            lastOut = output

        return lastOut

    def backPropagation(self, inp, target):
        # inp : NumpyArray<float> --> array with the inputs
        # target : NumpyArray<float> --> array with the desired outputs

        # Getting output from FF
        output = self.feedForward(inp)

        # This shouldnt be here, this is cableado, i suck
        error = np.add(target,  -output)
        sqError = np.square(error)
        self.trainingErrorSq.append(sqError)

        # Calculate output layer deltas
        outputLayer = self.layers[-1]
        for i, neuron in enumerate(outputLayer.neurons):
            neuronError = np.add(target[i], -neuron.output)
            neuron.delta = np.multiply(neuron.funcDer(neuron.output), neuronError)

        # Calculate hidden layers deltas
        for i in reversed(range(len(self.layers)-1)):
            layer = self.layers[i]
            nextLayer = self.layers[i+1]
            for j, neuron in enumerate(layer.neurons):
                aux = 0
                for k, outputNeuron in enumerate(nextLayer.neurons):
                    aux = np.add(aux, np.multiply(outputNeuron.weights[j+1], outputNeuron.delta))

                neuron.delta = np.multiply(neuron.funcDer(neuron.output), aux)

        # Update weights
        for i in range(0, len(self.layers)):
            layer = self.layers[i]

            # Getting output from previous layer
            if(i != 0):
                backLayerOutputs = self.layers[i-1].getNeuronOutputs()
            else:
                backLayerOutputs = inp


            for l, neuron in enumerate(layer.neurons):
                # Update weight of the bias
                backLayerNeuronOutput = 1

                deltaW = np.array([self.alpha, neuron.delta, backLayerNeuronOutput, neuron.weights[0]])
                deltaW = np.prod(deltaW)
                neuron.weights[0] = np.add(neuron.weights[0], deltaW)

                # Update weights
                for j in range(1, len(neuron.weights)):
                    backLayerNeuronOutput = backLayerOutputs[j-1]
                    deltaW = np.array([self.alpha, neuron.delta, backLayerNeuronOutput, neuron.weights[j]])
                    deltaW = np.prod(deltaW)
                    neuron.weights[j] = np.add(neuron.weights[j], deltaW)

        return True

    def train(self, inputs, outputs):
        if(len(inputs) != len(outputs)):
            print('Lengths for input and output on training data sets dont match')

        print('Training netowrk... ')

        epochErrorSqMean = 10000
        epoch = 0
        while(epochErrorSqMean > 0.2):
        #for epoch in range(self.epochs):
            print('Epoch: '+str(epoch))
            #print('%f%% done'%(epoch/self.epochs*100))
            for i in range(0, len(inputs)):
                NN.backPropagation( inputs[i], outputs[i] )

            epochErrorSq = self.trainingErrorSq[epoch*len(inputs):epoch*len(inputs)+len(inputs)]
            epochErrorSqMean = np.mean(epochErrorSq)
            print('Epoch square error mean: '+str(epochErrorSqMean))
            epoch += 1

        print('done!')
        print('\a')
        print('Layers: '+str(len(self.layers)-1))
        print('Epochs: '+str(self.epochs))
        print('Alpha: '+str(self.alpha))
        print('Training error mean: '+str(np.mean(self.trainingErrorSq)))

    def test(self, inputs, outputs, printRes=False):
        if(len(inputs) != len(outputs)):
            print('Lengths for input and output on testing data sets dont match')

        fakeNegatives = 0
        fakePositives = 0
        output = []
        for i in range(0, len(inputs)):
            out = self.feedForward(inputs[i])
            error = np.add(outputs[i], -out)
            sqError = np.square(error)

            self.testErrorSq.append(sqError)
            output.append(out)

            if(printRes):
                print('INPUT: '+str(inputs[i]))
                print('Expected=%f, Got=%.15f' % (outputs[i], out))

        return output

if __name__ == "__main__":

    if(len(sys.argv) > 1):
        NN = BPNeuralNetwork(
            inputSize = 2,
            outputSize = 1,
            alpha = float(sys.argv[1]),
            epochs = int(sys.argv[2])
        )
    else:
        NN = BPNeuralNetwork(
            inputSize = 2,
            outputSize = 1,
            alpha = 0.1,
            epochs = 100
        )

    if('-xor' in sys.argv):
        NN.addLayer(2, sigmoid, sigmoid_derivate)
        NN.addLayer(1, sigmoid, sigmoid_derivate)

        dataset = np.array([
            np.array([0, 0, 0]),
            np.array([0, 1, 1]),
            np.array([1, 0, 1]),
            np.array([1, 1, 0]),
        ])
        X = dataset[:,0:NN.inputSize]
        Y = dataset[:,NN.inputSize:NN.inputSize+NN.outputSize]
        NN.train(X, Y)
        NN.test(X, Y, True)

如果你想测试它只是运行: python alpha epochs -xor

epochs参数不能正常工作,因为我已经将它配置为在错误之后停止&lt; 0.2

您将看到的输出是该特定时期的平方误差的平均值

我感谢对可能出错的任何回复或见解

0 个答案:

没有答案