XOR神经网络不与python实现融合

时间:2017-02-20 18:13:01

标签: python neural-network

我正在尝试处理神经网络,但我对此非常陌生。我从来没有理解我发现的不同实现。

所以我尝试根据Virginie MATHIVET的书,使用MultiLayer Perceptron和反向传播做出一个简单的实现XOR问题,但我的算法不收敛。我尝试了很多东西,但结果并没有改变。这是我的代码:

class Neuron:

    def __init__(self, nb_inputs, bias):
        self.nb_inputs = nb_inputs
        self.bias = bias
        self.weights = [random()*2.0-1 for _ in range(nb_inputs+1)]
        self.deltas = [0.0 for _ in range(nb_inputs+1)]
        self.output = None

    def init_deltas(self):
        self.deltas = [0.0 for _ in range(self.nb_inputs+1)]

    def aggregation(self, inputs):
        return sum([self.weights[i] * inputs[i] for i in range(self.nb_inputs)]) + self.bias * self.weights[self.nb_inputs]

    def activation(self, value):
        return 1.0/(1.0+math.exp(-value))

    def compute_output(self, inputs):
        self.output = self.activation(self.aggregation(inputs))
        return self.output

class NeuralNetwork:

    def __init__(self, nb_inputs, nb_hidden, nb_outputs, learning_rate):
        self.nb_inputs = nb_inputs
        self.nb_hidden = nb_hidden
        self.nb_outputs = nb_outputs
        self.learning_rate = learning_rate

        self.output_layer = [Neuron(self.nb_hidden, 1.0) for _ in range(self.nb_outputs)]
        self.hidden_layer = [Neuron(self.nb_inputs, 1.0) for _ in range(self.nb_hidden)]

    def compute_outputs(self, inputs):
        hidden_outputs = [neuron.compute_output(inputs) for neuron in self.hidden_layer]
        outputs = [neuron.compute_output(hidden_outputs) for neuron in self.output_layer]
        return outputs[0]

    def init_deltas(self):
        for neuron in self.output_layer + self.hidden_layer:
            neuron.init_deltas()

    def train(self, data, nb_iterations):
        for _ in range(nb_iterations):
            self.init_deltas()

            for inputs in data:
                si, yi = self.compute_outputs(inputs), data[inputs]
                print si, yi
                # For each output neuron
                for neuron in self.output_layer:
                    # For each weight
                    for i in range(neuron.nb_inputs):
                        neuron.deltas[i] = si * (1 - si) * (yi - si)

                # For each hidden neuron
                for i in range(self.nb_hidden):
                    hidden_neuron = self.hidden_layer[i]
                    # For each weight
                    for k in range(hidden_neuron.nb_inputs):
                        total = 0.0
                        # For each output neuron
                        for output_neuron in self.output_layer:
                            total += output_neuron.deltas[i] * output_neuron.weights[i]
                        hidden_neuron.deltas[k] = hidden_neuron.output * (1 - hidden_neuron.output) * total

                # adjust weights in output_layer
                for neuron in self.output_layer:
                    for i in range(self.nb_hidden):
                        neuron.weights[i] += self.learning_rate * neuron.deltas[i] * neuron.output
                    neuron.weights[self.nb_hidden] += self.learning_rate * neuron.deltas[self.nb_hidden] * neuron.bias

                # adjust weights in hidden_layer
                for neuron in self.hidden_layer:
                    for i in range(self.nb_inputs):
                        neuron.weights[i] += self.learning_rate * neuron.deltas[i] * neuron.output
                    neuron.weights[self.nb_inputs] += self.learning_rate * neuron.deltas[self.nb_inputs] * neuron.bias

    def predict(self, inputs):
        return self.compute_outputs(inputs)

并测试它:

DATA = {
    (0, 0): 0,
    (1, 0): 1,
    (0, 1): 1,
    (1, 1): 0
}
nn = NeuralNetwork(2, 3, 1, 0.2)
nn.train(DATA, 50000)
nn.predict([0,0])

提前感谢您的帮助

0 个答案:

没有答案