Python列表重复为整个列表插入的最后一个元素

时间:2017-11-03 18:40:09

标签: python neural-network

我正在研究一些神经网络代码。我已经编写了自己的神经元课程,可以找到here。现在,我正在编写脑类,这应该会使NN中使用的大多数代码变得非常有用。在这个类中,self.Real_Outputs收集所有输出并将其放入要在之后使用的列表中。

我正在沸腾我的大脑以找出为什么当我在self中添加元素时.Real_Outputs整个列表都会收到此值。我发现here in this topic类似于我的讨论,但是,在我的情况下,我已经使用过' self'声明。你能帮助我吗?

班级大脑:

def __init__(self, training_set, desired_outputs, bias, learning_tax):
    self.Training_Set = training_set
    self.Desired_Outputs = desired_outputs
    self.Bias = bias
    self.Learning_Tax = learning_tax
    self.Hidden_Layer = []
    self.Hidden_Layer_Outputs = []
    self.Hidden_Layer_Errors = []
    self.Output_Layer = []
    self.Output_Layer_Outputs = []
    self.Output_Layer_Errors = []
    self.Real_Outputs = [0 for x in self.Desired_Outputs]

def set_hidden_layers(self, number_of_layers, number_of_neurons, activation_function):
    self.Hidden_Layer = [[Neuron.Neuron(len(self.Training_Set[0]), activation_function, 1, self.Bias)
                         for x in range(number_of_neurons)]
                         for y in range(number_of_layers)]

    self.Hidden_Layer_Outputs = [[0 for x in range(number_of_neurons)]
                                 for y in range(number_of_layers)]

    self.Hidden_Layer_Errors = [[0 for x in range(number_of_neurons)]
                                for y in range(number_of_layers)]

def set_output_layer(self, number_of_neurons, activation_function):
    self.Output_Layer = [Neuron.Neuron(len(self.Hidden_Layer[0]), activation_function, 0, self.Bias)
                         for x in range(number_of_neurons)]

    self.Output_Layer_Outputs = [0 for x in range(number_of_neurons)]

    self.Output_Layer_Errors = [0 for x in range(number_of_neurons)]

def start_converging(self):
        j=0
        while j < 10:

            # Here we're coming inside the training set. If was the n-th time
            # you pass here, it's the n-th iteration over the Training Set.
            # 'a' represents the Training Set index

            for a in range(len(self.Training_Set)):

                # Here we're running over the hidden layers
                # 'b' represent the layer index

                for b in range(len(self.Hidden_Layer)):

                    # Here we're running over the neurons in the layers
                    # 'c' represents the neuron index

                    for c in range(len(self.Hidden_Layer[b])):
                        if b == 0:
                            self.Hidden_Layer[b][c].initialize_inputs(self.Training_Set[a])
                            self.Hidden_Layer[b][c].get_sum()
                            self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()
                        else:
                            self.Hidden_Layer[b][c].initialize_inputs(self.Hidden_Layer_Outputs[b-1])
                            self.Hidden_Layer[b][c].get_sum()
                            self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()

                # Here we're running over the output layer
                # 'd' represents the neuron index

                for d in range(len(self.Output_Layer)):
                    self.Output_Layer[d].initialize_inputs(self.Hidden_Layer_Outputs[-1])
                    self.Output_Layer[d].get_sum()
                    self.Output_Layer_Outputs[d] = self.Output_Layer[d].get_output()
                    self.Output_Layer_Errors[d] = self.Output_Layer[d].get_error(0, self.Desired_Outputs[a])
                    self.Output_Layer[d].update_weights(0, self.Learning_Tax)

                self.Real_Outputs[a] = self.Output_Layer_Outputs

                # We're updating the hidden layers now. Notice that we should pass backwards, from
                # last to first, so, we're using [-(e+1)] indexes.
                # '[-(e+1)]' represents the layers index.

                for e in range(len(self.Hidden_Layer)):
                    for f in range(len(self.Hidden_Layer[-(e+1)])):
                        if e == 0:
                            self.Hidden_Layer_Errors[-(e + 1)][-(f + 1)] = self.Hidden_Layer[-(e + 1)][-(f + 1)].get_error(0, self.Output_Layer_Errors)
                            self.Hidden_Layer[-(e + 1)][-(f + 1)].update_weights(0, self.Learning_Tax)
                        else:
                            self.Hidden_Layer[-(e + 1)][-(f + 1)].get_error(0, self.Hidden_Layer_Errors[- (e + 1)])
                            self.Hidden_Layer[-(e + 1)][-(f + 1)].update_weights(0, self.Learning_Tax)
            j += 1
            print (self.Desired_Outputs)
            print (self.Real_Outputs)

0 个答案:

没有答案