预测始终为1或0

时间:2018-10-12 05:14:40

标签: python python-3.x machine-learning neural-network backpropagation

编辑:将输入压缩为0、1时,每个数据集每个神经元的输出约为0.5。

在训练后,每输入一组输入,输出总是1。但是,如果我从pos更改学习率。否定。反之亦然,输出始终为0。

LN = -0.05

def Matrix(numI, numO):
    matrix = []
    for i in range(0, numO):
        matrix.append([])
        for c in range(0, numI):
            if c > numI:
                rw = random.random()
                matrix[i].append(rw)
            else:
                rw = random.random()
                matrix[i].append(rw)
    return matrix


class Neuralnetwork:

    def __init__(self, numI, numO):
        self.Output_layer = Output_layer(numI, numO)
        self.Feed_forward = self.Output_layer.Feed_forward

    def train(self, t_inputs, t_targets):
        for n in range(len(self.Output_layer.Neurons)):
            self.Output_layer.new_weight(t_inputs, t_targets, n)


class Output_layer:

    def __init__(self, numI, numO):

        self.Bias = 1
        self.Matrix = Matrix(numI, numO)
        self.Neurons = []

        for o in range(numO):
            self.Neurons.append(Neuron(self.Matrix, o))

    def Feed_forward(self, inputs):
        outputs = []
        for i in self.Neurons:
            outputs.append(i.Output(inputs, self.Bias))
        print(outputs)

    def new_weight(self, t_inputs, t_targets, a):
        for aw in range(len(self.Neurons[a].Weights)):
            totalsw = []
            totalsb = []
            for i in range(len(t_inputs)):
                pd_c_wrt_output = 2 * (self.Neurons[a].Output(t_inputs[i],     self.Bias) - t_targets[i][a])
            pd_output_wrt_net = self.Neurons[a].Output(t_inputs[i], self.Bias) * (1 - self.Neurons[a].Output(t_inputs[i], self.Bias))
            pd_net_wrt_weight = t_inputs[aw][aw]
            pd_c_wrt_weight = pd_c_wrt_output * pd_output_wrt_net * pd_net_wrt_weight
            totalsw.append(pd_c_wrt_weight)
            pd_c_wrt_output = 2 * (self.Neurons[a].Output(t_inputs[i], self.Bias) - t_targets[i][a])
            pd_output_wrt_net = self.Neurons[a].Output(t_inputs[i], self.Bias) * (1 - self.Neurons[a].Output(t_inputs[i], self.Bias))
            pd_net_wrt_bias = 1
            pd_c_wrt_bias = pd_c_wrt_output * pd_output_wrt_net * pd_net_wrt_bias
            totalsb.append(pd_c_wrt_bias)
        pd_weight = sum(totalsw)
        pd_bias = sum(totalsb)
        self.Neurons[a].Weights[aw] -= LN * pd_weight
        self.Bias -= LN * pd_bias


class Neuron:
    def __init__(self, matrix, index_of_M):

        self.Weights = matrix[index_of_M]

    def Weighted_sum(self, weights, inputs, bias):
        ind = 0
        weightedI = []
        for i in weights:
            output = i * inputs[ind]
            weightedI.append(output)
            ind += 1

        list = sum(weightedI) + bias
        return list

    def Sigmoid(self, prediction):
        e = math.exp(-prediction)
        prediction = 1 / (1 + e)
        return round(prediction, 8)

    def Output(self, inputs, bias):
        output = self.Sigmoid(self.Weighted_sum(self.Weights, inputs, bias))
        return output


nn = Neuralnetwork(2, 2)
nn.Feed_forward([10, 20])

for i in range(100000):
    nn.train([[10, 20], [15, 30], [8, 16], [3, 9], [6, 18], [2, 6]],
            [[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])`

在我的第一个神经网络中,它运行良好。确实找不到错误。 我尝试了不同的操作,例如将new_weight放在神经元类中,输入和输出的数量不同等等。

1 个答案:

答案 0 :(得分:0)

尝试将权重值设置为随机值。这将有助于打破对称性。还要将偏差设置为1。 您有两个输出类。因此,我建议您在梯度下降优化器中使用诸如均方误差之类的损失函数。 还要将学习率设置为0.001或0.01。

您可以了解更多here