为什么负面偏见会给这个感知器带来错误的结果?

时间:2018-08-26 23:57:00

标签: python-3.x neural-network perceptron

我正在通过《人工神经网络:实践课程》一书学习神经网络,我正在为该书的练习构建一个神经网络,这本书对我说使用等于-1的偏差,但我做到了,对于某些输入,我得到的结果是错误的,我不知道为什么,但是当我将偏差更改为1或其他正数时,我对所有输入都得到了正确的答案,我认为这是因为这个数字将被更新在每次与砝码的交互中这都不应该发生,但是正在发生,如果有人帮助我或尝试帮助我,我将非常感激,对于我的英语不好,这是我的代码:

#!/usr/bin/python3

来自随机进口制服

input_train = [[-0.6508,0.1097,4.0009],[-1.4492,0.8896,4.4005],[2.0850,0.6876,12.0710],[0.2626,1.1476,7.7985],[0.6418,1.0234,7.0427],                [0.2569、0.6730、8.3265],[1.1155、0.6043、7.4446],[0.0914、0.3399、7.0677],[0.0121、0.5256、4.6316],[-0.0429、0.4660、5.4323],                [0.4340、0.6870、8.2287],[0.2735、1.0287、7.1934],[0.4839、0.4851、7.4850],[0.4089,-0.1267、5.5019],[1.4391、0.1614、8.5843],                [-0.9115,-0.1973,2.1962],[0.3654,1.0475,7.4858],[0.2144,0.7515,7.1699],[0.2013,1.0014,6.5489],[0.6483,0.2183,5.8991],                [-0.1147,0.2242,7.2435],[-0.7970,0.8795,3.8762],[-1.0625,0.6366,2.4707],[0.5307,0.1285,5.6883],[-1.2200,0.7777,1.7252],                [0.3957,0.1076,5.6623],[-0.1013,0.5989,7.1812],[2.4482,0.9455,11.2095],[2.0149,0.6192,10.9263],[0.2012,0.2611,5.4631]]

output_train = [-1.0000,-1.0000,-1.0000,1.0000,1.0000,                 -1.0000,1.0000,-1.0000,1.0000,1.0000,                 -1.0000,1.0000,-1.0000,-1.0000,-1.000,                 -1.0000,1.0000,1.0000,1.0000,1.0000,                 -1.0000,1.0000,1.0000,1.0000,1.0000,                 -1.0000,-1.0000,1.0000,-1.0000,1.0000]

类感知器:     “感知器实现:Hebb规则”

def __init__(self, data, output, epoch, bias, learning_rate = 0.01):
    self.data = data
    self.output = output
    self.epoch = epoch
    self.bias = bias
    self.learning_rate = learning_rate
    self.n_samples = len(data)
    self.n_atributtes = len(data[0])
    self.weights = self.GenWeights()

def train(self):
    for epoch in range(self.epoch):
        erro = False
        for row in range(self.n_samples):
            result = self.Predict(self.data[row])

            if result is not self.output[row]:
                self.bias = self.UpdateBias(result, self.output[row])
                for weights in range(self.n_atributtes):
                    self.weights[weights] = self.UpdateWeights(self.weights[weights], result, self.output[row], self.data[row][weights])
                erro = True

        if not erro: break

    return

def Predict(self, datarow):
    sum = self.bias
    for ele in range(self.n_atributtes):
        sum += datarow[ele] * self.weights[ele]
    return 1.0000 if sum >= 0.0 else -1.0000

def UpdateWeights(self, weight, predict, expected, inputc):
    return weight + (((expected - predict) * self.learning_rate) * inputc)

def UpdateBias(self, predict, expected):
    return self.bias + (((expected - predict) * self.learning_rate) * -1 )

def GenWeights(self):
    weights = list()
    for atributtes in range(self.n_atributtes):
        weights.insert(atributtes, uniform(0, 1))
    return weights

def NewEntry(self, newrow):
    sum = self.bias
    for ele in range(self.n_atributtes):
        sum += newrow[ele] * self.weights[ele]
    return 1.0000 if sum >= 0.0 else -1.0000

def main():

first_neural_network = perceptron(input_train, output_train, 1000, -1)
print("Initial Weights : {}" .format(first_neural_network.weights))
first_neural_network.train()
print("Trained Weights : {} and final bias : {} ".format(first_neural_network.weights, first_neural_network.bias))
a = 0
newlist = []
while True:
    a = float(input("Enter With the first number : "))
    newlist.append(a)
    a = float(input("Enter with the second number : "))
    newlist.append(a)
    a = float(input("Enter with the third number : "))
    newlist.append(a)
    print(newlist)
    print("result is : {0}", format(first_neural_network.NewEntry(newlist)))
    newlist.clear()


del first_neural_network
return

如果名称 ==“ 主要”:     main()

1 个答案:

答案 0 :(得分:0)

修改您的main函数以获取error rate

def main():
    first_neural_network = perceptron(input_train, output_train, 1000, 1)
    print("Initial Weights : {}" .format(first_neural_network.weights))
    first_neural_network.train()
    print("Trained Weights : {} and final bias : {} ".format(
        first_neural_network.weights, first_neural_network.bias))
    # a = 0
    # newlist = []
    # while True:
    #     a = float(input("Enter With the first number : "))
    #     newlist.append(a)
    #     a = float(input("Enter with the second number : "))
    #     newlist.append(a)
    #     a = float(input("Enter with the third number : "))
    #     newlist.append(a)
    #     print(newlist)
    #     print("result is : {0}".format(
    #         first_neural_network.NewEntry(newlist)))
    #     newlist.clear()
    nb_error = 0
    for newlist, expected in zip(input_train, output_train):
        pred = first_neural_network.NewEntry(newlist)
        print('true: {:2f} pred: {:2f}'.format(expected, pred))
        nb_error += pred != expected
    print('error rate%:{:.2f}'.format(float(nb_error)/first_neural_network.n_samples))

    del first_neural_network
    return

在您的配置中,error rate约为0.47。音调bias在指标中不发挥至关重要的作用。 我认为您的数据不是线性分离的,您需要更复杂的模型来提高准确性