这是我为二进制图像分类编写的一个简单的MLP,带有反向传播:
class MLP:
def __init__(self, size, epochs = 1000, learning_rate = 1):
self.l1weights = numpy.random.random((size + 1, 3))
self.l2weights = numpy.random.random(3)
self.epochs = epochs
self.learning_rate = learning_rate
def predict(self, _input_):
#Append bias at the beginning of input
l1output = self.sigmoid(numpy.dot(numpy.append([1], _input_), self.l1weights))
l2output = self.sigmoid(numpy.dot(l1output, self.l2weights))
return l1output, l2output
def train(self, training_set, training_goal):
for epoch in range(self.epochs):
l1squared_error = 0
l2squarederror = 0
for set_index in range(training_goal.shape[0]):
set = training_set[set_index]
l1output, l2output = self.predict(set)
l2error = training_goal[set_index] - l2output
l1error = l2error * self.dsigmoid(l2output) * self.l2weights
self.l1weights[0] = self.l1weights[0] + self.learning_rate * l1error
for index in range(len(self.l1weights) - 1):
self.l1weights[index + 1] += self.learning_rate * l1error * self.dsigmoid(l1output)
for index in range(len(self.l2weights)):
self.l2weights[index] += self.learning_rate * l2error * self.dsigmoid(l2output)
l1squared_error += sum(l1error ** 2)
l2squarederror += l2error ** 2
print("Squared error at epoch " + str(epoch) + " : " + str(l1squared_error) + ", " + str(l2squarederror))
def sigmoid(self, _input_):
#Sigmoid sigmoid function
return 1 / (1 + numpy.exp(-_input_))
def dsigmoid(self, _input_):
return _input_ * (1 - _input_)
当运行时,有时所有输出收敛为1但由于某种原因,0的预测收敛为0.5,而1的预测保持在0.75附近,如果相对更成功,第2层的误差在~1000个时期后保持不变。这是使用以下代码进行2x2图像分类测试:
def image_class(input):
return 1 if input >= 2 else 0
training_set = ((numpy.arange(2**4)[:,None] & (1 << numpy.arange(4))) != 0)
training_goals = numpy.array([image_class(sum(i)) for i in training_set])
mlp = MLP(size=4)
mlp.train(training_set, training_goals)
答案 0 :(得分:0)
我可以通过在输出层之后添加一个步骤激活而不是sigmoid来解决这个问题,并将其与初始网络分开训练,至少是2x2识别。