我已经实现了一个神经网络类,它总是只有一个隐藏层,不使用库 - 甚至不是numpy。我已经做了所有这样的事情,我理解它应该是,但它根本就没有学习,实际上不断增加的损失,即使在网上看了很多例子,我也找不到我出错的地方。
这是我的MLP课程及其尝试学习XOR功能的演示:
import random
from math import exp
class MLP:
def __init__(self, numInputs, numHidden, numOutputs):
# MLP architecture sizes
self.numInputs = numInputs
self.numHidden = numHidden
self.numOutputs = numOutputs
# MLP weights
self.IH_weights = [[random.random() for i in range(numHidden)] for j in range(numInputs)]
self.HO_weights = [[random.random() for i in range(numOutputs)] for j in range(numHidden)]
# Gradients corresponding to weight matrices computed during backprop
self.IH_gradients = [[0 for i in range(numHidden)] for j in range(numInputs)]
self.HO_gradients = [[0 for i in range(numOutputs)] for j in range(numHidden)]
# Input, hidden and output neuron values
self.I = None
self.H = [0 for i in range(numHidden)]
self.O = [0 for i in range(numOutputs)]
self.H_deltas = [0 for i in range(numHidden)]
self.O_deltas = [0 for i in range(numOutputs)]
# Sigmoid
def activation(self, x):
return 1 / (1 + exp(-x))
# Derivative of Sigmoid
def activationDerivative(self, x):
return x * (1 - x)
# Squared Error
def calculateError(self, prediction, label):
return (prediction - label) ** 2
def forward(self, input):
self.I = input
for i in range(self.numHidden):
for j in range(self.numInputs):
self.H[i] += self.I[j] * self.IH_weights[j][i]
self.H[i] = self.activation(self.H[i])
for i in range(self.numOutputs):
for j in range(self.numHidden):
self.O[i] += self.activation(self.H[j] * self.HO_weights[j][i])
self.O[i] = self.activation(self.O[i])
return self.O
def backwards(self, label):
if label != list:
label = [label]
error = 0
for i in range(self.numOutputs):
neuronError = self.calculateError(self.O[i], label[i])
error += neuronError
self.O_deltas[i] = neuronError * self.activationDerivative(self.O[i])
for j in range(self.numHidden):
self.HO_gradients[j][i] += self.O_deltas[i] * self.H[j]
for i in range(self.numHidden):
neuronError = 0
for j in range(self.numOutputs):
neuronError += self.HO_weights[i][j] * self.O_deltas[j]
self.H_deltas[i] = neuronError * self.activationDerivative(self.H[i])
for j in range(self.numInputs):
self.IH_gradients[j][i] += self.H_deltas[i] * self.I[j]
return error
def updateWeights(self, learningRate):
for i in range(self.numInputs):
for j in range(self.numHidden):
self.IH_weights[i][j] += learningRate * self.IH_gradients[i][j]
for i in range(self.numHidden):
for j in range(self.numOutputs):
self.HO_weights[i][j] += learningRate * self.HO_gradients[i][j]
self.IH_gradients = [[0 for i in range(self.numHidden)] for j in range(self.numInputs)]
self.HO_gradients = [[0 for i in range(self.numOutputs)] for j in range(self.numHidden)]
data = [
[[0, 0], 0],
[[0, 1], 1],
[[1, 0], 1],
[[1, 1], 0]
]
mlp = MLP(2, 5, 1)
for epoch in range(100):
epochError = 0
for i in range(len(data)):
mlp.forward(data[i][0])
epochError += mlp.backwards(data[i][1])
print(epochError / len(data))
mlp.updateWeights(0.001)
答案 0 :(得分:1)
如果我理解你的实现正确,那么我认为你的问题在于计算向后函数中的权重更新,更新应该是误差(不是误差平方)乘以sigmoid导数,所以我会采取查看/重做计算。
答案 1 :(得分:1)
你是怎么做到的?我把它展示给了一位朋友 - 我们都发现你在没有太多抽象的情况下进行算法的目标是有启发性的,尽管很难找到错误。
他发现的改进是updateWeights需要是一个负反馈循环,所以改变" + ="到" - ="两行给出:
self.IH_weights[i][j] -= learningRate * self.IH_gradients[i][j]
和
self.HO_weights[i][j] -= learningRate * self.HO_gradients[i][j]
另一个因素是提高学习率。随着这些变化,错误下降到大约16%(对我来说,我可能已经做了另一个我没有看到的变化),然后开始攀升至27% - 可能是由于学习率过高而过度训练。
我的学习率取决于时代
mlp.updateWeights(0.1/(0.01 * (epoch+1)))
并且其稳步下降并稳定在0.161490 ......
但如果你从“前进”中得到预测,那么它总是预测为0.66 - 输入已被消除。所以......那很糟糕。
- Input Data: [0, 0] | Prediction: [0.6610834017294481] |Truth: 0
- Input Data: [0, 1] | Prediction: [0.6616502691118376] |Truth: 1
- Input Data: [1, 0] | Prediction: [0.6601936411430607] |Truth: 1
- Input Data: [1, 1] | Prediction: [0.6596122207209283] |Truth: 0