我实施了BP alhorithm。我测试了它,并在训练后发现了错误的数据。那么,我哪里弄错了?
double OpenNNL::_changeWeightsByBP(double * trainingInputs, double *trainingOutputs, double speed, double sample_weight)
{
double * localGradients = new double[_neuronsCount];
double * outputs = new double[_neuronsCount];
double * derivatives = new double[_neuronsCount];
calculateNeuronsOutputsAndDerivatives(trainingInputs, outputs, derivatives);
for(int j=0;j<_neuronsPerLayerCount[_layersCount-1];j++)
{
localGradients[indexByLayerAndNeuron(_layersCount-1, j)] = trainingOutputs[j] - outputs[indexByLayerAndNeuron(_layersCount-1, j)];
}
if(_layersCount > 1)
{
for(int i=_layersCount-2;i>=0;i--)
{
for(int j=0;j<_neuronsPerLayerCount[i];j++)
{
localGradients[indexByLayerAndNeuron(i, j)] = 0;
for(int k=0;k<_neuronsPerLayerCount[i+1];k++)
{
localGradients[indexByLayerAndNeuron(i, j)] += _neuronsInputsWeights[indexByLayerNeuronAndInput(i+1, k, j)]
* localGradients[indexByLayerAndNeuron(i+1, k)];
}
}
}
}
for(int j=0;j<_neuronsPerLayerCount[0];j++)
{
for(int k=0;k<_inputsCount;k++)
{
_neuronsInputsWeights[indexByLayerNeuronAndInput(0, j, k)] += speed * localGradients[indexByLayerAndNeuron(0, j)]
* derivatives[indexByLayerAndNeuron(0, j)] * trainingInputs[k];
}
}
for(int i=1;i<_layersCount;i++)
{
for(int j=0;j<_neuronsPerLayerCount[i];j++)
{
for(int k=0;k<_neuronsPerLayerCount[i-1];k++)
{
_neuronsInputsWeights[indexByLayerNeuronAndInput(i, j, k)] += speed * localGradients[indexByLayerAndNeuron(i, j)]
* derivatives[indexByLayerAndNeuron(i, j)] * outputs[indexByLayerAndNeuron(i, j)];
}
}
}
delete[] localGradients;
delete[] outputs;
delete[] derivatives;
}
如何计算网络输出的错误以停止训练过程?
如何改变神经元的偏见?
如果您需要,我的完整代码为https://github.com/NicholasShatokhin/OpenNNL。
答案 0 :(得分:0)
我发现了问题。在最后一个周期中,我必须写outputs[indexByLayerAndNeuron(i, j)]
。
outputs[indexByLayerAndNeuron(i-1, k)]