我使用newff在Matlab中创建了一个神经网络,用于手写数字识别。
我刚训练它只识别0&来自图像的1个值。
有3层,输入层有9个神经元,隐藏层有5个神经元,输出第1层神经元,有9个输入。
我的出局是0.1& 0.2,所有图层输出功能都是" tansig"。
我在Matlab中测试它,网络工作正常。现在我想用c ++创建这个网络,我写了代码,我复制了所有权重和偏见(总共146个权重)。 但是当我将相同的输入数据输入网络时,输出值不正确。
你们中的任何人都可以指导我吗?
这是我的网络代码:
here's my networks code...
public class Neuron
{
public Neuron()
{ }
public Neuron(int SumOfInputs)
{
m_SumOfInputs = SumOfInputs;
}
public double act(double[] Input, double[] weight, double bias)
{
double tmp = bias;
for (int i = 0; i < m_SumOfInputs; i++)
tmp += (Input[i] * weight[i]);
m_output = 1.0 / (1.0 + Math.Exp(-tmp));
return m_output;
}
public double m_output;
private int m_SumOfInputs;
};
public class Net
{
public Net()
{
int i;
//net1 , net2
//initializing inputLayer Neurons
for (i = 0; i < 9; i++)
InputLayer[i] = new Neuron(9);
//initializing HiddenLayer Neurons
for (i = 0; i < 5; i++)
HiddenLayer[i] = new Neuron(9);
//initializing OutputLayer
OutputLayer = new Neuron(5);
}
public double Calculate(double[] inputs)
{
double[] ILay_Outputs = new double[9];
double[] HLay_Outputs = new double[5];
//inputLayer acting
ILay_Outputs[0] = InputLayer[0].act(inputs, IW1, Ib[0]);
ILay_Outputs[1] = InputLayer[1].act(inputs, IW2, Ib[1]);
ILay_Outputs[2] = InputLayer[2].act(inputs, IW3, Ib[2]);
ILay_Outputs[3] = InputLayer[3].act(inputs, IW4, Ib[3]);
ILay_Outputs[4] = InputLayer[4].act(inputs, IW5, Ib[4]);
ILay_Outputs[5] = InputLayer[5].act(inputs, IW6, Ib[5]);
ILay_Outputs[6] = InputLayer[6].act(inputs, IW7, Ib[6]);
ILay_Outputs[7] = InputLayer[7].act(inputs, IW8, Ib[7]);
ILay_Outputs[8] = InputLayer[8].act(inputs, IW9, Ib[8]);
//HiddenLayer acting
HLay_Outputs[0] = HiddenLayer[0].act(ILay_Outputs, HW1, Hb[0]);
HLay_Outputs[1] = HiddenLayer[1].act(ILay_Outputs, HW2, Hb[1]);
HLay_Outputs[2] = HiddenLayer[2].act(ILay_Outputs, HW3, Hb[2]);
HLay_Outputs[3] = HiddenLayer[3].act(ILay_Outputs, HW4, Hb[3]);
HLay_Outputs[4] = HiddenLayer[4].act(ILay_Outputs, HW5, Hb[4]);
//OutputLayer acting
OutputLayer.act(HLay_Outputs, OW, Ob);
return OutputLayer.m_output;
}
//variables
Neuron[] InputLayer = new Neuron[9];
Neuron[] HiddenLayer = new Neuron[5];
Neuron OutputLayer;
//net2 tansig tansig tansig
double[] IW1 = { 0.726312035124743, 1.01034015912570, 0.507178716484559, -0.254689455765290, 0.475299816659036, 0.0336358919735363, -0.715890843015230, 0.466632424349648, 0.565406467159982 };
double[] IW2 = { 0.866482591050076, -0.672473224929341, 0.915599891389326, 0.310163265280920, -0.373812653648686, -0.0859927887021936, 0.0100063635393257, 0.816638798257382, -0.540771172965867 };
double[] IW3 = { 0.138868216294952, 1.93121321568871, -0.564704445249800, 0.834275586326333, 3.08348295981989, 0.899715248285303, -0.661916798988641, 6.00562393127300, 6.11939776912678 };
double[] IW4 = { 0.578089791487308, 0.885170493965113, -0.992514702569606, 0.415980526304333, -0.706140252063166, 0.442017877881589, -0.449053823645690, -0.0894051386719344, -0.348622179369911 };
double[] IW5 = { -0.407756482945129, 0.0786764402198765, 0.972408690276837, -0.959955597431701, -0.977769442966978, 1.52121267506016, 0.503296357838885, -3.31593633455649, -3.47834004737816 };
double[] IW6 = { -1.17474983226852, 0.870140308892922, 1.50545637070446, 0.369712493398677, -0.569857993006262, -0.732502911495791, -0.668984976457441, -1.48023312055586, -0.893472571240467 };
double[] IW7 = { -0.860518592120001, -1.48432158859269, 0.957060799463945, -0.680797771869510, -0.270752283410268, -0.218766920514208, 0.168091770241510, -2.50326075864844, -0.800988078966455 };
double[] IW8 = { 0.436492138260917, 0.280081066366966, 0.484813099857825, -0.310693876078844, 1.60359045377467, 1.57343220231689, -1.21552190886612, 2.03276547165735, 1.27245062411707 };
double[] IW9 = { 1.66853306274827, -1.59142022586958, 0.862315766588855, 0.676048095028997, -2.22623540036057, -1.48036066273542, -0.0386781503608105, -5.18214728910353, -5.21258509200432 };
double[] HW1 = { 0.577543862468449, 0.452264642610010, -0.869014797322399, 0.122435296258077, 0.507631314535324, 0.0386430216115630, -0.398222802253669, -0.614601040619812, 1.43324133164016 };
double[] HW2 = { 0.163344332215885, 0.434728230081814, -3.04877964757120, -0.118300732191499, -2.63220585865390, 0.443163977179405, -2.11883915836372, 2.07955461474729, -3.94441429060856 };
double[] HW3 = { -0.156103043064606, -0.482049683802527, 1.24788068138172, -1.05731056687422, -0.615321348655331, 0.214815967784408, 0.375762477817552, -0.728649292060764, -0.212151944122515 };
double[] HW4 = { 1.78276088127139, 1.15086535250306, 1.25967219208841, -0.446026243031773, -3.94742837475153, -1.33311929047378, -2.09356929069216, 0.0736879745054291, 1.51472991137144 };
double[] HW5 = { 0.744372844550077, 0.400815326319268, -4.94686055701529, 0.444773365537176, 2.65351865321717, 1.87143709824455, 1.74346707204902, -3.28220218001754, 5.78321274609173 };
double[] OW = { -1.09112204235009, -7.13508015318964, -1.02533926874837, 3.80439015418632, -4.16711367340349 };
double[] Ib = {-1.77988445077976,
-1.37323967952292,
-0.547465218997906,
0.331535304175263,
-0.0167810612906040,
0.734128501831859,
-0.543321122358485,
-1.13525462762255,
1.82870615182942};
double[] Hb = {1.68321697741393,
-0.862080862212137,
-0.536310792063381,
-0.772019935790668,
1.51470472867250};
double Ob = -0.156343477742835;
};
感谢。
阿尔塔。
答案 0 :(得分:2)
您在说明中提到要使用Tansig激活功能,但在您的代码中,您可以实现Logsig激活功能。 Tansig近似值为:
2/(1+Math.Exp(-2*tmp))-1
我也不确定如何获得输入图层的权重,这些可能是隐藏图层的权重。由于输入直接连接到隐藏层,因此Matlab不会为输入层生成权重。其中net.IW是第一个(隐藏)图层的权重,后续图层(包括输出)的权重由net.LW给出。
除了上述内容之外,我没有在您的代码中看到明显的错误/错误,也许首先尝试一个更简单的网络,然后训练它来做旧的和明智的XOR关系。
最后,我想提一下,如果您正在为微控制器编写此代码,那么在C语言中更容易实现,而且没有对象。您的代码将更小更快。分步示例为here。
答案 1 :(得分:0)
我找到了问题的人。
在matlab中,在输入到网络之前,它们都以.m文件名(mapminmax.m)转到函数名(applyminmax),然后这个函数输出是网络输入。
在网络模拟完成后,输出转到同一.m文件中的函数名称(反向)。 此功能输出是神经网络的最终输出。
感谢您的所有帮助。
阿尔塔。