使用perceptron 1 Layer进行错误学习

时间:2016-10-01 21:13:01

标签: java neural-network artificial-intelligence perceptron

我需要帮助我的感知器1层,我使用函数sigmoide来传输函数,并且算法反向传播以用于学习。我想为计算A和B(逻辑和)做一个简单的神经网络。我的问题是在学习之后我把2个值(例如0和0)和我的IA总是给我0.99。我看了3次代码,我不明白为什么我的程序在学习后会回答错误的答案。请帮帮我。

Neuron.java:

public class Neuron {
  public double value;
  public double[] weights;
  public double bias;
  public double deltas;

public Neuron(int nb_entree){
    weights = new double[nb_entree];

    value =  Math.random() / 10000000000000.0;
    bias = Math.random() / 10000000000000.0;
    deltas = Math.random() / 10000000000000.0;

    for(int i = 0 ; i < weights.length ; i++){
        weights[i] = Math.random() / 10000000000000.0;
    }
}

/***
 * Function to evaluate a neurone with a sigmoide function
 * @param input : list to input value
 * @return the result of sigmoide function
 */
public double evaluate(double[] input){
    double x = 0.0;

    for(int i = 0 ; i < input.length ; i++){
        x += input[i] * weights[i];
    }
    x += bias;

    value = 1 / (1 + Math.pow(Math.E, x));

    return value;
}

//Function to delete value of neurons
protected void delete(){
    value = 0.0;
}
}

NeuralNetwork.java:

public class NeuralNetwork {
  public Neuron[] neurons_hidden;
  public Neuron[] neurons_output;
  public double rate_learning;
  public int nb_hidden;
  public int nb_output;

public NeuralNetwork(int nb_input, int nb_hid, int nb_out, double rate){
    nb_hidden = nb_hid;
    nb_output = nb_out;
    rate_learning = rate;

    neurons_hidden = new Neuron[nb_hidden];
    neurons_output = new Neuron[nb_output];

    //Create hidden neurons
    for(int i = 0 ; i < nb_hidden ; i++){
        neurons_hidden[i] = new Neuron(nb_input);
    }

    //Create output neurons
    for(int i = 0 ; i < nb_output ; i++){
        neurons_output[i] = new Neuron(nb_hidden);
    }
}

public double[] evaluate(double[] input){
    double[] output_hidden = new double[nb_hidden];
    double[] outputs = new double[nb_output];

    //we delete the value of hidden neurons
    for(Neuron n : neurons_hidden){
        n.delete();
    }

    //we delete the value of output neurons
    for(Neuron n : neurons_output){
        n.delete();
    }

    //Pour chaque neurone caches
    for(int i = 0 ; i < nb_hidden ; i++){
        output_hidden[i] = neurons_hidden[i].evaluate(input);   
    }

    //Pour chaque neurone sortie
    for(int i = 0 ; i < nb_output ; i++){
        outputs[i] = neurons_output[i].evaluate(output_hidden);
    }

    return outputs;
}


public double backPropagate(double[] input, double[] output){

    double[] output_o = evaluate(input);
    double error;
    int i;
    int k;

    //For all neurons output, we compute the deltas
    for(i = 0 ; i < nb_output ; i++){
            error = output[i] - output_o[i];
            neurons_output[i].deltas = error * (output_o[i] - Math.pow(output_o[i], 2));
    }

    //For all neurons hidden, we compute the deltas
    for(i = 0 ; i < nb_hidden ; i++){
        error = 0.0;
        for(k = 0 ; k < nb_output ; k++){
            error += neurons_output[k].deltas * neurons_output[k].weights[i];
        }
        neurons_hidden[i].deltas = error * (neurons_hidden[i].value - Math.pow(neurons_hidden[i].value, 2));            
    }


    //For all neurons output, we modify the weight
    for(i = 0 ; i < nb_output ; i++){
        for(k = 0 ; k <  nb_hidden ; k++){
            neurons_output[i].weights[k] += rate_learning * 
                                                neurons_output[i].deltas * 
                                                    neurons_hidden[k].value;
        }
        neurons_output[i].bias += rate_learning * neurons_output[i].deltas;
    }


    //For all neurons hidden, we modify the weight
    for(i = 0 ; i < nb_hidden ; i++){
        for(k = 0 ; k < input.length ; k++){
            neurons_hidden[i].weights[k] += rate_learning * neurons_hidden[i].deltas * input[k];        
        }
        neurons_hidden[i].bias += rate_learning * neurons_hidden[i].deltas;
    }

    error = 0.0;
    for(i = 0 ; i < output.length ; i++){
        error += Math.abs(output_o[i] - output[i]);
    }

    error = error / output.length;

    return error;
}
}

Test.java:

public class Test {

public static void main(String[] args) {

    NeuralNetwork net = new NeuralNetwork(2, 2, 1, 0.6);

    /* Learning */
    for(int i = 0 ; i < 10000 ; i++)
    {   
        double[] inputs = new double[]{Math.round(Math.random()), Math.round(Math.random())};
        double[] output = new double[1];
        double error;

        if((inputs[0] == inputs[1]) && (inputs[0] == 1))
            output[0] = 1.0;
        else
            output[0] = 0.0;

        System.out.println(inputs[0]+" and "+inputs[1]+" = "+output[0]);

        error = net.backPropagate(inputs, output);
        System.out.println("Error at step "+i+" is "+error);
    }

    System.out.println("Learning finish!");

    /* Test */
    double[] inputs = new double[]{0.0, 0.0};
    double[] output = net.evaluate(inputs);

    System.out.println(inputs[0]+" and "+inputs[1]+" = "+output[0]+""); 
}
}

感谢帮助我

1 个答案:

答案 0 :(得分:0)

您的sigmoid功能不正确。它需要一个否定的t:

 Calendar calendar = Calendar.getInstance();
        calendar.set(Calendar.DAY_OF_MONTH, 15);
        calendar.set(Calendar.HOUR_OF_DAY, 12);
        calendar.set(Calendar.MINUTE, 0);
        calendar.set(Calendar.SECOND, 0);
        calendar.set(Calendar.MILLISECOND, 0);
        if (calendar.getTimeInMillis() < System.currentTimeMillis()) {
            calendar.add(Calendar.DAY_OF_YEAR, 30);
        }
        Intent myIntent = new Intent(getApplicationContext(), MyReceiver.class);
        myIntent.putExtra("NOTI_MSG",getString(R.string.notification_sidas));
        PendingIntent pendingIntent = PendingIntent.getBroadcast(getApplicationContext(), NOTI_REQ_CODE_SIDAS, myIntent, PendingIntent.FLAG_UPDATE_CURRENT);

        AlarmManager alarmManager = (AlarmManager) getSystemService(ALARM_SERVICE);
        alarmManager.setRepeating(AlarmManager.RTC_WAKEUP, calendar.getTimeInMillis(),
                AlarmManager.INTERVAL_DAY * 30, pendingIntent);
    }

我不确定这是否是唯一的错误。

此外,对于连词&#34;和&#34;你只需要一层。

最后,您在反向传播方法中分别处理偏差。这可以通过添加输入节点来简化,输入节点具有常数1作为输入并且偏差作为权重。请参阅https://en.wikipedia.org/wiki/Perceptron#Definitions