如何用MLP进行汽车评估?

时间:2016-07-02 17:02:42

标签: java machine-learning neural-network perceptron

我想对具有资格和价格的汽车进行分类。我应该用MLP来做,但除了XOR示例之外没有任何其他示例。我有6个条件,我将它们转换为[1,0,0,0]加倍的双倍。(条件在我链接的uci集中。)

这是我的MLP代码,我想用uci数据集Dataset训练它如何使其适应此代码?

编辑:让我更清楚一点,我不是说除了XOR问题之外没有其他任何例子。我的意思是我需要一个输入集的例子,而不是像[1,0]我需要超过2个输入。

import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;



public class MultiLayerPerceptron implements Cloneable
{
protected double            fLearningRate = 0.6;
protected Layer[]           fLayers;
protected TransferFunction  fTransferFunction;


public MultiLayerPerceptron(int[] layers, double learningRate, TransferFunction fun)
{
    fLearningRate = learningRate;
    fTransferFunction = fun;

    fLayers = new Layer[layers.length];

    for(int i = 0; i < layers.length; i++)
    {           
        if(i != 0)
        {
            fLayers[i] = new Layer(layers[i], layers[i - 1]);
        }
        else
        {
            fLayers[i] = new Layer(layers[i], 0);
        }
    }
}



public double[] execute(double[] input)
{
    int i;
    int j;
    int k;
    double new_value;

    double output[] = new double[fLayers[fLayers.length - 1].Length];

    // Put input
    for(i = 0; i < fLayers[0].Length; i++)
    {
        fLayers[0].Neurons[i].Value = input[i];
    }

    // Execute - hiddens + output
    for(k = 1; k < fLayers.length; k++)
    {
        for(i = 0; i < fLayers[k].Length; i++)
        {
            new_value = 0.0;
            for(j = 0; j < fLayers[k - 1].Length; j++)
                new_value += fLayers[k].Neurons[i].Weights[j] * fLayers[k - 1].Neurons[j].Value;

            new_value += fLayers[k].Neurons[i].Bias;

            fLayers[k].Neurons[i].Value = fTransferFunction.evalute(new_value);
        }
    }


    // Get output
    for(i = 0; i < fLayers[fLayers.length - 1].Length; i++)
    {
        output[i] = fLayers[fLayers.length - 1].Neurons[i].Value;
    }

    return output;
}




public double backPropagateMultiThread(double[] input, double[] output, int nthread)
{
    return 0.0;
}




public double backPropagate(double[] input, double[] output)
{
    double new_output[] = execute(input);
    double error;
    int i;
    int j;
    int k;

    /* doutput = correct output (output) */

    for(i = 0; i < fLayers[fLayers.length - 1].Length; i++)
    {
        error = output[i] - new_output[i];
        fLayers[fLayers.length - 1].Neurons[i].Delta = error * fTransferFunction.evaluteDerivate(new_output[i]);
    } 


    for(k = fLayers.length - 2; k >= 0; k--)
    {
        //delta
        for(i = 0; i < fLayers[k].Length; i++)
        {
            error = 0.0;
            for(j = 0; j < fLayers[k + 1].Length; j++)
                error += fLayers[k + 1].Neurons[j].Delta * fLayers[k + 1].Neurons[j].Weights[i];

            fLayers[k].Neurons[i].Delta = error * fTransferFunction.evaluteDerivate(fLayers[k].Neurons[i].Value);               
        }

        // success
        for(i = 0; i < fLayers[k + 1].Length; i++)
        {
            for(j = 0; j < fLayers[k].Length; j++)
                fLayers[k + 1].Neurons[i].Weights[j] += fLearningRate * fLayers[k + 1].Neurons[i].Delta * 
                        fLayers[k].Neurons[j].Value;
            fLayers[k + 1].Neurons[i].Bias += fLearningRate * fLayers[k + 1].Neurons[i].Delta;
        }
    }   

    // error 
    error = 0.0;

    for(i = 0; i < output.length; i++)
    {
        error += Math.abs(new_output[i] - output[i]);

        //System.out.println(output[i]+" "+new_output[i]);
    }

    error = error / output.length;
    return error;
}

public boolean save(String path)
{
    try
    {
        FileOutputStream fout = new FileOutputStream(path);
        ObjectOutputStream oos = new ObjectOutputStream(fout);
        oos.writeObject(this);
        oos.close();
    }
    catch (Exception e) 
    { 
        return false;
    }

    return true;
}



public static MultiLayerPerceptron load(String path)
{
    try
    {
        MultiLayerPerceptron net;

        FileInputStream fin = new FileInputStream(path);
        ObjectInputStream oos = new ObjectInputStream(fin);
        net = (MultiLayerPerceptron) oos.readObject();
        oos.close();

        return net;
    }
    catch (Exception e) 
    { 
        return null;
    }
}


public double getLearningRate()
{
    return fLearningRate;
}

public void setLearningRate(double rate)
{
    fLearningRate = rate;
}


public void setTransferFunction(TransferFunction fun)
{
    fTransferFunction = fun;
}



public int getInputLayerSize()
{
    return fLayers[0].Length;
}


public int getOutputLayerSize()
{
    return fLayers[fLayers.length - 1].Length;
}
} 

1 个答案:

答案 0 :(得分:0)

使用ANN,XOR是非线性分类的简单基准。它有2个输入和1个输出(例如[0,1] =&gt; [1]),而且它不仅仅是示例。

对于您的问题,简单的答案是将其视为与实施XOR的神经网络(MLP)相同,

不同之处在于您需要5个输入和1个输出。(参考您的uci数据集)

您也可以尝试以下链接:

http://scikit-learn.org/dev/modules/neural_networks_supervised.html

http://neuroph.sourceforge.net/