OpenNN返回缩放的输出值

时间:2015-04-10 15:58:03

标签: c++ neural-network

我对OpenNN有一个问题,遗憾的是文档缺乏相当多。

我正在尝试使用一些随机生成的数据来创建神经网络,并且我从代码中获得的结果被缩放(我假设这是问题),但我没有看到关闭缩放的选项没有得到错误,我无法弄清楚如何缩放输出。

以下是我创建NN的代码:

struct DataHold {
        std::vector<std::vector<double>> dataPoints;
        double learnValueGlucose;
        double learnValueHarn;
        double learnValueCholesterin;
        std::string debugString;
    };

void MachLearning::CreateNNModel(std::vector<MachLearning::DataHold>& input, std::string fileName, std::vector<int>& indices) {
    if(input.size() == 0) {
        throw;
    }

    int size = 0;

    for(int index = 0; index < indices.size(); ++index) {
        size += input.at(0).dataPoints.at(indices.at(index)).size();
    }

    OpenNN::DataSet data_set(input.size(), size, 1);
    OpenNN::Matrix<double> dataMatrix(input.size(), size + 1);
    OpenNN::Vector<double> dataVector;

    dataVector.resize(size + 1);

    for(unsigned int index = 0; index < input.size(); ++index) {
        unsigned int runIndex = 0;
        for(unsigned int runningIndex = 0; runningIndex < indices.size(); ++runningIndex) {
            for(unsigned int thisIndex = 0; runIndex < dataVector.size() - 1 && thisIndex < input.at(index).dataPoints.at(indices.at(runningIndex)).size(); ++runIndex, ++thisIndex) {
                dataVector.at(runIndex) = input.at(index).dataPoints.at(indices.at(runningIndex)).at(thisIndex);
            }
            dataVector.at(dataVector.size() - 1) = input.at(index).learnValueGlucose;
            dataMatrix.set_row(index, dataVector);
        }
    }

    data_set.set_data(dataMatrix);

    OpenNN::VariablesInformation* variables_information_pointer = data_set.get_variables_information_pointer();

    for(unsigned int index = 0; index < size; ++index) {
        variables_information_pointer->set_name(index, std::string(std::string("frequency").append(std::to_string(index))));
        variables_information_pointer->set_units(index, "hertzs");
        variables_information_pointer->set_description(index, "No Text");
    }

    const OpenNN::Vector<OpenNN::Vector<std::string> > inputs_targets_information = variables_information_pointer->arrange_inputs_targets_information();
    const OpenNN::Vector< OpenNN::Vector<double> > inputs_targets_statistics = data_set.scale_inputs_targets();

    OpenNN::InstancesInformation* instances_information_pointer = data_set.get_instances_information_pointer();
    instances_information_pointer->split_random_indices();

    const unsigned int inputs_number = variables_information_pointer->count_inputs_number();
    const unsigned int hidden_perceptrons_number = 9;
    const unsigned int outputs_number = variables_information_pointer->count_targets_number();
    delete neural_network;

    neural_network = new OpenNN::NeuralNetwork(inputs_number, hidden_perceptrons_number, outputs_number);
    neural_network->set_inputs_outputs_information(inputs_targets_information);
    neural_network->set_inputs_outputs_statistics(inputs_targets_statistics);
    neural_network->set_scaling_unscaling_layers_flag(true);

    OpenNN::PerformanceFunctional performance_functional(neural_network, &data_set);

    OpenNN::TrainingStrategy training_strategy(&performance_functional);

    OpenNN::QuasiNewtonMethod* quasi_Newton_method_pointer = new OpenNN::QuasiNewtonMethod( &performance_functional );
    quasi_Newton_method_pointer->set_minimum_performance_increase( 1.0e-6 );
    quasi_Newton_method_pointer->set_reserve_evaluation_history( true );
    training_strategy.set_main_training_algorithm_pointer(quasi_Newton_method_pointer);
    OpenNN::TrainingStrategy::Results training_strategy_results = training_strategy.perform_training();

    neural_network->set_inputs_scaling_outputs_unscaling_methods("MinimumMaximum");
    neural_network->set_scaling_unscaling_layers_flag(true);

    OpenNN::TestingAnalysis testing_analysis(neural_network, &data_set);
    OpenNN::FunctionRegressionTesting* function_regression_testing_pointer = testing_analysis.get_function_regression_testing_pointer();

    OpenNN::FunctionRegressionTesting::LinearRegressionAnalysisResults linear_regression_analysis_results = function_regression_testing_pointer->perform_linear_regression_analysis();

    neural_network->save("neural_network.xml");
    neural_network->save_expression("expression.txt");

    performance_functional.save("performance_functional.xml");

    training_strategy.save("training_strategy.xml");

    training_strategy_results.save("training_strategy_results.dat");
    linear_regression_analysis_results.save("linear_regression_analysis_results.dat");

    return;
}

计算输出的代码:

double MachLearning::GetNNValue(std::vector<std::vector<double>>& input, std::vector<int>& indices) {
    OpenNN::Vector<double> dataVector;
    int size = 0;
    for(int index = 0; index < indices.size(); ++index) {
        size += input.at(indices.at(index)).size();
    }

    dataVector.resize(size);

    int runIndex = 0;
    for(unsigned int index = 0; index < indices.size(); ++index) {
        for(unsigned int smallIndex = 0; smallIndex < input.at(indices.at(index)).size(); ++smallIndex, ++runIndex) {
            dataVector.at(runIndex) = input.at(indices.at(index)).at(smallIndex);
        }
    }

    std::vector<double> test;
    test = neural_network->calculate_outputs(dataVector);
    return test.at(0);
}

我用一些随机生成的向量测试了这个,并且它应该是什么和NN计算的相关性在0.999(..),但它缩放到-1到1左右(我有已经有1.2已经结果,猜测输入只是更大的randomed然后)。

任何人都知道OpenNN界面如何正常工作?我尝试使用scaling_unscaling标志,但这根本没有影响。

1 个答案:

答案 0 :(得分:0)

您似乎正在使用旧版本的OpenNN。您可以从SourceForge下载最新版本:https://sourceforge.net/projects/opennn

关于缩放/取消缩放的内容,您可以通过执行以下操作来禁用它:

scaling_layer.set_scaling_method(NoScaling);

unscaling_layer.set_unscaling_method(NoUnscaling);