结合多个神经网络模型

时间:2013-11-03 18:07:19

标签: r machine-learning neural-network

我运行了200次循环,其中我:

  • 将我的数据集随机分成训练和测试集

  • 在训练集上使用R nnet()命令拟合神经网络模型

  • 评估测试集的性能

我将每个模型保存到列表中。

现在我想使用组合模型进行样本外预测。为此,我在combine个对象上使用了randomForest函数。 nnet个对象是否有类似的组合命令?

我无法上传数据集,但下面是我正在使用的代码。它按原样工作,除了我寻求命令组合模型的最后一行。

    n <- 200
    nnet_preds <- matrix(NA,  ncol = 1,  nrow = n)
    nnet_predstp <- matrix(NA,  ncol = 1,  nrow = n)
    nnet_predstn <- matrix(NA,  ncol = 1,  nrow = n)
    nnet_predsfptp <- matrix(NA,  ncol = 1,  nrow = n)
    nnet_predsfntp <- matrix(NA,  ncol = 1,  nrow = n)
    NN_predictions <- matrix(NA,  ncol = 1,  nrow = 10) 
    outcome_2010_NN <- testframe2[, "ytest"] 
    nn_model <- vector("list", n)

    data<-testframe2[, c("sctownpop", "sctownhh", "scnum_tribes", "sctownmusl_percap", "scmuslim", "scprop_stranger", "sctownstrg_percap", "scprop_domgroup", "scexcom", "sctownexcm_percap", "sctownretn_percap", "scprop_under30", "scprop_male", "scedulevel", "scprop_noeduc", "scprop_anypeace", "scgroup_prtcptn", "scpubcontr", "scsafecommdum", "scciviccommdum", "scoll_action_add", "scngodependent", "scgovtdependent", "scpolicourtscorr", "screlmarry", "scmslmnolead", "sccrime_scale", "scviol_scale", "sclandconf_scale", "sctownnlnd_percap", "scnolandnofarm", "scfarmocc", "scunemployed", "scwealthindex", "scwealth_inequality", "scviol_experienced", "scviol_part", "scanylndtake", "scdisp_ref", "sfacilities", "sfreq_visits", "sctot_resources", "scmeanprice_above75", "scmosquesdum", "scmnrt_ldrshp", "scany_majorconf", "sstate_presence", "sremote", "scmobilec", "scradio_low")]

    data = cbind(outcome_2010_NN, data)

    sampleSplit = round(nrow(data)-(nrow(data)/5))

    for(i in 1:n) {     

set.seed(06511+i)
    data_randomization <- data[sample(1:nrow(data),  dim(data)[1],  replace=FALSE), ]

    train <- data_randomization[1:sampleSplit, ]
    test  <- data_randomization[(sampleSplit+1):nrow(data), ]

    nn_model[[i]] <- nnet(outcome_2010_NN ~ sctownpop +           sctownhh+ scnum_tribes+ sctownmusl_percap+ scmuslim+          scprop_stranger+   sctownstrg_percap+ scprop_domgroup+     scexcom+  sctownexcm_percap+   sctownretn_percap+   scprop_under30 +  scprop_male+         scedulevel+          scprop_noeduc+       scprop_anypeace+     scgroup_prtcptn+     scpubcontr+          scsafecommdum+       scciviccommdum+      scoll_action_add+    scngodependent+      scgovtdependent+     scpolicourtscorr+    screlmarry+          scmslmnolead+        sccrime_scale+       scviol_scale+        sclandconf_scale+    sctownnlnd_percap+   scnolandnofarm+      scfarmocc+           scunemployed+        scwealthindex+       scwealth_inequality+ scviol_experienced+  scviol_part+         scanylndtake+        scdisp_ref+          sfacilities+         sfreq_visits+        sctot_resources+     scmeanprice_above75+ scmosquesdum+        scmnrt_ldrshp+       scany_majorconf+     sstate_presence+     sremote+             scmobilec+           scradio_low, 
    data=train,  size = 3,  decay = 0.1)# size=number of units/nodes in the (single_hidden layer); decay=parameter for weight decay. Default 0.

    predictions <- predict(nn_model[[i]],  test)

    nnpredorder<-rank(predictions)
    nncvpredictionsA50 <- ifelse( nnpredorder > 24,  1,  0 )    # manually optimized

    errors <- table(test[, "outcome_2010_NN"],  nncvpredictionsA50)             

    accuracy.rate <- (errors[1, 1]+errors[2, 2])/sum(errors)
    true.pos.rate <- (errors[2, 2]/(errors[2, 2]+errors[2, 1]))
    true.neg.rate <- (errors[1, 1]/(errors[1, 1]+errors[1, 2]))
    FPTP <- (errors[1, 2]/errors[2, 2])
    FNTP <- (errors[2, 1]/errors[2, 2])

    nnet_preds[i, ] <- accuracy.rate
    nnet_predstp[i, ] <- true.pos.rate
    nnet_predstn[i, ] <- true.neg.rate
    nnet_predsfptp[i, ] <- FPTP
    nnet_predsfntp[i, ] <- FNTP
}

    mean(nnet_preds); sd(nnet_preds)
    mean(nnet_predstp); sd(nnet_predstp)

    NN_predictions[1, ] <- mean(nnet_predstp) # TP accuracy rate (sensitivity)
    NN_predictions[2, ] <- sd(nnet_predstp) # TP accuracy rate (sensitivity)
    NN_predictions[3, ] <- mean(nnet_predstn)  # TN accuracy rate (specificity)
    NN_predictions[4, ] <- sd(nnet_predstn) # TN accuracy rate (specificity)
    NN_predictions[5, ] <- mean(nnet_preds)  # Accuracy rate
    NN_predictions[6, ] <- sd(nnet_preds) # Accuracy rate
    NN_predictions[7, ] <- mean(nnet_predsfptp)  # Ratio FP:TP
    NN_predictions[8, ] <- sd(nnet_predsfptp) # Ratio FP:TP
    NN_predictions[9, ] <- mean(nnet_predsfntp)  # Ratio FN:TP
    NN_predictions[10, ] <- sd(nnet_predsfntp) # Ratio FN:TP

    print(NN_predictions)

### Combine NN models  #Where `combine` is the randomForest command     
aggNNmodel <- do.call(combine, nn_model)

1 个答案:

答案 0 :(得分:1)

您应该无法使用Random Forest的combine方法,因为它适用于决策树。但随机森林是增强算法因此你应该能够使用增强算法来组合神经网络模型。

Boosting是一种结合弱学习者的方法,但没有规定不使用像神经网络这样强大的学习者来提升。

Can a set of weak learners create a single strong learner?

使用像AdaBoost这样的提升算法,将神经网络作为弱学习者。 google search显示了R中的几个增强包。