游标和classProbs = TRUE

时间:2019-04-07 15:42:16

标签: r caret

library(dplyr)
library(caret)
library(doParallel)

cl <- makeCluster(3, outfile = '')
registerDoParallel(cl)
set.seed(2019)
fit1 <- train(x = X_train %>% head(1000) %>% as.matrix(),
              y = y_train %>% head(1000),
              method = 'ranger', 
              verbose = TRUE,
              trControl = trainControl(method = 'oob',
                                       verboseIter = TRUE,
                                       allowParallel = TRUE,
                                       classProbs = TRUE),
              tuneGrid = expand.grid(mtry = 2:3,
                                     min.node.size = 1, 
                                     splitrule = 'gini'),
              num.tree = 100,
              metric = 'Accuracy',
              importance = 'permutation')
stopCluster(cl)

上面的代码导致错误:

  

汇总结果   出了点问题;所有精度指标值均缺失:       精度卡伯
   最小:不适用:NA
   第一学期:不适用第一学期:不适用
   中位数:NA中位数:NA
   均值:NaN均值:NaN
   第三学期:不适用第三学期:不适用
   最高:不适用:NA
   NA:2 NA:2
  错误:正在停止

我已经在SO中搜索了此错误,并发现背后有许多可能的原因。不幸的是,我没有发现适用于我的案件的任何内容。在这里,问题似乎出在classProbs = TRUE上-当我删除此问题并使用了FALSE的默认值时,就成功地对模型进行了训练。但是,根据文档,我不明白为什么这可能是一个问题:

  

合乎逻辑;应该在每次重采样中为分类模型(连同预测值)计算分类概率吗?

数据样本:

X_train <- structure(list(V5 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), V1 = c(41.5, 
5.3, 44.9, 58.7, 67.9, 56.9, 3.7, 43.4, 38.6, 34.2, 42.3, 29.1, 
27.6, 44.2, 55.6, 53.7, 48, 58.4, 54, 7.1, 35.9, 36, 61.2, 24.1, 
20.3, 10.8, 13, 69.4, 71.5, 45.6, 34.4, 17.1, 30.1, 68.9, 25.1, 
37.4, 55.5, 58.9, 49.8, 47.2, 29.5, 19.9, 24.1, 27, 33.3, 41.9, 
33.2, 27.9, 48.4, 41.2), V2 = c(33.1, 35.4, 66.2, 1.8, 5, -0.9, 
32.8, 35.8, 36, 4, 65.5, 64, 61, 68.9, 69.3, 59.7, 29.8, 24.4, 
62.7, 12.2, 6, -1.2, 63.5, 7.5, 22.9, 40.5, 47.3, 1.6, -1.5, 
33.3, 53.3, 23.7, 2.7, 61, 2.4, 13.5, 8.1, 55.1, 29.6, 36.8, 
26.8, 26, 30.8, 53.8, 10.6, 1.9, 10.2, 29.1, 51.4, 33.1), V3 = c(0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0), V4 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), class = c("tbl_df", 
"tbl", "data.frame"), row.names = c(NA, -50L))
y_train <- structure(c(2L, 2L, 2L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 
2L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 
1L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
1L, 1L, 1L, 1L, 1L), .Label = c("plus", "minus"), class = "factor")

1 个答案:

答案 0 :(得分:0)

基于对https://stats.stackexchange.com/questions/23763/is-there-a-way-to-disable-the-parameter-tuning-grid-feature-in-caret的回答,我尝试遵循以下建议将trainControl的“方法”设置为“ none,不允许成功执行。第二个答案”暗示随机森林方法不应使用复杂的方法(我也将'mtry'参数设置为单个值,但我不确定这是必需的。)(我之前曾尝试删除对并行群集的使用,而对错误没有任何影响。)您可以添加现在,您已经拥有不会引发错误的代码。

fit1 <- train(form=y~., x = X_train[,2:3] ,
              y = factor(y_train) ,
              method = 'ranger', 
              verbose = TRUE,
              trControl=trainControl(method="none"),
              tuneGrid = expand.grid(mtry = 2,
                                     min.node.size = 1, 
                                     splitrule = 'gini'
                                     ),
              num.tree = 100,
              metric = 'Accuracy',
              importance = 'permutation')