最新版本的xgboost(0.7)通过将 predcontrib 参数设置为TRUE,可以解释预测。我试图修改默认的xgboost学习器,以便获得这些贡献以及预测。这里是代码(我在START和END之间添加的唯一代码):
#' @export
makeRLearner.classif.xgboost.c = function() {
makeRLearnerClassif(
cl = "classif.xgboost.c",
package = "xgboost",
par.set = makeParamSet(
# we pass all of what goes in 'params' directly to ... of xgboost
# makeUntypedLearnerParam(id = "params", default = list()),
makeDiscreteLearnerParam(id = "booster", default = "gbtree", values = c("gbtree", "gblinear", "dart")),
makeUntypedLearnerParam(id = "watchlist", default = NULL, tunable = FALSE),
makeNumericLearnerParam(id = "eta", default = 0.3, lower = 0, upper = 1),
makeNumericLearnerParam(id = "gamma", default = 0, lower = 0),
makeIntegerLearnerParam(id = "max_depth", default = 6L, lower = 1L),
makeNumericLearnerParam(id = "min_child_weight", default = 1, lower = 0),
makeNumericLearnerParam(id = "subsample", default = 1, lower = 0, upper = 1),
makeNumericLearnerParam(id = "colsample_bytree", default = 1, lower = 0, upper = 1),
makeNumericLearnerParam(id = "colsample_bylevel", default = 1, lower = 0, upper = 1),
makeIntegerLearnerParam(id = "num_parallel_tree", default = 1L, lower = 1L),
makeNumericLearnerParam(id = "lambda", default = 1, lower = 0),
makeNumericLearnerParam(id = "lambda_bias", default = 0, lower = 0),
makeNumericLearnerParam(id = "alpha", default = 0, lower = 0),
makeUntypedLearnerParam(id = "objective", default = "binary:logistic", tunable = FALSE),
makeUntypedLearnerParam(id = "eval_metric", default = "error", tunable = FALSE),
makeNumericLearnerParam(id = "base_score", default = 0.5, tunable = FALSE),
makeNumericLearnerParam(id = "max_delta_step", lower = 0, default = 0),
makeNumericLearnerParam(id = "missing", default = NULL, tunable = FALSE, when = "both",
special.vals = list(NA, NA_real_, NULL)),
makeIntegerVectorLearnerParam(id = "monotone_constraints", default = 0, lower = -1, upper = 1),
makeNumericLearnerParam(id = "tweedie_variance_power", lower = 1, upper = 2, default = 1.5, requires = quote(objective == "reg:tweedie")),
makeIntegerLearnerParam(id = "nthread", lower = 1L, tunable = FALSE),
makeIntegerLearnerParam(id = "nrounds", default = 1L, lower = 1L),
# FIXME nrounds seems to have no default in xgboost(), if it has 1, par.vals is redundant
makeUntypedLearnerParam(id = "feval", default = NULL, tunable = FALSE),
makeIntegerLearnerParam(id = "verbose", default = 1L, lower = 0L, upper = 2L, tunable = FALSE),
makeIntegerLearnerParam(id = "print_every_n", default = 1L, lower = 1L, tunable = FALSE,
requires = quote(verbose == 1L)),
makeIntegerLearnerParam(id = "early_stopping_rounds", default = NULL, lower = 1L, special.vals = list(NULL), tunable = FALSE),
makeLogicalLearnerParam(id = "maximize", default = NULL, special.vals = list(NULL), tunable = FALSE),
makeDiscreteLearnerParam(id = "sample_type", default = "uniform", values = c("uniform", "weighted"), requires = quote(booster == "dart")),
makeDiscreteLearnerParam(id = "normalize_type", default = "tree", values = c("tree", "forest"), requires = quote(booster == "dart")),
makeNumericLearnerParam(id = "rate_drop", default = 0, lower = 0, upper = 1, requires = quote(booster == "dart")),
makeNumericLearnerParam(id = "skip_drop", default = 0, lower = 0, upper = 1, requires = quote(booster == "dart")),
########## START ##########
makeLogicalLearnerParam(id = "predcontrib", default = FALSE, tunable = FALSE),
makeLogicalLearnerParam(id = "approxcontrib", default = FALSE, tunable = FALSE),
########### END ##########
makeUntypedLearnerParam(id = "callbacks", default = list(), tunable = FALSE)
),
par.vals = list(nrounds = 1L, verbose = 0L),
properties = c("twoclass", "multiclass", "numerics", "prob", "weights", "missings", "featimp"),
name = "eXtreme Gradient Boosting",
short.name = "xgboost.c",
note = "All settings are passed directly, rather than through `xgboost`'s `params` argument. `nrounds` has been set to `1` and `verbose` to `0` by default. `num_class` is set internally, so do not set this manually.",
callees = "xgboost"
)
}
#' @export
trainLearner.classif.xgboost.c = function(.learner, .task, .subset, .weights = NULL, ...) {
td = getTaskDesc(.task)
parlist = list(...)
nc = length(td$class.levels)
if (is.null(parlist$objective))
parlist$objective = ifelse(nc == 2L, "binary:logistic", "multi:softprob")
if (.learner$predict.type == "prob" && parlist$objective == "multi:softmax")
stop("objective = 'multi:softmax' does not work with predict.type = 'prob'")
#if we use softprob or softmax as objective we have to add the number of classes 'num_class'
if (parlist$objective %in% c("multi:softprob", "multi:softmax"))
parlist$num_class = nc
task.data = getTaskData(.task, .subset, target.extra = TRUE)
label = match(as.character(task.data$target), td$class.levels) - 1
parlist$data = xgboost::xgb.DMatrix(data = data.matrix(task.data$data), label = label)
if (!is.null(.weights))
xgboost::setinfo(parlist$data, "weight", .weights)
if (is.null(parlist$watchlist))
parlist$watchlist = list(train = parlist$data)
do.call(xgboost::xgb.train, parlist)
}
#' @export
predictLearner.classif.xgboost.c = function(.learner, .model, .newdata, ...) {
td = .model$task.desc
m = .model$learner.model
cls = td$class.levels
nc = length(cls)
obj = .learner$par.vals$objective
if (is.null(obj))
.learner$par.vals$objective = ifelse(nc == 2L, "binary:logistic", "multi:softprob")
p = predict(m, newdata = data.matrix(.newdata), ...)
if (nc == 2L) { #binaryclass
if (.learner$par.vals$objective == "multi:softprob") {
y = matrix(p, nrow = length(p) / nc, ncol = nc, byrow = TRUE)
colnames(y) = cls
} else {
y = matrix(0, ncol = 2, nrow = nrow(.newdata))
colnames(y) = cls
y[, 1L] = 1 - p
y[, 2L] = p
}
if (.learner$predict.type == "prob") {
# VITV
#return(cbind(y, p))
return(y)
} else {
p = colnames(y)[max.col(y)]
names(p) = NULL
p = factor(p, levels = colnames(y))
return(p)
}
} else { #multiclass
if (.learner$par.vals$objective == "multi:softmax") {
p = as.factor(p) #special handling for multi:softmax which directly predicts class levels
levels(p) = cls
return(p)
} else {
p = matrix(p, nrow = length(p) / nc, ncol = nc, byrow = TRUE)
colnames(p) = cls
if (.learner$predict.type == "prob") {
return(p)
} else {
ind = max.col(p)
cns = colnames(p)
return(factor(cns[ind], levels = cns))
}
}
}
}
#' @export
getFeatureImportanceLearner.classif.xgboost.c = function(.learner, .model, ...) {
mod = getLearnerModel(.model, more.unwrap = TRUE)
imp = xgboost::xgb.importance(feature_names = .model$features,
model = mod, ...)
fiv = imp$Gain
setNames(fiv, imp$Feature)
}
下面是一个可复制的示例,以演示我如何调用代码(在注册了新学习者的方法之后):
library(mlr)
library(dplyr)
library(titanic)
sample <- sample.int(n = nrow(titanic_train), size = floor(.7*nrow(titanic_train)), replace = F)
train <- titanic_train[sample, ] %>% select(Pclass, Sex, Age, SibSp, Fare, Survived) %>% mutate(Sex = ifelse(Sex == 'male', 0, 1))
mlr::configureMlr(on.par.without.desc = "quiet")
lrn <- mlr::makeLearner(cl = 'classif.xgboost.c',
predict.type = "prob",
fix.factors.prediction = TRUE,
tree_method = 'exact',
early_stopping_rounds=5)
lrn <- mlr::makeImputeWrapper(lrn,
classes = list(integer = mlr::imputeMedian(),
numeric = mlr::imputeHist(),
factor = mlr::imputeMode()),
dummy.classes = "factor")
classif.task <- mlr::makeClassifTask(data = train,
target = "Survived",
positive = "1")
mod = train(lrn, classif.task)
test <- titanic_train[-sample,] %>% select(Pclass, Sex, Age, SibSp, Fare, Survived) %>% mutate(Sex = ifelse(Sex == 'male', 0, 1))
pred = predict(mod, newdata = test, predcontrib = TRUE)
calculateConfusionMatrix(pred)
我有以下两个问题:
list(...)
,则会得到0列表。是否可以将参数传递给预测函数?如果我在学习者中调用预测函数 ,则一次将硬编码的 predcontrib 参数设置为TRUE,一次不进行设置(快速而肮脏的技巧,以避免更改目前代码太多)
contrib = predict(m, newdata = data.matrix(.newdata), predcontrib = TRUE, ...)
p = predict(m, newdata = data.matrix(.newdata), ...)
并同时返回两者
return(cbind(y, p))
然后出现以下错误:
Error in checkPredictLearnerOutput(.learner, .model, p) :
predictLearner for classif.xgboost.c.imputed has returned not the class
levels as column names: 0,1,p
有没有办法改变预期学习者的产出?