通过R中的IV值对连续变量进行合并

时间:2011-08-10 23:01:42

标签: r

我正在R中构建一个逻辑回归模型。我想以最佳方式将连续预测变量与目标变量相关联。我知道有两件事:

  1. 对连续变量进行分箱,使其IV(信息值)最大化

  2. 最大化双向列联表中的卡方 - 目标有两个值0和1,并且分箱连续变量具有分箱桶

  3. 有没有人知道R中可以执行此类分箱的任何功能?

    非常感谢您的帮助。

3 个答案:

答案 0 :(得分:4)

对于第一点,您可以使用包woebinning 使用证据权重(祸害)来优化,这可以优化IV的容器数量

library(woeBinning)

# get the bin cut points from your dataframe
cutpoints <- woe.binning(dataset, "target_name", "Variable_name")
woe.binning.plot(cutpoints)

# apply the cutpoints to your dataframe
dataset_woe <- woe.binning.deploy(dataset, cutpoint, add.woe.or.dum.var = "woe")

它返回带有两个额外列的数据集

  • Variable_name.binned,这是标签
  • Variable_name.woe.binned,这是替换后的值,您可以将其解析为回归而不是Variable_name

对于第二点,在chi2上,package discretization 似乎处理它但我还没有测试过它。

答案 1 :(得分:2)

可以考虑回归样条线用于设置结位置的方法。 rpart包可能有相关的代码。你确实需要惩罚推理统计数据,因为这会导致在移动休息时获得最佳拟合的过程中隐含的自由度隐藏。另一种常见的方法是在IV = 1的子集内指定等间隔分位数(四分位数或五分位数)的中断。像这个未经测试的代码:

cont.var.vec <- # names of all your continuous variables
breaks <- function(var,n) quantiles( dfrm[[var]], 
                                     probs=seq(0,1,length.out=n), 
                                     na.rm=TRUE)
lapply(dfrm[ dfrm$IV == 1 , cont.var.vec] , breaks, n=5)

答案 2 :(得分:-4)

取值

etwd("D:")
rm(list=ls())
options (scipen = 999)
read.csv("dummy_data.txt") -> dt

head(dt)
summary(dt)
mydata <- dt
head(mydata)
summary(mydata)
##Capping
for(i in 1:ncol(mydata)){
  if(is.numeric(mydata[,i])){
    val.quant <- unname(quantile(mydata[,i],probs = 0.75))
    mydata[,i] = sapply(mydata[,i],function(x){if(x > (1.5*val.quant+1)){1.5*val.quant+1}else{x}})
  }
}

library(randomForest)
x <- mydata[,!names(mydata) %in% c("Cust_Key","Y")]
y <- as.factor(mydata$Y)

set.seed(21)
fit <- randomForest(x,y,importance=T,ntree = 70)

mydata2 <- mydata[,!names(mydata) %in% c("Cust_Key")]
mydata2$Y <- as.factor(mydata2$Y)
fit$importance
####var reduction#####
vartoremove <- ncol(mydata2) - 20
library(rminer)
##### 
for(i in 1:vartoremove){
  rf <- fit(Y~.,data=mydata2,model = "randomForest", mtry = 10 ,ntree = 100)
  varImportance <- Importance(rf,mydata2,method="sensg")
  Z <- order(varImportance$imp,decreasing = FALSE)
  IND <- Z[2]
  var_to_remove <- names(mydata2[IND])
  mydata2[IND] = NULL
  print(i)
}
###########
library(smbinning)
as.data.frame(mydata2) -> inp
summary(inp)
attach(inp)
rm(result)
str(inp)
inp$target <- as.numeric(inp$Y) *1
table(inp$target)
ftable(inp$Y,inp$target)
inp$target <- inp$target -1
result= smbinning(df=inp, y="target",  x="X37", p=0.0005) 
result$ivtable
smbinning.plot(result,option="badrate",sub="test")
summary(inp)
result$ivtable
boxplot(inp$X2~inp$Y,horizontal=T, frame=F, col="red",main="Distribution")
###Sample
require(caTools)
inp$Y <- NULL
sample = sample.split(inp$target, SplitRatio = .7)
train = subset(inp, sample == TRUE)
test = subset(inp, sample == FALSE)
head(train)
nrow(train)

fit1 <- glm(train$target~.,data=train,family = binomial)  

summary(rf)
prediction1 <- data.frame(actual = test$target, predicted = predict(fit1,test ,type="response") )

result= smbinning(df=prediction1, y="actual",  x="predicted", p=0.005) 
result$ivtable

smbinning.plot(result,option="badrate",sub="test")

tail(prediction1)

write.csv(prediction1 , "test_pred_logistic.csv")
predict_train <- data.frame(actual = train$target, predicted = predict(fit1,train ,type="response") )
write.csv(predict_train , "train_pred_logistic.csv")
result= smbinning(df=predict_train, y="actual",  x="predicted", p=0.005) 
result$ivtable
smbinning.plot(result,option="badrate",sub="train")


####random forest

rf <- fit(target~.,data=train,model = "randomForest", mtry = 10 ,ntree = 200)

prediction2 <- data.frame(actual = test$target, predicted = predict(rf,train))
result= smbinning(df=prediction2, y="actual",  x="predicted", p=0.005) 
result$ivtable
smbinning.plot(result,option="badrate",sub="train")











###########IV

library(devtools)
install_github("riv","tomasgreif")
library(woe)

##### K-fold Validation ########

library(caret)
cv_fold_count = 2
folds = createFolds(mydata2$Y,cv_fold_count,list=T);

smpl = folds[[i]];
g_train = mydata2[-smpl,!names(mydata2) %in% c("Y")];
g_test = mydata2[smpl,!names(mydata2) %in% c("Y")];

cost_train = mydata2[-smpl,"Y"];
cost_test = mydata2[smpl,"Y"];

rf <- randomForest(g_train,cost_train)
logit.data <- cbind(cost_train,g_train)
logit.fit <- glm(cost_train~.,data=logit.data,family = binomial)

prediction <- data.f

rame(实际=测试$ Y,预测=预测(rf,测试))