如何在基于决策树的分类方法中使用修剪和提升?
I have 10 features and 3000 samples.
答案 0 :(得分:2)
以下是演示如何使用Boosting的示例。
from sklearn.datasets import make_classification
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.metrics import classification_report
# generate some artificial data
X, y = make_classification(n_samples=3000, n_features=10, n_informative=2, flip_y=0.1, weights=[0.15, 0.85], random_state=0)
# train/test split
split = StratifiedShuffleSplit(y, n_iter=1, test_size=0.2, random_state=0)
train_index, test_index = list(split)[0]
X_train, y_train = X[train_index], y[train_index]
X_test, y_test = X[test_index], y[test_index]
# boosting: many many weak classifiers (max_depth=1) refine themselves sequentially
# tree is the default the base classifier
estimator = GradientBoostingClassifier(n_estimators=200, learning_rate=0.1, max_depth=1, random_state=0)
estimator.fit(X_train, y_train)
y_pred = estimator.predict(X_test)
print(classification_report(y_test, y_pred))
precision recall f1-score support
0 0.88 0.80 0.84 109
1 0.96 0.98 0.97 491
avg / total 0.94 0.94 0.94 600
# benchmark: a standard tree
tree_benchmark = DecisionTreeClassifier(max_depth=3, class_weight='auto')
tree_benchmark.fit(X_train, y_train)
y_pred_benchmark = tree_benchmark.predict(X_test)
print(classification_report(y_test, y_pred_benchmark))
precision recall f1-score support
0 0.63 0.84 0.72 109
1 0.96 0.89 0.92 491
avg / total 0.90 0.88 0.89 600