我希望将XGBoost模型与PCA的输入缩放和特征空间缩减相结合。此外,应使用交叉验证来调整模型的超参数以及PCA中使用的组件数量。并且为了防止模型过度拟合,应该添加早期停止。
为了结合各个步骤,我决定使用sklearn的Pipeline
功能。
一开始,我遇到了一些问题,确保PCA也适用于验证集。但我认为使用XGB__eval_set
可以达成协议。
代码实际上没有任何错误地运行,但似乎永远运行(在某些时候,所有核心的CPU使用率降至零,但进程继续运行数小时;必须在某个时刻终止会话)。
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from xgboost import XGBRegressor
# Train / Test split
X_train, X_test, y_train, y_test = train_test_split(X_with_features, y, test_size=0.2, random_state=123)
# Train / Validation split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=123)
# Pipeline
pipe = Pipeline(steps=[("Scale", StandardScaler()),
("PCA", PCA()),
("XGB", XGBRegressor())])
# Hyper-parameter grid (Test only)
grid_param_pipe = {'PCA__n_components': [5],
'XGB__n_estimators': [1000],
'XGB__max_depth': [3],
'XGB__reg_alpha': [0.1],
'XGB__reg_lambda': [0.1]}
# Grid object
grid_search_pipe = GridSearchCV(estimator=pipe,
param_grid=grid_param_pipe,
scoring="neg_mean_squared_error",
cv=5,
n_jobs=5,
verbose=3)
# Run CV
grid_search_pipe.fit(X_train, y_train, XGB__early_stopping_rounds=10, XGB__eval_metric="rmse", XGB__eval_set=[[X_val, y_val]])
答案 0 :(得分:3)
问题在于<?xml version="1.0" encoding="utf-8"?>
<selector xmlns:android="http://schemas.android.com/apk/res/android">
<item android:drawable="@drawable/drawable_selected_indicator"
android:state_selected="true"/>
<item android:drawable="@drawable/drawable_default_indicator"/>
</selector>
方法需要在外部创建一个评估集,但是我们不能在通过管道进行转换之前创建一个评估集。
这有点棘手,但是我们的想法是为xgboost回归器/分类器创建一个薄包装器,为内部的评估集做准备。
fit
下面是一个测试。
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor, XGBClassifier
class XGBoostWithEarlyStop(BaseEstimator):
def __init__(self, early_stopping_rounds=5, test_size=0.1,
eval_metric='mae', **estimator_params):
self.early_stopping_rounds = early_stopping_rounds
self.test_size = test_size
self.eval_metric=eval_metric='mae'
if self.estimator is not None:
self.set_params(**estimator_params)
def set_params(self, **params):
return self.estimator.set_params(**params)
def get_params(self, **params):
return self.estimator.get_params()
def fit(self, X, y):
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=self.test_size)
self.estimator.fit(x_train, y_train,
early_stopping_rounds=self.early_stopping_rounds,
eval_metric=self.eval_metric, eval_set=[(x_val, y_val)])
return self
def predict(self, X):
return self.estimator.predict(X)
class XGBoostRegressorWithEarlyStop(XGBoostWithEarlyStop):
def __init__(self, *args, **kwargs):
self.estimator = XGBRegressor()
super(XGBoostRegressorWithEarlyStop, self).__init__(*args, **kwargs)
class XGBoostClassifierWithEarlyStop(XGBoostWithEarlyStop):
def __init__(self, *args, **kwargs):
self.estimator = XGBClassifier()
super(XGBoostClassifierWithEarlyStop, self).__init__(*args, **kwargs)
如果向开发人员请求功能请求,最简单的扩展是允许from sklearn.datasets import load_diabetes
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
x, y = load_diabetes(return_X_y=True)
print(x.shape, y.shape)
# (442, 10) (442,)
pipe = Pipeline([
('pca', PCA(5)),
('xgb', XGBoostRegressorWithEarlyStop())
])
param_grid = {
'pca__n_components': [3, 5, 7],
'xgb__n_estimators': [10, 20, 30, 50]
}
grid = GridSearchCV(pipe, param_grid, scoring='neg_mean_absolute_error')
grid.fit(x, y)
print(grid.best_params_)
在未提供时在内部创建评估集。这样,无需扩展scikit-learn(我想)。