我想跟踪sklearn管道中的分类特征索引,以便将其提供给CatBoostClassifier。
我将从管道的fit()之前的一组分类功能开始。 管道本身会在特征选择步骤中更改数据的结构并删除特征。
我如何预先知道将在管道中删除或添加哪些分类功能? 调用fit()方法时,我需要了解更新的列表索引。 问题是,转换后我的数据集可能会更改。
这是我的数据框示例:
data = pd.DataFrame({'pet': ['cat', 'dog', 'dog', 'fish', np.nan, 'dog', 'cat', 'fish'],
'children': [4., 6, 3, np.nan, 2, 3, 5, 4],
'salary': [90., 24, np.nan, 27, 32, 59, 36, 27],
'gender': ['male', 'male', 'male', 'male', 'male', 'male', 'male', 'male'],
'happy': [0, 1, 1, 0, 1, 1, 0, 0]})
categorical_features = ['pet', 'gender']
numerical_features = ['children', 'salary']
target = 'happy'
print(data)
pet children salary gender happy
0 cat 4.0 90.0 male 0
1 dog 6.0 24.0 male 1
2 dog 3.0 NaN male 1
3 fish NaN 27.0 male 0
4 NaN 2.0 32.0 male 1
5 dog 3.0 59.0 male 1
6 cat 5.0 36.0 male 0
7 fish 4.0 27.0 male 0
现在,我想运行一个包含多个步骤的管道。 这些步骤之一是VarianceThreshold(),在我的情况下,它将导致从数据帧中删除“性别”。
X, y = data.drop(columns=[target]), data[target]
pipeline = Pipeline(steps=[
(
'preprocessing',
ColumnTransformer(transformers=[
(
'categoricals',
Pipeline(steps=[
('fillna_with_frequent', SimpleImputer(strategy='most_frequent')),
('ordinal_encoder', OrdinalEncoder())
]),
categorical_features
),
(
'numericals',
Pipeline(steps=[
('fillna_with_mean', SimpleImputer(strategy='mean'))
]),
numerical_features
)
])
),
(
'feature_selection',
VarianceThreshold()
),
(
'estimator',
CatBoostClassifier()
)
])
现在,当我尝试获取CatBoost的分类特征索引列表时,我无法确定“性别”已不再是我的数据框的一部分。
cat_features = [data.columns.get_loc(col) for col in categorical_features]
print(cat_features)
[0, 3]
索引0、3是错误的,因为在VarianceThreshold之后,特征3(性别)将被删除。
pipeline.fit(X, y, estimator__cat_features=cat_features)
---------------------------------------------------------------------------
CatBoostError Traceback (most recent call last)
<ipython-input-230-527766a70b4d> in <module>
----> 1 pipeline.fit(X, y, estimator__cat_features=cat_features)
~/anaconda3/lib/python3.7/site-packages/sklearn/pipeline.py in fit(self, X, y, **fit_params)
265 Xt, fit_params = self._fit(X, y, **fit_params)
266 if self._final_estimator is not None:
--> 267 self._final_estimator.fit(Xt, y, **fit_params)
268 return self
269
~/anaconda3/lib/python3.7/site-packages/catboost/core.py in fit(self, X, y, cat_features, sample_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot, column_description, verbose_eval, metric_period, silent, early_stopping_rounds, save_snapshot, snapshot_file, snapshot_interval, init_model)
2801 self._fit(X, y, cat_features, None, sample_weight, None, None, None, None, baseline, use_best_model,
2802 eval_set, verbose, logging_level, plot, column_description, verbose_eval, metric_period,
-> 2803 silent, early_stopping_rounds, save_snapshot, snapshot_file, snapshot_interval, init_model)
2804 return self
2805
~/anaconda3/lib/python3.7/site-packages/catboost/core.py in _fit(self, X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot, column_description, verbose_eval, metric_period, silent, early_stopping_rounds, save_snapshot, snapshot_file, snapshot_interval, init_model)
1231 _check_train_params(params)
1232
-> 1233 train_pool = _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description)
1234 if train_pool.is_empty_:
1235 raise CatBoostError("X is empty.")
~/anaconda3/lib/python3.7/site-packages/catboost/core.py in _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description)
689 raise CatBoostError("y has not initialized in fit(): X is not catboost.Pool object, y must be not None in fit().")
690 train_pool = Pool(X, y, cat_features=cat_features, pairs=pairs, weight=sample_weight, group_id=group_id,
--> 691 group_weight=group_weight, subgroup_id=subgroup_id, pairs_weight=pairs_weight, baseline=baseline)
692 return train_pool
693
~/anaconda3/lib/python3.7/site-packages/catboost/core.py in __init__(self, data, label, cat_features, column_description, pairs, delimiter, has_header, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names, thread_count)
318 )
319
--> 320 self._init(data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names)
321 super(Pool, self).__init__()
322
~/anaconda3/lib/python3.7/site-packages/catboost/core.py in _init(self, data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names)
638 cat_features = _get_cat_features_indices(cat_features, feature_names)
639 self._check_cf_type(cat_features)
--> 640 self._check_cf_value(cat_features, features_count)
641 if pairs is not None:
642 self._check_pairs_type(pairs)
~/anaconda3/lib/python3.7/site-packages/catboost/core.py in _check_cf_value(self, cat_features, features_count)
360 raise CatBoostError("Invalid cat_features[{}] = {} value type={}: must be int().".format(indx, feature, type(feature)))
361 if feature >= features_count:
--> 362 raise CatBoostError("Invalid cat_features[{}] = {} value: must be < {}.".format(indx, feature, features_count))
363
364 def _check_pairs_type(self, pairs):
CatBoostError: Invalid cat_features[1] = 3 value: must be < 3.
我希望cat_features为[0],但实际输出为[0,3]。
答案 0 :(得分:0)
您可以尝试将cat_features传递给CatBoostClassifier初始化函数。
答案 1 :(得分:0)
问题不是catboost的问题,而是ColumnTransformer
的工作方式。 columnTransfomer按转换操作的顺序
答案 2 :(得分:0)
这里的潜在问题是转换器没有遵循预定义的输出模式,这意味着您可以将1列转换为3(分类列)。
因此,您需要跟踪自己生成的功能数量。
我对此的解决方案是以一种方式来组织管道,这样我可以预先知道哪些索引对应于最后一步的类别列(Catboost估计器)。通常,我会将所有与类别相关的操作隔离并包装在一个转换器中(您也可以在其中进行子转换),并且要跟踪它将输出多少列。至关重要的将此变压器设置为管道中的 first 变压器。这将确保我的第一个X索引是分类的,并且我可以将此索引列表传递到最后的catboost cat_features
参数。
答案 3 :(得分:0)
您收到错误的原因是您当前的 cat_features 来自您的 non_transformed 数据集。为了解决这个问题,您必须在转换数据集后导出 cat_features。 这就是我跟踪我的方式:我将转换器拟合到数据集,检索数据集并将其转换为熊猫数据框,然后检索分类索引
column_transform = ColumnTransformer([('n', MinMaxScaler(), numerical_idx)], remainder='passthrough')
scaled_X = column_transform.fit_transform(X)
new_df = pd.DataFrame(scaled_X)
new_df = new_df.infer_objects() # converts the datatype to their most accurate datatype
cat_features_new = [new_df.columns.get_loc(col) for col in new_df.select_dtypes(include=['object', 'bool']).columns]