我还是机器学习的新手,并且在我之前的所有机器学习项目中都使用了 CPU。现在,我开发了一个随机森林回归器并使用 Optuna 来优化 18 个目标变量的超参数(每个模型单独训练)。然而,这似乎需要很长时间才能完成运行,尽管我的数据集中的行数只有大约 2,000。我尝试使用 Google Colab 的 GPU 加速训练,但我发现它对我的模型毫无用处。有没有办法将 GPU 用于我的随机森林模型?
target_vars = df_crime.columns.tolist()[-18:]
predictor_vars = df_crime.columns.tolist()[:-18]
def otimize_RF(trial, x, y):
criterion = trial.suggest_categorical('criterion', ['mse', 'mae'])
n_estimators = trial.suggest_int('n_estimators', 10, 1500)
max_depth = trial.suggest_int('max_depth', 3, 20)
max_features = trial.suggest_uniform('max_features', 0.01, 1)
model = RandomForestRegressor(
criterion= criterion,
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, x, y, cv=cv,
scoring='neg_mean_squared_error')
return -1 * np.mean(scores)
dict_ = dict()
for crime in target_vars:
X = predictor_df_stand[df_crime[crime].notnull()]
y = df_crime[crime][df_crime[crime].notnull()].values
optimization_function = partial(otimize_RF, x=X, y=y)
study = optuna.create_study(direction='minimize')
study.optimize(optimization_function, n_trials=100)
dict_2 = study.best_params
dict_2['mse'] = study.best_value
dict_[crime] = dict_2
print(dict_)