如何为RandomSearchCV

时间:2019-02-17 17:21:50

标签: scikit-learn neural-network

我正在尝试找出如何为MLPR定义具有两个隐藏层以输入到SkLearn中的RandomSearchCV的参数网格?

以下是我一直在尝试的内容。那么,如何为RandomSearchCV随机分配hidden_​​layer_sizes?

import numpy as np
import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import RandomizedSearchCV
boston = load_boston()
X = boston.data
y = boston.target


params = {'activation':['logistic', 'relu'],
          'learning_rate':['adaptive'],
          'alpha':np.logspace(0.0001, 100, 10),
          'max_iter':[1000],
          'hidden_layer_sizes':[(10,10), (30,10), (50,20), (60,30)]}


reg = MLPRegressor()
random_search = RandomizedSearchCV(estimator = reg,
                                   param_distributions=params,
                                   n_iter=10,
                                   scoring = 'neg_mean_squared_error',
                                   cv=3,
                                   n_jobs = -3,
                                   pre_dispatch = '2*n_jobs',
                                   return_train_score = True) 
random_search.fit(X,y)

df = pd.DataFrame(random_search.cv_results_)
df['train_RMSE'] = np.sqrt(-df['mean_train_score'])
df['test_RMSE'] = np.sqrt(-df['mean_test_score'])
print(random_search.best_params_)

PS:如果有人对我选择的参数也有任何意见,请随时发表评论。这些参数将用于最多7个输入的回归问题。

有什么想法吗?

1 个答案:

答案 0 :(得分:0)

Yes, you did it right. In addition you can set the verbose level to see the used hyper parameters of the last cross validation, e.g. [CV] activation=tanh, alpha=1e+100, hidden_layer_sizes=(30, 10), score=-4.180054117738231, total= 2.7s.

I chose a GridSearchCV instead of a RandomizedSearchCV to find the best parameter set and on my machine it took five minutes.

import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import explained_variance_score

X, y = load_boston(return_X_y=True)

# Split data for final evaluation:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=42)

# Define base regressor:
base_reg = MLPRegressor(learning_rate='adaptive', max_iter=5000, random_state=42)

# Define search space:
params = {
    'activation': ['logistic', 'relu', 'tanh'],  # <-- added 'tanh' as third non-linear activation function
    'alpha': np.logspace(0.0001, 100, 10),
    'hidden_layer_sizes': [
        (10, 10), (20, 10), (30, 10),
        (40, 10), (90, 10), (90, 30, 10)  # <-- added more neurons or layers
    ]
}

# Find best hyper params and then refit on all training data:
reg = GridSearchCV(estimator=base_reg, param_grid=params,
                   n_jobs=8, cv=3, refit=True, verbose=5)  # <-- verbose=5
reg.fit(X_train, y_train)

print(reg.best_estimator_)
# MLPRegressor(activation='logistic', alpha=1.0002302850208247,
#              batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False,
#              epsilon=1e-08, hidden_layer_sizes=(30, 10),
#              learning_rate='adaptive', learning_rate_init=0.001, max_iter=5000,
#              momentum=0.9, n_iter_no_change=10, nesterovs_momentum=True,
#              power_t=0.5, random_state=42, shuffle=True, solver='adam',
#              tol=0.0001, validation_fraction=0.1, verbose=False,
#              warm_start=False)

print(reg.best_params_)
# {'activation': 'logistic', 'alpha': 1.0002302850208247, 'hidden_layer_sizes': (30, 10)}

# Evaluate on unseen test data:
err = explained_variance_score(y_train, reg.predict(X_train))
print(err)  # 0.8936815412058757

err = explained_variance_score(y_test, reg.predict(X_test))
print(err)  # 0.801353064635174