我有一个MLPRegressor,可以很好地与我的数据集一起使用。这是我的代码的精简版本,可删除一些不必要的内容:
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn import preprocessing
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.tree import export_graphviz
from datetime import datetime
def str_to_num(arr):
le = preprocessing.LabelEncoder()
new_arr = le.fit_transform(arr)
return new_arr
def compare_values(arr1, arr2):
thediff = 0
thediffs = []
for thing1, thing2 in zip(arr1, arr2):
thediff = abs(thing1 - thing2)
thediffs.append(thediff)
return thediffs
def minmaxscale(data):
scaler = MinMaxScaler()
df_scaled = pd.DataFrame(scaler.fit_transform(data), columns=data.columns)
return df_scaled
data = pd.read_csv('reg.csv')
label = data['TOTAL']
data = data.drop('TOTAL', axis=1)
data = minmaxscale(data)
mlp = MLPRegressor(
activation = 'tanh',
alpha = 0.005,
learning_rate = 'invscaling',
learning_rate_init = 0.01,
max_iter = 200,
momentum = 0.9,
solver = 'lbfgs',
warm_start = True
)
X_train, X_test, y_train, y_test = train_test_split(data, label, test_size = 0.2)
mlp.fit(X_train, y_train)
preds = mlp.predict(X_test)
score = compare_values(y_test, preds)
print("Score: ", np.average(score))
效果很好!产生:Score: 7.246851606714535
但是,我想看看此模型中的功能重要性。我了解到神经网络并不总是关键所在,但这是一种商业理由,因此是必要的。我通过LIME发现了LIME Paper,我想使用它。由于这是回归,因此我尝试遵循示例here
所以我添加了以下几行:
categorical_features = np.argwhere(np.array([len(set(data[:,x])) for x in range(data.shape[1])]) <= 10).flatten()
explainer = lime.lime_tabular.LimeTabularExplainer(
X_train,
feature_names=X_train.columns,
class_names=['TOTAL'],
verbose=True,
categorical_features = categorical_features,
mode='regression')
但是现在出现错误:
Traceback (most recent call last):
File "c:\Users\jerry\Desktop\mlp2.py", line 65, in <module>
categorical_features = np.argwhere(np.array([len(set(data[:,x])) for x in range(data.shape[1])]) <= 10).flatten()
File "c:\Users\J39304\Desktop\mlp2.py", line 65, in <listcomp>
categorical_features = np.argwhere(np.array([len(set(data[:,x])) for x in range(data.shape[1])]) <= 10).flatten()
File "C:\Python35-32\lib\site-packages\pandas\core\frame.py", line 2927, in __getitem__
indexer = self.columns.get_loc(key)
File "C:\Python35-32\lib\site-packages\pandas\core\indexes\base.py", line 2657, in get_loc
return self._engine.get_loc(key)
File "pandas\_libs\index.pyx", line 108, in pandas._libs.index.IndexEngine.get_loc
File "pandas\_libs\index.pyx", line 110, in pandas._libs.index.IndexEngine.get_loc
TypeError: '(slice(None, None, None), 0)' is an invalid key
为什么会出现此错误,我该怎么办?我不知道如何正确集成LIME。
I see others have had this issue, but I don't know how to fix
答案 0 :(得分:0)
我需要先将所有内容转换为numpy数组:
class_names = X_train.columns
X_train = X_train.to_numpy()
X_test = X_test.to_numpy()
y_train = y_train.to_numpy()
y_test = y_test.to_numpy()
然后从那里将其提供给解释程序:
explainer = lime.lime_tabular.LimeTabularExplainer(
X_train,
feature_names=class_names,
class_names=['TOTAL'],
verbose=True,
mode='regression')
exp = explainer.explain_instance(X_test[5], mlp.predict)
exp = exp.as_list()
print(exp)