Sklearn计算多标签问题的accuracy_score
(docs)作为子集准确度:如果每个预测标签都正确,则每个预测仅计为正确。 Keras的默认'accuracy'
是二进制精度:
def accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
值太高。我想K.equal
计算每个元素的相等性,对吧?如何计算每个向量的相等性?
答案 0 :(得分:0)
我认为你需要为此写一个callback。这是一个例子。
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import Callback
from sklearn.metrics.classification import accuracy_score
n_labels = 3
n_samples = 20
n_feats = 10
x = np.random.normal(size=(n_samples, n_feats))
y = np.random.randint(2, size=(n_samples, n_labels))
model = Sequential()
model.add(Dense(50, input_shape=(n_feats, )))
model.add(Dense(n_labels, activation='sigmoid'))
model.compile(
loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']
)
class SkLearnHistory(Callback):
def __init__(self, x, y):
self.x = x
self.y = y
def on_train_begin(self, logs={}):
self.accuracy = []
def on_epoch_end(self, epoch, logs={}):
y_pred = np.round(self.model.predict(self.x))
y_true = self.y
score = accuracy_score(y_true, y_pred)
self.accuracy.append(score)
return
sklearn_history = SkLearnHistory(x, y)
keras_history = model.fit(x, y, epochs=5, verbose=0, callbacks=[sklearn_history])
keras_history.history['acc']
# [0.46666663885116577,
# 0.48333334922790527,
# 0.51666665077209473,
# 0.58333337306976318,
# 0.60000002384185791]
sklearn_history.accuracy
# [0.14999999999999999, 0.20000000000000001, 0.25, 0.25, 0.25]