使用张量流后端进行张量数学运算

时间:2017-08-24 19:47:30

标签: python tensorflow keras

我正在尝试使用keras训练我的LSTM时添加自定义指标。请参阅以下代码:

from keras.models import Sequential
from keras.layers import Dense, LSTM, Masking, Dropout
from keras.optimizers import SGD, Adam, RMSprop
import keras.backend as K
import numpy as np

_Xtrain = np.random.rand(1000,21,47)
_ytrain = np.random.randint(2, size=1000)

_Xtest = np.random.rand(200,21,47)
_ytest = np.random.randint(1, size=200)

def t1(y_pred, y_true):
    return K.tf.count_nonzero((1 - y_true))

def t2(y_pred, y_true):
   return K.tf.count_nonzero(y_true)

def build_model():
    model = Sequential()
    model.add(Masking(mask_value=0, input_shape=(21, _Xtrain[0].shape[1])))
    model.add(LSTM(32, return_sequences=True))
    model.add(LSTM(64, return_sequences=False))
    model.add(Dense(1, activation='sigmoid'))
    rms = RMSprop(lr=.001, decay=.001)
    model.compile(loss='binary_crossentropy', optimizer=rms, metrics=[t1, t2])
    return model

model = build_model()

hist = model.fit(_Xtrain, _ytrain, epochs=1, batch_size=5, validation_data=(_Xtest, _ytest), shuffle=True)

以上代码的输出如下:

训练1000个样本,验证200个样本 大纪元1/1 1000/1000 [==============================] - 5s - 损失:0.6958 - t1:5.0000 - t2:5.0000 - val_loss:0.6975 - val_t1:5.0000 - val_t2:5.0000

所以似乎方法t1和t2都产生完全相同的输出,这让我感到困惑。可能出现什么问题,怎样才能得到y_true的互补张量?

背景故事:我正在尝试为我的模型编写自定义指标(F1得分)。 Keras似乎没有那些随时可用的。如果有人知道更好的方法,请帮助我指出正确的方向。

1 个答案:

答案 0 :(得分:1)

处理此问题的一种简单方法是使用回调。遵循此issue的逻辑,您可以指定使用sci-kit learn计算任何指标的指标回调。例如,如果要计算f1,则可以执行以下操作:

from keras.models import Sequential
from keras.layers import Dense, LSTM, Masking, Dropout
from keras.optimizers import SGD, Adam, RMSprop
import keras.backend as K
from keras.callbacks import Callback
import numpy as np

from sklearn.metrics import f1_score

_Xtrain = np.random.rand(1000,21,47)
_ytrain = np.random.randint(2, size=1000)

_Xtest = np.random.rand(200,21,47)
_ytest = np.random.randint(2, size=200)

class MetricsCallback(Callback):
    def __init__(self, train_data, validation_data):
        super().__init__()
        self.validation_data = validation_data
        self.train_data = train_data
        self.f1_scores = []
        self.cutoff = .5

    def on_epoch_end(self, epoch, logs={}):
        X_val = self.validation_data[0]
        y_val = self.validation_data[1]

        preds = self.model.predict(X_val)

        f1 = f1_score(y_val, (preds > self.cutoff).astype(int))
        self.f1_scores.append(f1)


def build_model():
    model = Sequential()
    model.add(Masking(mask_value=0, input_shape=(21, _Xtrain[0].shape[1])))
    model.add(LSTM(32, return_sequences=True))
    model.add(LSTM(64, return_sequences=False))
    model.add(Dense(1, activation='sigmoid'))
    rms = RMSprop(lr=.001, decay=.001)
    model.compile(loss='binary_crossentropy', optimizer=rms, metrics=['acc'])
    return model

model = build_model()

hist = model.fit(_Xtrain, _ytrain, epochs=2, batch_size=5, validation_data=(_Xtest, _ytest), shuffle=True,
                callbacks=[MetricsCallback((_Xtrain, _ytrain), (_Xtest, _ytest))])