我想基于auc在Keras中保存最佳模型,并且我有以下代码:
def MyMetric(yTrue, yPred):
auc = tf.metrics.auc(yTrue, yPred)
return auc
best_model = [ModelCheckpoint(filepath='best_model.h5', monitor='MyMetric', save_best_only=True)]
train_history = model.fit([train_x],
[train_y], batch_size=batch_size, epochs=epochs, validation_split=0.05,
callbacks=best_model, verbose = 2)
因此我的模型运行不正常,我收到此警告:
RuntimeWarning: Can save best model only with MyMetric available, skipping.
'skipping.' % (self.monitor), RuntimeWarning)
如果能告诉我这是正确的方法,那是很好的,否则我该怎么办?
答案 0 :(得分:2)
您必须将要监视的指标传递给model.compile。
https://keras.io/metrics/#custom-metrics
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[MyMetric])
此外,tf.metrics.auc返回一个包含张量和update_op的元组。 Keras希望自定义指标功能仅返回张量。
def MyMetric(yTrue, yPred):
import tensorflow as tf
auc = tf.metrics.auc(yTrue, yPred)
return auc[0]
在此步骤之后,您将获得有关未初始化值的错误。请查看以下主题:
https://github.com/keras-team/keras/issues/3230
How to compute Receiving Operating Characteristic (ROC) and AUC in keras?
答案 1 :(得分:2)
您可以通过以下方式定义一个自定义指标,该指标调用tensorflow来计算AUROC:
def as_keras_metric(method):
import functools
from keras import backend as K
import tensorflow as tf
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
@as_keras_metric
def AUROC(y_true, y_pred, curve='ROC'):
return tf.metrics.auc(y_true, y_pred, curve=curve)
然后,您需要使用以下指标来编译模型:
model.compile(loss=train_loss, optimizer='adam', metrics=['accuracy',AUROC])
最后:通过以下方式检查模型:
model_checkpoint = keras.callbacks.ModelCheckpoint(path_to_save_model, monitor='val_AUROC',
verbose=0, save_best_only=True,
save_weights_only=False, mode='auto', period=1)
但是请注意:我相信Validation AUROC是按批计算并取平均值的;因此可能会给检查点带来一些错误。一个好主意可能是在模型训练结束后验证被训练模型的预测的AUROC(由sklearn.metrics计算)与Tensorflow在训练和检查点时报告的内容相匹配
答案 2 :(得分:0)
假设您使用TensorBoard,那么您就所有纪元都有所有度量标准计算的历史记录(以tfevents文件的形式);那么tf.keras.callbacks.Callback
就是您想要的。
我将tf.keras.callbacks.ModelCheckpoint
与save_freq: 'epoch'
一起使用,以将每个历元的权重保存为h5文件or tf file。
为避免用模型文件填充硬盘驱动器,请编写新的Callback
-或扩展ModelCheckpoint
类的-on_epoch_end
实现:
def on_epoch_end(self, epoch, logs=None):
super(DropWorseModels, self).on_epoch_end(epoch, logs)
if epoch < self._keep_best:
return
model_files = frozenset(
filter(lambda filename: path.splitext(filename)[1] == SAVE_FORMAT_WITH_SEP,
listdir(self._model_dir)))
if len(model_files) < self._keep_best:
return
tf_events_logs = tuple(islice(log_parser(tfevents=path.join(self._log_dir,
self._split),
tag=self.monitor),
0,
self._keep_best))
keep_models = frozenset(map(self._filename.format,
map(itemgetter(0), tf_events_logs)))
if len(keep_models) < self._keep_best:
return
it_consumes(map(lambda filename: remove(path.join(self._model_dir, filename)),
model_files - keep_models))
附录(导入和实用功能实现):
from itertools import islice
from operator import itemgetter
from os import path, listdir, remove
from collections import deque
import tensorflow as tf
from tensorflow.core.util import event_pb2
def log_parser(tfevents, tag):
values = []
for record in tf.data.TFRecordDataset(tfevents):
event = event_pb2.Event.FromString(tf.get_static_value(record))
if event.HasField('summary'):
value = event.summary.value.pop(0)
if value.tag == tag:
values.append(value.simple_value)
return tuple(sorted(enumerate(values), key=itemgetter(1), reverse=True))
it_consumes = lambda it, n=None: deque(it, maxlen=0) if n is None \
else next(islice(it, n, n), None)
SAVE_FORMAT = 'h5'
SAVE_FORMAT_WITH_SEP = '{}{}'.format(path.extsep, SAVE_FORMAT)
为完整起见,该课程的其余部分:
class DropWorseModels(tf.keras.callbacks.Callback):
"""
Designed around making `save_best_only` work for arbitrary metrics
and thresholds between metrics
"""
def __init__(self, model_dir, monitor, log_dir, keep_best=2, split='validation'):
"""
Args:
model_dir: directory to save weights. Files will have format
'{model_dir}/{epoch:04d}.h5'.
split: dataset split to analyse, e.g., one of 'train', 'test', 'validation'
monitor: quantity to monitor.
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
keep_best: number of models to keep, sorted by monitor value
"""
super(DropWorseModels, self).__init__()
self._model_dir = model_dir
self._split = split
self._filename = 'model-{:04d}' + SAVE_FORMAT_WITH_SEP
self._log_dir = log_dir
self._keep_best = keep_best
self.monitor = monitor
这还有一个额外的优势,就是能够在一个Callback中保存和删除多个模型文件。您可以轻松地通过不同的阈值支持进行扩展,例如,将所有具有AUC的模型文件保持在阈值或将TP,FP,TN,FN保持在阈值之内。