我一直在研究训练和验证数据集的损失函数,即使它们是相同的数据集,我仍然看到验证损失比训练损失小。我正试图了解为什么会是这种情况。
我正在训练张量流模型来预测一些时间序列数据。 因此,模型的创建和预处理如下:window_size = 40
batch_size = 32
forecast_period = 6
model_name = "LSTM"
tf.keras.backend.clear_session()
_seed = 42
tf.random.set_seed(_seed)
def _sub_to_batch(sub):
return sub.batch(window_size, drop_remainder=True)
def _return_input_output(tensor):
_input = tensor[:, :-forecast_period, :]
_output = tensor[:, forecast_period:, :]
return _input, _output
def _reshape_tensor(tensor):
tensor = tf.expand_dims(tensor, axis=-1)
tensor = tf.transpose(tensor, [1, 0, 2])
return tensor
# total elements after unbatch(): 3813
train_ts_dataset = tf.data.Dataset.from_tensor_slices(train_ts)\
.window(window_size, shift=1)\
.flat_map(_sub_to_batch)\
.map(_reshape_tensor)\
.map(_return_input_output)
# .unbatch().shuffle(buffer_size=500, seed=_seed).batch(batch_size)\
# .map(_return_input_output)
valid_ts_dataset = tf.data.Dataset.from_tensor_slices(valid_ts)\
.window(window_size, shift=1)\
.flat_map(_sub_to_batch)\
.map(_reshape_tensor)\
.unbatch().shuffle(buffer_size=500, seed=_seed).batch(batch_size)\
.map(_return_input_output)
def _forecast_mae(y_pred, y_true):
_y_pred = y_pred[:, -forecast_period:, :]
_y_true = y_true[:, -forecast_period:, :]
mae = tf.losses.MAE(_y_true, _y_pred)
return mae
def _accuracy(y_pred, y_true):
# print(y_true) => Tensor("sequential/time_distributed/Reshape_1:0", shape=(None, 34, 1), dtype=float32)
# y_true[-forecast_period:, :] => Tensor("strided_slice_4:0", shape=(None, 34, 1), dtype=float32)
# y_true[:, -forecast_period:, :] => Tensor("strided_slice_4:0", shape=(None, 6, 1), dtype=float32)
_y_pred = y_pred[:, -forecast_period:, :]
_y_pred = tf.reshape(_y_pred, shape=[-1, forecast_period])
_y_true = y_true[:, -forecast_period:, :]
_y_true = tf.reshape(_y_true, shape=[-1, forecast_period])
# MAPE: Tensor("Mean_1:0", shape=(None, 1), dtype=float32)
MAPE = tf.math.reduce_mean(tf.math.abs((_y_pred - _y_true) / _y_true), axis=1, keepdims=True)
accuracy = 1 - MAPE
accuracy = tf.where(accuracy < 0, tf.zeros_like(accuracy), accuracy)
accuracy = tf.reduce_mean(accuracy)
return accuracy
model = k.models.Sequential([
k.layers.Bidirectional(k.layers.LSTM(units=100, return_sequences=True), input_shape=(None, 1)),
k.layers.Bidirectional(k.layers.LSTM(units=100, return_sequences=True)),
k.layers.TimeDistributed(k.layers.Dense(1))
])
model_name = []
model_name_symbols = {"bidirectional": "BILSTM_1", "bidirectional_1": "BILSTM_2", "time_distributed": "td"}
for l in model.layers:
model_name.append(model_name_symbols.get(l.name, l.name))
model_name = "_".join(model_name)
print(model_name)
for i, (x, y) in enumerate(train_ts_dataset):
print(i, x.numpy().shape, y.numpy().shape)
数据集形状的输出如下:
BILSTM_1_BILSTM_2_td
0 (123, 34, 1) (123, 34, 1)
1 (123, 34, 1) (123, 34, 1)
2 (123, 34, 1) (123, 34, 1)
3 (123, 34, 1) (123, 34, 1)
4 (123, 34, 1) (123, 34, 1)
5 (123, 34, 1) (123, 34, 1)
6 (123, 34, 1) (123, 34, 1)
7 (123, 34, 1) (123, 34, 1)
8 (123, 34, 1) (123, 34, 1)
然后:
_datetime = datetime.datetime.now().strftime("%Y%m%d-%H-%M-%S")
_log_dir = os.path.join(".", "logs", "fit7", model_name, _datetime)
tensorboard_cb = k.callbacks.TensorBoard(log_dir=_log_dir)
model.compile(loss="mae", optimizer=tf.optimizers.Adam(learning_rate=0.001), metrics=[_forecast_mae, _accuracy])
history = model.fit(train_ts_dataset, epochs=100, validation_data=train_ts_dataset, callbacks=[tensorboard_cb])
我一直在研究训练和验证数据集的损失函数,并且我一直看到验证损失小于训练损失。我可能不适合。但是,我用训练集代替了验证集,将其作为一种简单的测试来监视训练和测试时的损失和准确性。但是我仍然获得比培训更好的验证准确性。以下是整个训练和验证数据集的准确性:
对我来说,很奇怪,尽管我使用相同的数据集进行训练和测试,但我得到的验证准确性要比训练准确性高。而且没有辍学,没有batchNormalization层等。
有关此行为的原因的任何提示?那将不胜感激!
================================================ ===================
在此处对代码进行了一些修改,以检查批处理大小是否有效。另外,为了消除tf.data.Dataset
中的任何疑问,我使用了numpy数组作为输入。因此,新代码如下:
custom_train_ts = train_ts.transpose(1, 0)[..., np.newaxis]
custom_train_ts_x = custom_train_ts[:, :window_size, :] # size: 123, window_size, 1
custom_train_ts_y = custom_train_ts[:, -window_size:, :] # size: 123, window_size, 1
custom_valid_ts = valid_ts.transpose(1, 0)[..., np.newaxis]
custom_valid_ts_x = custom_valid_ts[:, :window_size, :]
custom_valid_ts_y = custom_valid_ts[:, -window_size:, :]
custom_valid_ts = (custom_valid_ts_x, custom_valid_ts_y)
第二,为了确保准确性是在整个数据集上计算的,并且不依赖于批处理大小,我按原样输入了数据集,而不进行批处理。另外,我实现了以下自定义指标:
def _accuracy(y_true, y_pred):
# print(y_true) => Tensor("sequential/time_distributed/Reshape_1:0", shape=(None, 34, 1), dtype=float32)
# y_true[-forecast_period:, :] => Tensor("strided_slice_4:0", shape=(None, 34, 1), dtype=float32)
# y_true[:, -forecast_period:, :] => Tensor("strided_slice_4:0", shape=(None, 6, 1), dtype=float32)
_y_pred = y_pred[:, -forecast_period:, :]
_y_pred = tf.reshape(_y_pred, shape=[-1, forecast_period])
_y_true = y_true[:, -forecast_period:, :]
_y_true = tf.reshape(_y_true, shape=[-1, forecast_period])
# MAPE: Tensor("Mean_1:0", shape=(None, 1), dtype=float32)
MAPE = tf.math.reduce_mean(tf.math.abs((_y_pred - _y_true) / _y_true), axis=1, keepdims=True)
accuracy = 1 - MAPE
accuracy = tf.where(accuracy < 0, tf.zeros_like(accuracy), accuracy)
accuracy = tf.reduce_mean(accuracy)
return accuracy
class MyAccuracy(tf.keras.metrics.Metric):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.accuracy_function = _accuracy
self.y_true_lst = []
self.y_pred_lst = []
def update_state(self, y_true, y_pred, sample_weight=None):
self.y_true_lst.append(y_true)
self.y_pred_lst.append(y_pred)
def result(self):
y_true_concat = tf.concat(self.y_true_lst, axis=0)
y_pred_concat = tf.concat(self.y_pred_lst, axis=0)
accuracy = self.accuracy_function(y_true_concat, y_pred_concat)
self.y_true_lst = []
self.y_pred_lst = []
return accuracy
def get_config(self):
base_config = super().get_config()
return {**base_config}
最后,模型编译并拟合为:
model.compile(loss="mae", optimizer=tf.optimizers.Adam(hparams["learning_rate"]),
metrics=[tf.metrics.MAE, MyAccuracy()])
history = model.fit(custom_train_ts_x, custom_train_ts_y, epochs=120, batch_size=123, validation_data=custom_valid_ts,
callbacks=[tensorboard_cb])
当我在tensorboard中查看训练和验证准确性时,得到以下信息:
因此,显然,这没有任何意义。此外,在这种情况下,请确保在调用result()
之后的纪元末尾,仅计算一次精度。但是,在查看验证损失时,我发现训练损失低于验证损失:
答案 0 :(得分:-1)
它们是不同的,因为优化器会在每个批处理的末尾更新参数,并且val_loss
将在末尾计算,而train_loss
将在此过程中计算。
即使批次中只有一个样本,每个时期中只有一个批次,它们也会彼此不同,因为网络会为您的样本进行前向传递并计算损失,这称为{{1} },并在更新参数后再次计算损耗,这次将其称为train_loss
(在这种情况下,下一个时期val_loss
将等于当前的train_loss
)。 / p>
因此,如果您想检查一下我刚才所说的话是否正确,只需将优化器的val_loss
设置为learning_rate
,就可以得到相同的损失。
这是我在MNIST上针对相同问题测试的代码(您可以通过here在我的合作实验室中临时查看代码和结果):
0
编译并适合多种情况:
# ---------------------------------
# Importing stuff
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from keras.utils import to_categorical
# ---------------------------------
(trainX, trainy), (testX, testy) = keras.datasets.mnist.load_data()
# one-hot
trainy = to_categorical(trainy, 10)
testy = to_categorical(testy, 10)
# image should be in shape of (28, 28, 1) not (28, 28)
trainX = np.expand_dims(trainX, -1)
testX = np.expand_dims(testX, -1)
# normalize
trainX = trainX/255.
testX = testX/255.
# ---------------------------------
# Build the model
model = Sequential()
model.add(Input(trainX.shape[1:]))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.summary()
# training on 1 sample, but with learning_rate != 0
opt = keras.optimizers.Adam(learning_rate = 0.001)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
batchX = trainX[0].reshape(1, 28, 28, 1)
batchy = trainy[0].reshape(1, 10)
model.fit(batchX, batchy, validation_data = (batchX, batchy), batch_size = 1,
shuffle = False, validation_batch_size = 1, epochs = 5)
# You will see that the loss and val_loss are different and the
# next steps loss is equal to the current steps val_loss
# training on 1 sample, with learning_rate == 0
opt = keras.optimizers.Adam(learning_rate = 0)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
batchX = trainX[0].reshape(1, 28, 28, 1)
batchy = trainy[0].reshape(1, 10)
model.fit(batchX, batchy, validation_data = (batchX, batchy), batch_size = 1,
shuffle = False, validation_batch_size = 1, epochs = 5)
# You will see that the loss and val_loss are equal because
# the parameters will not change
# training on the complete dataset but with learning_rate != 0
opt = keras.optimizers.Adam(learning_rate = 0.001)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
model.fit(trainX, trainy, validation_data = (trainX, trainy), batch_size = 32,
shuffle = False, validation_batch_size = 32, epochs = 5)
# this is similar to the case you asked