我已经对多元模型进行了建模,并在上次提出了类似的问题。 我知道如何获取平均损失值和准确性值,但是我的模型仍然无法识别平均val_loss和val_acc。 你能告诉我如何解决吗? 我附上下面的代码。谢谢
此代码用于获取平均损失和准确性。
``类MergeMetrics(tf.keras.callbacks.Callback):
model = Model(inputs=visible, outputs=listDense)
losses = {"output{}".format(j+1):'mse' for j in range(len(listDense))}
# tie losses together
model.compile(optimizer='adam', loss=losses, metrics=["mse", "mae", r_square])
#averaging loss and accuracy
checkpoint = MergeMetrics()
# fit model
hist = model.fit(X_tr, [listofdepth_tr[s] for s in range(len(listofdepth_tr))], use_multiprocessing=True, workers=6, epochs=100, callbacks=[checkpoint], verbose=0, validation_data = (X_te, [listofdepth_te[s] for s in range(len(listofdepth_te))]))
#-----------------------------------------------------------------------------
# Plot learning curves including R^2 and RMSE
#-----------------------------------------------------------------------------
# plot training curve for R^2 (beware of scale, starts very low negative)
fig = plt.figure()
ax1 = fig.add_subplot(3,1,1)
ax1.plot(hist.history['merge_r_square'])
ax1.plot(hist.history['val_merge_r_square'])
ax1.set_title('Accuracy : model R^2')
ax1.set_ylabel('R^2')
ax1.legend(['train', 'test'], loc='upper left')
# plot training curve for rmse
ax2 = fig.add_subplot(3,1,2)
ax2.plot(hist.history['merge_mse'])
ax2.plot(hist.history['val_merge_mse'])
ax2.set_title('Accuracy : mse')
ax2.set_ylabel('mse')
ax2.legend(['train', 'test'], loc='upper left')
# plot training curve for rmse
ax3 = fig.add_subplot(3,1,3)
ax3.plot(hist.history['loss'])
ax3.plot(hist.history['val_loss'])
ax3.set_title('Loss : mse')
ax3.set_ylabel('mse')
ax3.set_xlabel('epoch')
ax3.legend(['train', 'test'], loc='upper left')
这是我模型的代码和损耗图。
long getMinCost(vector<int> crew_id, vector<int> job_id)
{
for(int i=1; i < crew_id.size(); i++){
cout << crew_id[i] << ' ';
}
}`
答案 0 :(得分:1)
使用验证时要注意...没有包含序列“ val_mse”的内容,因为它是“ val_outputname_mse”。如果您还使用验证,请注意不要将火车的MSE和验证的MSE融合在一起。在正确的方法之上
from string import digits # <=== import digits
def clear_name(output_name):
return output_name.translate(str.maketrans('', '', digits))
class MergeMetrics(Callback):
def __init__(self,**kargs):
super(MergeMetrics,self).__init__(**kargs)
def on_epoch_begin(self,epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
logs['merge_mse'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'dense__mse'])
logs['merge_mae'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'dense__mae'])
logs['val_merge_mse'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'val_dense__mse'])
logs['val_merge_mae'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'val_dense__mae'])
X = np.random.uniform(0,1, (1000,10))
y1 = np.random.uniform(0,1, 1000)
y2 = np.random.uniform(0,1, 1000)
inp = Input((10,))
x = Dense(32, activation='relu')(inp)
out1 = Dense(1)(x)
out2 = Dense(1)(x)
m = Model(inp, [out1,out2])
m.compile('adam','mae', metrics=['mse','mae'])
checkpoint = MergeMetrics()
hist = m.fit(X, [y1,y2], epochs=10, callbacks=[checkpoint], validation_split=0.1)
plt.plot(hist.history['merge_mse'])
plt.plot(hist.history['val_merge_mse'])
plt.title('Accuracy : mse')
plt.ylabel('mse')
plt.legend(['train', 'test'], loc='upper left')