如何在不重新初始化模型的情况下训练神经网络两次?

时间:2021-01-12 23:16:45

标签: python tensorflow machine-learning keras deep-learning

假设我有这个模型:

def mask_layer(tensor):
return layers.Multiply()([tensor, tf.ones([1, 128])])


def get_model():

inp_1 = keras.Input(shape=(64, 101, 1), name="input")
x = layers.Conv2D(256, kernel_size=(3, 3), kernel_regularizer=l2(1e-6), strides=(3, 3), padding="same")(inp_1)
x = layers.LeakyReLU(alpha=0.3)(x)
x = layers.Conv2D(128, kernel_size=(3, 3), kernel_regularizer=l2(1e-6), strides=(3, 3), padding="same")(x)
x = layers.LeakyReLU(alpha=0.3)(x)
x = layers.Flatten()(x)
x = layers.Dense(512)(x)
x = layers.LeakyReLU(alpha=0.3)(x)
x = layers.Dense(256)(x)
x = layers.LeakyReLU(alpha=0.3)(x)
x= layers.Dense(128, name="output1")(x)
mask = layers.Lambda(mask_layer, name="lambda_layer")(x)
out2 = layers.Dense(40000, name="output2")(mask)

model = keras.Model(inp_1, [mask, output2], name="2_out_model")

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), 
              loss="mean_squared_error")
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
model.summary()
return model

然后,我训练我的网络:

model = get_model()
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)
history = model.fit(X_train, [Y_train, Z_train], validation_data=(X_val, [Y_val, Z_val]), epochs=500,
                    batch_size=32,
                    callbacks=[es])
test_loss, _, _ = model.evaluate(X_test, [Y_test, Z_test], verbose=1)

我想用另一个训练集重新训练已经训练好的网络,但要改变 Lambda 层的定义,假设这次返回函数:

return layers.Multiply()([tensor, tf.ones([1, 128])*1.2])

我是否需要调用函数“get_model()”(因为我重新定义了一个层)然后再次拟合?是否有重新初始化模型权重的风险?提前谢谢你:)

1 个答案:

答案 0 :(得分:1)

您的 Lambda 层不是可训练的层,因此您可以安全地在另一个模型(具有相同结构)中移动您训练的权重,但更改您的 Lambda 层

示例如下:

def mask_layer1(tensor):
    return layers.Multiply()([tensor, tf.ones([1, 128])])

def mask_layer2(tensor):
    return layers.Multiply()([tensor, tf.ones([1, 128])*1.2])


def get_model(mask_kind):

    inp = keras.Input(shape=(64, 101, 1), name="input")
    
    x = layers.Conv2D(256, kernel_size=(3, 3), kernel_regularizer=l2(1e-6), 
                      strides=(3, 3), padding="same")(inp)
    x = layers.LeakyReLU(alpha=0.3)(x)
    x = layers.Conv2D(128, kernel_size=(3, 3), kernel_regularizer=l2(1e-6), 
                      strides=(3, 3), padding="same")(x)
    x = layers.LeakyReLU(alpha=0.3)(x)
    x = layers.Flatten()(x)
    x = layers.Dense(512)(x)
    x = layers.LeakyReLU(alpha=0.3)(x)
    x = layers.Dense(256)(x)
    x = layers.LeakyReLU(alpha=0.3)(x)
    x = layers.Dense(128, name="output1")(x)
    
    if mask_kind == 1:
        mask = layers.Lambda(mask_layer1, name="lambda_layer")(x)
    elif mask_kind == 2:
        mask = layers.Lambda(mask_layer2, name="lambda_layer")(x)
    else:
        mask = x
    
    out = layers.Dense(40000, name="output2")(mask)

    model = keras.Model(inp, [mask, out], name="2_out_model")
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), 
                  loss="mean_squared_error")
    
    return model


model1 = get_model(mask_kind=1)
model1.fit(...)

model2 = get_model(mask_kind=2)
model2.set_weights(model1.get_weights())
model2.fit(...)
相关问题