我正在尝试使用VGG16编码器构建U-Net模型。这是模型代码。
from keras.applications.vgg16 import VGG16
base_pretrained_model = VGG16(input_shape = shape, include_top = False, weights = 'imagenet')
base_pretrained_model.trainable = False
base_pretrained_model.summary()
inp = Input(shape=shape,name = 'image_input')
output_vgg16_conv = base_pretrained_model(inp)
# output and start upsampling
conv_1 = Conv2D(512, (3,3), activation='relu', padding='same')(output_vgg16_conv)
up_conv = Conv2DTranspose(256, (3,3), strides=(2,2), activation='relu', padding='same')(conv_1)
# first concatenation block
concat_1 = concatenate([base_pretrained_model.get_layer('block5_conv3').output, up_conv], axis=-1, name='concat_1')
conv_2 = Conv2D(512, (3,3), activation='relu', padding='same')(concat_1)
up_conv_2 = Conv2DTranspose(256, (3,3), strides=(2,2), activation='relu', padding='same')(conv_2)
# second concatenation block
concat_2 = concatenate([up_conv_2, base_pretrained_model.get_layer('block4_conv3').output])
conv_3 = Conv2D(512, (3,3), activation='relu', padding='same')(concat_2)
up_conv_3 = Conv2DTranspose(128, (3,3), strides=(2,2), activation='relu', padding='same')(conv_3)
# third concatenation block
concat_3 = concatenate([up_conv_3, base_pretrained_model.get_layer('block3_conv3').output])
conv_4 = Conv2D(256, (3,3), activation='relu', padding='same')(concat_3)
up_conv_4 = Conv2DTranspose(64, (3,3), strides=(2,2), activation='relu', padding='same')(conv_4)
# fourth concatenation block
concat_4 = concatenate([up_conv_4, base_pretrained_model.get_layer('block2_conv2').output])
conv_5 = Conv2D(128, (3,3), activation='relu', padding='same')(concat_4)
up_conv_5 = Conv2DTranspose(32, (3,3), strides=(2,2), activation='relu', padding='same')(conv_5)
# fifth concatenation block
concat_4 = concatenate([up_conv_5, base_pretrained_model.get_layer('block1_conv2').output])
conv_6 = Conv2D(128, (3,3), activation='sigmoid', padding='same')(concat_4)
finalModel = Model(inp, output = conv_6)
我收到以下错误消息。
ValueError:图形已断开连接:无法在“ input_1”层获取张量Tensor(“ input_1:0”,shape =(None,512,512,3),dtype = float32)的值。可以顺利访问以下先前的图层:[]
注意:input_1
是VGG16模型的输入层
答案 0 :(得分:1)
您无需定义其他Input
层,因为Input
模型中已经有一个VGG16
层,您可以使用来访问
inp = base_pretrained_model.input
您还可以通过使用
获取VGG16
模型的输出
output_vgg16_conv = base_pretrained_model.output
此外,要保持基础模型VGG16
的权重不变,您可以使每一层都不可训练:
for layer in base_pretrained_model.layers:
layer.trainable = False
希望这会有所帮助。