从VGG16训练UNET时,必须输入占位符* _sample_weights的值

时间:2018-08-16 12:06:56

标签: tensorflow keras

我正在尝试使用VGG16作为第一层来创建UNET。

def BuildUNet2():
    keras.backend.set_learning_phase(1)

    inputs = keras.layers.Input(shape=(PATCH_SIZE, PATCH_SIZE, 3), name="inputs")
    vggModel=keras.applications.VGG16(include_top=False, input_tensor=inputs)
    layers = dict([(layer.name, layer) for layer in vggModel.layers])

    print("Layers", len(layers), layers)

    block1_conv2 = layers["block1_conv2"].output
    block2_conv2 = layers["block2_conv2"].output
    block3_conv3 = layers["block3_conv3"].output
    block4_conv3 = layers["block4_conv3"].output
    vggTop = layers["block5_conv3"].output

    up6=keras.layers.concatenate([keras.layers.Conv2DTranspose(256, (2,2), strides=(2,2), padding="same")(vggTop), block4_conv3], axis=3)
    conv61=keras.layers.Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up6)
    conv62=keras.layers.Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv61)

    up7 = keras.layers.concatenate([keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(conv62), block3_conv3], axis=3)
    conv71=keras.layers.Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up7)
    conv72=keras.layers.Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv71)

    up8 = keras.layers.concatenate([keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(conv72), block2_conv2], axis=3)
    conv81=keras.layers.Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up8)
    conv82=keras.layers.Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv81)

    up9 = keras.layers.concatenate([keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv82), block1_conv2], axis=3)
    conv91=keras.layers.Conv2D(32, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up9)
    conv92=keras.layers.Conv2D(32, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv91)
    conv93=keras.layers.Conv2D(1, (1, 1), activation="sigmoid")(conv92)

    model = keras.models.Model(input=[inputs], output=[conv93])

    for layer in model.layers[:19]:
        layer.trainable = False

    model.compile(optimizer=keras.optimizers.Adam(lr=1e-5), loss=metric.dice_coef_loss,
              metrics=[metric.dice_coef, "accuracy"])

    model.summary()

    return model

我正在训练:

    with h5py.File(parms.training, "r") as trainingsFile:

        wrk=trainingsFile["work"].value
        np.random.seed(42)
        np.random.shuffle(wrk)

        limit=int(wrk.shape[0]*0.8)
        trainData=wrk[:limit]
        valData=wrk[limit:]

        trainGen=DataGenerator(trainData, parms.batchSize)
        valGen=DataGenerator(valData, parms.batchSize)

        bestCheckpoint = keras.callbacks.ModelCheckpoint("best.h5",
                                                        monitor="val_loss",
                                                        save_best_only=True,
                                                        save_weights_only=False)

        regCheckpoint = keras.callbacks.ModelCheckpoint("checkpoint-{epoch:04d}.h5", period=10)

        csvLog = keras.callbacks.CSVLogger("log.csv", append=True)

        runName = datetime.datetime.now().isoformat("@")[:19].replace(":", "-")

        tensorBoard = keras.callbacks.TensorBoard(log_dir="./logs/%s/" % runName)

        lrPlateau = keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=10, cooldown=5)

        model.fit_generator(trainGen,
                            epochs=parms.epochs,
                            steps_per_epoch=trainGen.__len__(),
                            validation_data=valGen,
                            validation_steps=valGen.__len__(),
                            callbacks=[bestCheckpoint, regCheckpoint, csvLog, tensorBoard, lrPlateau],
                            use_multiprocessing=False,
                            )

DataGenerator定义为:

class DataGenerator(keras.utils.Sequence):
    def __init__(self, data, batchSize):
        self.data=data
        self.batchSize=batchSize

    def __len__(self):
        return int((self.data.shape[0]+self.batchSize-1)/(self.batchSize))

    def __getitem__(self, item):

        X=np.zeros((self.batchSize, self.data.shape[1], self.data.shape[2], 3), dtype=np.float32)
        Y=np.zeros((self.batchSize, self.data.shape[1], self.data.shape[2]), dtype=np.float32)

        j=0

        wrk=np.zeros((self.data.shape[1], self.data.shape[2], self.data.shape[3]), dtype=np.float32)
        for i in range(item*self.batchSize, min((item+1)*self.batchSize,self.data.shape[0])):

            wrk=self.data[i, :, :, :]

            if random.random() < 0.5:
                wrk=wrk[:, ::-1, :]

            if random.random() < 0.5:
                wrk = wrk[::-1, :, :]

            direction = int(random.random() * 4) * 90
            if direction:
                wrk = imutils.rotate(wrk, direction)

            X[j, :, :, :]=wrk[:, :, 0: 3]
            Y[j, :, :]=wrk[:, :, 3]
            j+=1

        X=X.resize((j, X.shape[1], X.shape[2], X.shape[3]))
        Y=Y.resize((j, Y.shape[1], Y.shape[2]))

        return X, Y

尝试训练模型结果

tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_9_sample_weights' with dtype float and shape [?]

即使从DataGenerator中显式返回sample_weight(一个附加的np.ones((j),dtype = np.float32))也不能解决问题。

怎么了?

我该如何纠正?

1 个答案:

答案 0 :(得分:0)

问题出在DataGenerator。 getitem (): 调整大小不会返回新的numpy数组。它更改原始数组,但不返回任何内容。因此, getitem 方法返回无,无。 keras错误消息具有误导性。