当我实施Keras的Fully Convolutional Networks时,为什么我的代码会抛出KeyError:'epochs'

时间:2018-08-11 11:32:57

标签: python tensorflow machine-learning keras conv-neural-network

我试图通过TensorFlow实现FCN,并且我使用了Keras。经过第一次时代训练后,我得到了这个错误:

KeyError:"epochs"

我认为它应该与ModelCheckpoint()和model.fit()有关,因为当我删除model.fit()中的回调时,它可以完成所有时期。

任何帮助将不胜感激。谢谢。

这是我的代码的一部分:

from __future__ import print_function

import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.callbacks import *
from keras.utils.vis_utils import plot_model
import shutil

import matplotlib.pyplot as plt
from pylab import *
from keras.regularizers import l2
from keras.layers import *
from keras.engine import Layer
from keras.applications.vgg16 import *
from keras.models import *
from keras.applications.imagenet_utils import _obtain_input_shape
import tensorflow as tf
import time

K.set_image_data_format('channels_last')  # TF dimension ordering in this code

starttime = time.clock()

img_rows = 512
img_cols = 512

smooth = 1.


def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)




def FCN32():
    # https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5
    inputs = Input((img_rows, img_cols, 1))
    conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
    conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(conv1)
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(conv1)
    # Block 2

    conv2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(pool1)
    conv2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(conv2)
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(conv2)

    # Block 3
    conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(pool2)
    conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(conv3)
    conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(conv3)
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(conv3)

    # Block 4

    conv4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(pool3)

    conv4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(conv4)
    conv4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(conv4)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(conv4)

    # Block 5

    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(pool4)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(conv5)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(conv5)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(conv5)


    up1 = (Conv2D(32, (7, 7), activation='relu', padding='same'))(pool5)
    drop1 = Dropout(0.5)(up1)
    up2 = (Conv2D(32, (1, 1), activation='relu', padding='same'))(drop1)
    drop2 = Dropout(0.5)(up2)

    up3 = (Conv2D(1, (1, 1), kernel_initializer='he_normal'))(drop1)
    up4 = Conv2DTranspose(1, kernel_size=(64, 64), strides=(30, 30), use_bias=False)(up3)

    crop1 = Cropping2D(cropping=((1, 1), (1, 1)))(up4)

    model = Model(inputs=[inputs], outputs=[crop1])
    model.compile(optimizer=SGD(lr=0.005), loss='binary_crossentropy', metrics=[dice_coef])

    return model

def train_and_predict():
    print('-' * 30)
    print('Loading train data...')
    print('-' * 30)
    imgs_train = np.load('train_data.npy') 
    imgs_label_train = np.load('train_label.npy')

    imgs_train = imgs_train.reshape(1000, 512, 512, 1)
    imgs_label_train = imgs_label_train.reshape(1000, 512, 512, 1)

    #    imgs_train = preprocess(imgs_train)
    ##imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_label_train = imgs_label_train.astype('float32')
    imgs_label_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    log_filepath = '/logs'
    model = FCN32()


    model_checkpoint = ModelCheckpoint('fcn32_weights.{epochs:02d-{dice_coef:.2f}}.h5', monitor='val_loss',
                                       save_best_only=True)
    tb_cb = TensorBoard(log_dir=log_filepath, write_images=False, histogram_freq=1, write_graph=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)

    model.fit(imgs_train, imgs_label_train, batch_size=10, epochs=10, verbose=1, shuffle=True,
              validation_split=0.02,
              callbacks=[model_checkpoint, tb_cb])

    print('-' * 30)
    print('Loading test data...')
    print('-' * 30)
    imgs_test = np.load('test_data.npy') 
    imgs_test = imgs_test.reshape(100, 512, 512, 1)


    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('fcn32_weights.h5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, batch_size=10, verbose=1)

    np.save('imgs_mask_test.npy', imgs_mask_test)




if __name__ == '__main__':
    train_and_predict()

endtime = time.clock()
print("The train_and_predict running time is %g s" % (endtime - starttime))

1 个答案:

答案 0 :(得分:1)

由于在第一个时期结束时出现错误,可能是由于ModelCheckpoint回调所致:您将第一个}放在了错误的位置!

尝试

model_checkpoint = ModelCheckpoint('fcn32_weights.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=True)