我的模型的准确度是0.99549,但是预测的图像是空白的

时间:2019-01-30 13:00:16

标签: tensorflow keras unity3d-unet

我正在使用U-net模型进行MRImage分割。当我尝试运行它时,我希望能得到预测的图像,但是模型的精度却显示为0.99549。当我从预测数组绘制预测图像时,它什么也没显示。

K.set_image_data_format('channels_last') # TF dimension 
_, *input_image_shape, _ = Xtr.shape 
input_image_shape = tuple(input_image_shape) 
smooth = 1.
use_dropout = True #FOR OVERFITTING
use_regularizers = True # overfitting
dropout_rate = 0.5 
number_of_epochs = 40
batch_size = 32
kernel_size = (5, 5) 
initial_volume_size = 64
def dice_coef_per_image_in_batch(y_true, y_pred): 
    y_true_f = K.batch_flatten(y_true) 
    y_pred_f = K.batch_flatten(y_pred) 
    intersection = 2. * K.sum(y_true_f * y_pred_f, axis=1, keepdims=True) + smooth 
    union = K.sum(y_true_f, axis=1, keepdims=True) + K.sum( y_pred_f, axis=1, keepdims=True) + smooth 
    return K.mean(intersection / union)
def dice_coef_loss(y_true, y_pred): 
    return -dice_coef_per_image_in_batch(y_true, y_pred)
def dice_coef_accur(y_true, y_pred): 
    return dice_coef_per_image_in_batch(y_true, y_pred)
def setup_regularizers(conv_layer): 
    return BatchNormalization()(conv_layer) if use_regularizers else conv_layer
def setup_dropout(conv_layer): 
    return Dropout(dropout_rate)(conv_layer) if use_dropout else conv_layer

inputs = Input((*input_image_shape, 1)) 
conv1 = Conv2D(initial_volume_size, kernel_size, activation='relu', padding='same')(inputs) 
conv1 = Conv2D(initial_volume_size, kernel_size, activation='relu', padding='same')(conv1) 
conv1 = setup_regularizers(conv1) 
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(initial_volume_size*2, kernel_size, activation='relu', padding='same')(pool1) 
conv2 = Conv2D(initial_volume_size*2, kernel_size, activation='relu', padding='same')(conv2) 
conv2 = setup_regularizers(conv2) 
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(initial_volume_size*4,kernel_size, activation='relu', padding='same')(pool2) 
conv3 = Conv2D(initial_volume_size*4, kernel_size, activation='relu', padding='same')(conv3) 
conv3 = setup_regularizers(conv3) 
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(initial_volume_size*8, kernel_size, activation='relu', padding='same')(pool3) 
conv4 = Conv2D(initial_volume_size*8, kernel_size, activation='relu', padding='same')(conv4) 
conv4 = setup_regularizers(conv4) 
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(initial_volume_size*16, kernel_size, activation='relu', padding='same')(pool4) 
conv5 = Conv2D(initial_volume_size*16, kernel_size, activation='relu', padding='same')(conv5) 
conv5 = setup_regularizers(conv5)
up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3) 
up6 = setup_dropout(up6) 
conv6 = Conv2D(initial_volume_size*8, kernel_size, activation='relu', padding='same')(up6) 
conv6 = Conv2D(initial_volume_size*8, kernel_size, activation='relu', padding='same')(conv6)
up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3) 
up7 = setup_dropout(up7) 
conv7 = Conv2D(initial_volume_size*4, kernel_size, activation='relu', padding='same')(up7) 
conv7 = Conv2D(initial_volume_size*4,kernel_size, activation='relu', padding='same')(conv7)
up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3) 
up8 = setup_dropout(up8) 
conv8 = Conv2D(initial_volume_size*2, kernel_size, activation='relu', padding='same')(up8) 
conv8 = Conv2D(initial_volume_size*2, kernel_size, activation='relu', padding='same')(conv8)
up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=3) 
up9 = setup_dropout(up9) 
conv9 = Conv2D(initial_volume_size, kernel_size, activation='relu', padding='same')(up9) 
conv9 = Conv2D(initial_volume_size, kernel_size, activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef_accur]) 
print("Size of the CNN: %s" % model.count_params())
print(model.summary())
model_checkpoint = ModelCheckpoint( saved_model_filename, monitor='val_dice_coef_accur',save_best_only=True, verbose=1 ) 
csv_logger = CSVLogger(csv_logger_training, append=True, separator=';')
history = model.fit(Xtr, ytr, batch_size=batch_size, epochs=number_of_epochs, verbose=2, shuffle=True, 
                    callbacks=[model_checkpoint, csv_logger], validation_data=(Xva, yva))
test_loss, accuracy_test = model.evaluate(Xte, yte, verbose=0) 
print("Test Accuracy Mean: "+str(accuracy_test))
model.load_weights("datasets/dataset1/test-3.hdf5") 
print(model.metrics_names) 
test_loss, accuracy_test = model.evaluate(Xte, yte, verbose=0) 
print("Test Accuracy Mean: "+str(accuracy_test)) 
imgs_mask_test = model.predict(Xte, verbose=1)
ncols = 3 # number of columns in final grid of images 
nrows = 30 # looking at all images takes some time 
_, axes = plt.subplots(nrows, ncols, figsize=(17, 17*nrows/ncols)) 
for axis in axes.flatten(): 
    axis.set_axis_off() 
    axis.set_aspect('equal')
for k in range(0, nrows): 
    im_test_original = Xte[k].reshape(*input_image_shape) 
    im_result = imgs_mask_test[k].reshape(*input_image_shape) 
    im_ground_truth = yte[k].reshape(*input_image_shape)
    axes[k, 0].set_title("Original Test Image") 
    axes[k, 0].imshow(im_test_original, cmap='gray')
    axes[k, 1].set_title("Ground Truth") 
    axes[k, 1].imshow(im_ground_truth, cmap='gray')
    axes[k, 2].set_title("Predicted") 
    axes[k, 2].imshow(im_result, cmap='gray')

如果有人可以告诉我我做错了什么,那将非常有帮助。

0 个答案:

没有答案