到目前为止,这是我使用https://github.com/zhixuhao/unet中的代码尝试过的。我使用了自己的训练和测试图像,而不是使用该链接中的图像:
from model import *
from data import *
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from sklearn.metrics import roc_auc_score
import sys
import cv2
###get X_train, y_train, X_test, Y_test
n=0
img_list=[]
IMG_HEIGHT=1024
IMG_WIDTH=1024
num_train_imgs=48
X_train = np.zeros((num_train_imgs, IMG_HEIGHT, IMG_WIDTH), dtype=np.uint8)
Y_train = np.zeros((num_train_imgs, IMG_HEIGHT, IMG_WIDTH), dtype=np.bool)
TRAIN_PATH1='/data/membrane/train'+'/image/'
TRAIN_PATH2='/data/membrane/train'+'/label/'
folder='/home/me/unet'
for filename in os.listdir(folder+TRAIN_PATH1):
img = io.imread(folder+TRAIN_PATH1 + filename, as_grey=True)
if img is not None:
X_train[n] = img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH), dtype=np.bool)
imgname=folder+TRAIN_PATH1 + filename
img_rs = img.reshape([IMG_HEIGHT, IMG_WIDTH, 1])
img_list.append(img_rs)
for mskname in os.listdir(folder+TRAIN_PATH2):
if mskname[-13:-4]==imgname[-13:-4]:
mask_ = cv2.imread(folder +TRAIN_PATH2 + mskname)
mask_ = cv2.cvtColor(mask_, cv2.COLOR_BGR2GRAY)
mask = np.maximum(mask, mask_)
Y_train[n] = mask
n+=1
img_list = np.array(img_list)
# Get and resize test images
num_test_imgs=18
X_test = np.zeros((num_test_imgs, IMG_HEIGHT, IMG_WIDTH), dtype=np.uint8)
sizes_test = []
n=0
for filename in os.listdir(folder+'/data/membrane/test/'):
img = cv2.imread(folder + '/data/membrane/test/' + filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sizes_test.append([img.shape[0], img.shape[1]])
X_test[n] = img
n+=1
space = {
'lr': hp.choice('lr', [1e-6,5e-5,1e-5,5e-4,1e-4,5e-3,1e-3]),
'batchi': hp.choice('batchi', [0,1]),
'dropout1': hp.choice('dropout1', [.3,.4,.5,.6,.7]),
'dropout2': hp.choice('dropout2', [.3,.4,.5,.6,.7]),
'steps_per_epoch': hp.choice('steps_per_epoch', [5,10,20]),
'epochs': hp.choice('epochs', [1,2,3,4]),
'down_activation': hp.choice('down_activation',['relu','elu']),
'up_activation': hp.choice('up_activation',['relu','elu']),
}
def unet_batch_hyper(space,pretrained_weights = None):
input_size = (256,256,1)
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(inputs)
if space['batchi']==1:
conv1 = BatchNormalization()(conv1)
conv1 = Conv2D(64, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv1)
if space['batchi']==1:
conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(pool1)
if space['batchi']==1:
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(128, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv2)
if space['batchi']==1:
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(pool2)
if space['batchi']==1:
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(256, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv3)
if space['batchi']==1:
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(pool3)
if space['batchi']==1:
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(512, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv4)
if space['batchi']==1:
conv4 = BatchNormalization()(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(pool4)
if space['batchi']==1:
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(1024, 3, activation = space['down_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv5)
if space['batchi']==1:
conv5 = BatchNormalization()(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = space['up_activation'], padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(input = inputs, output = conv10)
model.compile(optimizer = Adam(lr = space['lr']), loss = 'binary_crossentropy', metrics = [mean_iou1])#'accuracy'])
if(pretrained_weights):
model.load_weights(pretrained_weights)
lr = space['lr']
filepath="weights-LR_"+str(lr)+"-S_20-E_{epoch:02d}-L_{loss:.2f}-batchN.hdf5"
model_checkpoint = ModelCheckpoint(filepath,monitor='loss',verbose=1, save_best_only=True)
model.fit(X_train, y_train, steps_per_epoch=space['steps_per_epoch'],epochs=space['epochs'], verbose = 0,callbacks=[val_call])
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
trials = Trials()
best = fmin(unet_batch_hyper, space, algo=tpe.suggest, max_evals=50, trials=trials)
print('best: ')
print(best)
但是它给出了错误:
TypeError Traceback (most recent call last)
~/anaconda3/lib/python3.6/site-packages/hyperopt/base.py in evaluate(self, config, ctrl, attach_attachments)
860 try:
--> 861 dict_rval['loss'] = float(dict_rval['loss'])
862 except (TypeError, KeyError):
TypeError: float() argument must be a string or a number, not 'function'
During handling of the above exception, another exception occurred:
InvalidLoss
我不确定loss
的内容。我在线阅读了一些有关如何在Keras中使用hyperopt
的教程,但是我找不到Unets的任何东西。我该如何解决此代码?