所以,我有一组大小为32x32x32的MR图像。我加入了一个转换网络。然而,我在最后一层中得到一个错误,如下所示: ValueError:检查模型目标时出错:预期activation_8有形状(None,1)但是有形状的数组(3,24)我花了很多时间试图调试这个错误,但是没有成功。
我的代码的关键如下所示。我使用train_on_batch使用批量大小为24来训练数据。任何关于出错的指示都将非常感激。
def create_network():
print 'Setting up first layer'
model = Sequential()
model.add(Convolution2D(32,3,3, border_mode = 'valid', subsample
= (1,1), init = 'glorot_uniform',input_shape =
(32,32,32),dim_ordering = 'th'))
model.add(BatchNormalization(epsilon=0.001, mode=2, axis=1,
momentum=0.99, weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None))
model.add(Activation('relu'))
print 'First layer setup'
print 'Setting up second layer'
model.add(Convolution2D(32,3,3,border_mode = 'same', subsample =
(1,1),init = 'glorot_uniform'))
model.add(BatchNormalization(epsilon=0.001, mode=2, axis=1,
momentum=0.99, weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2),strides = None))
print 'Setting up third layer'
model.add(Convolution2D(64,3,3, border_mode = 'same', subsample =
(1,1), init = 'glorot_uniform'))
model.add(BatchNormalization(epsilon=0.001, mode=2, axis=1,
momentum=0.99, weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None))
model.add(Activation('relu'))
model.add(Convolution2D(64,3,3, border_mode = 'same', subsample =
1,1), init = 'glorot_uniform'))
model.add(BatchNormalization(epsilon=0.001, mode=2, axis=1,
momentum=0.99, weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2),strides = None))
model.add(Convolution2D(32,3,3,border_mode = 'same',subsample =
(1,1),init = 'glorot_uniform'))
model.add(BatchNormalization(epsilon=0.001, mode=2, axis=1,
momentum=0.99, weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None))
model.add(Activation('relu'))
model.add(Convolution2D(32,3,3,border_mode = 'same',subsample =
(1,1),init = 'glorot_uniform'))
model.add(BatchNormalization(epsilon=0.001, mode=2, axis=1,
momentum=0.99, weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2),strides = None))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
sgd = SGD(lr=1e-8, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd',loss
='binary_crossentropy',metrics = ['accuracy'])
return model
if __name__ == "__main__":
model = create_network()
batch_size = 24
X_train,y_train = load_train_data()
for i in range(300):
model.train_on_batch(X_train,y_train)