我正在尝试训练用于图像分割的UNet,并且我的网络中包含以下内容。
def inception_block(inputs, depth, splitted=True, activation='relu'):
actv = LeakyReLU
c1_1 = Conv2D(int(depth/4), (1, 1), padding='same')(inputs)
c2_1 = Conv2D(int(depth/8*3), (1, 1),padding='same')(inputs)
c2_1 = actv()(c2_1)
if splitted:
c2_2 = Conv2D(int(depth/2), (1, 3), padding='same')(c2_1)
c2_2 = BatchNormalization(axis=1)(c2_2)
c2_2 = actv()(c2_2)
c2_3 = Conv2D(int(depth/2), (3, 1),padding='same')(c2_2)
else:
c2_3 = Conv2D(int(depth/2), (3, 3), padding='same')(c2_1)
c3_1 = Conv2D(int(depth/16), (1, 1), padding='same', activation='relu')(inputs)
c3_1 = actv()(c3_1)
if splitted:
c3_2 = Conv2D(int(depth/8), (1, 5), padding='same')(c3_1)
c3_2 = BatchNormalization(axis=1)(c3_2)
c3_2 = actv()(c3_2)
c3_3 = Conv2D(int(depth/8), (5, 1), padding='same')(c3_2)
else:
c3_3 = Conv2D(int(depth/8), (5, 5), padding='same')(c3_1)
p4_1 = MaxPooling2D(pool_size=(3,3), strides=(1,1),padding='same')(inputs)
c4_2 = Conv2D(int(depth/8), (1, 1), padding='same')(p4_1)
res = concatenate([c1_1, c2_3, c3_3, c4_2],axis=1)
res = BatchNormalization(axis=1)(res)
res=actv()(res)
return res
和
def N2(inputs,Z):
splitted=True
act='relu'
strides=(2,2)
padding='valid'
conv1 = (inception_block(inputs, 32, splitted= splitted, activation= act))
pool1 = poolConvolution2D(32, filters=(3, 3), strides=(2,2))(conv1)
pool1=Dropout(Z.drop)(pool1)
conv2 = inception_block(pool1, 64, splitted= splitted, activation=act)
pool2 = poolConvolution2D(64, filters=(3, 3), strides=(2,2))(conv2)
pool2=Dropout(Z.drop)(pool2)
conv3 = inception_block(pool2, 128, splitted=splitted, activation=act)
pool3 = poolConvolution2D(128, filters=(3, 3), strides=(2,2))(conv3)
pool3=Dropout(Z.drop)(pool3)
...
def poolConvolution2D(nb_filter, filters, strides=(1, 1)):
def f(_input):
conv = Conv2D(nb_filter, filters, strides=strides, padding='same')(_input)
norm = BatchNormalization(axis=1)(conv)
return ELU()(norm)
return f
运行代码时,我得到一个ValueError: Initializer for variable conv2d/kernel/ is from inside a control-flow construct, such as a loop or conditional. When creating a variable inside a loop or conditional, use a lambda as the initializer.
否,我已经浏览了Github和SO saying this sometimes is caused by concatenating float32 / float64 types with int上的帖子,但是这里似乎并非如此。我的输入格式为Tensor("main_input", shape=(?, 7, 96, 96), dtype=float32)
。
我对使用TF还是比较陌生,将不胜感激。