[在此处输入图片描述] [1]我正在尝试建立一个并行的ANN网络。 我打算:
def conv_net():
input_shape = [120,120,1]
inp=Input(shape=input_shape)
print(type(inp))
print(inp.shape)
row_layers = []
col_layers = []
# fn = lambda x: self.conv(x)
for i in range(0, 120, 40):
row_layers = []
for j in range(0, 120, 40):
# out = (self.conv(inp[:,i:i+39,j:j+39]))
inputs = inp[:, i:i + 40, j:j + 40]
x = Dense(64, activation='relu')(inputs)
out = Dense(64, activation='relu')(x)
print(out.shape)
row_layers.append(out)
col_layers.append(keras.layers.concatenate(row_layers, axis=2))
print((len(col_layers)))
merged = keras.layers.concatenate(col_layers, axis=1)
print(merged.shape)
con = Conv2D(1, kernel_size=5, strides=2, padding='same', activation='relu')(merged)
print(con.shape)
output = Flatten()(con)
output = Dense(1)(output)
print(output.shape)
model = Model(inputs=inp, outputs=output)
# plot_model(model,to_file='model.png')
return model
我收到一个错误NoneType
对象没有属性_inbound_nodes
。
我调试了一下。错误是因为这行。
inputs = inp[:,i:i+40,j:j+40]
错误:
Traceback (most recent call last):
File "C:/Users/Todd Letcher/machine_learning_examples/unsupervised_class3/slicing_img.py", line 83, in <module>
conv_net()
File "C:/Users/Todd Letcher/machine_learning_examples/unsupervised_class3/slicing_img.py", line 80, in conv_net
model = Model(inputs=inp, outputs = output)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\engine\network.py", line 91, in __init__
self._init_graph_network(*args, **kwargs)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\engine\network.py", line 235, in _init_graph_network
self.inputs, self.outputs)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\engine\network.py", line 1406, in _map_graph_network
tensor_index=tensor_index)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\engine\network.py", line 1393, in build_map
node_index, tensor_index)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\engine\network.py", line 1393, in build_map
node_index, tensor_index)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\engine\network.py", line 1393, in build_map
node_index, tensor_index)
File "C:\Users\Todd Letcher\AppData\Local\Programs\Python\Python36\lib\site-packages\keras\engine\network.py", line 1365, in build_map
node = layer._inbound_nodes[node_index]
AttributeError: 'NoneType' object has no attribute '_inbound_nodes'
帮助表示赞赏。谢谢
P.S .:我删除了切片线inp[:,i:i+39,j:j+39]
,它运行正常。
图像显示了我打算做什么。唯一的区别是我想将图像分成9个图块。这里,同一张图像被馈送到所有并行的Conv网络。
[1]: https://i.stack.imgur.com/Z7nt0.png
答案 0 :(得分:1)
最后得出一个答案。尽管我仍然想知道为什么以前的代码会出错,但是我只是添加了lambda层进行拆分。
def conv_net(self): # Add dropout if Overfiting
input_shape = [120,120,1]
inp=Input(shape=input_shape)
col_layers = []
def sliced(x,i,j):
return x[:,i:i+40,j:j+40]
for i in range(0,120,40):
row_layers = []
for j in range(0,120,40):
#out = (self.conv(inp[:,i:i+39,j:j+39]))
inputs = Lambda(sliced,arguments={'i':i,'j':j})(inp)
#inputs = Input(shape=input_shape_small)
out = (self.conv(inputs))
print(out.shape)
row_layers.append(out)
col_layers.append(keras.layers.concatenate(row_layers, axis=2))
print((len(col_layers)))
merged = keras.layers.concatenate(col_layers,axis=1)
print(merged.shape)
#merged = Reshape((3,3,1))(merged)
print(merged.shape)
con = Conv2D(1,kernel_size=5,strides=2,padding='same',activation='relu')(merged)
con = (BatchNormalization(momentum=0.8))(con)
print(con.shape)
#con = Conv2D(1,kernel_size=5,strides=2,padding='same',activation='relu')(inp)
output = Flatten()(con)
output = Dense(1)(output)
print(output.shape)
model = Model(inputs=inp, outputs = output)
#plot_model(model,to_file='model.png')
print(model.summary())
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
return model
这没有错误。