我目前正在使用keras实现自动编码器,以便对图像进行升采样。
这个想法是我想使用相邻图像的信息来创建一个高采样的中心图像。垂直邻居和水平邻居都应在编码器中共享权重。在瓶颈处,将垂直和水平图像块连接起来并开始解码。
代码如下:
from keras.models import Model
from keras.layers import Input, Concatenate
from keras.layers import Conv3D, Conv3DTranspose, Conv2DTranspose
from keras.utils import np_utils
from keras import backend as K
# number of feature maps
L = 16
L0 = 24
L1 = 2*L #32
L2 = 4*L #64
L3 = 6*L #96
# input layers
input_LF_v = Input(shape=[5,12,12,3])
input_LF_h = Input(shape=[5,12,12,3])
# 5x12x12
# Encoding path
encoded_1 = Conv3D(L0,(3,3,3),strides=(1, 1, 1),input_shape=[5, 12, 12, 3], padding='SAME',data_format="channels_last")
encoded_2 = Conv3D(L0,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")
encoded_3 = Conv3D(L1,(3,3,3),strides=(1, 2, 2),padding='SAME',data_format="channels_last")
# now 5x6x6
encoded_4 = Conv3D(L1,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")
encoded_5 = Conv3D(L1,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")
encoded_6 = Conv3D(L2,(3,3,3),strides=(1, 2, 2),padding='SAME',data_format="channels_last")
# now 5x3x3
encoded_v1 = encoded_1(input_LF_v)
encoded_v2 = encoded_2(encoded_v1)
encoded_v3 = encoded_3(encoded_v2)
encoded_v4 = encoded_4(encoded_v3)
encoded_v5 = encoded_5(encoded_v4)
encoded_v6 = encoded_6(encoded_v5)
encoded_h1 = encoded_1(input_LF_h)
encoded_h2 = encoded_2(encoded_h1)
encoded_h3 = encoded_3(encoded_h2)
encoded_h4 = encoded_4(encoded_h3)
encoded_h5 = encoded_5(encoded_h4)
encoded_h6 = encoded_6(encoded_h5)
# skip paths
encoded_vh2 = Concatenate()([encoded_v2,encoded_h2]) # 5x12x12x48
encoded_vh5 = Concatenate()([encoded_v5,encoded_h5]) # 5x6x6x64
# bottle neck of encoder
encoded_end = Concatenate()([encoded_v6,encoded_h6]) # 5x3x3x128
# print(encoded_vh2.shape)
# print(encoded_vh5.shape)
# print(encoded_end.shape)
# '''
# decoder starts here
decoded_6 = Conv3DTranspose(L3*2,(3,3,3),strides=(1, 1, 1),input_shape=[5, 3, 3, L3*2],padding='SAME',data_format="channels_last")(encoded_end)
decoded_5 = Conv3DTranspose(L3*2,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")(decoded_6)
decoded_4 = Conv3DTranspose(L3,(3,3,3),strides=(1, 2, 2),padding='SAME',data_format="channels_last")(decoded_5)
# 5x6x6x64
decoded_vh4 = Concatenate()([decoded_4,encoded_vh5]) # 5x6x6x128
#
decoded_3 = Conv3DTranspose(L3*2,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")(decoded_vh4)
decoded_2 = Conv3DTranspose(L3,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")(decoded_3)
decoded_1 = Conv3DTranspose(L0*2,(3,3,3),strides=(1, 2, 2),padding='SAME',data_format="channels_last")(decoded_2)
# 5x12x12x48
decoded_vh1 = Concatenate()([decoded_1,encoded_vh2]) # 5x12x12x96
decoded_0 = Conv3DTranspose(L0*4,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")(decoded_vh1)
decoded_00 = Conv3DTranspose(L0*2,(3,3,3),strides=(1, 1, 1),padding='SAME',data_format="channels_last")(decoded_0)
decoded_up1 = Conv3DTranspose(L0,(3,3,3),strides=(1, 2, 2),padding='SAME',data_format="channels_last")(decoded_00)
# 5x24x24x24
decoded_vh_up = Concatenate()([decoded_up1[:,0,:,:,:],decoded_up1[:,1,:,:,:],decoded_up1[:,2,:,:,:],decoded_up1[:,3,:,:,:],decoded_up1[:,4,:,:,:]])
# 24x24x120
decoded_vh_up0 = Conv2DTranspose(L0*5,(3,3),strides= (1,1),padding='SAME',data_format="channels_last")(decoded_vh_up)
decoded_vh_up1 = Conv2DTranspose(L2,(3,3),strides=(1,1),padding='SAME',data_format="channels_last")(decoded_vh_up0)
decoded_vh_up2 = Conv2DTranspose(L1,(3,3),strides=(1,1),padding='SAME',data_format="channels_last")(decoded_vh_up1)
decoded_vh_up3 = Conv2DTranspose(L,(3,3),strides=(2,2),padding='SAME',data_format="channels_last")(decoded_vh_up2)
# 48x48x16
decoded_vh_up4 = Conv2DTranspose(L,(3,3),strides=(1,1),padding='SAME',data_format="channels_last")(decoded_vh_up3)
decoded_vh_up5 = Conv2DTranspose(3,(1,1),strides=(1,1),padding='SAME',data_format="channels_last")(decoded_vh_up4)
# 48x48x3
learning_rate = 0.0001
adam = keras.optimizers.Adam(lr=learning_rate)
batch = 20
epoch = 5000
SR_Autoencoder = Model(inputs=[input_LF_v, input_LF_h], outputs = decoded_vh_up5)
SR_Autoencoder.compile(optimizer = adam, loss = 'mse')
SR_Autoencoder.fit([stack_v,stack_h], cv, batch_size = batch, epochs = epoch, validation_split = 0.1, shuffle = True)
当我尝试运行它时,出现错误消息:
Using TensorFlow backend. Traceback (most recent call last): File "/home/z/PycharmProjects/SR/SR_keras.py", line 129, in SR_Autoencoder = Model(inputs=[input_LF_v, input_LF_h], outputs = decoded_vh_up5) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/legacy/interfaces.py", line 91, in wrapper return func(*args, **kwargs) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1734, in __init__ build_map_of_graph(x, finished_nodes, nodes_in_progress) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph layer, node_index, tensor_index) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph layer, node_index, tensor_index) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph layer, node_index, tensor_index) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph layer, node_index, tensor_index) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph layer, node_index, tensor_index) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph layer, node_index, tensor_index) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph layer, node_index, tensor_index) File "/home/z/tensorflow/lib/python3.4/site-packages/keras/engine/topology.py", line 1695, in build_map_of_graph layer, node_index, tensor_index = tensor._keras_history AttributeError: 'Tensor' object has no attribute '_keras_history'
我已经搜索了关于stackoverflow的一些答案,但是找不到任何不是keras层的层。
有人能给我一些提示吗?
谢谢!
答案 0 :(得分:1)
每个操作都必须由“ keras图层”执行,不可能在它们外面进行操作。
decoded_vh_up = Lambda(lambda x: K.concatenate(
[x[:,0,:,:,:],x[:,1,:,:,:],x[:,2,:,:,:],x[:,3,:,:,:],x[:,4,:,:,:]]
))(decoded_up1)