我有一个Keras自动编码器模型,它采用MNIST图像和形状矢量((12,)
矢量)。我试图将形状向量添加到解码阶段获得的要素图中(参见https://arxiv.org/pdf/1603.02199.pdf图4中的空间平铺)。该模型产生了与lambda层相关的奇怪错误。我无法从追溯中理解错误的原因。
源代码:
input_img = Input(shape=(28, 28, 1))
shape_input = Input(shape=(12,))
shape_input_reshaped = Reshape((1, 1, 12))(shape_input)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(12, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(12, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(12, (3, 3), activation='relu', padding='same')(encoded)
shape_vec = Lambda(lambda x: K.tile(x, [-1, 4, 4, 1]))(shape_input_reshaped)
x = Add()([x, shape_vec])
x = UpSampling2D((2, 2))(x)
x = Conv2D(12, (3, 3), activation='relu', padding='same')(x)
shape_vec = Lambda(lambda x: K.tile(x, [-1, 8, 8, 1]))(shape_input_reshaped)
x = Add()([x, shape_vec])
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(inputs=[input_img, shape_input], outputs=decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
错误日志:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-8-adda46da1298> in <module>()
5 shuffle=True,
6 validation_data=([x_test, test_shapes], x_test_transformed),
----> 7 callbacks=[TensorBoard(log_dir='./logs/2')])
8
/media/zero/data1/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1703 initial_epoch=initial_epoch,
1704 steps_per_epoch=steps_per_epoch,
-> 1705 validation_steps=validation_steps)
1706
1707 def evaluate(self, x=None, y=None,
/media/zero/data1/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
1233 ins_batch[i] = ins_batch[i].toarray()
1234
-> 1235 outs = f(ins_batch)
1236 if not isinstance(outs, list):
1237 outs = [outs]
/media/zero/data1/anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2476 session = get_session()
2477 updated = session.run(fetches=fetches, feed_dict=feed_dict,
-> 2478 **self.session_kwargs)
2479 return updated[:len(self.outputs)]
2480
/media/zero/data1/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
903 try:
904 result = self._run(None, fetches, feed_dict, options_ptr,
--> 905 run_metadata_ptr)
906 if run_metadata:
907 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/media/zero/data1/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1138 if final_fetches or final_targets or (handle and feed_dict_tensor):
1139 results = self._do_run(handle, final_targets, final_fetches,
-> 1140 feed_dict_tensor, options, run_metadata)
1141 else:
1142 results = []
/media/zero/data1/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1319 if handle is None:
1320 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1321 run_metadata)
1322 else:
1323 return self._do_call(_prun_fn, handle, feeds, fetches)
/media/zero/data1/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1338 except KeyError:
1339 pass
-> 1340 raise type(e)(node_def, op, message)
1341
1342 def _extend_graph(self):
InvalidArgumentError: Expected multiples[0] >= 0, but got -1
[[Node: lambda_1/Tile = Tile[T=DT_FLOAT, Tmultiples=DT_INT32, _device="/job:localhost/replica:0/task:0/device:GPU:0"](reshape_1/Reshape, lambda_1/Tile/multiples)]]
[[Node: loss/mul/_213 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_1303_loss/mul", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
答案 0 :(得分:1)
我通过在K.tile
中将-1替换为1来训练你的模型。
代码片段:
x = Conv2D(12, (3, 3), activation='relu', padding='same')(encoded)
shape_vec = Lambda(lambda x: K.tile(x, [1, 4, 4, 1]))(shape_input_reshaped)
x = Add()([x, shape_vec])
x = UpSampling2D((2, 2))(x)
x = Conv2D(12, (3, 3), activation='relu', padding='same')(x)
shape_vec = Lambda(lambda x: K.tile(x, [1, 8, 8, 1]))(shape_input_reshaped)
x = Add()([x, shape_vec])
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)