InvalidArgumentError:Conv2DCustomBackpropInput:输入和out_backprop必须具有相同的批处理大小

时间:2019-03-12 06:41:22

标签: python tensorflow keras deep-learning

尝试为GatedConv2D和GatedDeConv2D实现自定义图层时,出现此错误。 生成器:

inp = Input(shape=(self.vars.INP_SHAPE[0], self.vars.INP_SHAPE[1], 9))
    cnum = 64
    x1, mask1 = self.GatedConv2D(inp, cnum, (7, 7), (2,2), use_lrn=False)
    x2, mask2 = self.GatedConv2D(x1, 2*cnum, (5, 5), (2, 2))
    x3, mask3 = self.GatedConv2D(x2, 4*cnum, (5, 5), (2, 2))
    x4, mask4 = self.GatedConv2D(x3, 8*cnum, (3, 3), (2, 2))
    x5, mask5 = self.GatedConv2D(x4, 8*cnum, (3, 3), (2, 2))
    x6, mask6 = self.GatedConv2D(x5, 8*cnum, (3, 3), (2, 2))
    x7, mask7 = self.GatedConv2D(x6, 8*cnum, (3, 3), (2, 2))

    x7, _ = self.GatedConv2D(x7, 8*cnum, (3, 3), (1, 1), dilation=2)
    x7, _ = self.GatedConv2D(x7, 8*cnum, (3, 3), (1, 1), dilation=4)
    x7, _ = self.GatedConv2D(x7, 8*cnum, (3, 3), (1, 1), dilation=8)
    x7, _ = self.GatedConv2D(x7, 8*cnum, (3, 3), (1, 1), dilation=16)

    x8, _ = self.GatedDeConv2D(x7, [self.vars.SCFEGAN_BATCH_SIZE, int(self.vars.INP_SHAPE[0]/64), int(self.vars.INP_SHAPE[1]/64), 8*cnum])
    x8 = Concatenate(axis=0)([x6, x8])
    x8, mask8 = self.GatedConv2D(x8, 8*cnum, (3, 3), (1, 1))

    x9, _ = self.GatedDeConv2D(x8, [self.vars.SCFEGAN_BATCH_SIZE, int(self.vars.INP_SHAPE[0]/32), int(self.vars.INP_SHAPE[1]/32), 8*cnum])
    x9 = Concatenate(axis=0)([x5, x9])
    x9, mask9 = self.GatedConv2D(x9, 8*cnum, (3, 3), (1, 1))

    x10, _ = self.GatedDeConv2D(x9, [self.vars.SCFEGAN_BATCH_SIZE, int(self.vars.INP_SHAPE[0]/16), int(self.vars.INP_SHAPE[1]/16), 8*cnum])
    x10 = Concatenate(axis=0)([x4, x10])
    x10, mask10 = self.GatedConv2D(x10, 8*cnum, (3, 3), (1, 1))

    x11, _ = self.GatedDeConv2D(x10, [self.vars.SCFEGAN_BATCH_SIZE, int(self.vars.INP_SHAPE[0]/8), int(self.vars.INP_SHAPE[1]/8), 4*cnum])
    x11 = Concatenate(axis=0)([x3, x11])
    x11, mask11 = self.GatedConv2D(x11, 4*cnum, (3, 3), (1, 1))

    x12, _ = self.GatedDeConv2D(x11, [self.vars.SCFEGAN_BATCH_SIZE, int(self.vars.INP_SHAPE[0]/4), int(self.vars.INP_SHAPE[1]/4), 2*cnum])
    x12 = Concatenate(axis=0)([x2, x12])
    x12, mask12 = self.GatedConv2D(x12, 2*cnum, (3, 3), (1, 1))

    x13, _ = self.GatedDeConv2D(x12, [self.vars.SCFEGAN_BATCH_SIZE, int(self.vars.INP_SHAPE[0]/2), int(self.vars.INP_SHAPE[1]/2), cnum])
    x13 = Concatenate(axis=0)([x1, x13])
    x13, mask13 = self.GatedConv2D(x13, cnum, (3, 3), (1, 1))

    x14, _ = self.GatedDeConv2D(x13, [self.vars.SCFEGAN_BATCH_SIZE, int(self.vars.INP_SHAPE[0]), int(self.vars.INP_SHAPE[1]), 9])
    x14 = Concatenate(axis=0)([inp, x14])
    x14, mask14 = self.GatedConv2D(x14, 3, (3, 3), (1, 1))

    x14 = Activation('tanh')(x14)

    model = Model(inputs=inp, outputs=[x14, mask14])

GatedConv2D:

def GatedConv2D(self, x, filters, kernel_size, strides, dilation=1, activation='leaky_relu', use_lrn=True):
    inp = x
    x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=dilation, padding='same')(x)
    if use_lrn:
        x = LRNLayer()(x)

    if activation == 'leaky_relu':
        x = LeakyReLU()(x)
    else:
        x = Activation(activation)('x')

    g = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same', dilation_rate=dilation)(inp)
    g = Activation('sigmoid')(g)

    x = multiply([x, g])

    return x, g

GatedDeConv2D:

class GatedDeConv(Layer):
def __init__(self, out_shape, kernel_size, strides, std_dev):
    super(GatedDeConv, self).__init__()
    self.out_shape = out_shape
    self.kernel_size = kernel_size
    self.strides = strides
    self.std_dev = std_dev

def call(self, x):
    inp = x

    kernel = K.random_uniform_variable(shape=(self.kernel_size[0], self.kernel_size[1], self.out_shape[-1], int(x.get_shape()[-1])), low=0, high=1)

    deconv = K.conv2d_transpose(x, kernel=kernel, strides=self.strides, output_shape=self.out_shape, padding='same')

    biases = K.zeros(shape=(self.out_shape[-1]))

    deconv = K.reshape(K.bias_add(deconv, biases), deconv.get_shape())
    deconv = LeakyReLU()(deconv)

    g = K.conv2d_transpose(inp, kernel, output_shape=self.out_shape, strides=self.strides, padding='same')
    biases2 = K.zeros(shape=(self.out_shape[-1]))
    g = K.reshape(K.bias_add(g, biases2), deconv.get_shape())

    g = K.sigmoid(g)

    deconv = tf.multiply(deconv, g)

    outputs = [deconv, g]

    output_shapes = self.compute_output_shape(x.shape)
    for output, shape in zip(outputs, output_shapes):
        output._keras_shape = shape

    return [deconv, g]

def compute_output_shape(self, input_shape):
    return [self.out_shape, self.out_shape]

def compute_mask(self, input, input_mask=None):
    return 2 * [None]

LRNLayer:

class LRNLayer(Layer):
def __init__(self, alpha=1e-4, beta=0.75, k=2, n=5):
    super(LRNLayer, self).__init__()
    self.alpha = alpha
    self.beta = beta
    self.k = k
    self.n = n

def call(self, x):
    op = []
    nc = np.shape(x)[-1]
    for i in range(nc):
        sq = K.sum((x[:,:,:,max(0, int(i-self.n/2)):min(nc-1, i+int(self.n/2))+1]) ** 2)
        op.append(x[:,:,:,i]/((self.k + self.alpha * sq) ** self.beta))

    op = tf.convert_to_tensor(op)

    op = tf.transpose(op, perm=[1,2,3,0])

    op_shape = self.compute_output_shape(np.shape(x))

    op._keras_shape = op_shape

    return op

def compute_output_shape(self, input_shape):
    return input_shape

def compute_mask(self, input, input_mask):
    return 1*[None]

鉴别器:

inp = Input(shape=self.vars.INP_SHAPE)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='relu')(inp)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(filters=128, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(filters=256, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(filters=256, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(filters=256, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(filters=256, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)

    model = Model(inputs=inp, outputs=x)

错误:

---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-14-f1979e2d5f34> in <module>()
  1 listdir('/var')
----> 2 model.train()
  3 # except:
  4 #   print('Failed')
  5 get_ipython().system('cat /var/log/colab-jupyter.log')

<ipython-input-9-790e38d234ac> in train(self)
    332                         fakes = np.zeros((self.vars.SCFEGAN_BATCH_SIZE, *self.vars.SCFEGAN_DISC_OP_SIZE))
    333 
--> 334                         gen_imgs = self.generator.predict(inp)
    335 
    336                         cmp_images = self.complete_imgs(images, masks, gen_imgs)

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in predict(self, x, batch_size, verbose, steps)
   1167                                             batch_size=batch_size,
   1168                                             verbose=verbose,
-> 1169                                             steps=steps)
   1170 
   1171     def train_on_batch(self, x, y,

/usr/local/lib/python3.6/dist-packages/keras/engine/training_arrays.py in predict_loop(model, f, ins, batch_size, verbose, steps)
    292                 ins_batch[i] = ins_batch[i].toarray()
    293 
--> 294             batch_outs = f(ins_batch)
    295             batch_outs = to_list(batch_outs)
    296             if batch_index == 0:

/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2713                 return self._legacy_call(inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:
   2717             if py_any(is_tensor(x) for x in inputs):

/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
   2673             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]
   2677 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
    526             None, None,
    527             compat.as_text(c_api.TF_Message(self.status.status)),
--> 528             c_api.TF_GetCode(self.status.status))
    529     # Delete the underlying status object from memory otherwise it stays alive
    530     # as there is a reference to status from this from the traceback due to

InvalidArgumentError: Conv2DCustomBackpropInput: input and out_backprop must have the same batch sizeinput batch: 1outbackprop batch: 2 batch_dim: 0
         [[{{node gated_de_conv_8/conv2d_transpose_1}}]]

我没有找到针对我的情况的任何解决方案。 提前致谢。 我正在尝试实现SCFEGAN模型并面临此错误。 Tensorflow版本:1.13.1 Keras版本:2.2.4 Python版本:3.6

0 个答案:

没有答案