DCGAN:ValueError:图层deconv的输入0与该图层不兼容:预期ndim = 4,找到的ndim = 2。收到的完整图形:[无,100]

时间:2018-11-06 01:58:50

标签: python tensorflow deep-learning generative-adversarial-network

我的代码(用于CelebA数据集的DCGAN)既不完整也不正确,因为我无法通过此错误(所以请帮助我确定如何解决第一个错误):

tf.reset_default_graph()
LOGDIR = "logs"

def train(args):
    data_loader = Dataset(args.data_path, args.num_images, args.image_size) 
    #batch_z = sample_z(args.dim_z, args.batch_size)
    print(args.dim_z)
    print(args.image_size)
    X = tf.placeholder(tf.float32, shape=[args.dim_z, 3,args.image_size , args.image_size])
    Z = tf.placeholder(tf.float32, shape=[None, args.dim_z])
    G_sample, _ = generator(Z, args)
    #G_sample, _ = generator(batch_z, args)
    print(type(G))
    #D_real, D_real_logits = discriminator(real_batch, args, reuse=False)
    D_real, D_real_logits = discriminator(X, args, reuse=False)
    D_fake, D_fake_logits = discriminator(G_sample, args, reuse=True)
    tf.summary.image('generated image', tf.reshape(G, [-1, 28, 28, 1], 3))
    d_loss, g_loss = get_losses(d_real_logits, d_fake_logits)
    z_sum = tf.histogram.summary('z', batch_z)
    d_sum = tf.histogram.summary('d', D_real)
    G_sum = tf.histogram.summary('g', G)
    d_loss_sum = tf.scalar.summary('d_loss', d_loss)
    g_loss_sum = tf.scalar.summary('g_loss', g_loss)
    d_sum = tf.merged_summary([z_sum, d_sum, d_loss_sum])
    g_sum = tf.merged_summary([z_sum, G_sum, g_loss_sum])
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('log', sess.graph)

        for epoch in range(args.n_epoch):
            for itr, real_batch in enumerate(data_loader.get_nextbatch(args.batch_size)):
                #fill with your codes
                sample = sess.run(G_sample, feed_dict={Z:sample_z(args.dim_z, args.batch_size)})
                d_optimizer, g_optimizer = get_optimizers(agrs.learning_rate, args.beta1, args.beta2)
                d_step, g_step = optimize(d_optimizer, g_optimizer, d_loss, g_loss)
                writer = tf.train.SummaryWriter(train_dir, sess.gLraph)  
                #background = np.ones((10 + (112 + 10) * 5, 10 + (112 + 10) * 8, 3)).astype(np.uint8) * 255
                #scipy.misc.imsave(os.path.join('log', 'generated-ep-' + str(ep) + '.jpg'), background)
                tf.summary.image('raw image', tf.reshape())
                tf.summary.image('generated', image_output)    
                summary_op = tf.summary.merge_all()
                writer = tf.summary.FileWriter(logdir, graph)
                summary = sess.run(summary_op)
                writer.add_summary(image, step)
                writer.add_summary(merged_summary, itr)
                d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
                g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
                merged_summary = tf.summary.merge_all()
                #latest_checkpoint = tf.train.latest_checkpoint(LOGDIR+"/checkpoints/")
                writer.add_graph(sess.graph)
                saver.save(sess, save_path='./gan.ckpt')


train(args)

基本上如何解决以下错误?

100
64

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-51-6622063733a2> in <module>()
     54 
     55 
---> 56 train(args)

<ipython-input-51-6622063733a2> in train(args)
      9     X = tf.placeholder(tf.float32, shape=[args.dim_z, 3,args.image_size , args.image_size])
     10     Z = tf.placeholder(tf.float32, shape=[None, args.dim_z])
---> 11     G_sample, _ = generator(Z, args)
     12     #G_sample, _ = generator(batch_z, args)
     13     print(type(G))

<ipython-input-31-15807fb5df23> in generator(x, args, reuse)
     10                                              padding='valid',
     11                                              use_bias=False,
---> 12                                              name='deconv')
     13                 batch_norm1=tf.layers.batch_normalization(deconv1,
     14                                              name = 'batch_norm')

/share/pkg/tensorflow/r1.10/install/py3-gpu/lib/python3.6/site-packages/tensorflow/python/layers/convolutional.py in conv2d_transpose(inputs, filters, kernel_size, strides, padding, data_format, activation, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint, trainable, name, reuse)
   1266       _reuse=reuse,
   1267       _scope=name)
-> 1268   return layer.apply(inputs)
   1269 
   1270 

/share/pkg/tensorflow/r1.10/install/py3-gpu/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in apply(self, inputs, *args, **kwargs)
    803       Output tensor(s).
    804     """
--> 805     return self.__call__(inputs, *args, **kwargs)
    806 
    807   def _set_learning_phase_metadata(self, inputs, outputs):

/share/pkg/tensorflow/r1.10/install/py3-gpu/lib/python3.6/site-packages/tensorflow/python/layers/base.py in __call__(self, inputs, *args, **kwargs)
    360 
    361       # Actually call layer
--> 362       outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
    363 
    364     if not context.executing_eagerly():

/share/pkg/tensorflow/r1.10/install/py3-gpu/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
    718 
    719         # Check input assumptions set before layer building, e.g. input rank.
--> 720         self._assert_input_compatibility(inputs)
    721         if input_list and self._dtype is None:
    722           try:

/share/pkg/tensorflow/r1.10/install/py3-gpu/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in _assert_input_compatibility(self, inputs)
   1422                            'expected ndim=' + str(spec.ndim) + ', found ndim=' +
   1423                            str(ndim) + '. Full shape received: ' +
-> 1424                            str(x.shape.as_list()))
   1425       if spec.max_ndim is not None:
   1426         ndim = x.shape.ndims

ValueError: Input 0 of layer deconv is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: [None, 100]

1 个答案:

答案 0 :(得分:0)

我错过了对CelebA数据集图像进行预处理的步骤。我在下面的代码中做到了,而且一次,以上错误消失了:

基本上是一行:

data_loader.preprocess_and_save_images('preprocessed', 'results_celebA') #preprocess the images once

在下面的代码中:

def train(args):
    tf.reset_default_graph()
    data_loader = Dataset(args.data_path, args.num_images, args.image_size)
    data_loader.preprocess_and_save_images('preprocessed', 'results_celebA') #preprocess the images once
    X = tf.placeholder(tf.float32, shape=[args.batch_size, args.image_size , args.image_size, 3])
    Z = tf.placeholder(tf.float32, shape=[args.batch_size, 1, 1, args.dim_z])

    G_sample, _ = generator(Z, args)