DCGAN模型中的模型损失完全没有变化

时间:2018-07-04 12:08:55

标签: generative-adversarial-network

我无法在自己的数据集(32x32x3)上训练DCGAN模型。鉴别器和发生器的损耗完全没有变化。我将在下面共享网络。

def discriminator(x, reuse=tf.AUTO_REUSE):
    with tf.variable_scope("discriminator", reuse=reuse):
        d_net=tf.layers.conv2d(x,64,(5,5),(2,2),activation=tf.nn.leaky_relu,padding='SAME')

        d_net=tf.layers.conv2d(d_net,128,(5,5),(2,2),padding='SAME')
        d_net=tf.layers.batch_normalization(d_net)
        d_net=tf.nn.leaky_relu(d_net)

        d_net=tf.layers.conv2d(d_net,256,(5,5),(2,2),padding='SAME')
        d_net=tf.layers.batch_normalization(d_net)
        d_net=tf.nn.leaky_relu(d_net)

        d_net=tf.layers.conv2d(d_net,512,(5,5),(2,2),padding='SAME')
        d_net=tf.layers.batch_normalization(d_net)
        d_net=tf.nn.leaky_relu(d_net)

        d_net = tf.layers.flatten(d_net)
        return tf.layers.dense(d_net,1,activation=tf.nn.sigmoid)





def generator(z, reuse=tf.AUTO_REUSE):
    with tf.variable_scope("generator", reuse=reuse):
        h_size = 32
        h_size_2 = 16
        h_size_4 = 8
        h_size_8 = 4
        h_size_16 = 2

        net=tf.layers.dense(z,512*2*2)
        net=tf.reshape(net,[-1,h_size_16,h_size_16,512])
        net=tf.layers.batch_normalization(net)
        net=tf.nn.relu(net)
        print("G:",net.get_shape())

        net=tf.layers.conv2d_transpose(net,filters=256,kernel_size=(5,5),strides=(2,2),padding='SAME',activation=tf.nn.relu)
        print("G:",net.get_shape())

        net=tf.layers.conv2d_transpose(net,filters=128,kernel_size=(5,5),strides=(2,2),padding='SAME',activation=tf.nn.relu)
        print("G:",net.get_shape())

        net=tf.layers.conv2d_transpose(net,filters=64,kernel_size=(5,5),strides=(2,2),padding='SAME',activation=tf.nn.relu)
        print("G:",net.get_shape())

        net=tf.layers.conv2d_transpose(net,filters=32,kernel_size=(5,5),strides=(2,2),padding='SAME',activation=tf.nn.relu)
        print("G:",net.get_shape())
        out = tf.layers.conv2d_transpose(net,3,(5,5),padding='SAME',activation=tf.nn.tanh)
        print("G:",out.get_shape())
        return out

鉴别器和生成器的学习率分别为1e-3和1e-4。我使用的批次大小为256。优化参数如下所示:

d_inputs=tf.placeholder(tf.float32, shape = [None,32,32,3],name='d_inputs')
g_inputs=tf.placeholder(tf.float32, shape = [None,sample_size],name='g_inputs')
labels=tf.placeholder(tf.float32,shape=[None,1],name='labels')

g_output    = generator(g_inputs)
d_logits    = discriminator(d_inputs)
fake_logits = discriminator(g_output)

d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits,labels=labels))
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_logits,labels=tf.ones_like(fake_logits)))

gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope="generator")
disc_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope="discriminator")

d_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(d_loss,var_list=disc_vars)
g_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(g_loss,var_list=gen_vars)

请社区帮助我解决该问题。

0 个答案:

没有答案
相关问题