张量板中同一网络/对象多次出现的含义是什么?

时间:2017-12-28 12:24:55

标签: tensorflow tensorboard

我想知道如何解释tensorboard中同一网络/对象的多次出现。 enter image description here

如果我看一下这个带有两个网络的图的例子,那就是生成器和鉴别器。我看到两个网络多次,足够“_1”“_ 2”,依此类推。这样做的目的是准确地使用一个发生器和一个鉴别器。

我的问题是,是否存在错误并且已生成具有独立权重的多个网络,或者这些节点是否仅仅引用相同的实例,即变量中的相同值?这是我的图表:

def discriminator(x, batch_size, reuse=False):
    with tf.variable_scope('discriminator') as scope:
        if (reuse):
            tf.get_variable_scope().reuse_variables()

        s = 2
        f = 3

        n_ch1 = 3
        w = init_weights('d_wc1', [f, f, CHANNELS, n_ch1])
        b = init_bias('d_bc1', [n_ch1])
        h = conv2d(x, w, s, b)
        h = bn(h, 'd_bn1')
        h = tf.nn.relu(h)
        h = tf.nn.dropout(h, 0.9)

        n_ch2 = 3
        w = init_weights('d_wc2', [f, f, n_ch1, n_ch2])
        b = init_bias('d_bc2', [n_ch2])
        h = conv2d(h, w, s, b) 
        h = bn(h, 'd_bn2')
        h = tf.nn.relu(h)

        dimensions = n_ch2*HEIGHT*WIDTH//s**4 
        w = init_weights('d_w1', [dimensions, 1])
        b = init_bias('d_b1', [1])
        h_flat = tf.reshape(h, [-1, dimensions])
        output = tf.nn.sigmoid(tf.matmul(h_flat, w) + b)

    return output


def generator(x, batch_size, reuse=False):
    with tf.variable_scope('generator') as scope:
        if (reuse):
            tf.get_variable_scope().reuse_variables()

        s = 1
        f = 1 
        keep_prob = 0.9
        blow_up_factor = 2
        output_shape = [batch_size, blow_up_factor*HEIGHT, 
                        blow_up_factor*WIDTH, CHANNELS]
        w = init_weights('g_wdc0', [f, f, CHANNELS, CHANNELS])
        b = init_bias('g_bdc0', [CHANNELS])
        h = deconv2d(x, w, blow_up_factor, b, output_shape)        
        h = bn(h, "g_bnd0")                
        h = tf.nn.relu(h)
        h = tf.nn.dropout(h, keep_prob)

        h = avg_pool(h, blow_up_factor, 1)

        w = init_weights('g_wc0', [f, f, CHANNELS, CHANNELS])
        b = init_bias('g_bc0', [CHANNELS])
        h = conv2d(h, w, blow_up_factor, b)
        h = bn(h, 'g_bn0')
        h = tf.nn.relu(h)        
        h = tf.nn.dropout(h, keep_prob)
        h = h + x

        n_ch1 = 32
        w = init_weights('g_wc1', [f, f, CHANNELS, n_ch1])
        b = init_bias('g_bc1', [n_ch1])
        h = conv2d(h, w, s, b)
        h = bn(h, 'g_bn1')
        h = tf.nn.relu(h)
        h = tf.nn.dropout(h, keep_prob)

        n_ch2 = 128
        w = init_weights('g_wc2', [f, f, n_ch1, n_ch2])
        b = init_bias('g_bc2', [n_ch2])
        h = conv2d(h, w, s, b)
        h = bn(h, "g_bn2")                
        h = tf.nn.relu(h)
        h = tf.nn.dropout(h, keep_prob)

        n_ch3 = 128
        w = init_weights('g_wc3', [f, f, n_ch3, n_ch2])
        b = init_bias('g_bc3', [n_ch3])
        h = conv2d(h, w, 1, b)
        h = bn(h, "g_bn3")                
        h = tf.nn.relu(h)        
        h = tf.nn.dropout(h, keep_prob)

        output_shape = [batch_size, HEIGHT//s, WIDTH//s, n_ch1]
        w = init_weights('g_wdc2', [f, f, n_ch1, n_ch3])
        b = init_bias('g_bdc2', [n_ch1])
        h = deconv2d(h, w, s, b, output_shape)        
        h = bn(h, "g_bnd2")                
        h = tf.nn.relu(h)

        output_shape = [batch_size, HEIGHT, WIDTH, CHANNELS]
        w = init_weights('g_wdc1', [f, f, CHANNELS, n_ch1])
        b = init_bias('g_bdc1', [CHANNELS])
        output = deconv2d(h, w, s, b, output_shape)

    return tf.nn.sigmoid(output+x)

logs_path = logdir()  
output_frequency = 500  
batch_size = 4
iterations = 10005
norm_weight = 0.0
learning_rate = 0.0001

with tf.variable_scope('Rainy_batch'):
    rainy_image_batch = image_batch('./rainy/*.jpeg')
with tf.variable_scope('Sunny_batch'):    
    sunny_image_batch = image_batch('./sunny/*.jpeg')
with tf.variable_scope('Test_batch'):
    rainy_test_batch = image_batch('./test/*.jpeg')

x_placeholder = tf.placeholder("float", shape = [None, WIDTH, HEIGHT, CHANNELS], name='Rainy') 
y_placeholder = tf.placeholder("float", shape = [None, WIDTH, HEIGHT, CHANNELS], name='Sunny')

Dy = discriminator(y_placeholder, batch_size)  # discriminator prediction probabilities for sunny images
Gx = generator(x_placeholder, batch_size, reuse=False)  # generated images
Dg = discriminator(Gx, batch_size, reuse=True)  # discriminator prediction probabilities for generated images

d_loss = -tf.reduce_mean(tf.log(Dy) + tf.log(1. - Dg))
g_loss_logit = -tf.reduce_mean(tf.log(Dg))
g_loss_norm = norm_weight * tf.norm( (Gx - x_placeholder), 1)
g_loss =  g_loss_logit + g_loss_norm

tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]

tf.summary.scalar('gLoss', g_loss)
tf.summary.scalar('dLoss', d_loss)

adam = tf.train.AdamOptimizer(learning_rate=learning_rate)

with tf.variable_scope('discriminator') as scope:
    trainerD = adam.minimize(d_loss, var_list=d_vars)

with tf.variable_scope('generator') as scope:   
    trainerG = adam.minimize(g_loss, var_list=g_vars)

Full notebook

0 个答案:

没有答案