TensorFlow Scope问题:ValueError:变量w1_d / Adam /不存在

时间:2018-03-23 19:28:00

标签: python tensorflow

这个问题在这里被问到:TensorFlow ValueError: Variable does not exist, or was not created with tf.get_variable()

但是,我尝试了在我的代码示例中实现的解决方案,但仍然出现错误。该错误与链接中的内容相同,即ValueError: Variable w1_d/Adam/ does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?

这是我的代码(请注意,即使纠正了错误也不会运行,因为代码中没有sess.run,但是因为您可以看到代码已经很长,所以我将其缩减为仍然会从整个代码重现相同错误的长度):

#code much from EZGAN github

from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
np.random.seed(112)
tf.set_random_seed(232)

data = []



datalist = [[[0.0 for each2 in range(26)]for each in range(100)] for each in range(20)]
datasum = [0.0 for each in range(20)]



def discriminator(x_data, y_data, drate, is_training, market, reuse=False):
    if (reuse):
        tf.get_variable_scope().reuse_variables()

    hidden = 64

    input111 = tf.concat([x_data, y_data, market], 1)

    features = 26



    sdev = np.sqrt(float(1/features))

    w1_d = tf.get_variable("w1_d",[features, hidden],initializer = tf.random_normal_initializer(stddev=sdev))
    sdev = np.sqrt(float(1/hidden))

    weights2_d = tf.get_variable("w2_d", [hidden, hidden], initializer = tf.random_normal_initializer(stddev=sdev))


    weights17_d = tf.get_variable("w3_d", [hidden, 1], initializer = tf.random_normal_initializer(stddev=sdev))

    bias1_d = tf.get_variable("b1_d", [hidden], initializer = tf.random_normal_initializer(stddev=0))
    bias2_d = tf.get_variable("b2_d", [hidden],initializer = tf.random_normal_initializer(stddev=0))
    bias17_d = tf.get_variable("b3_d", [], initializer = tf.random_normal_initializer(stddev=0))


    mul_d = tf.matmul(input111, w1_d) + bias1_d
    layer1_d = mul_d
    layer16_d = tf.matmul(layer1_d, weights2_d) + bias2_d + layer1_d
    out_d = tf.matmul(layer16_d, weights17_d) + bias17_d
    sigout = tf.sigmoid(out_d)*.9999998+.0000001               


    return sigout

def generator(x_in, drate, is_training, samples):

    demlen = 4
    normdraws = np.random.randn()
    demodraws = np.random.rand(demlen,1)

    demodraws = demodraws.astype(np.float32)


    features = 6


    hidden = 64


    sdev = tf.sqrt(1.0/features)

    weights1_g = tf.get_variable("w1_g", [features, hidden], initializer = tf.random_normal_initializer(stddev=sdev))
    delta_g= tf.get_variable("delta_g", [features, 1], initializer = tf.random_normal_initializer(stddev=sdev))
    demweights_g = tf.get_variable("dw_g", [features, demlen], initializer = tf.random_normal_initializer(stddev=sdev))
    sdev = tf.sqrt(1/hidden)

    weights2_g = tf.get_variable("w2_g", [hidden,1], initializer = tf.random_normal_initializer(stddev=sdev))


    bias1_g = tf.get_variable("b1_g", [hidden], initializer = tf.random_normal_initializer(stddev=0))
    bias2_g = tf.get_variable("b2_g", [], initializer=tf.random_normal_initializer(stddev=0))
    biasdelt_g = tf.get_variable("bdelt2_g", [], initializer=tf.random_normal_initializer(stddev=0))

    mul_g = tf.matmul(x_in, weights1_g) + bias1_g
    layer1_g = mul_g
    out_g = tf.matmul(layer1_g, weights2_g) + bias2_g



    utility_g = tf.matmul(x_in,delta_g) + biasdelt_g + tf.exp(out_g)*normdraws+tf.matmul(x_in,tf.matmul(demweights_g,demodraws))

    output_g = tf.nn.softmax(utility_g, 0)#*samples

    return (x_in, output_g)



sess = tf.Session()

x_placeholder = tf.placeholder("float", shape = [None, 6])
y_placeholder = tf.placeholder("float", shape = [None, 1])
drate1 = tf.placeholder("float")
is_training1= tf.placeholder("bool")
market_place = tf.placeholder("float", shape = [None, 19])
shape1 = tf.placeholder("float", shape = [])

Gz = generator(x_placeholder, drate1, is_training1, shape1)

Dx = discriminator(x_placeholder, y_placeholder, drate1, is_training1, market_place)

Dg = discriminator(Gz[0], Gz[1], drate1, is_training1, market_place, reuse=True)

g_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.ones_like(Dg)))

d_loss_real = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dx, labels=tf.fill([tf.shape(x_placeholder)[0], 1], 1.0)))
d_loss_fake = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.zeros_like(Dg)))
d_loss = d_loss_real + d_loss_fake

tvars = tf.trainable_variables()

d_vars = [var for var in tvars if '_d' in var.name]
g_vars = [var for var in tvars if '_g' in var.name]
with tf.variable_scope(tf.get_variable_scope(), reuse=False) as scope:
    d_trainer_fake = tf.train.AdamOptimizer(0.0001).minimize(d_loss_fake, var_list=d_vars)
    d_trainer_real = tf.train.AdamOptimizer(0.0001).minimize(d_loss_real, var_list=d_vars)
    # Train the generator
    # Decreasing from 0.004 in GitHub version
    g_trainer = tf.train.AdamOptimizer(0.0001).minimize(g_loss, var_list=g_vars)

0 个答案:

没有答案