在做张量板时我想创建一些图层。我定义了所有变量,并在每个变量中都添加了名称。当然,我使用init = tf.global_variables_initializer()
和sess.run('init')
来确保定义了每个变量。但是奇怪的是,错误
Attempting to use uninitialized value Layers/output_layer/weight_out/W_out" still happened in W_out, so as W1_Hidden_1 and W2_Hidden_2...
有人可以帮助我吗?反正这是我的代码
with tf.name_scope('Layers'):
# Initializers
weight_initializer = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode="FAN_AVG", uniform=True)
bias_initializer = tf.zeros_initializer()
with tf.name_scope('Hidden_layer_1'):
with tf.name_scope('weight_1'):
W_hidden_1 = tf.Variable(weight_initializer([n_stocks, n_neurons_1]), name = 'W_Hidden_1')
with tf.name_scope('bias_1'):
bias_hidden_1 = tf.Variable(bias_initializer([n_neurons_1]), name = 'Bias_Hidden_1')
with tf.name_scope('hidden_1_output'):
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
with tf.name_scope('Hidden_layer_2'):
with tf.name_scope('weight_2'):
W_hidden_2 = tf.Variable(weight_initializer([n_neurons_1, n_neurons_2]), name = 'W_Hidden_2')
with tf.name_scope('bias_2'):
bias_hidden_2 = tf.Variable(bias_initializer([n_neurons_2]), name = 'Bias_Hidden_2')
with tf.name_scope('hidden_2_output'):
hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
with tf.name_scope('output_layer'):
with tf.name_scope('weight_out'):
W_out = tf.Variable(weight_initializer([n_neurons_2, 1]), name = 'W_out')
with tf.name_scope('bias_out'):
bias_out = tf.Variable(bias_initializer([1]), name = 'Bias_out')
with tf.name_scope('output'):
output = tf.transpose(tf.add(tf.matmul(hidden_2, W_out), bias_out))
with tf.name_scope('loss'):
# Cost function
loss = tf.reduce_mean(tf.square(output - Y))
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
# Init
init = tf.global_variables_initializer()
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('logs/', sess.graph)