以下是一些hello world level tf代码:
import tensorflow as tf
import numpy as np
def model():
layer1 = layer('layer1', None)
output_layer = layer('output_layer', layer1)
print(tf.get_variable_scope().name) # print 1
return output_layer
def layer(scope_name, x):
with tf.variable_scope(scope_name):
print(tf.get_variable_scope().name) # print 2
w = tf.get_variable('w', shape=(1,), dtype=tf.float32,
initializer=tf.truncated_normal_initializer(mean=0, stddev=1))
if x is not None:
return w + x
else:
return w
with tf.Graph().as_default():
with tf.variable_scope('model') as scope:
prediction_1 = model()
scope.reuse_variables()
prediction_2 = model()
with tf.Session() as sess:
summary_writer = tf.train.SummaryWriter('log', graph=sess.graph)
init_op = tf.initialize_all_variables()
sess.run(init_op)
[p1, p2] = sess.run([prediction_1, prediction_2])
print('p1={} p2={}'.format(p1, p2))
summary_writer.close()
上述代码中的print 1
和print 2
会产生:
model/layer1
model/output_layer
model
model/layer1
model/output_layer
model
这是我的预期,但它产生的图表:
中有两个output_layer
,为什么会有output_layer_1
?