我将占位符变量存储在名为“input的范围内。”在训练和保存模型后,我尝试重新加载它并在测试数据上进行评估。但是,我无法理解如何重建feed_dict。我注意到我正在获得多个“输入”范围,标记为“输入_#。”我注意到我在运行时
graph = tf.get_default_graph()
e1 = graph.get_tensor_by_name("inputs/e1:0")
e2 = graph.get_tensor_by_name("inputs/e2:0")
rel = graph.get_tensor_by_name("inputs/rel:0")
y = graph.get_tensor_by_name("inputs/y:0")
我收到错误。占位符e1,e2被识别,但y不是:
KeyError:“名称'inputs / y:0'指的是没有的Tensor 存在。图中不存在'inputs / y'操作。“
我的问题是:为什么我无法加载占位符y,为什么它不会出现在变量范围“输入”中,为什么还有其他“输入”范围。以下是我的代码和输出的示例。谢谢你的期待!
我的模特:
with open('./data/generated.data/initialized.vectors-normalized', 'r') as f:
vecs = pickle.load(f)
k = 3
d = 100
r = 11
lamb = 0.0001
lr = 0.01
words = 26770 # number of words in corpus, we add one row to it to get zero value
with tf.name_scope('inputs'):
e1 = tf.placeholder(tf.int32, shape=[None, 10], name='e1')
rel = tf.placeholder(tf.int32, shape=[None, 1], name='rel')
e2 = tf.placeholder(tf.int32, shape=[None, 10], name='e2')
y = tf.placeholder(tf.float32, shape=[None], name='y')
with tf.name_scope('parameters'):
# initialization process for E, maintain e[0,:] = 0 vector for dummy variable c
glove_rows = tf.constant(vecs, dtype='float32')
E_init = tf.Variable(tf.random_uniform([words - vecs.shape[0] + 1,d], -0.001, 0.001))
E = tf.concat([glove_rows, E_init], 0, name='E')
#E = E_init #tf.scatter_update(E_init, [0], [[0 for i in range(d)]])
W = tf.Variable(tf.random_uniform([r, d*d*k], -1/(d**.5), 1/(d**.5)), name='W')
V = tf.Variable(tf.random_uniform([r, 2*d*k], 0,1), name='R')
B = tf.Variable(tf.zeros([r, k]), name='B')
U = tf.Variable(tf.ones([r, k]), name='U')
Bias = tf.Variable(tf.ones([r, 1]), name='Bias')
with tf.name_scope('equation'):
# E1 Calc
e1_words = e1[:,:9]
e1_wordcount = tf.to_float(e1[:,9:], name = 'e1_wordcount')
E1_entity_wordvecs = tf.nn.embedding_lookup(E, e1_words, name = 'E1_entity_wordvecs')
E1_unscaled_entvecs = tf.reduce_sum(E1_entity_wordvecs, axis = 1, name = 'E1_unscaled_entvecs')
E1 = tf.divide(E1_unscaled_entvecs, e1_wordcount, name = 'E1')
## 3 terms in parenthesis score function
#
# E2 Calculation
e2_words = e2[:,:9]
e2_wordcount = tf.to_float(e2[:,9:], name = 'e2_wordcount')
E2_entity_wordvecs = tf.nn.embedding_lookup(E, e2_words, name = 'E2_entity_wordvecs')
E2_unscaled_entvecs = tf.reduce_sum(E2_entity_wordvecs, axis = 1, name = 'E2_unscaled_entvecs')
E2 = tf.divide(E2_unscaled_entvecs, e2_wordcount, name = 'E2')
# Term1 Calculation
Wr = tf.reshape(tf.nn.embedding_lookup(W, rel), [-1,d,d,k], name = 'Wr')
Term1 = tf.einsum('ij,ijmk,im->ik', E1, Wr, E2)
# Term2 Calculation
Vr = tf.reshape(tf.nn.embedding_lookup(V, rel), [-1,2*d,k], name = 'Vr')
E1E2 = tf.concat([E1, E2], axis=1, name = 'E1E2')
Term2 = tf.einsum('ijk,ij->ik', Vr, E1E2)
# Term3 Calculation
Br = tf.squeeze(tf.nn.embedding_lookup(B, rel),[1], name= 'Br')
Term3 = Br
# Score, a.k.a F, Calculation
Ur = tf.squeeze(tf.nn.embedding_lookup(U, rel),[1], name= 'Ur')
Biasr = tf.squeeze(tf.nn.embedding_lookup(Bias, rel),[1], name= 'Biasr')
Tanh = tf.tanh(tf.add_n([Term1, Term2, Term3], name = 'Tanh'))
F = tf.einsum('ij,ij->i', Ur, Tanh) + Biasr
with tf.name_scope('loss'):
with tf.name_scope('cross_entropy_term'):
compare_matrix = tf.maximum(1 - F * y, 0)
Sum = tf.reduce_sum(compare_matrix)
with tf.name_scope('regularization_term'):
E_flat = tf.reshape(E, [-1])
W_flat = tf.reshape(W, [-1])
V_flat = tf.reshape(V, [-1])
B_flat = tf.reshape(B, [-1])
U_flat = tf.reshape(U, [-1])
Params = tf.concat([E_flat, W_flat, V_flat, B_flat, U_flat], axis = 0)
Reg = lamb * tf.norm(Params, ord=2, name='Reg')
loss = tf.add(Sum, Reg, name = 'loss')
with tf.name_scope('optimizer'):
train_step = tf.train.AdagradOptimizer(lr).minimize(loss)
训练和重新加载后,输出
for i in tf.get_default_graph().get_operations():
if i.name[:6] == 'input'
print i.name
如下所示:
inputs/e1
inputs/rel
inputs/e2
inputs_1/e1
inputs_1/rel
inputs_1/e2
inputs_2/e1
inputs_2/rel
inputs_2/e2
inputs_3/e1
inputs_3/rel
inputs_3/e2
inputs_4/e1
inputs_4/rel
inputs_4/e2
inputs_5/e1
inputs_5/rel
inputs_5/e2
inputs_5/y
inputs_6/e1
inputs_6/rel
inputs_6/e2
inputs_6/y
inputs_7/e1
inputs_7/rel
inputs_7/e2
inputs_7/y
inputs_8/e1
inputs_8/rel
inputs_8/e2
inputs_8/y
inputs_9/e1
inputs_9/rel
inputs_9/e2
inputs_9/y
inputs_10/e1
inputs_10/rel
inputs_10/e2
inputs_10/y
inputs_11/e1
inputs_11/rel
inputs_11/e2
inputs_11/y
inputs/e1_1
inputs/rel_1
inputs/e2_1
inputs_1/e1_1
inputs_1/rel_1
inputs_1/e2_1
inputs_2/e1_1
inputs_2/rel_1
inputs_2/e2_1
inputs_3/e1_1
inputs_3/rel_1
inputs_3/e2_1
inputs_4/e1_1
inputs_4/rel_1
inputs_4/e2_1
inputs_5/e1_1
inputs_5/rel_1
inputs_5/e2_1
inputs_5/y_1
inputs_6/e1_1
inputs_6/rel_1
inputs_6/e2_1
inputs_6/y_1
inputs_7/e1_1
inputs_7/rel_1
inputs_7/e2_1
inputs_7/y_1
inputs_8/e1_1
inputs_8/rel_1
inputs_8/e2_1
inputs_8/y_1
inputs_9/e1_1
inputs_9/rel_1
inputs_9/e2_1
inputs_9/y_1
inputs_10/e1_1
inputs_10/rel_1
inputs_10/e2_1
inputs_10/y_1
inputs_11/e1_1
inputs_11/rel_1
inputs_11/e2_1
inputs_11/y_1
inputs/e1_2
inputs/rel_2
inputs/e2_2
inputs_1/e1_2
inputs_1/rel_2
inputs_1/e2_2
inputs_2/e1_2
inputs_2/rel_2
inputs_2/e2_2
inputs_3/e1_2
inputs_3/rel_2
inputs_3/e2_2
inputs_4/e1_2
inputs_4/rel_2
inputs_4/e2_2
inputs_5/e1_2
inputs_5/rel_2
inputs_5/e2_2
inputs_5/y_2
inputs_6/e1_2
inputs_6/rel_2
inputs_6/e2_2
inputs_6/y_2
inputs_7/e1_2
inputs_7/rel_2
inputs_7/e2_2
inputs_7/y_2
inputs_8/e1_2
inputs_8/rel_2
inputs_8/e2_2
inputs_8/y_2
inputs_9/e1_2
inputs_9/rel_2
inputs_9/e2_2
inputs_9/y_2
inputs_10/e1_2
inputs_10/rel_2
inputs_10/e2_2
inputs_10/y_2
inputs_11/e1_2
inputs_11/rel_2
inputs_11/e2_2
inputs_11/y_2