尝试使用未初始化的值output_softmax_w

时间:2018-10-21 04:52:34

标签: tensorflow

这是我的LSTM model.py

class Model(object):

    def __init__(self, is_training, batch_size, seq_max_length, hidden_size, vocab_size, 
                 num_layers, learning_rate, dropout=0.5, init_scale=0.05):
        self.is_training = is_training
        self.batch_size = batch_size
        self.seq_max_length = seq_max_length
        self.hidden_size = hidden_size
        self.learning_rate = learning_rate

        self.inputs = tf.placeholder(name='inputs', shape=[batch_size, seq_max_length], dtype=tf.int32)
        self.targets = tf.placeholder(name='targets', shape=[batch_size, seq_max_length], dtype=tf.int32)

        embedding = tf.get_variable('embedding', shape=[vocab_size, self.hidden_size], initializer=tf.contrib.layers.variance_scaling_initializer())
        inputs = tf.nn.embedding_lookup(embedding, self.inputs)

        cell = tf.contrib.rnn.LSTMCell(self.hidden_size)

        output, self.state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
        output = tf.reshape(output, [-1, hidden_size])

        self.softmax_w = tf.get_variable('output_softmax_w', shape=[self.hidden_size, vocab_size], 
                                    dtype=tf.float32)
        self.softmax_b = tf.get_variable('output_softmax_b', shape=[vocab_size], 
                                    dtype=tf.float32)
        logits = tf.nn.xw_plus_b(output, self.softmax_w, self.softmax_b)
        logits = tf.reshape(logits, [self.batch_size, self.seq_max_length, vocab_size])

        loss = tf.contrib.seq2seq.sequence_loss(
            logits,
            self.targets,
            tf.ones([self.batch_size, self.seq_max_length], dtype=tf.float32),
            average_across_timesteps=False,
            average_across_batch=True
        )
        self.cost = tf.reduce_sum(loss)

        if not is_training:
            return

        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), 5)
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
        self.train_op = optimizer.apply_gradients(
            zip(grads, tvars),
            global_step=tf.contrib.framework.get_or_create_global_step())

当我运行train代码时,

tf.reset_default_graph()

init_op = tf.global_variables_initializer()
model = Model(is_training=True, batch_size=2, seq_max_length=10, hidden_size=100, vocab_size=72, num_layers=1, learning_rate=1)

with tf.Session() as sess:

    sess.run(init_op)

    batch_x = np.array(train_X[0:2])
    batch_y = np.array(train_y[0:2])
    inputs_value = sess.run([model.cost, model.train_op], feed_dict={model.inputs: batch_x, model.targets: batch_y})

错误显示:

FailedPreconditionError: Attempting to use uninitialized value output_softmax_w
     [[Node: output_softmax_w/read = Identity[T=DT_FLOAT, _class=["loc:@output_softmax_w"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](output_softmax_w)]]
     [[Node: GradientDescent/update/_18 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_384_GradientDescent/update", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]

我不知道为什么我写的东西都和别人一样,但是我得到了错误。

1 个答案:

答案 0 :(得分:1)

在图形构建代码之后移动您的初始化操作,例如

model = Model(is_training=True, batch_size=2, seq_max_length=10, hidden_size=100, vocab_size=72, num_layers=1, learning_rate=1)
init_op = tf.global_variables_initializer()

global_variable_initializer()为在其之前声明的变量创建一个初始化器。