我构建了以下神经网络:
import tensorflow as tf
class LM:
def __init__(self, vocab_size, embedding_size, max_sentence_size, hidden_size, batch_size):
self.input_x = tf.placeholder(tf.int32, [batch_size, max_sentence_size], name="input_x")
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.max_sentence_size = max_sentence_size
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -0.1, 0.1), name="W")
embedded_words = tf.nn.embedding_lookup(self.W, self.input_x) # [None, past_words, embedding_size]
with tf.name_scope("loop"):
hidden_state = tf.zeros((batch_size, hidden_size))
self.logits = tf.zeros((batch_size, max_sentence_size-1, embedding_size))
cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size)
for i in range(max_sentence_size-1):
self.logits[:,i,:], hidden_state = cell.__call__(embedded_words[:,i,:], hidden_state)
with tf.name_scope("output_layer"):
self.predictions = tf.argmax(self.logits, 1, name="predictions")
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_x[1:])
self.loss = tf.reduce_mean(losses)
当我尝试训练时,线路
self.logits[:,i,:], hidden_state = cell.__call__(embedded_words[:,i,:], hidden_state)
给我一个错误“ TypeError:仅在启用急切执行时,张量对象才是可迭代的。要遍历此张量,请使用tf.map_fn。”
有人可以解释吗?