我指的是Kaggle notebook。看起来下面的代码应该可以与Tensorflow 1.1一起使用。在使用任何最新版本的Tensorflow运行时,我收到以下错误:
" AttributeError:module' tensorflow.contrib.seq2seq'没有属性' DynamicAttentionWrapper'"
所以我将DynamicAttentionWrapper修改为AttentionWrapper。现在我收到另一个错误说明: " 新()缺少3个必需的位置参数:' time',' alignments'和' alignment_history'"
dec_cell = tf.contrib.seq2seq.DynamicAttentionWrapper(dec_cell,
attn_mech,
rnn_size)
initial_state = tf.contrib.seq2seq.DynamicAttentionWrapperState(enc_state[0],
_zero_state_tensors(rnn_size,
batch_size,
tf.float32))
有人可以帮助我。
所以我修改了
# Build the graph
train_graph = tf.Graph()
# Set the graph to default to ensure that it is ready for training
with train_graph.as_default():
# Load the model inputs
input_data, targets, lr, keep_prob, summary_length, max_summary_length, text_length = model_inputs()
# Create the training and inference logits
training_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
text_length,
summary_length,
max_summary_length,
len(vocab_to_int)+1,
rnn_size,
num_layers,
vocab_to_int,
batch_size)
# Create tensors for the training logits and inference logits
# Step 1
training_logits = tf.identity(training_logits.rnn_output, 'logits')
# Step 2
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
# Create the weights for sequence_loss
masks = tf.sequence_mask(summary_length, max_summary_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
print("Graph is built.")