我编写了一种Bi-lstm语言模型,并且在https://github.com/yangsaiyong/tf-adaptive-softmax-lstm-lm/blob/master/train_lm.py中使用了adaptive softmax
def train(train_data, vocabulary_size, args):
with tf.Session() as sess:
model = BiRNNLanguageModel(vocabulary_size, args)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=args.model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
step = int(ckpt.model_checkpoint_path.rsplit('-', 1)[1])
def train_step(batch_x):
feed_dict = {model.x: batch_x, model.keep_prob: args.keep_prob}
step, loss, _ = sess.run([model.global_steps, model.cost, model.train_op], feed_dict=feed_dict)
if step % 100 == 0:
print("step {0}: loss = {1}".format(step, loss))
saver.save(sess, args.model_path+'bilstm-model', global_step=step)
batches = batch_iter(train_data, args.batch_size, args.num_epochs)
for batch_x in batches:
train_step(batch_x)
step = tf.train.global_step(sess, model.global_steps)
但是当我想存储模型时,错误就发生在下面
TypeError: Fetch argument 0.0 has invalid type <class 'numpy.float32'>, must be a string or Tensor. (Can not convert a float32 into a Tensor or Operation.)