Tensorboard:运行摘要合并,出现TypeError:获取参数None具有无效类型<class'nonetype'=“”>

时间:2018-12-30 04:30:15

标签: tensorflow tensorboard tensorflow-datasets

我想测量正向传播中操作的时间,因此我提到了Tensorflow's tutorial。这在训练阶段效果很好,但是在我的预测阶段存在错误。

预测阶段,我构建测试集的管道

handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
    handle, predict_dataset.output_types, predict_dataset.output_shapes)
predict_init_op = predict_dataset.make_one_shot_iterator()
next_element = iterator.get_next()

在会话中运行:

merged = tf.summary.merge_all()

predict_result = []

with tf.Session() as sess:
    global_step = 0
    saver.restore(sess, './output/msra_24_crf_result_dir/model.ckpt-26999')
    # sess.run(tf.global_variables_initializer())
    predict_writer = tf.summary.FileWriter(FLAGS.output_dir+'/predict' , sess.graph)
    predict_init_handle = sess.run(predict_init_op.string_handle())
    while True:
        try:
            tf.logging.info("======================= the %d step starts ==================="%global_step)
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
            summary, label, result= sess.run([merged, label_ids, pred_ids], feed_dict={handle:predict_init_handle},
                                             options=run_options, run_metadata=run_metadata)
            train_writer.add_run_metadata(run_metadata, 'step%03d' % step)
            predict_writer.add_summary(summary, global_step=global_step)
            predict_result.append(result)
            print("the label is", label)
            print("the result is", result)
            # result_to_pair(predict_examples, result)
            global_step += 1
        except tf.errors.OutOfRangeError:
            break
    predict_writer.close()

我在运行阶段也做同样的事情,但是没有任何问题。训练阶段的数据管道:

handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
    handle, train_dataset.output_types, train_dataset.output_shapes)
train_init_op = train_dataset.make_one_shot_iterator()
dev_init_op = dev_dataset.make_initializable_iterator()
next_element = iterator.get_next()

会话运行:

    merged = tf.summary.merge_all()

tf.logging.info("==================Entering Session Running=========================")
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer()) 
    train_writer = tf.summary.FileWriter(FLAGS.output_dir , sess.graph)
    dev_writer = tf.summary.FileWriter(FLAGS.output_dir + '/eval')
    train_iterator_handle = sess.run(train_init_op.string_handle())
    dev_iterator_handle = sess.run(dev_init_op.string_handle())
    for step in range(num_train_steps):
        if step % 100 == 0:
            tf.logging.info("===============evaluate at %d step=============="%step)
            sess.run(running_vars_initializer)
            sess.run(dev_init_op.initializer)
            # while True:
            while True:
                try:
                    # print(sess.run([label_ids, pred_ids], feed_dict={handle: dev_iterator_handle}))
                    summary,_,_,_ = sess.run([merged, prec_op, recall_op, f1_op], feed_dict={handle: dev_iterator_handle})
                except tf.errors.OutOfRangeError:
                    break
            dev_writer.add_summary(summary, step)
            _precision, _recall, _f1 = sess.run([prec_scalar, recall_scalar, f1_scalar])
            print("At step {}, the precision is {:.2f}%,the recall is {:.2f}%,the f1 is {:.2f}%".format(step, _precision*100, _recall*100, _f1*100))
        else:
            if step % 1000 == 999:
                tf.logging.info("===============save model at %d step==============" % step)
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                summary, _ , _total_loss= sess.run([merged, train_op, total_loss],
                                      feed_dict={handle: train_iterator_handle},
                                      options=run_options,
                                      run_metadata=run_metadata)
                train_writer.add_run_metadata(run_metadata, 'step%03d' % step)
                train_writer.add_summary(summary, step)
                tf.logging.info("========== the total loss is %.5f ===============" %(_total_loss))
                print('Adding run metadata for', step)
                save_path = saver.save(sess, os.path.join(FLAGS.output_dir, "model.ckpt"), global_step=step)
                print("Model saved in path: %s" % save_path)
            else:
                # print(sess.run([pred_ids, label_ids], feed_dict={handle: train_iterator_handle}))
                summary, _ = sess.run([merged, train_op], feed_dict={handle: train_iterator_handle})
                train_writer.add_summary(summary, step)
    train_writer.close()
    dev_writer.close()

我想知道训练阶段的总计算时间。剂量是指向前和向后传播的整个计算时间,或者只是向前传播的整个计算时间。我正在尝试修复此问题,因此将不胜感激。

0 个答案:

没有答案