张量板中的绘图总是关闭,就像一个圆圈

时间:2016-11-02 03:28:12

标签: tensorboard

我试图绘制一个损失曲线,但总是异常(就像一个圆圈,我真的不知道如何用英语正确描述它),我找到了很多关于这个问题的主题,只是可以'解决之后,我的张量流版本是0.10.0。

import tensorflow as tf
from tensorflow.core.util.event_pb2 import SessionLog
import os
# initialize variables/model parameters
# define the training loop operations
def inputs():
    # read/generate input training data X and expected outputs Y
    weight_age = [[84,46],[73,20],[65,52],[70,30],[76,57],[69,25],[63,28],[72,36],[79,57],[75,44],[27,24]
                ,[89,31],[65,52],[57,23],[59,60],[69,48],[60,34],[79,51],[75,50],[82,34],[59,46],[67,23],
                  [85,37],[55,40],[63,30]]
    blodd_fat_content = [354,190,405,263,451,302,288,385,402,365,209,290,346,
                         254,395,434,220,374,308,220,311,181,274,303,244]
    return tf.to_float(weight_age), tf.to_float(blodd_fat_content)
def inference(X):
    # compute inference model over data X and return the result
    return tf.matmul(X, W) + b
def loss(X, Y):
    # compute loss over training data X and expected outputs Y
    Y_predicted = inference(X)
    return tf.reduce_sum(tf.squared_difference(Y, Y_predicted))
def train(total_loss):
    # train / adjust model parameters according to computed total loss
    learning_rate = 1e-7
    return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)
def evaluate(sess, X, Y):
    # evaluate the resulting trained model
    print (sess.run(inference([[80., 25.]])))
    print (sess.run(inference([[60., 25.]])))


g1 = tf.Graph()
with tf.Session(graph=g1) as sess:

    W = tf.Variable(tf.zeros([2,1]), name="weights")
    b = tf.Variable(0., name="bias")
    tf.initialize_all_variables().run()
    X, Y = inputs()
    print (sess.run(W))
    total_loss = loss(X, Y)
    train_op = train(total_loss)

    tf.scalar_summary("loss", total_loss)
    summaries = tf.merge_all_summaries()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    summary_writer = tf.train.SummaryWriter('linear', g1)
    summary_writer.add_session_log(session_log= SessionLog(status=SessionLog.START), global_step=1)
    # actual training loop
    training_steps = 100
    tolerance = 100
    total_loss_last = 0
    initial_step = 0

    # Create a saver.
    saver = tf.train.Saver()

    # verify if we don't have a checkpoint saved already
    ckpt = tf.train.get_checkpoint_state(os.path.dirname('my_model'))
    if ckpt and ckpt.model_checkpoint_path:
        # Restores from checkpoint
        saver.restore(sess, ckpt.model_checkpoint_path)
        initial_step = int(ckpt.model_checkpoint_path.rsplit('-', 1)[1])
#         summary_writer.add_session_log(SessionLog(status=SessionLog.START), global_step=initial_step)
    for step in range(initial_step, training_steps):
        sess.run([train_op])
        if step%20 == 0:
            saver.save(sess, 'my-model', global_step=step)

        gap = abs(sess.run(total_loss) - total_loss_last)
        total_loss_last = sess.run(total_loss)
        summary_writer.add_summary(sess.run(summaries), step)
        # for debugging and learning purposes, see how the loss gets decremented thru training steps 
        if step % 10 == 0:
            print ("loss: ", sess.run([total_loss]))
        print("step: ", step)
        if gap < tolerance:
            break

    # evaluation...
    evaluate(sess, X, Y)
    coord.request_stop()
    coord.join(threads)

    saver.save(sess, 'my-model', global_step=training_steps)

    summary_writer.flush()
    sess.close()

Picture I plotted

0 个答案:

没有答案