我尝试了以下示例。我正在尝试通过Tensorboard可视化数据。我想使用投影机。但是我在投影机上看到了其他东西。看一下示例和输出:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None,n_outputs])
layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,
activation=tf.nn.leaky_relu, use_peepholes = True)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = outputs[:,n_steps-1,:] # keep only last output of sequence
loss = tf.reduce_mean(tf.squared_difference(outputs, y)) # loss function = mean squared error
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
saver = tf.train.Saver()
tf.summary.scalar("loss",loss)
tf.summary.histogram("outputs",outputs)
tf.summary.histogram("y",y)
tf.summary.histogram("X",X)
tf.summary.histogram("rnn_outputs",rnn_outputs)
tf.summary.histogram("states",states)
tf.summary.histogram("stacked_outputs",stacked_outputs)
tf.summary.histogram("stacked_rnn_outputs",stacked_rnn_outputs)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
config = projector.ProjectorConfig()
writer = tf.summary.FileWriter("write",sess.graph)
merged = tf.summary.merge_all()
for iteration in range(int(n_epochs*train_set_size/batch_size)):
x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch
if iteration % int(1*train_set_size/batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
out_train,merge = sess.run([outputs,merged], feed_dict={X: x_train, y: y_train})
writer.add_summary(merge, iteration)
projector.visualize_embeddings(writer, config)
print('%.2f epochs: MSE train/valid/test = %.10f/%.10f/%.10f'%(
iteration*batch_size/train_set_size, mse_train, mse_valid,mse_test))
try:
save_path = saver.save(sess, "write\\model"+str(iteration)+".ckpt")
except Exception as e:
print(e)
if not os.path.exists("write\\"):
os.makedirs("write\\")
save_path = saver.save(sess, "write\\model"+str(iteration)+".ckpt")
我在投影机上获得的输出用于以下变量:
rnn/multi_rnn_cell/cell_1/lstm_cell/kernel
200x400
rnn/multi_rnn_cell/cell_1/lstm_cell/kernel/Adam
200x400
rnn/multi_rnn_cell/cell_1/lstm_cell/kernel/Adam_1
200x400
rnn/multi_rnn_cell/cell_0/lstm_cell/kernel
102x400
rnn/multi_rnn_cell/cell_0/lstm_cell/kernel/Adam
102x400
rnn/multi_rnn_cell/cell_0/lstm_cell/kernel/Adam_1
102x400
正如我期望的那样,我通过标量和直方图可视化的变量也应该能够通过投影仪可视化。这意味着变量:loss,outputs,y,X,rnn_outputs,states,stacked_outputs,stacked_rnn_outputs
我很愿意看到我的LSTM如何朝着我希望获得的期望输出迈进。
请帮助我。如果您有任何建议,请与我们分享。
我正在使用最新的Tensorflow 1.12
和Python version is 3.5