我可以将张量流摘要导出到CSV吗?

时间:2017-02-20 22:10:34

标签: csv tensorflow

有没有办法从tfevents文件中提取标量摘要到CSV(最好是在tensorboard中)?

示例代码

以下代码在同一目录中的summary_dir中生成tfevent文件。假设你让它运行,你会发现一些有趣的东西。您希望获取原始数据以供进一步调查。你会怎么做?

#!/usr/bin/env python
"""A very simple MNIST classifier."""
import argparse
import sys

from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf
ce_with_logits = tf.nn.softmax_cross_entropy_with_logits

FLAGS = None


def inference(x):
    """
    Build the inference graph.

    Parameters
    ----------
    x : placeholder

    Returns
    -------
    Output tensor with the computed logits.
    """
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, W) + b
    return y


def loss(logits, labels):
    """
    Calculate the loss from the logits and the labels.

    Parameters
    ----------
    logits : Logits tensor, float - [batch_size, NUM_CLASSES].
    labels : Labels tensor, int32 - [batch_size]
    """
    cross_entropy = tf.reduce_mean(ce_with_logits(labels=labels,
                                                  logits=logits))
    return cross_entropy


def training(loss, learning_rate=0.5):
    """
    Set up the training Ops.

    Parameters
    ----------
    loss : Loss tensor, from loss().
    learning_rate : The learning rate to use for gradient descent.

    Returns
    -------
    train_op: The Op for training.
    """
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    train_step = optimizer.minimize(loss)
    return train_step


def main(_):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    y = inference(x)

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])
    loss_ = loss(logits=y, labels=y_)
    train_step = training(loss_)

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.name_scope('accuracy'):
        tf.summary.scalar('accuracy', accuracy)
    merged = tf.summary.merge_all()

    sess = tf.InteractiveSession()
    train_writer = tf.summary.FileWriter('summary_dir/train', sess.graph)
    test_writer = tf.summary.FileWriter('summary_dir/test', sess.graph)
    tf.global_variables_initializer().run()

    for train_step_i in range(100000):
        if train_step_i % 100 == 0:
            summary, acc = sess.run([merged, accuracy],
                                    feed_dict={x: mnist.test.images,
                                               y_: mnist.test.labels})
            test_writer.add_summary(summary, train_step_i)
            summary, acc = sess.run([merged, accuracy],
                                    feed_dict={x: mnist.train.images,
                                               y_: mnist.train.labels})
            train_writer.add_summary(summary, train_step_i)
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                        y_: mnist.test.labels}))

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir',
                        type=str,
                        default='/tmp/tensorflow/mnist/input_data',
                        help='Directory for storing input data')
    FLAGS, unparsed = parser.parse_known_args()
    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

4 个答案:

答案 0 :(得分:13)

只需检查TensorBoard左上角的“数据下载链接”选项,然后点击标量摘要下方的“CSV”按钮。

enter image description here

答案 1 :(得分:11)

尽管这里的答案是在tensorboard中要求的,但它仅允许下载单个标记的单次运行的csv。 例如,如果您有10个标签并运行了20个(根本不多),则您需要执行上述步骤200次(仅此一项可能就需要您一个多小时)。 如果现在由于某种原因您想要对单个标签的所有运行数据进行实际处理,则需要编写一些奇怪的CSV累积脚本或手动复制所有内容(这可能会花费您超过一天的时间)。 / p>

因此,我想添加一个解决方案,为包含所有运行的每个标签提取一个CSV文件。列标题是运行路径名,行索引是运行步骤号。

import os
import numpy as np
import pandas as pd

from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator


def tabulate_events(dpath):
    summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath)]

    tags = summary_iterators[0].Tags()['scalars']

    for it in summary_iterators:
        assert it.Tags()['scalars'] == tags

    out = defaultdict(list)
    steps = []

    for tag in tags:
        steps = [e.step for e in summary_iterators[0].Scalars(tag)]

        for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
            assert len(set(e.step for e in events)) == 1

            out[tag].append([e.value for e in events])

    return out, steps


def to_csv(dpath):
    dirs = os.listdir(dpath)

    d, steps = tabulate_events(dpath)
    tags, values = zip(*d.items())
    np_values = np.array(values)

    for index, tag in enumerate(tags):
        df = pd.DataFrame(np_values[index], index=steps, columns=dirs)
        df.to_csv(get_file_path(dpath, tag))


def get_file_path(dpath, tag):
    file_name = tag.replace("/", "_") + '.csv'
    folder_path = os.path.join(dpath, 'csv')
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)
    return os.path.join(folder_path, file_name)


if __name__ == '__main__':
    path = "path_to_your_summaries"
    to_csv(path)

我的解决方案基于:https://stackoverflow.com/a/48774926/2230045


编辑:

我创建了一个更复杂的版本,并将其发布到GitHub:https://github.com/Spenhouet/tensorboard-aggregator

此版本可汇总多个张量板运行,并且能够将汇总保存到新的张量板摘要或另存为.csv文件。

答案 2 :(得分:2)

这是我的解决方案,它基于以前的解决方案,但可以扩展。

import os
import numpy as np
import pandas as pd

from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator


def tabulate_events(dpath):

    final_out = {}
    for dname in os.listdir(dpath):
        print(f"Converting run {dname}",end="")
        ea = EventAccumulator(os.path.join(dpath, dname)).Reload()
        tags = ea.Tags()['scalars']

        out = {}

        for tag in tags:
            tag_values=[]
            wall_time=[]
            steps=[]

            for event in ea.Scalars(tag):
                tag_values.append(event.value)
                wall_time.append(event.wall_time)
                steps.append(event.step)

            out[tag]=pd.DataFrame(data=dict(zip(steps,np.array([tag_values,wall_time]).transpose())), columns=steps,index=['value','wall_time'])

        if len(tags)>0:      
            df= pd.concat(out.values(),keys=out.keys())
            df.to_csv(f'{dname}.csv')
            print("- Done")
        else:
            print('- Not scalers to write')

        final_out[dname] = df


    return final_out
if __name__ == '__main__':
    path = "youre/path/here"
    steps = tabulate_events(path)
    pd.concat(steps.values(),keys=steps.keys()).to_csv('all_result.csv')

答案 3 :(得分:0)

只需添加到@Spen

如果要在步数不同时导出数据。 这将使一个大的csv文件。 可能需要更改按键周围的内容才能为您工作。

import os
import numpy as np
import pandas as pd

from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import glob
import pandas as pd
listOutput = (glob.glob("*/"))

listDF = []

for tb_output_folder in listOutput:
 print(tb_output_folder)
 x = EventAccumulator(path=tb_output_folder)
 x.Reload()
 x.FirstEventTimestamp()
 keys = ['loss', 'mean_absolute_error', 'val_loss', 'val_mean_absolute_error'] 

 listValues = {}

 steps = [e.step for e in x.Scalars(keys[0])]
 wall_time = [e.wall_time for e in x.Scalars(keys[0])]
 index = [e.index for e in x.Scalars(keys[0])]
 count = [e.count for e in x.Scalars(keys[0])]
 n_steps = len(steps)
 listRun = [tb_output_folder] * n_steps
 printOutDict = {}

 data = np.zeros((n_steps, len(keys)))
 for i in range(len(keys)):
     data[:,i] = [e.value for e in x.Scalars(keys[i])]

 printOutDict = {keys[0]: data[:,0], keys[1]: data[:,1],keys[2]: data[:,2],keys[3]: data[:,3]}

 printOutDict['Name'] = listRun

 DF = pd.DataFrame(data=printOutDict)

 listDF.append(DF)

df = pd.concat(listDF)
df.to_csv('Output.csv')