解释TensorFlow时间轴配置文件

时间:2017-05-17 16:31:21

标签: tensorflow

我写了一个TensorFlow代码并使用tensorflow.python.client.timeline生成时间线图。但是,我不清楚如何解释时间轴并理解代码中的瓶颈。目前我刚刚编写了一个基于CPU的基本数据增强代码,但有关如何解释该情节的一些帮助将非常有用。

使用的TensorFlow版本:v1.1(从源代码编译)

我的代码如下:

    import tensorflow as tf
    from tensorflow.python.client import timeline
    import matplotlib.pyplot as plt

    def coloraugment(image):
        output = tf.image.random_brightness(image, max_delta=100./255.)
        output = tf.clip_by_value(output, 0.0, 1.0)
        output = tf.image.random_saturation(output, lower=0.2, upper=2)
        output = tf.clip_by_value(output, 0.0, 1.0)
        output = tf.image.random_contrast(output, lower=0.2, upper=2)
        output = tf.clip_by_value(output, 0.0, 1.0)
        return output


    def augmentbody(image, sz):

        for i in range(5):
            if i == 0:
                cropped = tf.random_crop(value=image, size=sz)
                croppedflipped = tf.image.flip_left_right(cropped)
                out = tf.stack([cropped, croppedflipped], axis=0)
            else:
                cropimg = tf.random_crop(value=image, size=sz)
                augcolor = coloraugment(cropimg)
                augflipped = tf.image.flip_left_right(augcolor)
                coll = tf.stack([augcolor, augflipped], axis=0)
                out = tf.concat([coll, out], axis=0)

        out = tf.random_shuffle(out)
        return out


    def aspect1(aspectratio):
        jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
        with jit_scope():
            newheight = tf.constant(256, dtype=tf.float32)
            newwidth = tf.divide(newheight, aspectratio)
            newsize = tf.stack([newheight, newwidth], axis=0)
            newsize = tf.cast(newsize, dtype=tf.int32)
        return newsize


    def aspect2(aspectratio):
        jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
        with jit_scope():
            newwidth = tf.constant(256, dtype=tf.float32)
            newheight = tf.multiply(newwidth, aspectratio)
            newsize = tf.stack([newheight, newwidth], axis=0)
            newsize = tf.cast(newsize, dtype=tf.int32)
        return newsize


    def resize_image(image):
        imageshape = tf.shape(image)
        imageheight = tf.cast(tf.gather(imageshape, tf.constant(0, dtype=tf.int32)),
                              dtype=tf.float32)
        imagewidth = tf.cast(tf.gather(imageshape, tf.constant(1, dtype=tf.int32)),
                             dtype=tf.float32)

        aspectratio = tf.divide(imageheight, imagewidth)
        newsize = tf.cond(tf.less_equal(imageheight, imagewidth),
                          lambda: aspect1(aspectratio),
                          lambda: aspect2(aspectratio))
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        image = tf.image.resize_images(image, newsize)
        return image

    def readimage(input_queue):
        reader = tf.WholeFileReader()
        key, value = reader.read(input_queue)
        image = tf.image.decode_jpeg(value)
        image = resize_image(image)
        return image
    if __name__ == "__main__":
        jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
        with jit_scope():
            queue = tf.train.string_input_producer(["Lena.jpg"])
            image = readimage(queue)
            augmented = augmentbody(image, [221,221,3])
            init_op = tf.global_variables_initializer()
            config = tf.ConfigProto()
            config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
            with tf.Session(config=config) as sess:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                sess.run(init_op)
                [tense] = sess.run([augmented], options=run_options, run_metadata=run_metadata)
                coord.request_stop()
                coord.join(threads)
                tl = timeline.Timeline(run_metadata.step_stats)
                ctf = tl.generate_chrome_trace_format()
                with open('timeline.json', 'w') as f:
                    f.write(ctf)

        print("The tensor size is {}".format(tense.shape))
        numcols = tense.shape[0]/2
        for i in range(tense.shape[0]):
            plt.subplot(2,numcols,i+1)
            plt.imshow(tense[i, :, :, :])

        plt.show()
        plt.close()

我的主要问题如下:

  1. 我如何解释这个情节? (如果需要仔细查看时间轴,可以找到它here
  2. 我如何理解XLA编译器的开销以及_XLA_LAUNCH在图中的含义?
  3. 如何确定正在进行的操作顺序?
  4. 图中不同的pid表示什么?
  5. Timeline plot generated using TensorFlow

0 个答案:

没有答案