如何使用Keras回调在Tensorboard中可视化平均编辑距离?

时间:2019-05-22 21:00:43

标签: python tensorflow keras callback tensorboard

到目前为止,我一直在尝试Tensorflow和Keras。我从image_ocr.py那里获取了一个代码,该代码使我能够训练印刷文本ocr。我想了解培训的进展情况,并成功地看到了培训模型的准确性和损失。但是,据我所知,OCR RNN并没有将准确性作为验证,而是使用平均编辑距离来验证单词的准确性。在这种情况下,我一直试图从类 VizCallback 中获取一个名为 mean_ed mean_norm_ed 的变量以在Tensorboard中可视化。强>。我已经尝试过此link中的方法,但仍然无法正常工作。谁能帮助我形象化平均编辑距离变量?这是我代码中的代码片段:

class VizCallback(keras.callbacks.Callback):

def __init__(self, run_name, test_func, text_img_gen, num_display_words=6):
    self.test_func = test_func
    self.output_dir = os.path.join(
        OUTPUT_DIR, run_name)
    self.text_img_gen = text_img_gen
    self.num_display_words = num_display_words
    if not os.path.exists(self.output_dir):
        os.makedirs(self.output_dir)

def on_train_begin(self, logs={}):
    self.med = []
    self.nmed = []

def show_edit_distance(self, num, logs={}):
    num_left = num
    mean_norm_ed = 0.0
    mean_ed = 0.0
    while num_left > 0:
        word_batch = next(self.text_img_gen)[0]
        num_proc = min(word_batch['the_input'].shape[0], num_left)
        decoded_res = decode_batch(self.test_func, word_batch['the_input'][0:num_proc])
        for j in range(num_proc):
            edit_dist = editdistance.eval(decoded_res[j], word_batch['source_str'][j])
            mean_ed += float(edit_dist)
            mean_norm_ed += float(edit_dist) / len(word_batch['source_str'][j])
        num_left -= num_proc
    mean_norm_ed = mean_norm_ed / num
    mean_ed = mean_ed / num
    #Create scalar summaries for both mean edit distance and normalized mean edit distance
    tf_med_ph = tf.placeholder(tf.float32,shape=None,name='med_summary')
    tf_nmed_ph = tf.placeholder(tf.float32,shape=None,name='nmed_summary')
    tf_med = tf.summary.scalar('med', tf_med_ph)
    tf_nmed = tf.summary.scalar('nmed', tf_nmed_ph)
    performance_summaries = tf.summary.merge([tf_med,tf_nmed])

    #Create a session for displaying the summary
    config = tf.ConfigProto(allow_soft_placement=True)
    session = tf.InteractiveSession(config=config)
    summ_writer = tf.summary.FileWriter(os.path.join('summaries','first'), session.graph)

    # Execute the summaries defined above
    summ = session.run(performance_summaries, feed_dict={tf_med_ph:mean_ed, tf_nmed_ph:mean_norm_ed})

    # Write the obtained summaries to the file, so it can be displayed in the TensorBoard
    summ_writer.add_summary(summ, epoch)

    session.close()
    print('\nOut of %d samples:  Mean edit distance: %.3f Mean normalized edit distance: %0.3f'
          % (num, mean_ed, mean_norm_ed))

def on_epoch_end(self, epoch, logs={}):
    self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))
    self.show_edit_distance(256)
    word_batch = next(self.text_img_gen)[0]
    res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words])
    if word_batch['the_input'][0].shape[0] < 256:
        cols = 2
    else:
        cols = 1
    for i in range(self.num_display_words):
        plt.subplot(self.num_display_words // cols, cols, i + 1)
        if K.image_data_format() == 'channels_first':
            the_input = word_batch['the_input'][i, 0, :, :]
        else:
            the_input = word_batch['the_input'][i, :, :, 0]
        plt.imshow(the_input.T, cmap='Greys_r')
        plt.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i]))
    fig = plt.gcf()
    fig.set_size_inches(10, 13)
    plt.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))
    plt.close()

def train(run_name, start_epoch, stop_epoch, img_w):
# Input Parameters
img_h = 64
words_per_epoch = 16000
val_split = 0.2
val_words = int(words_per_epoch * (val_split))

# Network parameters
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
minibatch_size = 32

if K.image_data_format() == 'channels_first':
    input_shape = (1, img_w, img_h)
else:
    input_shape = (img_w, img_h, 1)

fdir = os.path.dirname(get_file('wordlists.tgz',
                                origin='http://test.com/wordlist.tgz', untar=True))

img_gen = TextImageGenerator(monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
                             bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
                             minibatch_size=minibatch_size,
                             img_w=img_w,
                             img_h=img_h,
                             downsample_factor=(pool_size ** 2),
                             val_split=words_per_epoch - val_words
                             )
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
               activation=act, kernel_initializer='he_normal',
               name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
               activation=act, kernel_initializer='he_normal',
               name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)

conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

# cuts down input size going into RNN:
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)

# Two layers of bidirectional GRUs
# GRU seems to work as well, if not better than LSTM:

gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)

# transforms RNN output to character activations:
inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
              name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()

labels = Input(name='the_labels', shape=[img_gen.absolute_max_string_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])

# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)

model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)

#Make tensorboard instance
init_op = tf.initialize_all_variables()

sess = tf.Session()
sess.run(init_op)
tbname="tensorboard-of-{}".format(int(time.time()))
tensorboard = keras.callbacks.TensorBoard(
log_dir="logs/{}".format(tbname),
histogram_freq=0,
write_images=True)

# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd,
          metrics=['accuracy'])
if start_epoch > 0:
    weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
    model.load_weights(weight_file)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])

viz_cb = VizCallback(run_name, test_func, img_gen.next_val())

model.fit_generator(generator=img_gen.next_train(),
                    steps_per_epoch=(words_per_epoch - val_words) // minibatch_size,
                    epochs=stop_epoch,
                    validation_data=img_gen.next_val(),
                    validation_steps=val_words // minibatch_size,
                    callbacks=[tensorboard,viz_cb, img_gen],
                    initial_epoch=start_epoch)

任何帮助都会得到极大的帮助。谢谢!

更新 现在,只需将VizCallbak类的performance_summaries变量传递给train函数中的度量即可。这里有帮助吗?

1 个答案:

答案 0 :(得分:0)

您可以修改{ path: '/', name: 'login', component: Login, beforeEnter: isLoggedIn // No parentheses here }, 以在每次调用此函数时添加摘要:

show_edit_distance

请注意,您将需要一个额外的参数def show_edit_distance(self, num, epoch): ... tf.summary.scalar('mean_ed', data=mean_ed, step=epoch) tf.summary.scalar('mean_norm_ed', data=mean_norm_ed, step=epoch)

epoch

Tensorboard回调应自动将这些摘要添加到def on_epoch_end(self, epoch, logs={}): ... self.show_edit_distance(256, epoch) ... 集合中。

注意::不幸的是,我无法测试该解决方案。请让我知道我是否缺少某些东西。