自定义学习率调度器TF2和Keras

时间:2020-10-20 13:26:13

标签: python tensorflow keras

我正在尝试编写自定义学习速率计划程序:余弦退火和预热。 但是我既不能在Keras中使用它,也不能在Tensorflow中使用它。 下面是代码:

import tensorflow as tf
import numpy as np


def make_linear_lr(min_lr, max_lr, number_of_steps):
    def gen_lr(step):
        return (max_lr - min_lr) / number_of_steps * step + min_lr
    return gen_lr

def make_cosine_anneal_lr(learning_rate, alpha, decay_steps):
    def gen_lr(global_step):
        global_step = min(global_step, decay_steps)
        cosine_decay = 0.5 * (1 + np.cos(np.pi * global_step / decay_steps))
        decayed = (1 - alpha) * cosine_decay + alpha
        decayed_learning_rate = learning_rate * decayed
        return decayed_learning_rate
    return gen_lr

def make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps):
    gen_lr_1 = make_linear_lr(min_lr, max_lr, number_of_steps)
    gen_lr_2 = make_cosine_anneal_lr(max_lr, alpha, decay_steps)
    def gen_lr(global_step):
        if global_step < number_of_steps:
            return gen_lr_1(global_step)
        else:
            return gen_lr_2(global_step - number_of_steps)
        
    return gen_lr

class CosineAnnealingWithWarmUP(tf.keras.optimizers.schedules.LearningRateSchedule):
  def __init__(self, min_lr, max_lr, number_of_steps, alpha, decay_steps):
    super(CosineAnnealingWithWarmUP, self).__init__()
    self.gen_lr_ca =  make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps)
  def __call__(self, step):
    return tf.cast(self.gen_lr_ca(step), tf.float32)

learning_rate_fn = CosineAnnealingWithWarmUP(.0000001, 0.01, 10_000, 0, 150_000)
optimizer=tf.keras.optimizers.SGD(
    learning_rate=learning_rate_fn, 
    momentum=0.95)

我在TensorFlow中使用此功能来训练我的模型:

def get_model_train_step_function(model, optimizer, vars_to_fine_tune, batch_size):
  @tf.function
  def train_step_fn(image_tensors,
                    groundtruth_boxes_list,
                    groundtruth_classes_list):
    shapes = tf.constant(batch_size * [[640, 640, 3]], dtype=tf.int32)
    model.provide_groundtruth(
        groundtruth_boxes_list=groundtruth_boxes_list,
        groundtruth_classes_list=groundtruth_classes_list)
    with tf.GradientTape() as tape:
      preprocessed_images = tf.concat(
          [model.preprocess(
              image_tensor
           )[0]
           for image_tensor in image_tensors], axis=0)
      prediction_dict = model.predict(preprocessed_images, shapes)
      losses_dict = model.loss(prediction_dict, shapes)
      total_loss = losses_dict['Loss/localization_loss'] + losses_dict['Loss/classification_loss']
      gradients = tape.gradient(total_loss, vars_to_fine_tune)
      optimizer.apply_gradients(zip(gradients, vars_to_fine_tune))
    return total_loss

  return train_step_fn 

当我尝试将其与TensorFlow一起使用时,在get_model_train_step_function中传递优化器-如果删除@ tf.function装饰器,则该方法有效。但是它不适用于@ tf.function,错误显示: OperatorNotAllowedInGraphError:不允许将tf.Tensor作为Python bool使用:AutoGraph确实转换了此函数。这可能表明您正在尝试使用不受支持的功能。

我应该如何编写自定义学习率计划程序?另外,我想将此时间表与Keras一起使用。但这根本不起作用。

1 个答案:

答案 0 :(得分:1)

您需要排除numpy调用并用tensorflow运算符替换python条件(“ if”,“ min”):

def make_cosine_anneal_lr(learning_rate, alpha, decay_steps):
    def gen_lr(global_step):

        #global_step = min(global_step, decay_steps)

        global_step = tf.minimum(global_step, decay_steps)
        cosine_decay = 0.5 * (1 + tf.cos(3.1415926 * global_step / decay_steps)) # changed np.pi to 3.14
        decayed = (1 - alpha) * cosine_decay + alpha
        decayed_learning_rate = learning_rate * decayed
        return decayed_learning_rate
    return gen_lr

def make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps):
    gen_lr_1 = make_linear_lr(min_lr, max_lr, number_of_steps)
    gen_lr_2 = make_cosine_anneal_lr(max_lr, alpha, decay_steps)
    def gen_lr(global_step):

      #if global_step < number_of_steps:
      #    return gen_lr_1(global_step)
      #else:
      #    return gen_lr_2(global_step - number_of_steps)

      a = global_step < number_of_steps
      a = tf.cast(a, tf.float32)
      b = 1. - a
      return a * gen_lr_1(global_step) + b * gen_lr_2(global_step - number_of_steps)
        
    return gen_lr

Keras这样的时间表很有效。