如何使用keras微调初始v3来进行多类分类?

时间:2017-08-24 03:23:49

标签: deep-learning classification keras

我想使用Keras使用Kaggle.com的Cat vs. Dog数据集对两个类进行图像分类。 但是我的param“class_mode”有一些问题如下面的代码。 如果我使用“二进制”模式,精度约为95%,但如果我使用“分类”精度异常低,只有50%以上。

二进制模式表示最后一层中只有一个输出,并使用sigmoid激活进行分类。 sample的标签只有一个整数。

分类表示最后一层中的两个输出,并使用softmax激活进行分类。样本的标签是一种热门格式,例如(1,0),(0,1)。

我认为这两种方式应该有类似的结果。谁知道差异的原因?非常感谢!

import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt

from keras import __version__
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD

在这里设置一些参数

IM_WIDTH, IM_HEIGHT = 299, 299 #fixed size for InceptionV3
NB_EPOCHS = 1
BAT_SIZE = 32
FC_SIZE = 1024
NB_IV3_LAYERS_TO_FREEZE = 172
loss_mode = "binary_crossentropy"

def get_nb_files(directory):
  """Get number of files by searching directory recursively"""
  if not os.path.exists(directory):
    return 0
  cnt = 0
  for r, dirs, files in os.walk(directory):
    for dr in dirs:
      cnt += len(glob.glob(os.path.join(r, dr + "/*")))
  return cnt

transfer_learn,将权重保持在初始v3

def setup_to_transfer_learn(model, base_model):
  """Freeze all layers and compile the model"""
  for layer in base_model.layers:
    layer.trainable = False

  model.compile(optimizer='rmsprop', loss=loss_mode, metrics=['accuracy'])

添加最后一层以进行两类分类。

def add_new_last_layer(base_model, nb_classes):
  """Add last layer to the convnet
  Args:
    base_model: keras model excluding top
    nb_classes: # of classes
  Returns:
    new keras model with last layer
  """
  x = base_model.output
  x = GlobalAveragePooling2D()(x)
  x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init
  if args.class_mode == "binary":
    predictions = Dense(1, activation='sigmoid')(x) #new softmax layer
  else:
    predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer
  model = Model(inputs=base_model.input, outputs=predictions)
  return model

冻结底部NB_IV3_LAYERS并重新训练剩余的顶层, 和微调重量。

def setup_to_finetune(model):
  """Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.
  note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch
  Args:
    model: keras model
  """
  for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
     layer.trainable = False
  for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
     layer.trainable = True
  model.compile(optimizer="rmsprop", loss=loss_mode, metrics=['accuracy'])
  #model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])


def train(args):
  """Use transfer learning and fine-tuning to train a network on a new dataset"""
  nb_train_samples = get_nb_files(args.train_dir)
  nb_classes = len(glob.glob(args.train_dir + "/*"))
  nb_val_samples = get_nb_files(args.val_dir)
  nb_epoch = int(args.nb_epoch)
  batch_size = int(args.batch_size)
  print("nb_classes:{}".format(nb_classes))

数据准备

  train_datagen =  ImageDataGenerator(
      preprocessing_function=preprocess_input,
      rotation_range=30,
      width_shift_range=0.2,
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True
  )
  test_datagen = ImageDataGenerator(
      preprocessing_function=preprocess_input,
      rotation_range=30,
      width_shift_range=0.2,
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True
  )

  train_generator = train_datagen.flow_from_directory(
    args.train_dir,
    target_size=(IM_WIDTH, IM_HEIGHT),
    batch_size=batch_size,
    #class_mode='binary'
    class_mode=args.class_mode
  )

  validation_generator = test_datagen.flow_from_directory(
    args.val_dir,
    target_size=(IM_WIDTH, IM_HEIGHT),
    batch_size=batch_size,
    #class_mode='binary'
    class_mode=args.class_mode
  )

设置模型

  base_model = InceptionV3(weights='imagenet', include_top=False) #include_top=False excludes final FC layer
  model = add_new_last_layer(base_model, nb_classes)

转学习

  setup_to_transfer_learn(model, base_model)

  #model.summary()

  history_tl = model.fit_generator(
    train_generator,
    epochs=nb_epoch,
    steps_per_epoch=nb_train_samples//BAT_SIZE,
    validation_data=validation_generator,
    validation_steps=nb_val_samples//BAT_SIZE)

微调

  setup_to_finetune(model)

  history_ft = model.fit_generator(
    train_generator,
    steps_per_epoch=nb_train_samples//BAT_SIZE,
    epochs=nb_epoch,
    validation_data=validation_generator,
    validation_steps=nb_val_samples//BAT_SIZE)

  model.save(args.output_model_file)

  if args.plot:
    plot_training(history_ft)


def plot_training(history):
  acc = history.history['acc']
  val_acc = history.history['val_acc']
  loss = history.history['loss']
  val_loss = history.history['val_loss']
  epochs = range(len(acc))

  plt.plot(epochs, acc, 'r.')
  plt.plot(epochs, val_acc, 'r')
  plt.title('Training and validation accuracy')

  plt.figure()
  plt.plot(epochs, loss, 'r.')
  plt.plot(epochs, val_loss, 'r-')
  plt.title('Training and validation loss')
  plt.show()

主要功能

if __name__=="__main__":
  a = argparse.ArgumentParser()
  a.add_argument("--train_dir", default="train2")
  a.add_argument("--val_dir", default="test2")
  a.add_argument("--nb_epoch", default=NB_EPOCHS)
  a.add_argument("--batch_size", default=BAT_SIZE)
  a.add_argument("--output_model_file", default="inceptionv3-ft.model")
  a.add_argument("--plot", action="store_true")
  a.add_argument("--class_mode", default="binary")


  args = a.parse_args()
  if args.train_dir is None or args.val_dir is None:
    a.print_help()
    sys.exit(1)

  if args.class_mode != "binary" and args.class_mode != "categorical":
    print("set class_mode as 'binary' or 'categorical'")

  if args.class_mode == "categorical":
    loss_mode = "categorical_crossentropy"

  #set class_mode
  print("class_mode:{}, loss_mode:{}".format(args.class_mode, loss_mode))

  if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):
    print("directories do not exist")
    sys.exit(1)

  train(args)

2 个答案:

答案 0 :(得分:0)

我发现如果我使用SDG或Adam优化器,准确度可以正常提升。那么使用默认学习率= 0.001的RMSprop优化器是否有问题?

答案 1 :(得分:0)

当学习率太高时,我在几个任务上遇到了这个问题。尝试0.0001甚至更少的东西。

根据Keras文档,默认费率为0.001:

keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)

请参阅https://keras.io/optimizers/#rmsprop