我一直在寻找类似的解决方案,但是没有一个适合我所面临的问题,我正在尝试通过以下代码来应用迁移学习:
import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt
#from keras import backend as K
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
IM_WIDTH, IM_HEIGHT = 299, 299 #fixed size for InceptionV3
NB_EPOCHS = 3.0
BAT_SIZE = 32.0
FC_SIZE = 1024
NB_IV3_LAYERS_TO_FREEZE = 172
def get_nb_files(directory):
"""Get number of files by searching directory recursively"""
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt
def setup_to_transfer_learn(model, base_model):
"""Freeze all layers and compile the model"""
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
def add_new_last_layer(base_model, nb_classes):
"""Add last layer to the convnet
Args:
base_model: keras model excluding top
nb_classes: # of classes
Returns:
new keras model with last layer
"""
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init
predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer
model = Model(inputs=base_model.input, outputs=predictions)
return model
def setup_to_finetune(model):
"""Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.
note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch
Args:
model: keras model
"""
for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
layer.trainable = True
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
def train(args):
"""Use transfer learning and fine-tuning to train a network on a new dataset"""
nb_train_samples = get_nb_files(args.train_dir)
nb_classes = len(glob.glob(args.train_dir + "/*"))
nb_val_samples = get_nb_files(args.val_dir)
epochs = args.nb_epoch
batch_size = args.batch_size
#batch_size = K.cast_to_floatx(batch_size)
print('nb_train_samples',type(nb_train_samples))
print('nb_train_samples',type(batch_size))
steps_per_epoch = nb_train_samples / batch_size
validation_steps = nb_val_samples / batch_size
# data prep
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
args.train_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
)
validation_generator = test_datagen.flow_from_directory(
args.val_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
)
# setup model
base_model = InceptionV3(weights='imagenet', include_top=False) #include_top=False excludes final FC layer
model = add_new_last_layer(base_model, nb_classes)
# transfer learning
setup_to_transfer_learn(model, base_model)
history_tl = model.fit_generator(
train_generator,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=validation_generator,
validation_steps=validation_steps,
class_weight=None)
# fine-tuning
setup_to_finetune(model)
history_ft = model.fit_generator(
train_generator,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=validation_generator,
validation_steps=validation_steps,
class_weight=None)
model.save(args.output_model_file)
if args.plot:
plot_training(history_ft)
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r.')
plt.plot(epochs, val_acc, 'r')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'r.')
plt.plot(epochs, val_loss, 'r-')
plt.title('Training and validation loss')
plt.show()
if __name__=="__main__":
a = argparse.ArgumentParser()
a.add_argument("--train_dir")
a.add_argument("--val_dir")
a.add_argument("--nb_epoch", default=NB_EPOCHS)
a.add_argument("--batch_size", default=BAT_SIZE)
a.add_argument("--output_model_file", default="inceptionv3-ft.model")
a.add_argument("--plot", action="store_true")
args = a.parse_args()
if args.train_dir is None or args.val_dir is None:
a.print_help()
sys.exit(1)
if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):
print("directories do not exist")
sys.exit(1)
train(args)
在从早期Keras版本(2.1.4)进行一系列转换之后,我在fit_generator中相应地更改了参数名称,通常使用Ubuntu终端运行代码,如下所示:
python fine-tune.py --train_dir train_dir --val_dir val_dir --nb_epoch NB_EPOCH --batch_size BATCH_SIZE --output_model_file inceptionv3-ft.model --plot
我收到以下错误:
/home/raed/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
nb_train_samples <class 'int'>
nb_train_samples <class 'str'>
Traceback (most recent call last):
File "fine-tune.py", line 183, in <module>
train(args)
File "fine-tune.py", line 80, in train
steps_per_epoch = nb_train_samples / batch_size
TypeError: unsupported operand type(s) for /: 'int' and 'str'
我尝试使用int()转换batch_zise,然后使用cast使用keras的tensorflow后端,并且还出现以下错误:
python fine-tune.py --train_dir train_dir --val_dir val_dir --nb_epoch NB_EPOCH --batch_size BATCH_SIZE --output_model_file inceptionv3-ft.model --plot
/home/raed/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
Traceback (most recent call last):
File "fine-tune.py", line 183, in <module>
train(args)
File "fine-tune.py", line 76, in train
batch_size = K.cast_to_floatx(batch_size)
File "/home/raed/anaconda3/lib/python3.6/site-packages/keras/backend/common.py", line 110, in cast_to_floatx
return np.asarray(x, dtype=_FLOATX)
File "/home/raed/anaconda3/lib/python3.6/site-packages/numpy/core/numeric.py", line 492, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: could not convert string to float: 'BATCH_SIZE'
我要尝试正确的解决方法吗,有人可以帮助我解决此问题,谢谢!