我已经问了类似问题here,但现在问题略有不同,因此提出了新问题。
我决定使用稍微不同的方法,而不是在参考问题的答案中提出训练,然后微调模型。
更新:我已将此处提供的旧问题替换为更合适的版本
以下是我的行动序列:
这是我用来实现上述行动序列的代码:
import warnings
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', DeprecationWarning)
from __future__ import print_function
from itertools import izip_longest as zip_longest
from pprint import pformat as pf
from pprint import pprint as pp
import os
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Conv2D, MaxPooling2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Dropout, Flatten, Dense, InputLayer, Lambda
from keras.models import Sequential, Model, load_model
from keras.utils.data_utils import get_file
from keras.optimizers import SGD
import keras.backend as K
import numpy as np
RANDOM_STATE = 1
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
BATCH_SIZE = 4
VGG_MEAN = np.array([123.68, 116.779, 103.939]).reshape((3, 1, 1))
VGG16_WEIGHTS_PATH = 'http://www.platform.ai/models/vgg16.h5'
DATA_ROOT = os.path.join(os.path.expanduser('~'), 'data', 'dogscats')
TRAIN_DIR = os.path.join(DATA_ROOT, 'train')
VALID_DIR = os.path.join(DATA_ROOT, 'valid')
SAMPLES_DIR = os.path.expanduser('~/dogscats_samples')
np.random.seed(RANDOM_STATE)
K.set_image_dim_ordering('th')
def get_batches(dirname, gen=ImageDataGenerator(), shuffle=True,
batch_size=BATCH_SIZE, class_mode='categorical'):
return gen.flow_from_directory(
os.path.join(SAMPLES_DIR, dirname),
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
class_mode=class_mode,
shuffle=shuffle,
batch_size=batch_size)
def vgg_preprocess(x):
x = x - VGG_MEAN
return x[:, ::-1]
def conv_block(model, n_layers, n_filters, name='block'):
for i in range(n_layers):
model.add(ZeroPadding2D((1, 1), name='%s_padding_%s' % (name, i)))
model.add(Conv2D(n_filters, (3, 3), activation='relu', name='%s_conv2d_%s' % (name, i)))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='%s_maxpool' % name))
def fc_block(model, name='block'):
model.add(Dense(4096, activation='relu', name=name + '_dense'))
model.add(Dropout(0.5))
def build_vgg_16():
model = Sequential()
input_shape = (3, IMAGE_WIDTH, IMAGE_HEIGHT)
model.add(InputLayer(input_shape=input_shape))
model.add(Lambda(vgg_preprocess))
conv_block(model, n_layers=2, n_filters=64, name='block1')
conv_block(model, n_layers=2, n_filters=128, name='block2')
conv_block(model, n_layers=3, n_filters=256, name='block3')
conv_block(model, n_layers=3, n_filters=512, name='block4')
conv_block(model, n_layers=3, n_filters=512, name='block5')
model.add(Flatten())
fc_block(model)
fc_block(model)
model.add(Dense(1000, activation='softmax'))
return model
def train_finetuned_model():
file_path = get_file('vgg16.h5', VGG16_WEIGHTS_PATH, cache_subdir='models')
print('Building VGG16 (no-top) model to generate bottleneck features')
vgg16_notop = build_vgg_16()
vgg16_notop.load_weights(file_path)
for _ in range(6):
vgg16_notop.pop()
vgg16_notop.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_batches = get_batches('train', shuffle=False, class_mode=None)
train_labels = np.array([0]*1000 + [1]*1000)
bottleneck_train = vgg16_notop.predict_generator(train_batches, steps=2000 // BATCH_SIZE)
valid_batches = get_batches('valid', shuffle=False, class_mode=None)
valid_labels = np.array([0]*400 + [1]*400)
bottleneck_valid = vgg16_notop.predict_generator(valid_batches, steps=800 // BATCH_SIZE)
print('Training top model on bottleneck features')
top_model = Sequential()
top_model.add(Flatten(input_shape=bottleneck_train.shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
top_model.fit(bottleneck_train, train_labels,
batch_size=32, epochs=50,
validation_data=(bottleneck_valid, valid_labels))
print('Concatenate new VGG16 (without top layer) with pretrained top model')
vgg16_fine = build_vgg_16()
vgg16_fine.load_weights(file_path)
for _ in range(6):
vgg16_fine.pop()
vgg16_fine.add(Flatten(name='top_flatten'))
vgg16_fine.add(Dense(256, activation='relu', name='top_dense'))
vgg16_fine.add(Dropout(0.5, name='top_dropout'))
vgg16_fine.add(Dense(1, activation='sigmoid', name='top_sigmoid'))
for i, layer in enumerate(reversed(top_model.layers), 1):
pretrained_weights = layer.get_weights()
vgg16_fine.layers[-i].set_weights(pretrained_weights)
for layer in vgg16_fine.layers[:26]:
layer.trainable = False
vgg16_fine.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
loss='binary_crossentropy',
metrics=['accuracy'])
print('Train concatenated model on dogs/cats dataset sample')
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_batches = get_batches('train', gen=train_datagen, class_mode='binary')
valid_batches = get_batches('valid', gen=test_datagen, class_mode='binary')
vgg16_fine.fit_generator(train_batches,
steps_per_epoch=2000 // BATCH_SIZE,
epochs=50,
validation_data=valid_batches,
validation_steps=800 // BATCH_SIZE)
return vgg16_fine
final_model = train_finetuned_model()
但问题是模型的准确性急剧下降。在50个时期之后,其准确度大约为50%
。因此,我可能做错了什么。
参数可能有问题,即学习率,批量大小等?
答案 0 :(得分:1)
完全连接的图层与原始VGG架构完全不同。
# yours
Flatten()
Dense(256, activation='relu')
Dense(1, activation='sigmoid')
# original
Flatten()
Dense(4096, activation='relu')
Dense(4096, activation='relu')
Dense(2, activation='softmax')
两点。
最后一层应该是2级 - softmax而不是sigmoid。该 如果你使用sigmoid,我猜你的准确度不会像你期望的那样计算。
复杂性(神经元和层数)似乎太低了。
答案 1 :(得分:0)
嗯,不确定它是否是一个正确的解决方案,但我能够使用此代码将准确性提高至少70%(可能主要原因是学习率下降和更多时期):
def train_finetuned_model(lr=1e-5, verbose=True):
file_path = get_file('vgg16.h5', VGG16_WEIGHTS_PATH, cache_subdir='models')
if verbose:
print('Building VGG16 (no-top) model to generate bottleneck features.')
vgg16_notop = build_vgg_16()
vgg16_notop.load_weights(file_path)
for _ in range(6):
vgg16_notop.pop()
vgg16_notop.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
if verbose:
print('Bottleneck features generation.')
train_batches = get_batches('train', shuffle=False, class_mode=None, batch_size=BATCH_SIZE)
train_labels = np.array([0]*1000 + [1]*1000)
train_bottleneck = vgg16_notop.predict_generator(train_batches, steps=2000 // BATCH_SIZE)
valid_batches = get_batches('valid', shuffle=False, class_mode=None, batch_size=BATCH_SIZE)
valid_labels = np.array([0]*400 + [1]*400)
valid_bottleneck = vgg16_notop.predict_generator(valid_batches, steps=800 // BATCH_SIZE)
if verbose:
print('Training top model on bottleneck features.')
top_model = Sequential()
top_model.add(Flatten(input_shape=train_bottleneck.shape[1:]))
top_model.add(Dense(4096, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(4096, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(2, activation='softmax'))
top_model.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
top_model.fit(train_bottleneck, to_categorical(train_labels),
batch_size=32, epochs=10,
validation_data=(valid_bottleneck, to_categorical(valid_labels)))
if verbose:
print('Concatenate new VGG16 (without top layer) with pretrained top model.')
vgg16_fine = build_vgg_16()
vgg16_fine.load_weights(file_path)
for _ in range(6):
vgg16_fine.pop()
vgg16_fine.add(Flatten(name='top_flatten'))
vgg16_fine.add(Dense(4096, activation='relu'))
vgg16_fine.add(Dropout(0.5))
vgg16_fine.add(Dense(4096, activation='relu'))
vgg16_fine.add(Dropout(0.5))
vgg16_fine.add(Dense(2, activation='softmax'))
vgg16_fine.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
if verbose:
print('Loading pre-trained weights into concatenated model')
for i, layer in enumerate(reversed(top_model.layers), 1):
pretrained_weights = layer.get_weights()
vgg16_fine.layers[-i].set_weights(pretrained_weights)
for layer in vgg16_fine.layers[:26]:
layer.trainable = False
if verbose:
print('Layers training status:')
for layer in vgg16_fine.layers:
print('[%6s] %s' % ('' if layer.trainable else 'FROZEN', layer.name))
vgg16_fine.compile(optimizer=RMSprop(lr=1e-6), loss='binary_crossentropy', metrics=['accuracy'])
if verbose:
print('Train concatenated model on dogs/cats dataset sample.')
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_batches = get_batches('train', gen=train_datagen, class_mode='categorical', batch_size=BATCH_SIZE)
valid_batches = get_batches('valid', gen=test_datagen, class_mode='categorical', batch_size=BATCH_SIZE)
vgg16_fine.fit_generator(train_batches, epochs=100,
steps_per_epoch=2000 // BATCH_SIZE,
validation_data=valid_batches,
validation_steps=800 // BATCH_SIZE)
return vgg16_fine
我想有一种方法可以通过微调(高达98%)获得更好的结果,但我无法用提供的代码实现它。