我训练了一个带有大约8000张图像的二进制分类模型,每个类大约4000个图像,我在resnet50上进行了转移学习,冻结了所有图层,并得到了结果: val_loss:0.0340-val_acc:0.9890
但是当我对模型进行测试时,我获得非常随机的结果的可能性非常大……
我不知道这有什么意义? 毕竟,该模型没有研究验证的图片,所以图形中的结果与生产中的结果之间有如此大的差距?
model.summary():
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Model) (None, 2048) 23587712
_________________________________________________________________
dense (Dense) (None, 2) 4098
=================================================================
Total params: 23,591,810
Trainable params: 4,098
Non-trainable params: 23,587,712
_________________________________________________________________
(密集-softmax激活)
这里是所有代码:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import cv2
import os
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import optimizers
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
NUM_CLASSES = 2
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
LOSS_METRICS = ['accuracy']
NUM_EPOCHS = 10
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
# Using 1 to easily manage mapping between test_generator & prediction for submission preparation
BATCH_SIZE_TESTING = 1
model = Sequential()
model.add(ResNet50(include_top = False, pooling = RESNET50_POOLING_AVERAGE, weights = 'imagenet'))
model.add(Dense(NUM_CLASSES, activation = DENSE_LAYER_ACTIVATION))
model.layers[0].trainable = False
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = data_generator.flow_from_directory(
'./datasets/Helmet_verification_v2/train',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
'./datasets/Helmet_verification_v2/validation',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_VALIDATION,
class_mode='categorical')
output_dir = "./models/working"
output_file = output_dir + "/best.hdf5"
if not os.path.exists(output_dir):
print("create folder: {}".format(output_dir))
os.makedirs(output_dir)
cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = output_file, monitor = 'val_loss', save_best_only = True, mode = 'auto')
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper]
)
model.load_weights(output_file)
测试:
test_generator = data_generator.flow_from_directory(
directory = './datasets/Helmet_verification_v2/test',
target_size = (image_size, image_size),
batch_size = BATCH_SIZE_TESTING,
class_mode = None,
shuffle = False,
seed = 123
)
test_generator.reset()
pred = model.predict_generator(test_generator, steps = len(test_generator), verbose = 1)
predicted_class_indices = np.argmax(pred, axis = 1)