我努力工作了几天,用Keras在python中创建自己的人脸识别模型。我将引导您完成设置,并提供您的代码,我确定有些事情我做得不好,但是我不知道该怎么做。因此,我的数据集由97个人组成,平均每人10张照片。照片总数为1106。数据集位于“面孔”文件夹中,每个人的图片位于名为0到96的文件夹中。这是我用来处理并将数据拆分为训练,验证和测试并使用Keras创建我的CNN。任何帮助或建议都非常感谢,谢谢!
channels = 3
rows = 50
cols = 50
classes = 97
data = []
labels = []
images = sorted(list(paths.list_images("faces")))
for image in images:
img = cv2.imread(image)
img = cv2.resize(img, (rows, cols)).flatten()
data.append(img)
label = image.split(os.path.sep)[-2]
labels.append(label)
data = np.array(data, dtype="float32") / 255.0
labels = [int(i) for i in labels]
labels = np.array(labels)
xtrain, testX, ytrain, testY = train_test_split(data, labels, test_size = 0.3, random_state = 13)
trainX, validX, trainY, validY = train_test_split(xtrain, ytrain, test_size = 0.2, random_state = 14)
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
validY = lb.transform(validY)
testY = lb.transform(testY)
trainX = trainX.astype("float32")
validX = validX.astype("float32")
testX = testX.astype("float32")
trainY = trainY.astype("float32")
validY = validY.astype("float32")
testY = testY.astype("float32")
trainX = trainX.reshape([trainX.shape[0], cols, rows, channels])
validX = validX.reshape([validX.shape[0], cols, rows, channels])
testX = testX.reshape([testX.shape[0], cols, rows, channels])
weight_decay = 1e-4
model = Sequential()
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=trainX.shape[1:]))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(classes))
model.add(Activation('softmax'))
datagen = ImageDataGenerator(horizontal_flip=True,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
vertical_flip=False)
datagen.fit(trainX)
optim = RMSprop(lr=0.001, decay=1e-6)
#sgd = SGD(lr=0.01, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy'])
model.fit_generator(datagen.flow(trainX, trainY, batch_size=64), steps_per_epoch = trainX.shape[0], epochs = 50, validation_data=(validX, validY), verbose = 1)
score = model.evaluate(testX, testY, batch_size = 64, verbose = 1)
print("Test score: ", score[0])
print("Test accuracy: ", score[1])
model_json = model.to_json()
open('face_architecture.json', 'w').write(model_json)
model.save_weights('face_weights.h5', overwrite=True)