我是计算机视觉和ML领域的新手,我正在尝试开发一种图像识别模型,该模型正在使用大型图像数据集(50000张图像,大小为18 GB)在便携式计算机上进行训练像具有16 GB内存和第七代Core i7 cpu的Acer Predator,GTX 1060 Nvidia一样,即使在Tensorflow上使用GPU,我的内存也已耗尽。
如果我用很少的图像(例如2000)训练该模型到4000,那么它工作正常。看来我正在使用某种错误的方法来训练此模型,但是我无法找出正确的方法。我认为,如果我将图像分成多个小批加载然后进行培训,那么它可能会有所帮助,但我不知道该如何实现。
以下是我的火车模型请求的代码段。
我的train.py如下
import matplotlib
matplotlib.use("Agg")
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
from pyimagesearch.smallervggnet import SmallerVGGNet
import matplotlib.pyplot as plt
from imutils import paths
import numpy as np
import random
import pickle
import os
import json
import tensorflow as tf
with tf.device('/device:CPU:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
#config = tf.ConfigProto(log_device_placement=True)
session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
print(session.run(c))
with open('conf/conf.json') as f:
config = json.load(f)
# config variables
dataset = config["dataset"]
model_path = config["model"]
labelbin = config["labelbin"]
plot = config["plot"]
test_image = config["test_image"]
print('dataset: %s '+dataset)
EPOCHS = 75
INIT_LR = 1e-3
BS = 32
IMAGE_DIMS = (96, 96, 3)
# grab the image paths and randomly shuffle them
print("[INFO] loading images...")
imagePaths = sorted(paths.list_images(dataset))
random.seed(42)
random.shuffle(imagePaths)
# initialize the data and labels
data = []
labels = []
for imagePath in imagePaths:
img = image.load_img(imagePath,target_size=(IMAGE_DIMS[1], IMAGE_DIMS[0]))
img = img_to_array(img)
data.append(img)
l = label = imagePath.split(os.path.sep)[-2].split("_")
labels.append(l)
print("[INFO] images loading done...")
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
print("[INFO] data matrix: {} images ({:.2f}MB)".format(
len(imagePaths), data.nbytes / (1024 * 1000.0)))
# binarize the labels using scikit-learn's special multi-label
# binarizer implementation
print("[INFO] class labels:")
mlb = MultiLabelBinarizer()
labels = mlb.fit_transform(labels)
# loop over each of the possible class labels and show them
for (i, label) in enumerate(mlb.classes_):
print("{}. {}".format(i + 1, label))
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
labels, test_size=0.2, random_state=42)
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode="nearest")
# initialize the model using a sigmoid activation as the final layer
# in the network so we can perform multi-label classification
print("[INFO] compiling model...")
model = SmallerVGGNet.build(
width=IMAGE_DIMS[1], height=IMAGE_DIMS[0],
depth=IMAGE_DIMS[2], classes=len(mlb.classes_),
finalAct="sigmoid")
# initialize the optimizer (SGD is sufficient)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
# compile the model using binary cross-entropy rather than
# categorical cross-entropy -- this may seem counterintuitive for
# multi-label classification, but keep in mind that the goal here
# is to treat each output label as an independent Bernoulli
# distribution
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
# train the network
print("[INFO] training network...")
H = model.fit_generator(
aug.flow(trainX, trainY, batch_size=BS),
validation_data=(testX, testY),use_multiprocessing=True,
steps_per_epoch=len(trainX) // BS,
epochs=EPOCHS, verbose=1)
# save the model to disk
print("[INFO] serializing network...")
model.save(model_path)
# save the multi-label binarizer to disk
print("[INFO] serializing label binarizer...")
f = open(labelbin, "wb")
f.write(pickle.dumps(mlb))
f.close()
Conf.json 是:
{
"dataset" : "data-images",
"model" : "output/fashion.model",
"labelbin" : "output/mlb.pickle",
"plot" : "output/plot.png",
"test_image" : "examples/example_01.jpg"
}