读取庞大的图像数据以进行分类训练

时间:2019-02-11 17:13:42

标签: python machine-learning label training-data

我是python和机器学习的新手。我有一个庞大的汽车图像数据集,其中包含27000多个图像和标签。我试图创建一个数据集,以便可以在我的训练分类器中使用它,但是处理如此大量的数据当然会给Memory带来​​极大的痛苦,这就是我要坚持的地方。起初,我试图做这样的事情。

import os
import matplotlib.pyplot as plt
import matplotlib.image as mpg
import cv2
import gc
import numpy as np
from sklearn.preprocessing import normalize
import gc
import resource
import h5py

bbox = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/labels"
imagepath = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/image"



training_data = []
training_labels = []
count = 0


for root, _, files in os.walk(bbox):
    cdp = os.path.abspath(root)
    for rootImage , _ , fileImage in os.walk(imagepath):
        cdpimg = os.path.abspath(r) 
        for f in files:
            ct = 0
            name,ext = os.path.splitext(f)
            for fI in fileImage:
                n , e = os.path.splitext(fI)
                if name == n and ext == ".txt" and e == ".jpg":
                    cip = os.path.join(cdp,f)
                    cipimg = os.path.join(cdpimg,fI)
                    txt = open(cip,"r")
                    for q in txt:
                        ct = ct + 1
                        if ct == 3:
                            x1 = int(q.rsplit(' ')[0])
                            y1 = int(q.rsplit(' ')[1])
                            x2 = int(q.rsplit(' ')[2])
                            y2 = int(q.rsplit(' ')[3])  
                            try:
                                read_img = mpg.imread(cipimg)
                                read_img = read_img.astype('float32')
                                read_img_bbox = read_img[y1:y2, x1:x2,:]
                                resize_img = cv2.cv2.resize(read_img_bbox,(300,300))
                                resize_img /= 255.0 
                                training_labels.append(int(cipimg.split('\\')[4]))                                 


                                training_data.append(resize_img)
                                print("len Of Training_data",len(training_data))
                                training_labels.append(int(cipimg.split('/')[8]))
                                del resize_img
                                print("len Of Training Labels", len(training_labels))
                                gc.collect()                                    
                            except Exception as e:
                                print("Error",str(e), cip)
                            count = count + 1
                            print(count)    
                    txt.flush()
                    txt.close() 




np.save('/run/media/fdai5182/LAMA MADAN/Training_Data_4000Samples',training_data)
np.save('/run/media/fdai5182/LAMA MADAN/Training_Labels_4000Samples',training_labels)




print("DONE")

但是即使在32gb RAM上读取图像后,它总是给我带来巨大的内存错误。

因此,为此,我想执行一些其他步骤,这些步骤可能会有用,占用更少的内存并使此工作正常。 我要执行的步骤如下。

  1. 分配类型为N,150,150,3 / 300,300,3的形状的np数组X float32(不是astype)
  2. 遍历图像,并用150,150,3个图像像素填充数组X的每一行
  3. 就地归一化:X / = 255
  4. 写入文件(.npy格式)

我到目前为止所做的是

import cv2
import matplotlib.pyplot as plt
import matplotlib.iamge as mpg
import numpy as np

bbox = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/labels"
imagepath = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/image"

for root, _, files in os.walk(bbox):
    cdp = os.path.abspath(root)
    for rootImage, _, fileImage in os.walk(imagepath):
        cdpimg = os.path.abspath(rootImage)
        for f in files:
            ct = 0
            name,ext = os.path.splitext(f)
            for fI in fileImage:
                n , e = os.path.splitext(fI)
                if name == n and ext == ".txt" and e == ".jpg":
                   nparrayX = np.zeros((150,150,3)).view('float32')
                   cip = os.path.join(cdp,f)
                   cipImg = os.path.join(cdpimg,fI)
                   read_image = mpg.imread(cip)
                   resize_image = cv2.cv2.resize(read_image,(150,150))

我在正确的道路上吗? 另外,如何用150,150,3个图像像素填充imageformat的每一行。我不想再使用列表,因为它们占用更多的内存和时间。 请帮助我。

此外,作为新成员,如果问题不遵守StackOverflow的规则和规定,请告诉我,我将对其进行更多编辑。

谢谢

2 个答案:

答案 0 :(得分:0)

tensorflow / keras和pytorch都提供了数据集/生成器类,您可以使用它们来构造内存有效的数据加载器。

对于tensorflow / keras,有一个Stanford's Shervine Amidi创建的优秀教程。

对于pytorch,您可以在project's man page上找到一个不错的教程。

我强烈建议您在实现中使用这些框架,因为它们可以避免编写样板代码并使培训可扩展。

答案 1 :(得分:0)

谢谢您的帮助。但是我想手动进行以检查如何在不使用其他生成器的情况下做到这一点。下面是我的代码。

import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpg
import numpy as np
import os

N = 0
training_labels = []

bbox = "D:/Morethan4000samples/data/labels"
imagepath = "D:/Morethan4000samples/data/image/"
for root, _, files in os.walk(imagepath):
        cdp = os.path.abspath(root)
        for f in files:
                name, ext = os.path.splitext(f)
                if ext == ".jpg":
                        cip = os.path.join(cdp,f)
                        N += 1  

print(N) 

imageX = np.zeros((N,227,227,3), dtype='float32')

i = 0

for root, _ , files in os.walk(imagepath):
        cdp = os.path.abspath(root)
        print(cdp)
        for f in files:
                ct = 0
                name, ext = os.path.splitext(f)
                if ext == ".jpg":
                        cip = os.path.join(cdp,f)
                        read = mpg.imread(cip)
                        cipLabel = cip.replace('image','labels')
                        cipLabel = cipLabel.replace('.jpg','.txt')
                        nameL , extL = os.path.splitext(cipLabel)
                        if extL == '.txt':
                                boxes = open(cipLabel, 'r')
                                for q in boxes:
                                        ct = ct + 1 
                                        if ct == 3:
                                                x1 = int(q.rsplit(' ')[0])
                                                y1 = int(q.rsplit(' ')[1])
                                                x2 = int(q.rsplit(' ')[2])
                                                y2 = int(q.rsplit(' ')[3])
                                            readimage = read[y1:y2, x1:x2]
                                            resize = cv2.cv2.resize(readimage,(227,227))
                        resize = cv2.cv2.GaussianBlur(resize, (5,5),0)
                                            imageX[i] = resize
                        #training_labels.append(int(cip.split('\\')[4]))
                        training_labels.append(int(cip.split('/')[8]))
                        print(len(training_labels), len(imageX))
                        i += 1  
                        print(i)


imageX /= 255.0
plt.imshow(imageX[10])
plt.show()

print(imageX.shape)
print(len(training_labels))

np.save("/run/media/fdai5182/LAMA MADAN/Morethan4000samples/227227/training_images", imageX)
np.save("/run/media/fdai5182/LAMA MADAN/Morethan4000samples/227227/trainin_labels",training_labels)

将每个图像保存在相同尺寸的矩阵行中是最有效的方法。