我正在尝试实现YOLO对象自定义对象检测器。我分别在colab中下载了1000张图像用于火车数据和100张图像用于验证数据。该代码运行良好,除了collab笔记本需要几乎一天的时间才能获得一个纪元。以下是示例代码。同样,我如何使用Google开放式图像(如kaggle)的自定义人类手部数据集,而无需在collab上下载它。
# -*- coding: utf-8 -*-
"""Hnad_detector.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1083eIr_O3Dge1pnMgqliZz9zzMqRtaZC
"""
!git clone https://github.com/EscVM/OIDv4_ToolKit.git
!pip install awscli
!pip install urllib3
!pip3 install -r '/content/OIDv4_ToolKit/requirements.txt'
!python '/content/OIDv4_ToolKit/main.py' downloader --classes '/content/OIDv4_ToolKit/classes.txt' --type_csv train --limit 1000
import os
dir = '/content/OID/Dataset/train/'
for folder in os.listdir(dir):
print('Number for files in '+folder+'=',len(os.listdir(dir+folder)))
import os
dir = '/content/OID/Dataset/train/'
for folder in os.listdir(dir):
for file in os.listdir(dir+folder+'/Label')[:5]:
print(dir+folder+'/Label'+'/'+file)
!git clone https://github.com/thehetpandya/OIDv4_annotation_tool.git
!pip install -r OIDv4_annotation_tool/requirements.txt
from subprocess import check_output
source = '/content/OID/Dataset/train'
for folder in os.listdir(source):
target = f'{source}/{folder}'
output = check_output(["python", '/content/OIDv4_annotation_tool/OIDv4_to_VOC.py', "--sourcepath" , f"{source}/{folder}", "--dest_path", f"{target}"])
import shutil
for folder in os.listdir('/content/OID/Dataset/train'):
print(folder,len(os.listdir('/content/OID/Dataset/train/'+folder)))
for folder in os.listdir(dir):
shutil.rmtree(f'/{dir}/{folder}/Label')
#import shutil
#shutil.rmtree('/content/OID/Dataset/train/Apple')
#shutil.rmtree('/content/OID/Dataset/train/Light switch')
#shutil.rmtree('/content/OID/Dataset/train/Orange')
import os
print(len(os.listdir('/content/OID/Dataset/train/Human hand')))
!pip install annotated-images
!pip install annotated-images
import os
images = []
xml = []
for file in os.listdir('/content/OID/Dataset/train/Human hand/'):
if file.endswith(".jpg"):
imagepath = '/content/OID/Dataset/train/Human hand/{}'.format(file)
shutil.move(imagepath,'/content/OID/Dataset/train/Human hand/images')
if file.endswith(".xml"):
imagepath = '/content/OID/Dataset/train/Human hand/{}'.format(file)
shutil.move(imagepath,'/content/OID/Dataset/train/Human hand/annotations')
for images in os.listdir('/content/OID/Dataset/train/Human hand/images'):
print(images[0:17])
shutil.move('/content/OID/Dataset/train/Human hand/images','/content/OID/Dataset/train/Human hand/train')
shutil.move('/content/OID/Dataset/train/Human hand/annotations','/content/OID/Dataset/train/Human hand/train')
!python '/content/OIDv4_ToolKit/main.py' downloader --classes '/content/OIDv4_ToolKit/classes.txt' --type_csv train --limit 300
from subprocess import check_output
source = '/content/OID/Dataset/train'
for folder in os.listdir(source):
target = f'{source}/{folder}'
output = check_output(["python", '/content/OIDv4_annotation_tool/OIDv4_to_VOC.py', "--sourcepath" , f"{source}/{folder}", "--dest_path", f"{target}"])
import shutil
for folder in os.listdir('/content/OID/Dataset/train'):
print(folder,len(os.listdir('/content/OID/Dataset/train/'+folder)))
for folder in os.listdir(dir):
shutil.rmtree(f'/{dir}/{folder}/Label')
import os
images = []
xml = []
for file in os.listdir('/content/OID/Dataset/train/Human hand/'):
if file.endswith(".jpg"):
imagepath = '/content/OID/Dataset/train/Human hand/{}'.format(file)
shutil.move(imagepath,'/content/OID/Dataset/train/Human hand/validation/images')
if file.endswith(".xml"):
imagepath = '/content/OID/Dataset/train/Human hand/{}'.format(file)
shutil.move(imagepath,'/content/OID/Dataset/train/Human hand/validation/annotations')
!wget 'https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/pretrained-yolov3.h5'
#!pip install keras==2.2.4
#!pip install tensorflow==1.13.1
!pip install imageai==2.1.5
#!wget 'https://github.com/OlafenwaMoses/ImageAI/releases/download/1.0/resnet50_coco_best_v2.0.1.h5'
import tensorflow as tf
import imageai as imageai
from imageai.Detection.Custom import DetectionModelTrainer
trainer = DetectionModelTrainer()
trainer.setModelTypeAsYOLOv3()
trainer.setDataDirectory(data_directory="/content/OID/Dataset/train/Human hand/")
trainer.setTrainConfig(object_names_array=["Human hand"], batch_size=8, num_experiments=100, train_from_pretrained_model="/content/pretrained-yolov3.h5")
trainer.trainModel()