所以我有一个主文件夹,其中包含子文件夹,这些子文件夹又包含数据集的图像,如下所示。
-main_db
--- CLASS_1
----- img_1
----- img_2
----- img_3
----- img_4
--- CLASS_2
----- img_1
----- img_2
----- img_3
----- img_4
--- CLASS_3
----- img_1
----- img_2
----- img_3
----- img_4
我需要将此数据集分为两部分,即训练数据(70%)和测试数据(30%)。下面是我要实现的层次结构
-main_db
--- training_data
----- CLASS_1
------- img_1
------- img_2
------- img_3
------- img_4
--- CLASS_2
------- img_1
------- img_2
------- img_3
------- img_4
--- testing_data
----- CLASS_1
------- img_5
------- img_6
------- img_7
------- img_8
--- CLASS_2
------- img_5
------- img_6
------- img_7
------- img_8
任何帮助表示赞赏。谢谢
我已经尝试过该模块。但这对我不起作用。完全不导入该模块。
https://github.com/jfilter/split-folders
这正是我想要的。
答案 0 :(得分:1)
这应该做到。它将计算每个文件夹中有多少张图像,然后相应地对其进行拆分,将测试数据保存在具有相同结构的其他文件夹中。
将代码保存在main.py
文件中并运行命令:
python3 main.py ----data_path=/path1 --test_data_path_to_save=/path2 --train_ratio=0.7
import shutil
import os
import numpy as np
import argparse
def get_files_from_folder(path):
files = os.listdir(path)
return np.asarray(files)
def main(path_to_data, path_to_test_data, train_ratio):
# get dirs
_, dirs, _ = next(os.walk(path_to_data))
# calculates how many train data per class
data_counter_per_class = np.zeros((len(dirs)))
for i in range(len(dirs)):
path = os.path.join(path_to_data, dirs[i])
files = get_files_from_folder(path)
data_counter_per_class[i] = len(files)
test_counter = np.round(data_counter_per_class * (1 - train_ratio))
# transfers files
for i in range(len(dirs)):
path_to_original = os.path.join(path_to_data, dirs[i])
path_to_save = os.path.join(path_to_test_data, dirs[i])
#creates dir
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
files = get_files_from_folder(path_to_original)
# moves data
for j in range(int(test_counter[i])):
dst = os.path.join(path_to_save, files[j])
src = os.path.join(path_to_original, files[j])
shutil.move(src, dst)
def parse_args():
parser = argparse.ArgumentParser(description="Dataset divider")
parser.add_argument("--data_path", required=True,
help="Path to data")
parser.add_argument("--test_data_path_to_save", required=True,
help="Path to test data where to save")
parser.add_argument("--train_ratio", required=True,
help="Train ratio - 0.7 means splitting data in 70 % train and 30 % test")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
main(args.data_path, args.test_data_path_to_save, float(args.train_ratio))
答案 1 :(得分:1)
如果您不太热衷于编码,则可以使用一个名为split-folders的python软件包。它非常易于使用,可以here找到 这是如何使用它。
pip install split_folders
import split-folders
input_folder = "input_path"
output = "output_path" #where you want the split datasets saved. one will be created if none is set
split_folders.ratio('input_folder', output="output", seed=42, ratio=(.8, .1, .1)) # ratio of split are in order of train/val/test. You can change to whatever you want. For train/val sets only, you could do .75, .25 for example.
但是,我强烈建议对上面给出的答案进行编码,因为它们可以帮助您学习。
答案 2 :(得分:1)
**访问此链接https://www.kaggle.com/questions-and-answers/102677归功于Kaggle的“ saravanansaminathan”评论对于具有以下文件夹结构的数据集,存在同样的问题。 / TT拆分 / 0 /001_01.jpg ....... / 1 /001_04.jpg ....... 我确实以上述链接为参考。**
import os
import numpy as np
import shutil
import random
root_dir = '/home/dipak/Desktop/TTSplit/'
classes_dir = ['0', '1']
test_ratio = 0.20
for cls in classes_dir:
os.makedirs(root_dir +'train/' + cls)
os.makedirs(root_dir +'test/' + cls)
src = root_dir + cls
allFileNames = os.listdir(src)
np.random.shuffle(allFileNames)
train_FileNames, test_FileNames = np.split(np.array(allFileNames),
[int(len(allFileNames)* (1 - test_ratio))])
train_FileNames = [src+'/'+ name for name in train_FileNames.tolist()]
test_FileNames = [src+'/' + name for name in test_FileNames.tolist()]
print("*****************************")
print('Total images: ', len(allFileNames))
print('Training: ', len(train_FileNames))
print('Testing: ', len(test_FileNames))
print("*****************************")
lab = ['0', '1']
for name in train_FileNames:
for i in lab:
shutil.copy(name, root_dir +'train/' + i)
for name in test_FileNames:
for i in lab:
shutil.copy(name, root_dir +'test/' + i)
print("Copying Done!")
答案 3 :(得分:0)
如果您签入他们的文档here,则他们已经更新了语法。基本上,我遇到了类似的问题,但是我发现以下新语法按照那里的更新正在工作:
import splitfolders # or import split_folders
splitfolders.ratio("input_folder", output="output", seed=1337, ratio=(.8, .1, .1),
group_prefix=None) # default values
# Split with a ratio.
#To only split into training and validation set, set a tuple to `ratio`, i.e,`(.8,
# .2)`.
splitfolders.ratio("input_folder", output="output", seed=1337, ratio=(.8, .1, .1),
group_prefix=None) # default values
# Split val/test with a fixed number of items e.g. 100 for each set.
# To only split into training and validation set, use a single number to `fixed`,
i.e.,
# `10`.
splitfolders.fixed("input_folder", output="output", seed=1337, fixed=(100, 100),
oversample=False, group_prefix=None) # default values
答案 4 :(得分:0)
data = os.listdir(image_directory)
from sklearn.model_selection import train_test_split
train, valid = train_test_split(data, test_size=0.2, random_state=1)
然后您可以使用shutil
将图片复制到您想要的文件夹中
答案 5 :(得分:0)
这个怎么样?
from pathlib import Path
from sklearn.model_selection import StratifiedShuffleSplit
import shutil
def image_train_test_split(path, fmt, train_size):
train_folder = Path('train')
test_folder = Path('test')
train_folder.mkdir(exist_ok=True)
test_folder.mkdir(exist_ok=True)
data_path = Path(path)
data = []
for d in data_path.glob('*'):
for f in d.glob(f'*.{fmt}'):
data.append([f, d.stem])
data = np.array(data)
ss = StratifiedShuffleSplit(1, train_size=0.8)
train_ix, test_ix = next(ss.split(data[:,0], data[:,1]))
train_set, test_set = data[train_ix], data[test_ix]
for p, c in train_set:
(train_folder / c).mkdir(exist_ok=True)
shutil.move(p, train_folder.joinpath(*p.parts[-2:]))
for p, c in test_set:
(test_folder / c).mkdir(exist_ok=True)
shutil.move(p, test_folder.joinpath(*p.parts[-2:]))