我在pytorch中编写了一个自定义数据加载器类。但是,当在一个纪元内遍历所有批次时,它将失败。例如,假设我有100个数据示例,我的批处理大小为9。它将在第10次迭代中失败,原因是批处理大小不同,这将使批处理大小为1而不是10。我将自定义数据加载器放在下面。另外,我还介绍了如何从for循环内的加载程序中提取数据。
class FlatDirectoryAudioDataset(tdata.Dataset): #customized dataloader
def __init__(self, data_dir, transform=None):
self.data_dir = data_dir
self.transform = transform
self.files = self.__setup_files()
def __len__(self):
"""
compute the length of the dataset
:return: len => length of dataset
"""
return len(self.files)
def __setup_files(self):
file_names = os.listdir(self.data_dir)
files = [] # initialize to empty list
for file_name in file_names:
possible_file = os.path.join(self.data_dir, file_name)
if os.path.isfile(possible_file) and (file_name.lower().endswith('.wav') or file_name.lower().endswith('.mp3')): #&& (possible_file.lower().endswith('.wav') or possible_file.lower().endswith('.mp3')):
files.append(possible_file)
# return the files list
return files
def __getitem__ (self,index):
sample, _ = librosa.load(self.files[index], 16000)
if self.transform:
sample=self.transform(sample)
sample = torch.from_numpy(sample)
return sample
from torch.utils.data import DataLoader
my_dataset=FlatDirectoryAudioDataset(source_directory,source_folder,source_label,transform = None,label=True)
dataloader_my = DataLoader(
my_dataset,
batch_size=batch_size,
num_workers=0,
shuffle=True)
for (i,batch) in enumerate(dataloader_my,0):
print(i)
if batch.shape[0]!=16:
print(batch.shape)
assert batch.shape[0]==16,"Something wrong with the batch size"
答案 0 :(得分:2)
设置drop_last=True
放下last incomplete batch
根据您的代码制作的Dataloader的精简版,批量大小没有错误。
使用9作为batch_size
,并且有100个项目,最后一批只有一个项目。运行下面的代码即可。
设置drop_last = False会打印最后一行,并打印“ exception”。
0 <class 'torch.Tensor'> torch.Size([9, 1])
1 <class 'torch.Tensor'> torch.Size([9, 1])
2 <class 'torch.Tensor'> torch.Size([9, 1])
3 <class 'torch.Tensor'> torch.Size([9, 1])
4 <class 'torch.Tensor'> torch.Size([9, 1])
5 <class 'torch.Tensor'> torch.Size([9, 1])
6 <class 'torch.Tensor'> torch.Size([9, 1])
7 <class 'torch.Tensor'> torch.Size([9, 1])
8 <class 'torch.Tensor'> torch.Size([9, 1])
9 <class 'torch.Tensor'> torch.Size([9, 1])
10 <class 'torch.Tensor'> torch.Size([9, 1])
# depends on drop_last=True|False
11 <class 'torch.Tensor'> torch.Size([1, 1])
Different batch size (last batch) torch.Size([1, 1])
因此该批次产生了足够好的批次项目,使其总数达到100
from torch.utils.data import DataLoader
import os
import numpy as np
import torch
import torch.utils.data.dataset as tdata
class FlatDirectoryAudioDataset(tdata.Dataset): # customized dataloader
def __init__(self):
self.files = self.__setup_files()
def __len__(self):
return len(self.files)
def __setup_files(self):
return np.array(range(100))
def __getitem__(self, index):
file = self.files[index]
sample = np.array([file])
sample = torch.from_numpy(sample)
return sample
data = FlatDirectoryAudioDataset()
my_dataset = FlatDirectoryAudioDataset()
batch_size = 9
dataloader_my = DataLoader(
my_dataset,
batch_size=batch_size,
num_workers=0,
shuffle=True,
drop_last=True)
for i, sample in enumerate(dataloader_my, 0):
print(i, print(type(sample), sample.shape)
if sample.shape[0] != batch_size:
print("Different batch size (last batch)", sample.shape)
答案 1 :(得分:2)
使用drop_last = True utils.DataLoader(数据集,batch_size = batch_size,随机播放= True,drop_last = True)
答案 2 :(得分:0)
我编写了一个名为nonechucks的库来精确地做到这一点(以防您的批次大小不足,不是因为无法精确地划分而是存在了错误的样本)。它使您可以动态处理数据集中的不良样品(包括自动确定批次大小)。您可以简单地用Dataset
将现有的PyTorch SafeDataset
包裹起来,如下所示:
bad_dataset = Dataset(...)
import nonechucks as nc
dataset = nc.SafeDataset(bad_dataset)