BrokenPipeError错误:[Errno 32]通过PYTORCH断开的管道

时间:2019-11-14 05:32:32

标签: python deep-learning jupyter-notebook pytorch broken-pipe

我正在尝试通过深度学习进行分类,但问题是样本,labels = iter(dataloader).next()给出了BrokenPipeError错误:[Errno 32]管道损坏

这是代码部分:

    train_dir = 'train'
    test_dir = 'test1'
    train_files = os.listdir('train')
    test_files = os.listdir('test1')
    class CatDogDataset(Dataset):
    def __init__(self, file_list, dir, mode='train', transform = None):
        self.file_list = file_list
        self.dir = dir
        self.mode= mode
        self.transform = transform
        if self.mode == 'train':
            if 'dog' in self.file_list[0]:
                self.label = 1
            else:
                self.label = 0

    def __len__(self):
        return len(self.file_list)

    def __getitem__(self, idx):
        img = Image.open(os.path.join(self.dir, self.file_list[idx]))
        if self.transform:
            img = self.transform(img)
        if self.mode == 'train':
            img = img.numpy()
            return img.astype('float32'), self.label
        else:
            img = img.numpy()
            return img.astype('float32'), self.file_list[idx]

    data_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.ColorJitter(),
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.Resize(128),
        transforms.ToTensor()
    ])

cat_files = [tf for tf in train_files if 'cat' in tf]
dog_files = [tf for tf in train_files if 'dog' in tf]

cats = CatDogDataset(cat_files, 'train', transform = data_transform)
dogs = CatDogDataset(dog_files, 'train', transform = data_transform)

catdogs = ConcatDataset([cats, dogs])

dataloader = DataLoader(catdogs, batch_size = 32, shuffle=True, num_workers=4)
#for samples,labels in dataloader
samples,labels = iter(dataloader).next()
plt.figure(figsize=(16,24))
grid_imgs = torchvision.utils.make_grid(samples[:24])
np_grid_imgs = grid_imgs.numpy()
# in tensor, image is (batch, width, height), so you have to transpose it to (width, height, batch) 
plt.imshow(np.transpose(np_grid_imgs, (1,2,0)))

ERROR:

---------------------------------------------------------------------------
BrokenPipeError                           Traceback (most recent call last)
<ipython-input-19-b0818fceee5a> in <module>
      1 #for samples,labels in dataloader
----> 2 samples,labels = iter(dataloader).next()
      3 plt.figure(figsize=(16,24))
      4 grid_imgs = torchvision.utils.make_grid(samples[:24])
      5 np_grid_imgs = grid_imgs.numpy()

~\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in __iter__(self)
    499 
    500     def __iter__(self):
--> 501         return _DataLoaderIter(self)
    502 
    503     def __len__(self):

~\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in __init__(self, loader)
    287             for w in self.workers:
    288                 w.daemon = True  # ensure that the worker exits on process exit
--> 289                 w.start()
    290 
    291             _update_worker_pids(id(self), tuple(w.pid for w in self.workers))

~\Anaconda3\lib\multiprocessing\process.py in start(self)
    110                'daemonic processes are not allowed to have children'
    111         _cleanup()
--> 112         self._popen = self._Popen(self)
    113         self._sentinel = self._popen.sentinel
    114         # Avoid a refcycle if the target function holds an indirect

~\Anaconda3\lib\multiprocessing\context.py in _Popen(process_obj)
    221     @staticmethod
    222     def _Popen(process_obj):
--> 223         return _default_context.get_context().Process._Popen(process_obj)
    224 
    225 class DefaultContext(BaseContext):

~\Anaconda3\lib\multiprocessing\context.py in _Popen(process_obj)
    320         def _Popen(process_obj):
    321             from .popen_spawn_win32 import Popen
--> 322             return Popen(process_obj)
    323 
    324     class SpawnContext(BaseContext):

~\Anaconda3\lib\multiprocessing\popen_spawn_win32.py in __init__(self, process_obj)
     63             try:
     64                 reduction.dump(prep_data, to_child)
---> 65                 reduction.dump(process_obj, to_child)
     66             finally:
     67                 set_spawning_popen(None)

~\Anaconda3\lib\multiprocessing\reduction.py in dump(obj, file, protocol)
     58 def dump(obj, file, protocol=None):
     59     '''Replacement for pickle.dump() using ForkingPickler.'''
---> 60     ForkingPickler(file, protocol).dump(obj)
     61 
     62 #

BrokenPipeError: [Errno 32] Broken pipe

所有代码都运行良好,但在最后的iter函数中给出了此错误,我也看到了有关管道破裂的所有问题,但是它们给出了不同的解决方案和不同的域。 请帮助解决这个问题

0 个答案:

没有答案