锁定进程共享的队列

时间:2019-07-09 08:16:05

标签: python process locking

基本上,我将代码从Thread重写为Process,并且有一些“意外”。

如何锁定多个进程共享的队列? (目前我正在使用threading.Lock

class DataProviderProcess:
    def __init__(self, dataset_dir, n_images, batch_size, use_queue_lock,
                 input_img_w=299, input_img_h=299):
        img_filepaths = sorted(get_image_filepaths(dataset_dir))
        img_filepaths = img_filepaths[:n_images]
        self.img_filepath_queue = multiprocessing.Manager().Queue()
        for img_filepath in img_filepaths:
            self.img_filepath_queue.put_nowait(img_filepath)
        self.input_img_w = input_img_w
        self.input_img_h = input_img_h
        self.batch_size = batch_size
        self.use_queue_lock = use_queue_lock
        self.queue_lock = Lock()

    def get_batch(self, thread_id):    
        img_batch = []
        try:
            if self.use_queue_lock:
                self.queue_lock.acquire()

            for _ in range(self.batch_size):
                img_filepath = self.img_filepath_queue.get(block=False)
                print('DEBUG: self.img_filepath_queue.qsize()', self.img_filepath_queue.qsize(),
                      'thread_id:', thread_id)
                img = cv2.imread(img_filepath)
                img = cv2.resize(img, (self.input_img_w, self.input_img_h), interpolation=cv2.INTER_LINEAR)
                img_batch.append(img)
            img_batch = np.array(img_batch)

            if self.use_queue_lock:
                self.queue_lock.release()

            return img_batch
        except queue.Empty:
            if len(img_batch) > 0:
                img_batch = np.array(img_batch)

                if self.use_queue_lock:
                    self.queue_lock.release()

                return img_batch
            else:

                if self.use_queue_lock:
                    self.queue_lock.release()

                return None

这就是为什么我不能使用self._processProcess的通行证吗?

为什么multiprocessing.Manager().Queue()应该和Process一起使用而不是queue.Queue()

class BatchLoaderProcess:
    def __init__(self, data_provider: DataProviderProcess, n_threads=8):
        #self.batch_queue = Queue(data_provider.batch_size * 4) # v1
        self.batch_queue = multiprocessing.Manager().Queue(data_provider.batch_size * 4)
        self.data_provider = data_provider

        self.thread_list = []
        for thread_id in range(n_threads):
            #self.thread_list.append(Process(target=self._process)) # v1
            #self.thread_list.append(Process(target=self._process, args=(self,))) # v2
            self.thread_list.append(Process(target=BatchLoaderProcess._process, args=(self, thread_id))) # v3

        for t in self.thread_list:
            t.start()

    def _process(self, thread_id):
        while True:
            img_batch = self.data_provider.get_batch(thread_id)
            if img_batch is None:
                break
            self.batch_queue.put(img_batch)

    def get_batch(self):
        try:
            img_batch = self.batch_queue.get(block=True, timeout=1)
            return img_batch
        except queue.Empty:
            return None

1 个答案:

答案 0 :(得分:0)

使用multiprocessing时,应使用队列和锁定机制,请参见https://docs.python.org/3.5/library/multiprocessing.html。我怀疑这与GIL有很大关系,请参见Multiprocessing vs Threading Python

多处理队列具有其内置的锁定机制。因此,您无需再次锁定队列。

  

这就是为什么我不能使用self._process传递给Process吗?

因为self是“静态”方法的参数。