控制函数的最大递归深度

时间:2014-08-26 19:27:51

标签: python python-3.x parallel-processing

我写了一个对象来管理python进程。这位经理保持这些过程并与管道沟通。要做到这一点,一个函数会递归调用自己,而进程必须要做。但是,在X调用之后,自然python会引发RuntimeError: maximum recursion depth exceeded while calling a Python object

我可以用sys.setrecursionlimit(x)修改它但它不干净(适用于整个程序)...我如何控制此函数的最大递归深度

我的节目:

processmanager.py

import sys

if sys.version_info < (3, 3):
  sys.stdout.write("Python 3.3 required\n")
  sys.exit(1)

from multiprocessing import Process, Pipe
from multiprocessing.connection import wait

def chunk(seq,m):
   i,j,x=len(seq),0,[]
   for k in range(m):
     a, j = j, j + (i+k)//m
     x.append(seq[a:j])
   return x

class KeepedAliveProcessManager(object):

  def __init__(self, nb_process, target):
    self.processs = []
    self.target = target
    self.nb_process = nb_process
    self.readers_pipes = []
    self.writers_pipes = []

  def _start(self, chunked_things):
    for i in range(self.nb_process):
      local_read_pipe, local_write_pipe = Pipe(duplex=False)
      process_read_pipe, process_write_pipe = Pipe(duplex=False)
      self.readers_pipes.append(local_read_pipe)
      self.writers_pipes.append(process_write_pipe)
      p = Process(target=run_keeped_process, args=(self.target, local_write_pipe, process_read_pipe, chunked_things[i]))
      p.start()
      self.processs.append(p)
      local_write_pipe.close()
      process_read_pipe.close()

  def stop(self):
    for writer_pipe in self.writers_pipes:
      writer_pipe.send('stop')

  def get_their_work(self, things_to_do):
    chunked_things = chunk(things_to_do, self.nb_process)
    if not self.processs:
      self._start(chunked_things)
    else:
      for i in range(self.nb_process):
        #print('send things')
        self.writers_pipes[i].send(chunked_things[i])
    things_done_collection = []
    reader_useds = []
    while self.readers_pipes:
      for r in wait(self.readers_pipes):
        try:
          things_dones = r.recv()
        except EOFError:
          reader_useds.append(r)
          self.readers_pipes.remove(r)
        else:
          reader_useds.append(r)
          self.readers_pipes.remove(r)
          things_done_collection.append(things_dones)
    self.readers_pipes = reader_useds
    return things_done_collection

def run_keeped_process(target, main_write_pipe, process_read_pipe, things):
  things_dones = target(things)
  main_write_pipe.send(things_dones)
  del things_dones
  del things

  new_things = None
  readers = [process_read_pipe]
  readers_used = []
  while readers:
    for r in wait(readers):
      try:
        new_things = r.recv()
        #print('p: things received')
      except EOFError:
        pass
      finally:
        readers.remove(r)
  #print('p: continue')
  if new_things != 'stop':
    run_keeped_process(target, main_write_pipe, process_read_pipe, new_things)

main.py

from processmanager import KeepedAliveProcessManager

def do_things_in_process(things_to_do = []):
  return [i ** 12 for i in things_to_do] 

process_manager = KeepedAliveProcessManager(2, do_things_in_process)
for i in range(1000):
  print(process_manager.get_their_work([0,1,2,3]))
process_manager.stop()

最大递归在这里:

[...]
File "/home/bux/Projets/simtermites/sandbox/parallel/processmanager.py", line 118, in run_keeped_process
    run_keeped_process(target, main_write_pipe, process_read_pipe, new_things)
  File "/home/bux/Projets/simtermites/sandbox/parallel/processmanager.py", line 118, in run_keeped_process
    run_keeped_process(target, main_write_pipe, process_read_pipe, new_things)
  File "/home/bux/Projets/simtermites/sandbox/parallel/processmanager.py", line 100, in run_keeped_process
    main_write_pipe.send(things_dones)
  File "/usr/lib/python3.3/multiprocessing/connection.py", line 206, in send
    ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
  File "/usr/lib/python3.3/multiprocessing/forking.py", line 40, in __init__
    Pickler.__init__(self, *args)
RuntimeError: maximum recursion depth exceeded while calling a Python object

2 个答案:

答案 0 :(得分:1)

def run_keeped_process(target, main_write_pipe, process_read_pipe, things):
  #do some stuff here
  if new_things != 'stop':
    run_keeped_process(target, main_write_pipe, process_read_pipe, new_things)

此函数看起来可以更改,因此它根本不会递归。

def run_keeped_process(target, main_write_pipe, process_read_pipe, things):
  while True:
    #do some stuff here
    if new_things == 'stop':
      break
    things = new_things

现在你永远不会达到最大递归深度。

答案 1 :(得分:0)

传递一个depthCount参数,并在每次递归调用时递减它。 如果参数&lt;不打电话,请不要打电话。 0