在OpenMDAO中与分布式组件并行执行DoE

时间:2016-04-12 14:49:41

标签: openmdao

我正在尝试在分布式代码上并行运行DoE,这似乎不起作用。下面是一个简化的示例,它引发了与实际代码相同的错误。

import numpy as np

from openmdao.api import IndepVarComp, Group, Problem, Component
from openmdao.core.mpi_wrap import MPI
from openmdao.drivers.latinhypercube_driver import LatinHypercubeDriver

if MPI: 
    from openmdao.core.petsc_impl import PetscImpl as impl 
    rank = MPI.COMM_WORLD.rank
else:
    from openmdao.api import BasicImpl as impl 
    rank = 0



class DistribCompSimple(Component):
    """Uses 2 procs but takes full input vars"""

    def __init__(self, arr_size=2):
        super(DistribCompSimple, self).__init__()

        self._arr_size = arr_size
        self.add_param('invar', 0.)
        self.add_output('outvec', np.ones(arr_size, float))

    def solve_nonlinear(self, params, unknowns, resids):
        if rank == 0:
            unknowns['outvec'] = params['invar'] * np.ones(self._arr_size) * 0.25 
        elif rank == 1:
            unknowns['outvec'] = params['invar'] * np.ones(self._arr_size) * 0.5

        print 'hello from rank', rank, unknowns['outvec']

    def get_req_procs(self):
        return (2, 2)


if __name__ == '__main__':

    N_PROCS = 4

    prob = Problem(impl=impl)
    root = prob.root = Group()

    root.add('p1', IndepVarComp('invar', 0.), promotes=['*'])
    root.add('comp', DistribCompSimple(2), promotes=['*'])

    prob.driver = LatinHypercubeDriver(4, num_par_doe=N_PROCS/2)

    prob.driver.add_desvar('invar', lower=-5.0, upper=5.0)

    prob.driver.add_objective('outvec')

    prob.setup(check=False)
    prob.run()

我用

运行
mpirun -np 4 python lhc_driver.py

并收到此错误:

Traceback (most recent call last):
  File "lhc_driver.py", line 60, in <module>
    prob.run()
  File "/Users/frza/git/OpenMDAO/openmdao/core/problem.py", line 1064, in run
    self.driver.run(self)
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/predeterminedruns_driver.py", line 157, in run
    self._run_par_doe(problem.root)
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/predeterminedruns_driver.py", line 221, in _run_par_doe
    for case in self._get_case_w_nones(self._distrib_build_runlist()):
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/predeterminedruns_driver.py", line 283, in _get_case_w_nones
    case = next(it)
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/latinhypercube_driver.py", line 119, in _distrib_build_runlist
    run_list = comm.scatter(job_list, root=0)
  File "MPI/Comm.pyx", line 1286, in mpi4py.MPI.Comm.scatter (src/mpi4py.MPI.c:109079)
  File "MPI/msgpickle.pxi", line 707, in mpi4py.MPI.PyMPI_scatter (src/mpi4py.MPI.c:48114)
  File "MPI/msgpickle.pxi", line 161, in mpi4py.MPI.Pickle.dumpv (src/mpi4py.MPI.c:41605)
ValueError: expecting 4 items, got 2

我在最新版本中没有看到这个用例的测试,那么这是否意味着你还不支持它或者它是一个bug?

1 个答案:

答案 0 :(得分:0)

感谢您为此提交一个简单的测试用例。我最近添加了并行DOE的东西,忘了用分布式组件测试它。我会为我们的错误跟踪器添加一个故事,并希望尽快修复它。