MPI_Isend和MPI_Irecv遇到分段错误

时间:2017-03-27 11:13:00

标签: c++ asynchronous mpi

我正在使用MPI来平行我的C ++行星际轨迹优化程序。其中很大一部分是能够将负载分配给多个工作节点,让它们对分布的数据进行一些计算,并将数据返回给主节点。我认为我在程序中使用异步通信例程MPI_Isend和MPI_Irecv以及MPI_Wait。但是我正在使用EXIT CODE:11运行突然的程序终止,我认为这代表了一个分段错误。我已经尝试彻底搜索Stack Overflow这个主题,并确保覆盖其他人在代码中犯的错误。但是,我的代码仍然不起作用。这是代码:

    mat GeneticAlgorithm::mpi_pool_fitness(mat pool, int flyby_limit, int source, int target, bool isSolar, vec mu_system, vec rp_system, cube ephemerides, IPMGAConfig config)
{
    int poolsize = size(pool,0);
    int chromsize = size(pool,1);
    double* poolptr = NULL;
    mat rPool = zeros(poolsize,chromsize+1);

    int world_rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    int world_size;
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);

    MPI_Request* rq_status = (MPI_Request*)malloc(world_size*sizeof(MPI_Request));
    MPI_Status* status = (MPI_Status*)malloc(world_size*sizeof(MPI_Status));

    int k = 0;
    if ( world_rank == 0 )
    {
        //pool.print();
        //initialize poolptr with input pool elements, since mat is stored in memory column by column, it's not possible to use memptr() function
        poolptr = (double *) malloc(sizeof(double)*poolsize*chromsize);
        for(int i=0;i<poolsize;i++)
        {
            for (int j=0;j<chromsize;j++)
            {
                poolptr[k++] = pool(i,j);
                //cout << poolptr[k-1] << " " ;
            }
            //cout << endl;
        }
    }

    double perproc = poolsize/(world_size-1);
    int elems_per_proc = (int)perproc;
    if (elems_per_proc*(world_size-1) < poolsize)
    {
        elems_per_proc = elems_per_proc + 1;
    }
    //cout << world_rank << " Elements per processor : " << elems_per_proc << endl;
    if ( world_rank == 0 )
    {
        //cout << "poolptr size: " << k << endl;
        //cout << "expected poolsize: " << (world_size-1)*elems_per_proc*chromsize << endl;
        //MPI_Scatter(poolptr,elems_per_proc*chromsize,MPI_DOUBLE,row,elems_per_proc*chromsize,MPI_DOUBLE,0,MPI_COMM_WORLD);
        for (int i=1;i<world_size;i++)
        {
            cout << "0 Scattering chromosomes to processor: " << i << endl;
            MPI_Isend(&poolptr[(i-1)*elems_per_proc*chromsize],elems_per_proc*chromsize,MPI_DOUBLE,i,i,MPI_COMM_WORLD,&rq_status[i]);
        }
        /*
        for (int i=1;i<world_size;i++)
        {
            MPI_Wait(&rq_status[i],&status[i]);
        }
        */
        cout << "0 successfully sent off chromosomes for fitness evaluation....." << endl;
        free(poolptr);
    }

    double *row[100];
    double *iResults[100];
    mat iPool = zeros(poolsize,chromsize+1);
    if ( world_rank != 0 )
    {
        row[world_rank] = (double*)malloc(sizeof(double)*elems_per_proc*chromsize);
        cout << world_rank << " Starting to receive chromosomes from processor 0" << endl;
        MPI_Irecv(&row[world_rank],elems_per_proc*chromsize,MPI_DOUBLE,0,world_rank,MPI_COMM_WORLD,&rq_status[0]);
        MPI_Wait(&rq_status[0],&status[0]);
        cout << world_rank << " Received chromosomes from processor 0" << endl;
        //Convert MPI data back to arma matrix
        for (int i=0;i<elems_per_proc;i++)
        {
            cout << "Composing " << i << "th element at the given processor " << world_rank << endl;
            k = 1;
            for (int j=0;j<chromsize;j++,k++)
            {
                iPool(((world_rank-1)*elems_per_proc)+i,k)=row[world_rank][(i*chromsize)+j];
            }
        }
        //iPool.print();
        //Compute the fitness of each chromosome in intermediate pool
        cout << world_rank << " Attempting fitness calculations....." << endl;
        for (int i=0;i<elems_per_proc;i++)
        {
            iPool(((world_rank-1)*elems_per_proc)+i,span(0,chromsize)) = fitness_multi_rev_lambert(iPool(((world_rank-1)*elems_per_proc)+i,span(1,chromsize)),flyby_limit,source,target,isSolar,mu_system,rp_system,ephemerides,config);
        }
        cout << world_rank << " Successfully finished fitness calculations....." << endl;
        //iPool.print();
        //Convert the results back to MPI data type
        iResults[world_rank]=(double *) malloc(sizeof(double)*elems_per_proc*(chromsize+1));// = iPool.memptr();
        k=0;
        for(int i=0;i<elems_per_proc;i++)
        {
            for (int j=0;j<chromsize+1;j++)
            {
                iResults[world_rank][k++] = iPool(((world_rank-1)*elems_per_proc)+i,j);
            }
        }
        //cout << world_rank << " Starting to send processed chromosomes to processor 0" << endl;
        MPI_Isend(&iResults[world_rank],elems_per_proc*(chromsize+1),MPI_DOUBLE,0,world_rank,MPI_COMM_WORLD,&rq_status[0]);
        //cout << world_rank << " Sent processed chromosomes to processor 0" << endl;
        MPI_Wait(&rq_status[0],&status[0]);
    }
    //Declare a variable holder for global results 
    if ( world_rank == 0)
    {
        double* gResults = (double*)malloc(sizeof(double)*poolsize*(chromsize+1));

        //cout << "0 Gathering chromosomes with fitness evaluated from all processors...." << endl;
        //MPI_Gather(iResults,elems_per_proc*(chromsize+1),MPI_DOUBLE,gResults,poolsize*(chromsize+1),MPI_DOUBLE,0,MPI_COMM_WORLD);
        k=0;
        for (int i=1;i<world_size;i++)
        {
            MPI_Irecv(&gResults[(i-1)*elems_per_proc*(chromsize+1)],elems_per_proc*(chromsize+1),MPI_DOUBLE,i,i,MPI_COMM_WORLD,&rq_status[i]);
        }
        cout << "0 waiting to hear back from all the worker nodes...." << endl;
        for(int i=1;i<world_size;i++)
        {
            MPI_Wait(&rq_status[i],&status[i]);
        }
        cout << "Populating return pool...." << endl;
        for (int i=0;i<poolsize;i++)
        {
            for(int j=0;j<chromsize+1;j++)
            {
                rPool(i,j) = gResults[(i*(chromsize+1))+j];
            }
        }
        //cout << "Finished populating return pool...." << endl;
    }

    free(rq_status);
    free(status);
    return rPool;
}

程序似乎有我在搜索Stack Overflow时发现的各种症状,例如,主节点的MPI_Isend仅在我指定&#39; -n 11&#39;或&#39; -n 26&#39;在我的mpiexec。对于要使用的所有其他节点规范,主节点会遇到分段故障。如果来自master的MPI_Isend工作,那么工作节点正在运行分段错误,我认为在MPI_Irecv期间或之后。

当我运行带有11个节点的mpiexec时,这是程序执行一次的完整日志:

    10 Starting to receive chromosomes from processor 0
Best results are in : best_results_20160217T1902.mat
Generational chromosomes are in : chromosomes_20160217T1902.mat
0 Starting the GA.....
0 Processing generation : 1
6 Starting to receive chromosomes from processor 0
9 Starting to receive chromosomes from processor 0
4 Starting to receive chromosomes from processor 0
7 Starting to receive chromosomes from processor 0
5 Starting to receive chromosomes from processor 0
3 Starting to receive chromosomes from processor 0
8 Starting to receive chromosomes from processor 0
2 Starting to receive chromosomes from processor 0
1 Starting to receive chromosomes from processor 0
0 Scattering chromosomes to processor: 1
0 Scattering chromosomes to processor: 2
0 Scattering chromosomes to processor: 3
0 Scattering chromosomes to processor: 4
0 Scattering chromosomes to processor: 5
0 Scattering chromosomes to processor: 6
0 Scattering chromosomes to processor: 7
0 Scattering chromosomes to processor: 8
0 Scattering chromosomes to processor: 9
0 Scattering chromosomes to processor: 10
0 successfully sent off chromosomes for fitness evaluation.....
0 waiting to hear back from all the worker nodes....

===================================================================================
=   BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
=   PID 12223 RUNNING AT 192.168.0.101
=   EXIT CODE: 11
=   CLEANING UP REMAINING PROCESSES
=   YOU CAN IGNORE THE BELOW CLEANUP MESSAGES
===================================================================================
[proxy:0:2@odroid3] HYD_pmcd_pmip_control_cmd_cb (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip_cb.c:885): assert (!closed) failed
[proxy:0:2@odroid3] HYDT_dmxu_poll_wait_for_event (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/demux/demux_poll.c:76): callback returned error status
[proxy:0:2@odroid3] main (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip.c:206): demux engine error waiting for event
[proxy:0:3@odroid4] HYD_pmcd_pmip_control_cmd_cb (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip_cb.c:885): assert (!closed) failed
[proxy:0:3@odroid4] HYDT_dmxu_poll_wait_for_event (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/demux/demux_poll.c:76): callback returned error status
[proxy:0:3@odroid4] main (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip.c:206): demux engine error waiting for event
[proxy:0:5@odroid6] HYD_pmcd_pmip_control_cmd_cb (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip_cb.c:885): assert (!closed) failed
[proxy:0:5@odroid6] HYDT_dmxu_poll_wait_for_event (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/demux/demux_poll.c:76): callback returned error status
[proxy:0:5@odroid6] main (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip.c:206): demux engine error waiting for event
[proxy:0:4@odroid5] HYD_pmcd_pmip_control_cmd_cb (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip_cb.c:885): assert (!closed) failed
[proxy:0:4@odroid5] HYDT_dmxu_poll_wait_for_event (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/demux/demux_poll.c:76): callback returned error status
[proxy:0:4@odroid5] main (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip.c:206): demux engine error waiting for event
[proxy:0:6@odroid7] HYD_pmcd_pmip_control_cmd_cb (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip_cb.c:885): assert (!closed) failed
[proxy:0:6@odroid7] HYDT_dmxu_poll_wait_for_event (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/demux/demux_poll.c:76): callback returned error status
[proxy:0:6@odroid7] main (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip.c:206): demux engine error waiting for event
[proxy:0:1@odroid2] HYD_pmcd_pmip_control_cmd_cb (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip_cb.c:885): assert (!closed) failed
[proxy:0:1@odroid2] HYDT_dmxu_poll_wait_for_event (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/demux/demux_poll.c:76): callback returned error status
[proxy:0:1@odroid2] main (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmip.c:206): demux engine error waiting for event
[mpiexec@odroid1] HYDT_bscu_wait_for_completion (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/bootstrap/utils/bscu_wait.c:76): one of the processes terminated badly; aborting
[mpiexec@odroid1] HYDT_bsci_wait_for_completion (/home/odroid/installers/mpich-3.2/src/pm/hydra/tools/bootstrap/src/bsci_wait.c:23): launcher returned error waiting for completion
[mpiexec@odroid1] HYD_pmci_wait_for_completion (/home/odroid/installers/mpich-3.2/src/pm/hydra/pm/pmiserv/pmiserv_pmci.c:218): launcher returned error waiting for completion
[mpiexec@odroid1] main (/home/odroid/installers/mpich-3.2/src/pm/hydra/ui/mpich/mpiexec.c:344): process manager error waiting for completion

我很感激在这件事情上有任何帮助,我真的很想在我的论文截止日期前完成这个程序的执行!

2 个答案:

答案 0 :(得分:0)

至少注释了一个关键MPI_Wait

    for (int i=1;i<world_size;i++)
    {
        cout << "0 Scattering chromosomes to processor: " << i << endl;
        MPI_Isend(&poolptr[(i-1)*elems_per_proc*chromsize],elems_per_proc*chromsize,MPI_DOUBLE,i,i,MPI_COMM_WORLD,&rq_status[i]);
    }
    /*
    for (int i=1;i<world_size;i++)
    {
        MPI_Wait(&rq_status[i],&status[i]);
    }
    */
    cout << "0 successfully sent off chromosomes for fitness evaluation....." << endl;
    free(poolptr);

在完成所有发送通信之前,您不得 free或写信至poolptr

通常,您过度使用非阻塞通信:

  1. 您今天MPI_Wait的任何非阻塞请求都是毫无意义的。请改用阻止电话。
  2. 尽可能使用集体通讯电话,特别是MPI_Scatter / MPI_Gather。通常,如果要重叠多个通信,请使用集合体,如果要与计算重叠通信,请使用非阻塞通信。
  3. 如果您要等待多个请求,请使用MPI_Waitall
  4. 为了更好的讨论,请添加Minimal, Complete, and Verifiable example - 并确保清理已注释的内容。

答案 1 :(得分:0)

感谢您指出我的疏忽!我最终在我的问题中利用了MPI_Scatter / MPI_Gather,就像你建议的那样,而不是异步通信例程,无论如何都是过度使用。

我在this stack overflow link找到了能够成功使用MPI Scatter / Gather非常有用的示例。