如何在循环

时间:2016-10-17 09:54:21

标签: c parallel-processing mpi

我正在尝试多次学习如何使用MPI_ScatterMPI_Gather,并在等待完成这两个MPI函数后打印出结果。在进程0,程序的顶部,我想使用一个调用Scatter和Gather的while循环。完成所有计算后,我想将此数组发送回这些函数以进行更多计算。我在下面的代码中解释了我想要做什么。 /*.....*/中的评论是我要完成的任务 以下代码使用4个处理器运行。
:$ mpicc test.c -o test
:$ mpirun -np 4 test

#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>

int main(int argc, char **argv) {
    int size, rank;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    int globaldata[8];
    int localdata[2];
    int counter, i;
    if (rank == 0) 
    {
        for (i=0; i<size*2; i++)//initializing array to all zeros, one time
            globaldata[i] = 0;

        /*counter=0;
        do
        {
            counter++;  */
            printf("Sending at Processor %d has data: ", rank);
            for (i=0; i<size*2; i++)
                printf("%d ", globaldata[i]);
            printf("\n");

            /*After MPI_Gather is done, I want to get the newly assined array here.
            Now the globaldata array should hold values: 0 0 1 1 2 2 3 3
            Therefore, in the next iteration of this while loop, these array values need 
            to be send for a new calculation with Scatter & Gather
        }while(counter<2);*/

        //Following need to be executed after all the scatter and gather has completed
        printf("Finally at Processor %d has data: ", rank);
        for (i=0; i<size*2; i++)//Here the result should be: 0 0 2 2 3 3 4 4
            printf("%d ", globaldata[i]);
        printf("\n");
    }


    MPI_Scatter(globaldata, 2, MPI_INT, &localdata, 2, MPI_INT, 0, MPI_COMM_WORLD); 

    localdata[0]= localdata[0]+rank;
    localdata[1]= localdata[1]+rank;

    MPI_Gather(&localdata, 2, MPI_INT, globaldata, 2, MPI_INT, 0, MPI_COMM_WORLD);

    if (rank == 0) {//Currently I can only see the newly assinged array values if I print out the result at the bottom
        printf("At the bottom, Processor %d has data: ", rank);
        for (i=0; i<size*2; i++)
            printf("%d ", globaldata[i]);
        printf("\n");
    }


    MPI_Finalize();
    return 0;
}

关于我上面尝试做的更多解释: 我想将globaldata数组发送到所有处理器。然后获取更新的globaldata数组。一旦我得到更新的数组,我想再次将此数组重新发送回所有其他进程以进行更多计算。我编写了以下代码,使用MPI_Send和MPI_Recv执行类似的工作。在这里,我使用MPI_Send将我的阵列发送到所有处理器。然后这个数组将改变它并发送回root / process 0.一旦我得到修改后的数组,do while循环将再次运行并执行更多计算。我想要做的是:以类似的方式使用MPI_Scatter和MPI_Gather。我在哪里获得更新的globaldata数组并将其发送回MPI_Scatter和MPI_Gather以再次更改该数组

#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>

int main(int argc, char **argv) {
    int size, rank;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    int globaldata[8];
    int counter, i;
    if (rank == 0) 
    {
        for (i=0; i<size*2; i++)
            globaldata[i] = 0;

        counter=0;
        do
        {   /*becase of this do while loop "globaldata" array will always be updated and resend for more caculations*/
            counter++;  
            printf("Sending at Processor %d has data: ", rank);
            for (i=0; i<size*2; i++)
                printf("%d ", globaldata[i]);
            printf("\n");

            for(i = 0; i < 4; i++)
            {
                MPI_Send(&globaldata, 8, MPI_INT, i, 0, MPI_COMM_WORLD);
            }
            for(i = 1; i < 4; i++)
            {         
                MPI_Recv(&globaldata, 8, MPI_INT, i, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            }

        }while(counter<2);

        /*Following executes after all the above calculations has completed*/
        printf("Finally at Processor %d has data: ", rank);
        for (i=0; i<size*2; i++)
            printf("%d ", globaldata[i]);
        printf("\n");
    }

    counter=0;
    do
    {
        counter++; 
        MPI_Recv(&globaldata, 8, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); 
        globaldata[rank]=globaldata[rank]+rank;
        globaldata[rank+1]=globaldata[rank+1]+rank;
        MPI_Send(&globaldata, 8, MPI_INT, 0, 99, MPI_COMM_WORLD);
    }while(counter<2);

    MPI_Finalize();
    return 0;
}

1 个答案:

答案 0 :(得分:1)

将分散和聚集与循环中的本地处理放在一起:

if (rank == 0)
{
   for (i = 0; i < size*2; i++)
      globaldata[i] = 0;
}

for (counter = 0; counter < 2; counter++)
{
   // if (rank == 0)
   // {
   //    pre-process globaldata
   // }

   MPI_Scatter(globaldata, 2, MPI_INT, localdata, 2, MPI_INT, 0, MPI_COMM_WORLD);

   localdata[0] += rank;
   localdata[1] += rank;

   MPI_Gather(localdata, 2, MPI_INT, globaldata, 2, MPI_INT, 0, MPI_COMM_WORLD);

   // if (rank == 0)
   // {
   //    post-process globaldata
   // }
}

if (rank == 0)
{
   printf("Finally at Processor %d has data: ", rank);
      for (i=0; i<size*2; i++)
         printf("%d ", globaldata[i]);
   printf("\n");
}

或者,如果您希望保留&#34; master&#34;的逻辑。流程分开:

if (rank == 0)
{
   for (i = 0; i < size*2; i++)
      globaldata[i] = 0;

   for (counter = 0; counter < 2; counter++)
   {
      // pre-process globaldata

      MPI_Scatter(globaldata, 2, MPI_INT, localdata, 2, MPI_INT, 0, MPI_COMM_WORLD);

      // Not really useful as rank == 0 and it changes nothing
      localdata[0] += rank;
      localdata[1] += rank;

      MPI_Gather(localdata, 2, MPI_INT, globaldata, 2, MPI_INT, 0, MPI_COMM_WORLD);

      // post-process globaldata
   }

   printf("Finally at Processor %d has data: ", rank);
      for (i=0; i<size*2; i++)
         printf("%d ", globaldata[i]);
   printf("\n");
}
else
{
   for (counter = 0; counter < 2; counter++)
   {
      MPI_Scatter(globaldata /* or NULL */, 2, MPI_INT, localdata, 2, MPI_INT,
                  0, MPI_COMM_WORLD);

      localdata[0] += rank;
      localdata[1] += rank;

      MPI_Gather(localdata, 2, MPI_INT, globaldata /* or NULL */, 2, MPI_INT,
                 0, MPI_COMM_WORLD);
   }
}

确保代码两个部分中的循环具有相同的迭代次数。另请注意,MPI_Scatter也会向根级别发送一块globaldataMPI_Gather会从根级收集一大块数据,因此主进程也需要执行一些数据处理。