使用MPI矩阵乘法

时间:2013-07-25 19:42:38

标签: c mpi

我正在尝试运行一个MPI矩阵乘法示例,除了我修改它以读取文件,当然事情就会爆炸。

具体来说,我得到了这个错误:

Entering first MPI_Recv in p0 and recieving data from slave processor 1
Fatal error in MPI_Recv: Invalid count, error stack:
MPI_Recv(186): MPI_Recv(buf=0xbfd30930, count=-1807265191, MPI_FLOAT, src=0, tag=1, MPI_COMM_WORLD, status=0x804b080) failed
MPI_Recv(104): Negative count, value is -1807265191

以下是修改后的代码:

 MPI_Init(&argc, &argv);  
 MPI_Comm_rank(MPI_COMM_WORLD, &id);  
 MPI_Comm_size(MPI_COMM_WORLD, &p);  
 slaves = p-1;  //slaves=numworkers
 /*---------------------------- master ----------------------------*/  
 if(id == 0) 
   {  
  /* check the number of arguments */

    if(argc!=4)
    {
        printf("Invalid number of aguements!\n./program matrix_file1 matrix_file2 result_matrix_file\n");
        return -1;
    }

         /* read matrix A */
    printf("read matrix A from %s\n", argv[1]);
    read_matrix( argv[1],&a, &sa, &i, &j);

    if(i != j) 
    { 
        printf("ERROR: matrix A not square\n"); 
        return -1;
    }



    n = i;



  /* read matrix B */
     printf("read matrix B from %s\n", argv[2]);
     read_matrix(argv[2],&b, &sb, &i, &j);



    if(i != j) 
    {     
          printf("ERROR: matrix B not square\n"); 
          return -1; 
    }   

    if(n != i) 
    {   printf("ERROR: matrix A and B incompatible\n"); 
        return -1; 
    }



    if((n%p)!=0)
    {
        printf("ERROR: %d processor(s) cannot divide matrices %d x %d! \n", p,n,n); 
        return -1;
    }



        rows = n/slaves;
        offset=0;
        remainPart=n%slaves;


    for(dest=1;dest<=slaves;dest++)
    {


        if(remainPart>0)
        {
            originalRows=rows;
            ++rows;
            remainPart--;
             printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset);
            MPI_Send(&offset, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&rows, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&a[offset][0], rows*n, MPI_FLOAT,dest,1, MPI_COMM_WORLD);  
            MPI_Send(&b, n*n, MPI_FLOAT, dest, 1, MPI_COMM_WORLD);  
            offset = offset + rows;   
            rows = originalRows;  

        }
        else
        {
             printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset);
            MPI_Send(&offset, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&rows, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);  
            MPI_Send(&a[offset][0], rows*n, MPI_FLOAT,dest,1, MPI_COMM_WORLD);  
            MPI_Send(&b, n*n, MPI_FLOAT, dest, 1, MPI_COMM_WORLD);  
            offset = offset + rows; 
        }
    }
    /* initialize matrix C */

    sc = (float*)malloc(n*n*sizeof(float));
    memset(sc, 0, n*n*sizeof(float));
    c = (float**)malloc(n*sizeof(float*));
    for(i=0; i<n; i++) c[i] = &sc[i*n];

    /* wait for results from all worker tasks */  
   for (k=1; k<=slaves; k++)      
   {              
    source = k;  
    printf("Entering first MPI_Recv in p0 and recieving data from slave processor %d\n", source);
    MPI_Recv(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering second MPI_Recv in p0\n"); 
    MPI_Recv(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status);
    printf("Entering third MPI_Recv in p0\n");  
    MPI_Recv(&c[offset][0], rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD, &status);  
   }     


   write_matrix(argv[3], sc, i, j);

    free(sc);
    free(c);
  }   



if(id>0)
{
      source = 0; 
        //printf("Entered first MPI_Recv for process %d\n", id); 
       MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        //printf("Entered second MPI_Recv for process %d\n", id);
       MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        //printf("Entered third MPI_Recv for process %d\n", id);
       MPI_Recv(&a, rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
        //printf("Entered fourth MPI_Recv for process %d\n", id);  
       MPI_Recv(&b, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);  
       /* Matrix multiplication */  
       for (k=0; k<n; k++)  
        for (l=0; l<rows; l++) {   
         for (m=0; m<n; m++)  
          c[l][k] = c[l][k] + a[l][m] * b[m][k];  
        }  


        //printf("Entered first MPI_send for process %d\n", id);
       MPI_Send(&offset, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);  
        //printf("Entered second MPI_send for process %d\n", id);
       MPI_Send(&rows, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);  
        //printf("Entered third MPI_send for process %d\n", id);
       MPI_Send(&c, rows*n, MPI_FLOAT, 0, 2, MPI_COMM_WORLD);  


}






MPI_Finalize();}

事先我错误地完成了所有过程,而不仅仅是工人,所以我已经解决了这个问题,但我不知道随机负数出现在哪里。特别是因为印刷声明之后的内容

printf("Entering first MPI_Recv in p0 and recieving data from slave processor %d\n", source);
    MPI_Recv(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering second MPI_Recv in p0\n"); 
    MPI_Recv(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status);
    printf("Entering third MPI_Recv in p0\n");  
    MPI_Recv(&c[offset][0], rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD, &status);  

只有一个和原始维度n乘以给予从属的行的平均值。谢谢你提前。

更新:好的,所以问题的一部分似乎是在我的数组在主服务器中分配了空间时,对于从属进程来说并非如此。

一旦意识到这一点,我就在为检查处理器是否为工人之前为传输数据的矩阵添加了缓冲区。虽然显然打印报表显然不会显示出来,但它并没有按计划完成。

float buffA[n][n];
float buffB[n][n];
float buffC[n][n];

for(l=0;l<n;l++)
    for(m=0;m<n;m++)
    {
        buffA[l][m]=a[l][m];
        buffB[l][m]=b[l][m];

                        //buffA[l][m]=sa[(i*n) + j];
                        //buffB[l][m]=sb[(i*n) + j];
        printf("buffA[%d][%d] =%f\n",l,m, buffA[l][m]);
        printf("buffB[%d][%d] =%f\n",l,m,buffB[l][m]);
    }

if(id>0)
{
        /*read_matrix( argv[1],&a, &sa, &i, &j);
        read_matrix(argv[2],&b, &sb, &i, &j);*/



        source = 0; 
        printf("Entered first MPI_Recv for process %d\n", id); 
       MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        printf ("offset =%d\n", offset);
       MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        printf ("row =%d\n", rows);
       MPI_Recv(&buffA[offset][0], rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
        printf("buffA[offset][0] =%f\n", buffA[offset][0]); //they're not getting the matrices 
       MPI_Recv(&buffB, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);  
        //printf ("b=\n");

       /* Matrix multiplication */  
       for (k=0; k<n; k++)  
        for (l=0; l<rows; l++) {   
            //c[l][k]=0.0;
         for (m=0; m<n; m++)  
          buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k];  
            //printf("c[%d][%d]= %f\n", l,k, c[l][k]);
        }  


        //printf("Entered first MPI_send for process %d\n", id);
       MPI_Send(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered second MPI_send for process %d\n", id);
       MPI_Send(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered third MPI_send for process %d\n", id);
       MPI_Send(&buffC, rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD);  

        printf("Exit via MPI_send for process %d\n", id);
}

虽然我不确定这是否意味着什么,但错误编号也发生了变化。

Fatal error in MPI_Recv: Invalid count, error stack:
MPI_Recv(186): MPI_Recv(buf=0xbf8e642c, count=-8, MPI_FLOAT, src=0, tag=1,MPI_COMM_WORLD, status=0x804c088) failed
MPI_Recv(104): Negative count, value is -8

好的,现在我发现维度n没有被转移,这导致了初始的随机负数。所以我为n添加了一个send和recv。现在看来最后一个问题是如何为MPI传输动态分配的数组。仍在努力。

更新

它有效,当前的工作人员代码是这样的,虽然乘法遍布整个地方,但我认为是婴儿步骤。 XP

if(id>0)
{




        source = 0; 
        printf("Entered first MPI_Recv for process %d\n", id); 
       MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);  
        printf ("offset =%d\n", offset);
       MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
         MPI_Recv(&n, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
        printf ("row =%d\nn=%d\n", rows,n);

        float buffA[rows][n];
        float buffB[n][n];
        float buffC[rows][n];


       MPI_Recv(&buffA[offset][0], rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
        printf("buffA[offset][0] =%f\n", buffA[offset][0]); //they're not getting the matrices 
       MPI_Recv(&buffB, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);  
        //printf ("b=\n");

       /* Matrix multiplication */  
       for (k=0; k<n; k++)  
        for (l=0; l<rows; l++) {   
            //c[l][k]=0.0;
         for (m=0; m<n; m++)  
          //buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k];  
            //printf("c[%d][%d]= %f\n", l,k, c[l][k]);
            buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k];  

        }  


        //printf("Entered first MPI_send for process %d\n", id);
       MPI_Send(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered second MPI_send for process %d\n", id);
       MPI_Send(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD);  
        //printf("Entered third MPI_send for process %d\n", id);
       MPI_Send(&buffC, rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD);  

        printf("Exit via MPI_send for process %d\n", id);
}

结果

0.00 -0.00 -0.00 -0.00 -0.00 -0.00 0.00 0.00 
0.00 -0.00 -0.00 -0.00 -1.26 -1.26 -0.00 -1.26 
-0.00 -1.26 -0.00 0.00 -0.00 0.00 0.00 0.00 
-0.00 0.00 -0.00 -0.00 0.00 -0.00 -0.00 0.00 
0.00 0.00 0.00 0.00 -0.00 -1.26 -0.00 0.00 
-0.00 -0.00 0.00 35833769696167556769392596671120015360.00 0.00 0.00 -0.00 0.00 
-0.00 -0.00 0.00 -0.00 -0.00 0.00 0.00 0.00 
0.00 -nan -0.00 -0.00 -0.00 -0.00 -0.00 -0.00 

1 个答案:

答案 0 :(得分:2)

(从评论中删除,以便回答此问题)

在分布式环境中,打印语句非常不可靠。无法保证它们相互之间按顺序到达。 GDB真的不是那么糟糕。您不需要附加到所有进程,只需选择一个。您可以在这里查看我的答案(stackoverflow.com/questions/17347778 / ...)以了解如何操作。