MPI中的麻烦 - >地址失败:(无)

时间:2013-04-07 00:40:17

标签: c matrix mpi null openmpi

我是C和MPI的初学者,我正在尝试在MPI中添加2个矩阵的程序。 但我不知道我的代码有什么问题。 我试着在n行中'切片'矩阵M1然后发送到另一个进程做多次并广播de matrix M2在我做一个Gather以构建最终矩阵M3之后。 我做到了:

mpirun -n 2 matrix

但我在终端收到错误:

[adiel-VirtualBox:07921] *** Process received signal ***
[adiel-VirtualBox:07921] Signal: Segmentation fault (11)
[adiel-VirtualBox:07921] Signal code:  (128)
[adiel-VirtualBox:07921] Failing at address: (nil)
--------------------------------------------------------------------------
mpirun noticed that process rank 0 with PID 7921 on node adiel-VirtualBox exited on signal 0 (Unknown signal 0).
--------------------------------------------------------------------------
2 total processes killed (some possibly by mpirun during cleanup)
mpirun: clean termination accomplished

任何人都可以帮助我吗?

这是我的代码:

#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
//#include "mpe.h"
#include <math.h>


void printMatrix(double *M, int m, int n) {
   int lin, col;
   for (lin=0; lin<m; lin++) {
      for (col=0; col<n; col++)
        printf("%.2f \t", M[(lin*n+col)]);
      printf("\n"); 
   }
}

double* allocateMatrix(int m, int n){
    double* M;
    M = (double *)malloc(m*n*sizeof(double));
    return M;
}

int main( int argc, char *argv[] )
{

    int rank, size;
    int m1,n1,m2,n2;
    int row, col,ctrl,i,k,lines,proc;
    double *M1, *M2, *M3, **vp, *v;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    m1 = m2 = n1 = n2 = 3;

    lines = (int)ceil(n1/size);
    v = (double *)malloc(lines*n1*sizeof(double));


    M2 = allocateMatrix(m2,n2);
    M3 = allocateMatrix(m1,n2);
if(rank==0)
    M1 = allocateMatrix(m1,n1);

    //startin matrix
    for (col = 0; col < n1; col++){
        for (row = 0; row < m1; row++) {
            if(rank==0)
            M1[(row*m1+col)] = 0;
            M2[(row*m2+col)] = 0;
            M3[(row*m1+col)] = 0;
        }
    }
//startin pointers with 0
for(i=0;i<lines*n1;i++)
v[i] = 0;

    //populate
    if(rank == 0){
        for (col = 0; col < n1; col++){
            for (row = 0; row < m1; row++) {
                M1[row*m1+col] = row*3+(col+1);
                M2[(row*m2+col)] = 1;
            }
        }
    }

//---------------------sharing and multiply---------------//


    //slicing M1 and sending to other process
    if(rank == 0){
        proc = size-1;
        //for each line
        for(row = 0;row<m1;row++){
        ctrl = floor(row/lines);            
            //on each column
            for(col=0;col<n1;col++){
                v[(ctrl*n1)+col] = M1[(row*n1)+col];
            }
            if(row%lines == (lines - 1)){
                if(proc!=0){
                    MPI_Send(v,lines*n1,MPI_DOUBLE,proc,1, MPI_COMM_WORLD);
                    proc--;
                    //clearing pointers
                    for(i=0;i<lines*n1;i++)
                        v[i] = 0;
                }
            }
        }
    }

    //MPI_Bcast(m1, m*n, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
    MPI_Bcast(M2, m2*n2, MPI_DOUBLE, 0, MPI_COMM_WORLD); 


    //receiving process
    if(rank!=0)
        MPI_Recv(v,lines*n1,MPI_DOUBLE,0,1,MPI_COMM_WORLD, MPI_STATUS_IGNORE);

            for(row=0;row<lines;row++){
                if(v[row*n1]!=0){
                    for (col = 0; col < n1; col++){
                    double val = 0.0;
                        for(k=0;k<m1;k++){
                            val += v[(row*n1)+k] * M2[(k*n1)+col];
                        }
                        M3[((size-1-rank)*size*n1)+(row*n1)+col] = val;
                    }
                }
            }


    if(rank!=0){    
        for(row = 0; row < lines; row++){
            MPI_Gather(&M3[((size-1-rank)*size*n1)+(row*n1)], n1, MPI_DOUBLE, &M3[((size-1-rank)*size*n1)+(row*n1)], n1, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
        }   
    }

if(rank == 0){  
printf("matrix 1------------------------\n");
printMatrix(M1,m1,n1); 
printf("matrix 2------------------------\n");
printMatrix(M2,m2,n2);
printf("matrix 3------------------------\n");
printMatrix(M3,m1,n2);
}
  MPI_Finalize();
    return 0;
}

1 个答案:

答案 0 :(得分:1)

首先,在广播之前完成所有发送,并在广播之后收到所有发送信息。我很容易看出导致MPI资源耗尽或死锁失败。在这样一个不应该出现的小输入中,你应该不管怎样修复它。我会再看一眼。