对于大型消息,MPI挂起在MPI_Send上

时间:2013-04-05 12:25:46

标签: mpi

c ++ / mpi(mpich2)中有一个简单的程序,它发送一个double类型的数组。如果数组的大小超过9000,那么在调用MPI_Send期间我的程序挂起。如果array小于9000(例如8000),则programm工作正常。源代码如下:

的main.cpp

using namespace std;

Cube** cubes;
int cubesLen;

double* InitVector(int N) {
   double* x = new double[N];
   for (int i = 0; i < N; i++) {
       x[i] = i + 1;
   }
   return x;
}

void CreateCubes() {
    cubes = new Cube*[12];
    cubesLen = 12;
    for (int i = 0; i < 12; i++) {
       cubes[i] = new Cube(9000);
    }
}

void SendSimpleData(int size, int rank) {
    Cube* cube = cubes[0];
    int nodeDest = rank + 1;
    if (nodeDest > size - 1) {
        nodeDest = 1;
    }

    double* coefImOut = (double *) malloc(sizeof (double)*cube->coefficentsImLength);
    cout << "Before send" << endl;
    int count = cube->coefficentsImLength;
    MPI_Send(coefImOut, count, MPI_DOUBLE, nodeDest, 0, MPI_COMM_WORLD);
    cout << "After send" << endl;
    free(coefImOut);

    MPI_Status status;
    double *coefIm = (double *) malloc(sizeof(double)*count);

    int nodeFrom = rank - 1;
    if (nodeFrom < 1) {
        nodeFrom = size - 1;
    }

    MPI_Recv(coefIm, count, MPI_DOUBLE, nodeFrom, 0, MPI_COMM_WORLD, &status);
    free(coefIm);
}

int main(int argc, char *argv[]) {
    int size, rank;
    const int root = 0;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    CreateCubes();

    if (rank != root) {
         SendSimpleData(size, rank);
    }

    MPI_Finalize();
    return 0;
}

class Cube

 class Cube {
 public:
    Cube(int size);
    Cube(const Cube& orig);
    virtual ~Cube();

    int Id() { return id; } 
    void Id(int id) { this->id = id; }

    int coefficentsImLength;
    double* coefficentsIm;

private:
    int id;
};

Cube::Cube(int size) {
    this->coefficentsImLength = size;

    coefficentsIm = new double[size];
    for (int i = 0; i < size; i++) {
        coefficentsIm[i] = 1;
    }
}

Cube::Cube(const Cube& orig) {
}

Cube::~Cube() {
    delete[] coefficentsIm;
}

该程序在4个进程上运行:

mpiexec -n 4 ./myApp1

有什么想法吗?

1 个答案:

答案 0 :(得分:15)

Cube类的详细信息与此无关:考虑更简单的版本

#include <mpi.h>
#include <cstdlib>

using namespace std;

int main(int argc, char *argv[]) {
    int size, rank;
    const int root = 0;

    int datasize = atoi(argv[1]);

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank != root) {
        int nodeDest = rank + 1;
        if (nodeDest > size - 1) {
            nodeDest = 1;
        }
        int nodeFrom = rank - 1;
        if (nodeFrom < 1) {
            nodeFrom = size - 1;
        }

        MPI_Status status;
        int *data = new int[datasize];
        for (int i=0; i<datasize; i++)
            data[i] = rank;

        cout << "Before send" << endl;
        MPI_Send(&data, datasize, MPI_INT, nodeDest, 0, MPI_COMM_WORLD);
        cout << "After send" << endl;
        MPI_Recv(&data, datasize, MPI_INT, nodeFrom, 0, MPI_COMM_WORLD, &status);

        delete [] data;

    }

    MPI_Finalize();
    return 0;
}

运行时给出

$ mpirun -np 4 ./send 1
Before send
After send
Before send
After send
Before send
After send
$ mpirun -np 4 ./send 65000
Before send
Before send
Before send

如果在DDT中你查看了邮件队列窗口,你会看到每个人都在发送,没有人收到,你有一个经典的deadlock

很明显,

MPI_Send的语义定义不明确,但允许阻止直到“接收已发布”。 MPI_Ssend在这方面更清楚;它将一直阻止,直到收到发布。有关不同发送模式的详细信息,请参见here

它对较小的消息起作用的原因是实施的意外;对于“足够小”的消息(对于你的情况,它看起来是<64kB),你的MPI_Send实现使用“急切发送”协议并且不阻止接收;对于较大的消息,只要在内存中保持消息的缓冲副本不一定安全,Send就会等待匹配的接收(无论如何总是允许这样做)。

你可以采取一些措施来避免这种情况;所有你需要做的就是确保不是每个人都在同时调用阻塞的MPI_Send。您可以(比如说)先处理器发送,然后接收,奇数处理器首先接收,然后发送。您可以使用非阻塞通信(Isend / Irecv / Waitall)。但在这种情况下最简单的解决方案是使用MPI_Sendrecv,这是一个阻塞(Send + Recv),而不是阻塞发送和阻塞接收。发送和接收将同时执行,函数将阻塞,直到两者都完成。所以这个工作

#include <mpi.h>
#include <cstdlib>

using namespace std;

int main(int argc, char *argv[]) {
    int size, rank;
    const int root = 0;

    int datasize = atoi(argv[1]);

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank != root) {
        int nodeDest = rank + 1;
        if (nodeDest > size - 1) {
            nodeDest = 1;
        }
        int nodeFrom = rank - 1;
        if (nodeFrom < 1) {
            nodeFrom = size - 1;
        }

        MPI_Status status;
        int *outdata = new int[datasize];
        int *indata  = new int[datasize];
        for (int i=0; i<datasize; i++)
            outdata[i] = rank;

        cout << "Before sendrecv" << endl;
        MPI_Sendrecv(outdata, datasize, MPI_INT, nodeDest, 0,
                     indata, datasize, MPI_INT, nodeFrom, 0, MPI_COMM_WORLD, &status);
        cout << "After sendrecv" << endl;

        delete [] outdata;
        delete [] indata;
    }

    MPI_Finalize();
    return 0;
}

跑步给出

$ mpirun -np 4 ./send 65000
Before sendrecv
Before sendrecv
Before sendrecv
After sendrecv
After sendrecv
After sendrecv