一个简单的MPI程序

时间:2011-08-15 19:30:58

标签: mpi parallel-processing sendmessage

我很感激,如果有人告诉我为什么这个简单的MPI发送和接收代码不能在两个处理器上运行,当n = 40的值(在第20行),但适用于n < = 30。换句话说,如果消息大小超出特定数量(不是那么大,大致是一个大小为8100的1-D数组),MPI就会死锁。

#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
#include "iostream"
#include "math.h"
using namespace std;

int main(int argc, char *argv[])
{
    int processor_count, processor_rank;
    double *buff_H, *buff_send_H;
    int N_pa_prim1, l, n, N_p0;
    MPI_Status status;

    MPI_Init (&argc, &argv);
    MPI_Comm_size (MPI_COMM_WORLD, &processor_count);
    MPI_Comm_rank (MPI_COMM_WORLD, &processor_rank);

    N_pa_prim1=14; l=7; n=40; N_p0=7;
    buff_H = new double [n*n*N_p0+1];          //Receive buffer allocation

    buff_send_H = new double [n*n*N_p0+1];     //Send buffer allocation

    for (int j = 0; j < n*n*N_p0+1; j++)
        buff_send_H[j] = 1e-8*rand();

    if (processor_rank == 0)
        MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD);  
else if(processor_rank == 1)
    MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD);

    MPI_Recv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &status);
cout << "Received successfully by " << processor_rank << endl;
MPI_Finalize();
return 0;
}

2 个答案:

答案 0 :(得分:4)

死锁是正确的行为;你的代码中有一个死锁。

MPI规范允许MPI_Send表现为MPI_Ssend - 即阻塞。在某种意义上通信“已完成”之前,阻塞通信原语不会返回,这(在阻塞发送的情况下)可能意味着接收已经开始。

您的代码如下:

If Processor 0:
   Send to processor 1

If Processor 1:
   Send to processor 0

Receive

即 - 在发送完成之前,接收不会启动。你发送,但他们永远不会回来,因为没有人收到! (这适用于小消息的事实是一个实现工件 - 大多数mpi实现使用所谓的“急切协议”来表示“足够小”的消息;但是这通常不能计算在内。)

请注意,此处还存在其他逻辑错误 - 此程序也会因超过2个处理器而死锁,因为rank> = 2的处理器将等待从未发生过的消息。

您可以通过交替发送和按级别接收来修复您的程序:

if (processor_rank == 0) {
    MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD);  
    MPI_Recv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &status);
} else if (processor_rank == 1) {
    MPI_Recv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &status);
    MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD);
}

或使用MPI_Sendrecv(阻塞(发送+接收),而不是阻塞发送+阻塞接收):

int sendto;
if (processor_rank == 0)
    sendto = 1;
else if (processor_rank == 1)
    sendto = 0;

if (processor_rank == 0 || processor_rank == 1) {
    MPI_Sendrecv(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, sendto, 163,
                 buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163,
                 MPI_COMM_WORLD, &status);
}

或者使用非阻塞发送和接收:

MPI_Request reqs[2];
MPI_Status  statuses[2];
if (processor_rank == 0) {
    MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD, &reqs[0]);
} else if (processor_rank == 1) {
    MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD, &reqs[0]);
}

if (processor_rank == 0 || processor_rank == 1)
    MPI_Irecv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &reqs[1]);

MPI_Waitall(2, reqs, statuses);

答案 1 :(得分:0)

感谢Jonathan的帮助。在这里,我选择了第三种解决方案并为您编写了类似的代码,除了添加“for”循环以发送大量消息。这次它没有僵局;但是处理器继续只接收最后一条消息。 (因为消息很长,我只打印最后一个元素来检查一致性)

#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
using namespace std;

int main(int argc, char *argv[])
{
int processor_count, processor_rank;

//Initialize MPI
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &processor_count);
MPI_Comm_rank (MPI_COMM_WORLD, &processor_rank);

double **buff_H, *buff_send_H;
int N_pa_prim1, l, n, N_p0, count, temp;
N_pa_prim1=5; l=7; n=50; N_p0=7;

MPI_Request reqs[N_pa_prim1];
MPI_Status  statuses[N_pa_prim1];
buff_H = new double *[N_pa_prim1];                 //Receive buffer allocation
for (int i = 0; i < N_pa_prim1; i++)
    buff_H[i] = new double [n*n*N_p0+1];  
buff_send_H = new double [n*n*N_p0+1];             //Send buffer allocation

if (processor_rank == 0) {
    for (int i = 0; i < N_pa_prim1; i++){
        for (int j = 0; j < n*n*N_p0+1; j++)
            buff_send_H[j] = 2.0325e-8*rand();

        cout << processor_rank << "\t" << buff_send_H[n*n*N_p0] << "\t" << "Send" << "\t" << endl;  
        MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD, &reqs[i]);
    }
}
else if (processor_rank == 1) {
    for (int i = 0; i < N_pa_prim1; i++){
        for (int j = 0; j < n*n*N_p0+1; j++)
            buff_send_H[j] = 3.5871e-8*rand();

        cout << processor_rank << "\t" << buff_send_H[n*n*N_p0] << "\t" << "Send" << "\t" << endl;  
        MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD, &reqs[i]);
    }
}

for (int i = 0; i < N_pa_prim1; i++)
    MPI_Irecv(buff_H[i], n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &reqs[N_pa_prim1+i]);

MPI_Waitall(2*N_pa_prim1, reqs, statuses);

for (int i = 0; i < N_pa_prim1; i++)
    cout << processor_rank << "\t" << buff_H[i][n*n*N_p0] << "\t" << "Receive" << endl;

MPI_Finalize();
return 0;

}