假设我使用PMPI为MPI_Wait编写一个包装器,它等待MPI发送或接收完成。
/* ================== C Wrappers for MPI_Wait ================== */
_EXTERN_C_ int PMPI_Wait(MPI_Request *request, MPI_Status *status);
_EXTERN_C_ int MPI_Wait(MPI_Request *request, MPI_Status *status) {
int _wrap_py_return_val = 0;
_wrap_py_return_val = PMPI_Wait(request, status);
return _wrap_py_return_val;
}
包装器由this生成。
我想做的是:
/* ================== C Wrappers for MPI_Wait ================== */
_EXTERN_C_ int PMPI_Wait(MPI_Request *request, MPI_Status *status);
_EXTERN_C_ int MPI_Wait(MPI_Request *request, MPI_Status *status) {
int _wrap_py_return_val = 0;
if(is a send request)
printf("send\n");
else // is a recv request
printf("recv\n");
_wrap_py_return_val = PMPI_Wait(request, status);
return _wrap_py_return_val;
}
如何区分Open MPI中的send和recv?假设我使用Open MPI 3.0.0。
答案 0 :(得分:0)
我认为由于MPI_Request
是不透明的(我认为在多个发行版中它只是一个int
),所以您唯一的机会就是监视自己创建的MPI_Request
。
这是一个命题(它是面向C ++的,因为这就是我喜欢的方式):
#include <mpi.h>
#include <iostream>
#include <map>
//To do opaque ordering
struct RequestConverter
{
char data[sizeof(MPI_Request)];
RequestConverter(MPI_Request * mpi_request)
{
memcpy(data, mpi_request, sizeof(MPI_Request));
}
RequestConverter()
{ }
RequestConverter(const RequestConverter & req)
{
memcpy(data, req.data, sizeof(MPI_Request));
}
RequestConverter & operator=(const RequestConverter & req)
{
memcpy(data, req.data, sizeof(MPI_Request));
return *this;
}
bool operator<(const RequestConverter & request) const
{
for(size_t i=0; i<sizeof(MPI_Request); i++)
{
if(data[i]!=request.data[i])
{
return data[i]<request.data[i];
}
}
return false;
}
};
//To store the created MPI_Request
std::map<RequestConverter, std::string> request_holder;
extern "C"
{
int MPI_Isend(
void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request
)
{
int ier = PMPI_Isend(buf, count, datatype, dest, tag, comm, request);
request_holder[RequestConverter(request)]="sending";
return ier;
}
int MPI_Irecv(
void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request
)
{
int ier = PMPI_Irecv(buf, count, datatype, dest, tag, comm, request);
request_holder[RequestConverter(request)]="receiving";
return ier;
}
int MPI_Wait(
MPI_Request *request,
MPI_Status * status
)
{
int myid;
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
std::cout << "waiting("<<myid<<")-> "<<request_holder[RequestConverter(request)]<<std::endl;
request_holder.erase(RequestConverter(request));
return PMPI_Wait(request, status);
}
}
RequestConverter
只是一种使用std::map
MPI_Isend
将请求存储在全局映射中,MPI_Irecv
和MPI_Wait
也会查找请求并将其从std::map
中删除。
简单测试即可得出:
int main(int argv, char ** args)
{
int myid, numprocs;
MPI_Init(&argv, &args);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
int i=123456789;
MPI_Request request;
MPI_Status status;
if(myid==0)
{
MPI_Isend(&i, 1, MPI_INT, 1, 44444, MPI_COMM_WORLD, &request);
MPI_Wait(&request, &status);
std::cout << myid <<' '<<i << std::endl;
}
else if(myid==1)
{
MPI_Irecv(&i, 1, MPI_INT, 0, 44444, MPI_COMM_WORLD, &request);
MPI_Wait(&request, &status);
std::cout << myid <<' '<<i << std::endl;
}
int * sb = new int[numprocs];
for(size_t i=0; i<numprocs; i++){sb[i]=(myid+1)*(i+1);}
int * rb = new int[numprocs];
MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD );
MPI_Finalize();
}
输出:
waiting(0)-> sending
0 123456789
waiting(1)-> receiving
1 123456789
不过,我刚刚添加了一个MPI_Alltoall测试,以查看是否仅调用了PMPI函数。所以那里没有奇迹。