我有一个存储数组的数组(称为sendbuff),我想通过使用MPI :: Scatter将这些数组发送到其他线程。
sendbuff
##### ###############################
p # 0 # -> # -1 # -1 # -1 # -1 # -1 # -1 # (values)
o ##### ###############################
s # 1 # -> # -1 # -1 # -1 # -1 # -1 # -1 # (values)
##### ###############################
可以看出,sendbuff[0]
包含一个大小为6的数组,其中包含6个值(全-1),sendbuff[1]
具有相同的内容。我想将这些-1的数组发送到其他线程,并将它们保存在一个名为recvbuff的数组中,该数组填充0:
recvbuff
#########################
# 0 # 0 # 0 # 0 # 0 # 0 #
#########################
我研究操作系统的答案,我发现了一些,但他们使用MPI_Datatype,但我想避免它。 为了实现这个目标,我制作了以下不起作用的代码:
int main( int argc, char *argv[]){
//variable innitialization
int taskid, ntasks, buffsize, **sendbuff, *recvbuff;
MPI::Init(argc, argv);
taskid = MPI::COMM_WORLD.Get_rank();
ntasks = MPI::COMM_WORLD.Get_size();
buffsize = 6;
//memory innitialization
recvbuff = new int[buffsize];
sendbuff = new int*[ntasks];
for(int i = 0; i < ntasks; i++){
sendbuff[i] = new int[buffsize];
}
//array innitialization
for(int i = 0; i < buffsize; i++){
recvbuff[i] = 0;
}
for(int i = 0; i < ntasks; i++){
for(int j = 0; j < buffsize; j++){
sendbuff[i][j] = -1;
}
}
//communication
MPI::COMM_WORLD.Scatter(sendbuff[0], buffsize, MPI::INT, recvbuff, buffsize,
MPI::INT, 0);
//output
for(int i = 0; i < buffsize; i++){
cout<<"Task"<<taskid<<" recvbuff["<<i<<"] = "<<recvbuff[i] << endl;
}
//cleaning
for(int i = 0; i < ntasks; i++){
delete[] sendbuff[i];
}
delete[] sendbuff;
delete[] recvbuff;
MPI::Finalize();
return EXIT_SUCCESS;
}
使用分散后,我希望他的recvbuff
变量可以填充-1值,但是我得到-1和垃圾的混合:
$ mpirun -np 3 a.out
Task0 recvbuff[0] = -1
Task0 recvbuff[1] = -1
Task0 recvbuff[2] = -1
Task0 recvbuff[3] = -1
Task0 recvbuff[4] = -1
Task0 recvbuff[5] = -1
Task1 recvbuff[0] = 33
Task1 recvbuff[1] = 0
Task1 recvbuff[2] = -1
Task1 recvbuff[3] = -1
Task1 recvbuff[4] = -1
Task1 recvbuff[5] = -1
Task2 recvbuff[0] = -1
Task2 recvbuff[1] = -1
Task2 recvbuff[2] = 33
Task2 recvbuff[3] = 0
Task2 recvbuff[4] = 1768975727
Task2 recvbuff[5] = 7496543
我做错了什么? 提前谢谢,佩德罗。
答案 0 :(得分:1)
详细描述了散布和聚集in this answer。 Scatter分割数据并将碎片分散到其他任务,但数据必须存储在连续的内存中 - MPI_Scatter无法知道它需要跟踪指针,如果有,有多少 - 以及你分配sendbuff的方式:
sendbuff = new int*[ntasks];
for(int i = 0; i < ntasks; i++){
sendbuff[i] = new int[buffsize];
}
不同的sendbuff行可以分散在整个系统内存中。如果你连续分配数据,你几乎就在那里:
sendbuff = new int*[ntasks];
sendbuff[0] = new int[ntasks * 6];
for(int i = 1; i < ntasks; i++){
sendbuff[i] = &(sendbuff[0][i*6];
}
现在你应该可以分散,但要注意第0行将排名为0;也就是说,分散到所有通信器中的进程。如果您只是尝试发送到非秩0任务,最简单的方法是在sendbuff中为排名0保留一行虚拟数据,以便正常的分散正常工作:
#include <iostream>
#include <mpi.h>
int main(int argc, char **argv)
{
int rank, size;
const int nelem = 6;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int **sendbuff = new int*[size];
int *recvbuff = new int[nelem];
if (rank == 0) {
sendbuff[0] = new int[nelem * size];
for (int i=0; i<size; i++)
sendbuff[i] = &(sendbuff[0][nelem*i]);
for (int i=0; i<size; i++)
for (int j=0; j<nelem; j++)
sendbuff[i][j] = i-1;
}
MPI_Scatter(sendbuff[0], nelem, MPI_INT, recvbuff, nelem, MPI_INT, 0, MPI_COMM_WORLD);
if (rank != 0) {
std::cout << "Scatter: [ " << rank << "]: ";
for (int i=0; i<nelem; i++)
std::cout << recvbuff[i] << " ";
std::cout << std::endl;
for (int i=0; i<nelem; i++)
recvbuff[i] *= recvbuff[i];
}
MPI_Gather(recvbuff, nelem, MPI_INT, sendbuff[0], nelem, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0) {
for (int j=1; j<size; j++) {
std::cout << "Gather: [ " << j << "]: ";
for (int i=0; i<nelem; i++)
std::cout << sendbuff[j][i] << " ";
std::cout << std::endl;
}
}
delete [] recvbuff;
if (rank == 0)
delete [] sendbuff[0];
delete [] sendbuff;
MPI_Finalize();
}
请注意,我们正在分散数据,工作人员正在对数字进行平方,然后主人将其收集起来。编译和运行给出:
$ mpic++ -o intercomm intercomm.cxx
$ mpirun -np 4 ./intercomm
Scatter: [ 2]: 1 1 1 1 1 1
Scatter: [ 1]: 0 0 0 0 0 0
Scatter: [ 3]: 2 2 2 2 2 2
Gather: [ 1]: 0 0 0 0 0 0
Gather: [ 2]: 1 1 1 1 1 1
Gather: [ 3]: 4 4 4 4 4 4
如果您更愿意避免使用0级的虚拟数据 - 可能很大 - 您可以将任务分为两组,即主任务和工作任务,并设置intercommunicator允许他们之间的集体沟通。这是一个简单的程序:
#include <iostream>
#include <mpi.h>
int main(int argc, char **argv)
{
MPI_Comm localComm; /* intra-communicator of local sub-group */
MPI_Comm interComm; /* inter-communicator */
int masterworker;
int rank, size;
const int nelem = 6;
int rootrank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
masterworker = (rank == 0 ? 0 : 1);
MPI_Comm_split(MPI_COMM_WORLD, masterworker, rank, &localComm);
if (masterworker == 0)
{
MPI_Intercomm_create( localComm, 0, MPI_COMM_WORLD, 1, 1, &interComm);
rootrank = ( rank == 0 ? MPI_ROOT : MPI_PROC_NULL );
}
else {
MPI_Intercomm_create( localComm, 0, MPI_COMM_WORLD, 0, 1, &interComm);
rootrank = 0;
}
int **sendbuff = new int*[size-1];
int *recvbuff = new int[nelem];
if (rank == 0) {
sendbuff[0] = new int[nelem * (size-1)];
for (int i=1; i<size-1; i++)
sendbuff[i] = &(sendbuff[0][nelem*i]);
for (int i=0; i<size-1; i++)
for (int j=0; j<nelem; j++)
sendbuff[i][j] = i;
}
MPI_Scatter(sendbuff[0], nelem, MPI_INT, recvbuff, nelem, MPI_INT, rootrank, interComm);
if (masterworker == 1) {
std::cout << "Scatter: [ " << rank << "]: ";
for (int i=0; i<nelem; i++)
std::cout << recvbuff[i] << " ";
std::cout << std::endl;
for (int i=0; i<nelem; i++)
recvbuff[i] *= recvbuff[i];
}
MPI_Gather(recvbuff, nelem, MPI_INT, sendbuff[0], nelem, MPI_INT, rootrank, interComm);
if (masterworker == 0) {
for (int j=0; j<size-1; j++) {
std::cout << "Gather: [ " << j << "]: ";
for (int i=0; i<nelem; i++)
std::cout << sendbuff[j][i] << " ";
std::cout << std::endl;
}
}
MPI_Comm_free(&interComm);
MPI_Comm_free(&localComm);
delete [] recvbuff;
if (rank == 0)
delete [] sendbuff[0];
delete [] sendbuff;
MPI_Finalize();
}
再次,编译和运行给出了:
$ mpic++ -o intercomm intercomm.cxx
$ mpirun -np 4 ./intercomm
Scatter: [ 1]: 0 0 0 0 0 0
Scatter: [ 2]: 1 1 1 1 1 1
Scatter: [ 3]: 2 2 2 2 2 2
Gather: [ 0]: 0 0 0 0 0 0
Gather: [ 1]: 1 1 1 1 1 1
Gather: [ 2]: 4 4 4 4 4 4
或者,如果您不想使用内部通信器,只需在sendbuff中为排名0保留一行虚拟数据,以便正常的分散正常工作:
#include <iostream>
#include <mpi.h>
int main(int argc, char **argv)
{
int rank, size;
const int nelem = 6;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int **sendbuff = new int*[size];
int *recvbuff = new int[nelem];
if (rank == 0) {
sendbuff[0] = new int[nelem * size];
for (int i=0; i<size; i++)
sendbuff[i] = &(sendbuff[0][nelem*i]);
for (int i=0; i<size; i++)
for (int j=0; j<nelem; j++)
sendbuff[i][j] = i-1;
}
MPI_Scatter(sendbuff[0], nelem, MPI_INT, recvbuff, nelem, MPI_INT, 0, MPI_COMM_WORLD);
if (rank != 0) {
std::cout << "Scatter: [ " << rank << "]: ";
for (int i=0; i<nelem; i++)
std::cout << recvbuff[i] << " ";
std::cout << std::endl;
for (int i=0; i<nelem; i++)
recvbuff[i] *= recvbuff[i];
}
MPI_Gather(recvbuff, nelem, MPI_INT, sendbuff[0], nelem, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0) {
for (int j=1; j<size; j++) {
std::cout << "Gather: [ " << j << "]: ";
for (int i=0; i<nelem; i++)
std::cout << sendbuff[j][i] << " ";
std::cout << std::endl;
}
}
delete [] recvbuff;
if (rank == 0)
delete [] sendbuff[0];
delete [] sendbuff;
MPI_Finalize();
}
再次编译和运行给出:
$ mpic++ -o intercomm intercomm.cxx
$ mpirun -np 4 ./intercomm
Scatter: [ 2]: 1 1 1 1 1 1
Scatter: [ 1]: 0 0 0 0 0 0
Scatter: [ 3]: 2 2 2 2 2 2
Gather: [ 1]: 0 0 0 0 0 0
Gather: [ 2]: 1 1 1 1 1 1
Gather: [ 3]: 4 4 4 4 4 4