我遇到类似下面代码的情况:工作进程处理数据子集,并且必须将未知数量的数据发送回主数据库。是否可以让主服务器等待并从工作进程接收未知数量的发送?有没有办法使用片面通信?提前谢谢!
#include <errno.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
/*
sample run/output:
$mpirun -np 5 practice.exe
@[1]: i=30
@[2]: i=0
@[2]: i=75
@[4]: i=40
@[4]: i=55
@[3]: i=85
@[3]: i=65
*/
int main(int argc, char *argv[])
{
int i, rank, size, np, nw, num;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
nw = np -1;
srand(time(NULL)*rank);
if (rank > 0)
{
for (i=(rank-1); i<(nw*10); i+=nw)
{
num = rand() % 100;
if (num % 5 == 0)
{
printf("@[%d]: i=%d\n", rank, num);
// SEND num TO MASTER
}
}
}
else
{
// RECEIVE num FROM WORKER
}
MPI_Finalize();
return EXIT_SUCCESS;
}
答案 0 :(得分:1)
当然,有很多方法可以做到这一点,但它与异步通信并没有任何关系。你可以通过单面通信来做到这一点,但即使这样也存在问题(你仍然需要猜测数据需要多少总内存)。
这样做的一种方法就是弄清楚您拥有多少数据,然后将数据发送给主数据,以便知道要接收多少消息,然后一次发送一个数据:
#include <errno.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define MAXPERWORKER 10
#define TAG_NUM_INCOMING 1
#define TAG_DATA 2
int main(int argc, char *argv[])
{
int i, rank, size, np, nw, num;
int mynums[MAXPERWORKER], numcount, total;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
nw = np -1;
srand(time(NULL)*rank);
if (rank > 0)
{
numcount = 0;
total = 0;
for (i=(rank-1); i<(nw*10); i+=nw)
{
num = rand() % 100;
if (num % 3 == 0)
{
printf("@[%d]: i=%d\n", rank, num);
mynums[numcount] = num;
numcount++;
total += num;
}
}
/* of course, in this case we could just
* do this in one message, but..
*/
MPI_Send(&numcount, 1, MPI_INT, 0, TAG_NUM_INCOMING, MPI_COMM_WORLD);
for (i=0; i<numcount; i++)
MPI_Send(&(mynums[i]), 1, MPI_INT, 0, TAG_DATA, MPI_COMM_WORLD);
printf("@[%d]: Total of all nums is %d\n", rank, total);
}
else
{
int *totals = malloc(sizeof(int)*nw);
int *counts = malloc(sizeof(int)*nw);
int *sofar = malloc(sizeof(int)*nw);
int **data = malloc(sizeof(int *)*nw);
int rcv;
int totalcounts;
int j;
int workernum;
MPI_Status status;
for (i=0; i<nw; i++) {
sofar[i] = 0;
totals[i]= 0;
}
/* get number of incoming messages */
for (i=0; i<nw; i++) {
MPI_Recv(&rcv, 1, MPI_INT, MPI_ANY_SOURCE, TAG_NUM_INCOMING, MPI_COMM_WORLD, &status);
workernum = status.MPI_SOURCE-1;
counts[workernum] = rcv;
totalcounts += rcv;
data[workernum] = malloc(sizeof(int)*rcv);
}
/* get real data */
for (i=0; i<totalcounts; i++) {
MPI_Recv(&rcv, 1, MPI_INT, MPI_ANY_SOURCE, TAG_DATA, MPI_COMM_WORLD, &status);
workernum = status.MPI_SOURCE-1;
data[ workernum ][ sofar[workernum]++ ] = rcv;
totals[ workernum ] += rcv;
}
/* print results */
for (i=0; i<nw; i++) {
printf("From [%2d]:", i+1);
for (j=0; j<counts[i]; j++)
printf("%3d ", data[i][j]);
printf("| %3d\n", totals[i]);
}
for (i=0; i<nw; i++)
free(data[i]);
free(data);
free(totals);
free(counts);
free(sofar);
}
MPI_Finalize();
return EXIT_SUCCESS;
}
在4个进程上运行它,我得到:
$ mpirun -np 4 ./masterworker1
@[1]: i=39
@[1]: i=81
@[3]: i=9
@[3]: i=45
@[3]: i=0
@[3]: i=57
@[3]: Total of all nums is 111
@[1]: Total of all nums is 120
From [ 1]: 39 81 | 120
From [ 2]: 24 6 39 | 69
From [ 3]: 9 45 0 57 | 111
@[2]: i=24
@[2]: i=6
@[2]: i=39
@[2]: Total of all nums is 69
但是,这可能不太可行 - 您可能不想像这样缓冲所有数据(如果可以的话,您可以在一条消息中发送它)。
另一种方法是发送数据,然后在您完成发送数据时发送特殊消息,主人只是继续接收,直到听到来自每个工作人员的这些“完成”消息之一:
#include <errno.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define MAXPERWORKER 10
#define TAG_DATA 2
#define TAG_DONE 1
int main(int argc, char *argv[])
{
int i, rank, size, np, nw, num;
int mynums[MAXPERWORKER], numcount, total;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
nw = np -1;
srand(time(NULL)*rank);
if (rank > 0)
{
numcount = 0;
total = 0;
for (i=(rank-1); i<(nw*10); i+=nw)
{
num = rand() % 100;
if (num % 3 == 0)
{
printf("@[%d]: i=%d\n", rank, num);
total += num;
MPI_Send(&num, 1, MPI_INT, 0, TAG_DATA, MPI_COMM_WORLD);
}
}
MPI_Send(&num, 1, MPI_INT, 0, TAG_DONE, MPI_COMM_WORLD);
printf("@[%d]: Total of all nums is %d\n", rank, total);
}
else
{
int *totals = malloc(sizeof(int)*nw);
int *counts = malloc(sizeof(int)*nw);
int **data = malloc(sizeof(int *)*nw);
int rcv;
int j;
int workernum;
int stillsending;
MPI_Status status;
for (i=0; i<nw; i++) {
totals[i]= 0;
counts[i]= 0;
data[i] = malloc(sizeof(int)*MAXPERWORKER);
}
stillsending = nw;
/* get data */
while (stillsending > 0) {
MPI_Recv(&rcv, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
workernum = status.MPI_SOURCE-1;
if (status.MPI_TAG == TAG_DONE) {
stillsending--;
} else if (status.MPI_TAG == TAG_DATA) {
data[workernum][counts[workernum]] = rcv;
totals[workernum] += rcv;
counts[workernum]++;
}
}
/* print results */
for (i=0; i<nw; i++) {
printf("From [%2d]:", i+1);
for (j=0; j<counts[i]; j++)
printf("%3d ", data[i][j]);
printf("| %3d\n", totals[i]);
}
for (i=0; i<nw; i++)
free(data[i]);
free(data);
free(totals);
free(counts);
}
MPI_Finalize();
return EXIT_SUCCESS;
}
再次对4个任务,我得到:
$ mpirun -np 4 ./masterworker2
@[1]: i=63
@[1]: i=99
@[1]: i=60
@[1]: i=69
@[1]: i=21
@[1]: i=48
@[1]: i=24
@[1]: Total of all nums is 384
@[2]: i=39
@[2]: i=84
@[2]: i=63
@[2]: Total of all nums is 186
@[3]: i=3
@[3]: i=51
@[3]: i=36
@[3]: Total of all nums is 90
From [ 1]: 63 99 60 69 21 48 24 | 384
From [ 2]: 39 84 63 | 186
From [ 3]: 3 51 36 | 90
请注意,在这两种情况下,我都依赖一些MAXPERWORKER大小数组来预先分配内容;你真的不需要这个,你可以根据需要malloc一个数组和realloc,或者如果你愿意使用C ++,可以使用std :: vector。