这是输出,表示当我增加进程数时,时间也在增加。那么这段代码的逻辑是否存在问题。我从网上找到它,只将名称更改为matrixMulti及其printf。当我连接到Grid实验室并增加进程数时,在我看来这是合乎逻辑的但不能正常工作。你觉得怎么样?
/**********************************************************************************************
* Matrix Multiplication Program using MPI.
*
* Viraj Brian Wijesuriya - University of Colombo School of Computing, Sri Lanka.
*
* Works with any type of two matrixes [A], [B] which could be multiplied to produce a matrix [c].
*
* Master process initializes the multiplication operands, distributes the muliplication
* operation to worker processes and reduces the worker results to construct the final output.
*
************************************************************************************************/
#include<stdio.h>
#include<mpi.h>
#define NUM_ROWS_A 5000 //rows of input [A]
#define NUM_COLUMNS_A 5000 //columns of input [A]
#define NUM_ROWS_B 5000 //rows of input [B]
#define NUM_COLUMNS_B 5000 //columns of input [B]
#define MASTER_TO_SLAVE_TAG 1 //tag for messages sent from master to slaves
#define SLAVE_TO_MASTER_TAG 4 //tag for messages sent from slaves to master
void makeAB(); //makes the [A] and [B] matrixes
void printArray(); //print the content of output matrix [C];
int rank; //process rank
int size; //number of processes
int i, j, k; //helper variables
double mat_a[NUM_ROWS_A][NUM_COLUMNS_A]; //declare input [A]
double mat_b[NUM_ROWS_B][NUM_COLUMNS_B]; //declare input [B]
double mat_result[NUM_ROWS_A][NUM_COLUMNS_B]; //declare output [C]
double start_time; //hold start time
double end_time; // hold end time
int low_bound; //low bound of the number of rows of [A] allocated to a slave
int upper_bound; //upper bound of the number of rows of [A] allocated to a slave
int portion; //portion of the number of rows of [A] allocated to a slave
MPI_Status status; // store status of a MPI_Recv
MPI_Request request; //capture request of a MPI_Isend
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv); //initialize MPI operations
MPI_Comm_rank(MPI_COMM_WORLD, &rank); //get the rank
MPI_Comm_size(MPI_COMM_WORLD, &size); //get number of processes
/* master initializes work*/
if (rank == 0) {
makeAB();
start_time = MPI_Wtime();
for (i = 1; i < size; i++) {//for each slave other than the master
portion = (NUM_ROWS_A / (size - 1)); // calculate portion without master
low_bound = (i - 1) * portion;
if (((i + 1) == size) && ((NUM_ROWS_A % (size - 1)) != 0)) {//if rows of [A] cannot be equally divided among slaves
upper_bound = NUM_ROWS_A; //last slave gets all the remaining rows
} else {
upper_bound = low_bound + portion; //rows of [A] are equally divisable among slaves
}
//send the low bound first without blocking, to the intended slave
MPI_Isend(&low_bound, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD, &request);
//next send the upper bound without blocking, to the intended slave
MPI_Isend(&upper_bound, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG + 1, MPI_COMM_WORLD, &request);
//finally send the allocated row portion of [A] without blocking, to the intended slave
MPI_Isend(&mat_a[low_bound][0], (upper_bound - low_bound) * NUM_COLUMNS_A, MPI_DOUBLE, i, MASTER_TO_SLAVE_TAG + 2, MPI_COMM_WORLD, &request);
}
}
//broadcast [B] to all the slaves
MPI_Bcast(&mat_b, NUM_ROWS_B*NUM_COLUMNS_B, MPI_DOUBLE, 0, MPI_COMM_WORLD);
/* work done by slaves*/
if (rank > 0) {
//receive low bound from the master
MPI_Recv(&low_bound, 1, MPI_INT, 0, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD, &status);
//next receive upper bound from the master
MPI_Recv(&upper_bound, 1, MPI_INT, 0, MASTER_TO_SLAVE_TAG + 1, MPI_COMM_WORLD, &status);
//finally receive row portion of [A] to be processed from the master
MPI_Recv(&mat_a[low_bound][0], (upper_bound - low_bound) * NUM_COLUMNS_A, MPI_DOUBLE, 0, MASTER_TO_SLAVE_TAG + 2, MPI_COMM_WORLD, &status);
for (i = low_bound; i < upper_bound; i++) {//iterate through a given set of rows of [A]
for (j = 0; j < NUM_COLUMNS_B; j++) {//iterate through columns of [B]
for (k = 0; k < NUM_ROWS_B; k++) {//iterate through rows of [B]
mat_result[i][j] += (mat_a[i][k] * mat_b[k][j]);
}
}
}
//send back the low bound first without blocking, to the master
MPI_Isend(&low_bound, 1, MPI_INT, 0, SLAVE_TO_MASTER_TAG, MPI_COMM_WORLD, &request);
//send the upper bound next without blocking, to the master
MPI_Isend(&upper_bound, 1, MPI_INT, 0, SLAVE_TO_MASTER_TAG + 1, MPI_COMM_WORLD, &request);
//finally send the processed portion of data without blocking, to the master
MPI_Isend(&mat_result[low_bound][0], (upper_bound - low_bound) * NUM_COLUMNS_B, MPI_DOUBLE, 0, SLAVE_TO_MASTER_TAG + 2, MPI_COMM_WORLD, &request);
}
/* master gathers processed work*/
if (rank == 0) {
for (i = 1; i < size; i++) {// untill all slaves have handed back the processed data
//receive low bound from a slave
MPI_Recv(&low_bound, 1, MPI_INT, i, SLAVE_TO_MASTER_TAG, MPI_COMM_WORLD, &status);
//receive upper bound from a slave
MPI_Recv(&upper_bound, 1, MPI_INT, i, SLAVE_TO_MASTER_TAG + 1, MPI_COMM_WORLD, &status);
//receive processed data from a slave
MPI_Recv(&mat_result[low_bound][0], (upper_bound - low_bound) * NUM_COLUMNS_B, MPI_DOUBLE, i, SLAVE_TO_MASTER_TAG + 2, MPI_COMM_WORLD, &status);
}
end_time = MPI_Wtime();
printf("\nRunning Time = %f\n\n", end_time - start_time);
printArray();
}
MPI_Finalize(); //finalize MPI operations
return 0;
}
void makeAB()
{
for (i = 0; i < NUM_ROWS_A; i++) {
for (j = 0; j < NUM_COLUMNS_A; j++) {
mat_a[i][j] = i + j;
}
}
for (i = 0; i < NUM_ROWS_B; i++) {
for (j = 0; j < NUM_COLUMNS_B; j++) {
mat_b[i][j] = i*j;
}
}
}
void printArray()
{
for (i = 0; i < NUM_ROWS_A; i++) {
printf("\n");
for (j = 0; j < NUM_COLUMNS_A; j++)
printf("%8.2f ", mat_a[i][j]);
}
printf("\n\n\n");
for (i = 0; i < NUM_ROWS_B; i++) {
printf("\n");
for (j = 0; j < NUM_COLUMNS_B; j++)
printf("%8.2f ", mat_b[i][j]);
}
printf("\n\n\n");
for (i = 0; i < NUM_ROWS_A; i++) {
printf("\n");
for (j = 0; j < NUM_COLUMNS_B; j++)
printf("%8.2f ", mat_result[i][j]);
}
printf("\n\n");
}
答案 0 :(得分:5)
这实际上并不令人惊讶。你拥有的工人越多,你拥有的沟通开销就越多(分工,将结果汇总回来),所以通常有一个最佳位置,你有足够的工人,你可以利用并行化,但没有那么多的工人通信开销开始成为一个问题。随着核心数量的增加,您可以减少工作量,减少工作量,并增加通信开销。这就是为什么在编写并行应用程序时,需要做大量的工作来测量哪些工作者能够产生最佳性能,以及设计能够最大限度地减少开销的网络结构。
答案 1 :(得分:4)
发布的代码存在一些真正的正确性问题。让我们看一下排名0的发送循环:
for (i = 1; i < size; i++) {
//...
low_bound = (i - 1) * portion;
upper_bound = low_bound + portion;
MPI_Isend(&low_bound, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD, &request);
MPI_Isend(&upper_bound, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG + 1, MPI_COMM_WORLD, &request);
MPI_Isend(&mat_a[low_bound][0], (upper_bound - low_bound) * NUM_COLUMNS_A, MPI_DOUBLE, i, MASTER_TO_SLAVE_TAG + 2, MPI_COMM_WORLD, &request);
}
你做不到。如果您要使用非阻塞请求,则最终必须MPI_Wait()
或MPI_Test()
以获取请求,以便您(以及MPI库)知道它们已完成。您需要对avoid leaking resources执行此操作,但更重要的是,在这种情况下,在您甚至知道发送已发生之前,您会反复覆盖low_bound
和upper_bound
。谁知道您的工作人员任务所获得的数据。此外,每次绝对保证资源泄漏时,都要覆盖请求。
有几种方法可以解决这个问题;最简单的方法是创建一个简单的上限和下限数组,以及一组请求:
if (rank == 0) {
makeAB();
requests = malloc(size*3*sizeof(MPI_Request));
low_bounds = malloc(size*sizeof(int));
upper_bounds = malloc(size*sizeof(int));
start_time = MPI_Wtime();
for (i = 1; i < size; i++) {
portion = (NUM_ROWS_A / (size - 1));
low_bounds[i] = (i - 1) * portion;
if (((i + 1) == size) && ((NUM_ROWS_A % (size - 1)) != 0)) {
upper_bounds[i] = NUM_ROWS_A;
} else {
upper_bounds[i] = low_bounds[i] + portion;
}
MPI_Isend(&(low_bounds[i]), 1, MPI_INT, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD, &(requests[3*i]));
MPI_Isend(&(upper_bounds[i]), 1, MPI_INT, i, MASTER_TO_SLAVE_TAG + 1, MPI_COMM_WORLD, &(requests[3*i+1]));
MPI_Isend(&mat_a[low_bounds[i]][0], (upper_bounds[i] - low_bounds[i]) * NUM_COLUMNS_A, MPI_DOUBLE, i, MASTER_TO_SLAVE_TAG + 2, MPI_COMM_WORLD, &(requests[3*i+2]));
}
MPI_Waitall(3*(size-1), &(requests[3]), MPI_STATUS_IGNORE);
free(requests);
关于这一点的好处在于,由于0级保存了这些信息,因此工作人员在完成后无需将其发回,而0级可以直接接收到正确的位置:
//...
for (i = low_bound; i < upper_bound; i++) {
for (j = 0; j < NUM_COLUMNS_B; j++) {
for (k = 0; k < NUM_ROWS_B; k++) {
mat_result[i][j] += (mat_a[i][k] * mat_b[k][j]);
}
}
}
MPI_Send(&mat_result[low_bound][0], (upper_bound - low_bound) * NUM_COLUMNS_B, MPI_DOUBLE, 0, SLAVE_TO_MASTER_TAG + 2, MPI_COMM_WORLD);
//...
if (rank == 0) {
for (i = 1; i < size; i++) {
MPI_Recv(&mat_result[low_bounds[i]][0], (upper_bounds[i] - low_bounds[i]) * NUM_COLUMNS_B, MPI_DOUBLE, i, SLAVE_TO_MASTER_TAG + 2, MPI_COMM_WORLD, &status);
}
但是只要你有一个必须分配给所有处理器的这些值的数组,你就可以使用MPI_Scatter操作,这通常比你的循环发送更有效:
for (i = 1; i < size; i++) {
low_bounds[i] = ...
upper_bounds[i] = ...
}
MPI_Scatter(low_bounds, 1, MPI_INT, &low_bound, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(upper_bounds, 1, MPI_INT, &upper_bound, 1, MPI_INT, 0, MPI_COMM_WORLD);
理想情况下,您也可以使用scatter或其变体来分发A数组。
MPI_Scatter()
是一项集体操作,如MPI_Bcast()
,它将我们带到您的下一个问题。在您的原始代码中,您有:
//rank 0:
for (i = 1; i < size; i++ ) {
//...
MPI_Isend();
MPI_Isend();
MPI_Isend();
}
MPI_Bcast();
// other ranks:
MPI_Bcast();
MPI_Recv();
MPI_Recv();
MPI_Recv();
集体和点对点通信的交错可能非常危险,并可能导致死锁。这里没必要;你应该将Bcast移动到Scatter和Recv()之后(现在只有1个recv)。这使您的工作人员任务代码看起来像:
MPI_Scatter(NULL, 1, MPI_INT, &low_bound, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(NULL, 1, MPI_INT, &upper_bound, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Recv(&mat_a[low_bound][0], (upper_bound - low_bound) * NUM_COLUMNS_A, MPI_DOUBLE, 0, MASTER_TO_SLAVE_TAG + 2, MPI_COMM_WORLD, &status);
MPI_Bcast(&mat_b, NUM_ROWS_B*NUM_COLUMNS_B, MPI_DOUBLE, 0, MPI_COMM_WORLD);
这样可以摆脱大多数正确性问题,虽然我仍然建议使用scatter来分配A数组,然后使用rank 0来等待工作任务执行其“公平共享”计算一些东西。 (这样做的好处是你的程序适用于size = 1)。现在让我们来看一下性能问题。
对于固定的问题规模,您的程序必须:
并且每项任务都必须
很容易看出,每个等级必须完成的实际计算工作量实际上随着处理器数量的增加而下降为1 /(P-1),但通信工作量增加(作为P或lg P,取决于)。在某些时候,那些在更多处理器上交叉并运行的东西只会减慢速度。那么这一点到底是什么意思?
在单个8核nehalem节点上进行快速扩展测试,并使用IPM简单计算时间花费的时间,我们有:
worker | running | | MPI
tasks | time | Speedup | time
--------+-----------+----------+--------
1 | 90.85s | - | 45.5s
2 | 45.75s | 1.99x | 15.4s
4 | 23.42s | 3.88x | 4.93s
6 | 15.75s | 5.76x | 2.51s
这实际上并不太糟糕; MPI时间实际上几乎完全花费在MPI_Recv()
上,其中on-node表示复制矩阵块的成本,对于0级进程,等待结果从工作任务开始返回。这表明等级0做了一些工作,并用收集操作替换接收线性循环,这将是有用的优化。
当然,当您离开节点或处理大量处理器时,通信成本将继续上升,并且缩放将会恶化。
更多小问题:
首先,主从通常是一种非常差的方法,通过简单的负载平衡(如矩阵乘法)来解决紧耦合数值问题。但我认为这只是一次学习 - MPI练习并将其留在那里。请注意,当然,执行基于MPI的矩阵乘法的正确方法是使用现有的矩阵乘法库,如SCALAPACK,Eigen等。
其次,全局变量的大量使用总体上是无益的,但这超出了这个问题的范围。我还要注意NUM_COLUMNS_A
必须NUM_ROWS_B
,你不需要两者。
答案 2 :(得分:1)
在分离流程时,您需要平衡发送结果所花费的时间与获得的节省。在你的情况下,我猜你发送的计算需要花费更长的时间来发送,而不是在本地计算。
尝试与其他流程分享更大的工作量。