MPI中的矩阵乘法

时间:2017-05-12 16:32:34

标签: c mpi matrix-multiplication

我试图用MPI创建一个简单的矩阵乘法程序,使用1,2,4或8个处理器。我的代码适用于1(在这种情况下,它只进行正常的矩阵乘法,我的意思是,如果你只运行一个等级,那么就没有工作要分开)。它也适用于2和4处理器。但是,当我尝试使用8个处理器(运行程序时在命令行上为-n 8)时,我在矩阵c中的所有位置都没有得到正确的值。

以下是示例:如果SIZE = 8(即a和b和c都是8x8矩阵),则得到的矩阵如下:

   8.00   8.00   8.00   8.00   8.00   8.00   8.00   8.00
   8.00   8.00   8.00   8.00   8.00   8.00   8.00   8.00
   8.00   8.00   8.00   8.00   8.00   8.00   8.00   8.00
   8.00   8.00   8.00   8.00   8.00   8.00   8.00   8.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
   0.00   0.00  16.00  16.00  16.00  16.00  16.00  16.00

如果SIZE = 16:

  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00  16.00
  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00
  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00
  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00
  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00
  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00
  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00
   0.00   0.00   0.00   0.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00
  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00  32.00

如您所见,左下方会弹出零。 Rank 7正在做的事情导致这些坐标变为0。

我现在一直盯着自己的代码,我觉得我只需要另外一双眼睛。据我所知,所有发送和接收工作都正常,所有不同的任务都获得了他们应该具备的价值。从我做过的测试来看,没有任务实际上给c矩阵中的任何位置赋值为0.我不知道它为什么会发生,如何或我可以做些什么来修复它。

以下是代码:

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>

#define SIZE 16 /*assumption: SIZE a multiple of number of nodes*/
#define FROM_MASTER 1/*setting a message type*/
#define FROM_WORKER 2/*setting a message type*/
#define DEBUG 1/*1 = debug on, 0 = debug off*/

MPI_Status status;

static double a[SIZE][SIZE];
static double b[SIZE][SIZE];
static double c[SIZE][SIZE];
static double b_to_trans[SIZE][SIZE];
static void init_matrix(void)
{
    int i, j;
    for (i = 0; i < SIZE; i++)
    {
        for (j = 0; j < SIZE; j++) {
            a[i][j] = 1.0;
            if(i >= SIZE/2) a[i][j] = 2.0;
            b_to_trans[i][j] = 1.0;
            if(j >= SIZE/2) b[i][j] = 2.0;
//          c[i][j] = 1.0;
        }
    }
}

static void print_matrix(void)
{
    int i, j;
    for(i = 0; i < SIZE; i++) {
        for(j = 0; j < SIZE; j++) {
            printf("%7.2f", c[i][j]);
        }
    printf("\n");
    }
}

static void transpose_matrix()
{
    int i, j;
    for(i = 0; i<SIZE; i++)
        for(j = 0; j<SIZE;j++)
            b[i][j] = b_to_trans[j][i];
}

int main(int argc, char **argv)
{
    int myrank, nproc;
    int rows; /*amount of work per node (rows per worker)*/
    int mtype; /*message type: send/recv between master and workers*/
    int dest, src, offseta, offsetb;
    int runthrough, runmod;
    double start_time, end_time;
    int i, j, k, l;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
    rows = SIZE/nproc;
    mtype = FROM_MASTER;

    if (myrank == 0) {
        /*Initialization*/
        printf("SIZE = %d, number of nodes = %d\n", SIZE, nproc);
        init_matrix();
        transpose_matrix();
        start_time = MPI_Wtime();

        if(nproc == 1) { /*In case we only run on one processor, the master will simply do a regular matrix-matrix multiplacation.*/
            for(i = 0; i < SIZE; i++) {
                for(j = 0; j < SIZE; j++) {
                    for(k = 0; k < SIZE; k++)
                        c[i][j] = c[i][j] + a[i][k]*b[j][k];
                }
            }
            end_time = MPI_Wtime();
            if(DEBUG) /*Prints the resulting matrix c*/
                print_matrix();
            printf("Execution time on %2d nodes: %f\n", nproc, end_time-start_time);
        }
        else {

            for(l = 0; l < nproc; l++){
                offsetb = rows*l;
                offseta = rows;
                mtype = FROM_MASTER;

                for(dest = 1; dest < nproc; dest++){
                    MPI_Send(&offseta, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
                    MPI_Send(&offsetb, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
                    MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
                    MPI_Send(&a[offseta][0], rows*SIZE, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
                    MPI_Send(&b[offsetb][0], rows*SIZE, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
                    offseta += rows;
                    offsetb = (offsetb+rows)%SIZE;
                }

                offseta = rows;
                offsetb = rows*l;
                //printf("Rank: %d, offseta: %d, offsetb: %d\n", myrank, offseta, offsetb);
                //printf("Offseta: %d\n", offseta);
                //printf("Offsetb: %d\n", offsetb);
                for(i = 0; i < offseta; i++) {
                    for(j = offsetb; j < offsetb+rows; j++) {
                            for(k = 0; k < SIZE; k++){
                                c[i][j] = c[i][j] + a[i][k]*b[j][k];
                        }
                    }
                }
                mtype = FROM_WORKER;
                for(src = 1; src < nproc; src++){
                    MPI_Recv(&offseta, 1, MPI_INT, src, mtype, MPI_COMM_WORLD, &status);
                    MPI_Recv(&offsetb, 1, MPI_INT, src, mtype, MPI_COMM_WORLD, &status);
                    MPI_Recv(&rows, 1, MPI_INT, src, mtype, MPI_COMM_WORLD, &status);
                    for(i = 0; i < rows; i++) {
                        MPI_Recv(&c[offseta+i][offsetb], offseta, MPI_DOUBLE, src, mtype, MPI_COMM_WORLD, &status); /*returns answer c(1,1)*/
                    }
                }
            }


            end_time = MPI_Wtime();
            if(DEBUG) /*Prints the resulting matrix c*/
                print_matrix();
            printf("Execution time on %2d nodes: %f\n", nproc, end_time-start_time);
        }
    }
    else{
        if(nproc > 1) {
            for(l = 0; l < nproc; l++){
                mtype = FROM_MASTER;
                MPI_Recv(&offseta, 1, MPI_INT, 0, mtype, MPI_COMM_WORLD, &status);
                MPI_Recv(&offsetb, 1, MPI_INT, 0, mtype, MPI_COMM_WORLD, &status);
                MPI_Recv(&rows, 1, MPI_INT, 0, mtype, MPI_COMM_WORLD, &status);
                MPI_Recv(&a[offseta][0], rows*SIZE, MPI_DOUBLE, 0, mtype, MPI_COMM_WORLD, &status);
                MPI_Recv(&b[offsetb][0], rows*SIZE, MPI_DOUBLE, 0, mtype, MPI_COMM_WORLD, &status);

                for(i = offseta; i < offseta+rows; i++) {
                    for(j = offsetb; j < offsetb+rows; j++) {
                        for(k = 0; k < SIZE; k++){
                            c[i][j] = c[i][j] + a[i][k]*b[j][k];
                        }
                    }
                }

                mtype = FROM_WORKER;
                MPI_Send(&offseta, 1, MPI_INT, 0, mtype, MPI_COMM_WORLD);
                MPI_Send(&offsetb, 1, MPI_INT, 0, mtype, MPI_COMM_WORLD);
                MPI_Send(&rows, 1, MPI_INT, 0, mtype, MPI_COMM_WORLD);
                for(i = 0; i < rows; i++){
                    MPI_Send(&c[offseta+i][offsetb], offseta, MPI_DOUBLE, 0, mtype, MPI_COMM_WORLD);
                }
            }
        }
    }
    MPI_Finalize();
    return 0;
}

任何建议都会有所帮助,谢谢你。

1 个答案:

答案 0 :(得分:1)

这不是一个明确的答案,但肯定会帮助你进行调试。

我通过在master从worker接收最终数据之后添加以下代码进行测试。在一堆输出中,我只展示了重要的输出。请注意,j+count永远不会超过SIZE,除非处理器数为8的情况。这很重要,因为您写入了未分配的内存。

for(i = 0; i < rows; i++) {
    MPI_Recv(&c[offseta+i][offsetb], offseta, MPI_DOUBLE, src, mtype, MPI_COMM_WORLD, &status);
    // I added the following for debugging.            
    if (src == nproc-1)
    {
        printf("src = %i\n", src);
        printf("i = %i\n", offseta+i);
        printf("j = %i\n", offsetb);
        printf("count = %i\n", offseta);
    }
}

np = 2

src = 1
i = 15
j = 8
count = 8

np = 4

src = 3
i = 15
j = 4
count = 12

np = 8

src = 7
i = 15
j = 10
count = 14