MPI_Scatter和Gather - 2D数组,不均匀块

时间:2017-03-26 01:08:45

标签: c arrays matrix mpi distributed-computing

我正在使用MPI,我尝试将不均匀的2D阵列块发送到不同的处理器。 例如,如果我没有查看大小为333x225的图像,并且我想将不同大小的块发送到不同的处理器。

我见过偶数数组的@Jonathan Dursi方法: sending blocks of 2D array in C using MPI

我试着让它适应我的问题。到目前为止,我设法将数据块发送到这样的两个进程:

#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"

int malloc2dchar(char ***array, int n, int m) {

    /* allocate the n*m contiguous items */
    char *p = (char *)malloc(n*m*sizeof(char));
    if (!p) return -1;

    /* allocate the row pointers into the memory */
    (*array) = (char **)malloc(n*sizeof(char*));
    if (!(*array)) {
       free(p);
       return -1;
    }

    /* set up the pointers into the contiguous memory */
    for (int i=0; i<n; i++)
       (*array)[i] = &(p[i*m]);

    return 0;
}

int free2dchar(char ***array) {
    /* free the memory - the first element of the array is at the start */
    free(&((*array)[0][0]));

    /* free the pointers into the memory */
    free(*array);

    return 0;
}

int main(int argc, char **argv) {
    char **global, **local;
    const int gridsize=10; // size of grid
    const int procgridsize=2;  // size of process grid
    int rank, size;        // rank of current process and no. of processes

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank == 0) {
        /* fill in the array, and print it */
        malloc2dchar(&global, gridsize, gridsize);
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                global[i][j] = '0'+(3*i+j)%10;
        }


        printf("Global array is:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                putchar(global[i][j]);

            printf("\n");
        }
    }

    /* create the local array which we'll process */

    malloc2dchar(&local, 5, 10);

    /* create a datatype to describe the subarrays of the global array */

    int sizes[2]    = {gridsize, gridsize};         /* global size */
    int subsizes[2] = {5, 10};     /* local size */
    int starts[2]   = {0,0};                        /* where this one starts */
    MPI_Datatype type, subarrtype;
    MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_CHAR, &type);
    MPI_Type_create_resized(type, 0, 10*sizeof(char), &subarrtype);
    MPI_Type_commit(&subarrtype);

    char *globalptr=NULL;
    if (rank == 0) globalptr = &(global[0][0]);

    /* scatter the array to all processors */
    int sendcounts[2];
    int displs[2];

    if (rank == 0) {
        for (int i=0; i<2; i++) sendcounts[i] = 1;
        int disp = 0;
        displs[0]=0;
        displs[1]=5;

        //for (int i=0; i<procgridsize; i++) {
        //    for (int j=0; j<procgridsize; j++) {
        //        displs[i*procgridsize+j] = disp;
        //        disp += 1;
        //    }
        //    disp += ((gridsize/procgridsize)-1)*procgridsize;
        //}
    }


    MPI_Scatterv(globalptr, sendcounts, displs, subarrtype, &(local[0][0]),
                 gridsize*gridsize/2, MPI_CHAR,
                 0, MPI_COMM_WORLD);

    /* now all processors print their local data: */

    for (int p=0; p<size; p++) {
        if (rank == p) {
            printf("Local process on rank %d is:\n", rank);
            for (int i=0; i<5; i++) {
                putchar('|');
                for (int j=0; j<10; j++) {
                    putchar(local[i][j]);
                }
                printf("|\n");
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    /* now each processor has its local array, and can process it */
    for (int i=0; i<5; i++) {
        for (int j=0; j<10; j++) {
            local[i][j] = 'A' + rank;
        }
    }

    /* it all goes back to process 0 */
    MPI_Gatherv(&(local[0][0]), gridsize*gridsize/2,  MPI_CHAR,
                 globalptr, sendcounts, displs, subarrtype,
                 0, MPI_COMM_WORLD);

    /* don't need the local data anymore */
    free2dchar(&local);

    /* or the MPI data type */
    MPI_Type_free(&subarrtype);

    if (rank == 0) {
        printf("Processed grid:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++) {
                putchar(global[i][j]);
            }
            printf("\n");
        }

        free2dchar(&global);
    }


    MPI_Finalize();

    return 0;
}

所以我得到了:

Global array is:
0123456789
3456789012
6789012345
9012345678
2345678901
5678901234
8901234567
1234567890
4567890123
7890123456

Local process on rank 0 is:
|0123456789|
|3456789012|
|6789012345|
|9012345678|
|2345678901|

Local process on rank 1 is:
|5678901234|
|8901234567|
|1234567890|
|4567890123|
|7890123456|

Processed grid:
AAAAAAAAAA
AAAAAAAAAA
AAAAAAAAAA
AAAAAAAAAA
AAAAAAAAAA
BBBBBBBBBB
BBBBBBBBBB
BBBBBBBBBB
BBBBBBBBBB
BBBBBBBBBB

但我希望数据像这样(甚至不是块):

    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    AAAAAAAAAA
    BBBBBBBBBB
    BBBBBBBBBB
    BBBBBBBBBB
    BBBBBBBBBB

更新

我尝试根据流程排名设置tab_size。但它并不完全正常。

以下是代码:

#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"

int malloc2dchar(char ***array, int n, int m) {

    /* allocate the n*m contiguous items */
    char *p = (char *)malloc(n*m*sizeof(char));
    if (!p) return -1;

    /* allocate the row pointers into the memory */
    (*array) = (char **)malloc(n*sizeof(char*));
    if (!(*array)) {
       free(p);
       return -1;
    }

    /* set up the pointers into the contiguous memory */
    for (int i=0; i<n; i++)
       (*array)[i] = &(p[i*m]);

    return 0;
}

int free2dchar(char ***array) {
    /* free the memory - the first element of the array is at the start */
    free(&((*array)[0][0]));

    /* free the pointers into the memory */
    free(*array);

    return 0;
}

int main(int argc, char **argv) {
    char **global, **local;
    const int gridsize=10; // size of grid
    const int procgridsize=2;  // size of process grid
    int rank, size;        // rank of current process and no. of processes

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);


    //if (size != procgridsize*procgridsize) {
    //    fprintf(stderr,"%s: Only works with np=%d for now\n", argv[0], procgridsize);
    //    MPI_Abort(MPI_COMM_WORLD,1);
    //}

    int tab_size;
    if (rank == 0) {
        /* fill in the array, and print it */
        malloc2dchar(&global, gridsize, gridsize);
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                global[i][j] = '0'+(3*i+j)%10;
        }


        printf("Global array is:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++)
                putchar(global[i][j]);

            printf("\n");
        }
        tab_size = 4;
    }
    if(rank == 1)
    {
        tab_size = 6;
    }

    /* create the local array which we'll process */

    malloc2dchar(&local, tab_size, 10);

    /* create a datatype to describe the subarrays of the global array */

    int sizes[2]    = {gridsize, gridsize};         /* global size */
    int subsizes[2] = {tab_size, 10};     /* local size */
    int starts[2]   = {0,0};                        /* where this one starts */
    MPI_Datatype type, subarrtype;
    MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_CHAR, &type);
    MPI_Type_create_resized(type, 0, 10*sizeof(char), &subarrtype);
    MPI_Type_commit(&subarrtype);

    char *globalptr=NULL;
    if (rank == 0) globalptr = &(global[0][0]);

    /* scatter the array to all processors */
    int sendcounts[2];
    int displs[2];

    int tabsize;
    if (rank == 0) {
        for (int i=0; i<2; i++) sendcounts[i] = 1;
        int disp = 0;
        displs[0]=0;
        displs[1]=tab_size;

        //for (int i=0; i<procgridsize; i++) {
        //    for (int j=0; j<procgridsize; j++) {
        //        displs[i*procgridsize+j] = disp;
        //        disp += 1;
        //    }
        //    disp += ((gridsize/procgridsize)-1)*procgridsize;
        //}
    }


    MPI_Scatterv(globalptr, sendcounts, displs, subarrtype, &(local[0][0]),
                 gridsize*gridsize/2, MPI_CHAR,
                 0, MPI_COMM_WORLD);

    /* now all processors print their local data: */

    for (int p=0; p<size; p++) {
        if (rank == p) {
            printf("Local process on rank %d is:\n", rank);
            for (int i=0; i<tab_size; i++) {
                putchar('|');
                for (int j=0; j<10; j++) {
                    putchar(local[i][j]);
                }
                printf("|\n");
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    /* now each processor has its local array, and can process it */
    for (int i=0; i<tab_size; i++) {
        for (int j=0; j<10; j++) {
            local[i][j] = 'A' + rank;
        }
    }

    /* it all goes back to process 0 */
    MPI_Gatherv(&(local[0][0]), gridsize*gridsize/2,  MPI_CHAR,
                 globalptr, sendcounts, displs, subarrtype,
                 0, MPI_COMM_WORLD);

    /* don't need the local data anymore */
    free2dchar(&local);

    /* or the MPI data type */
    MPI_Type_free(&subarrtype);

    if (rank == 0) {
        printf("Processed grid:\n");
        for (int i=0; i<gridsize; i++) {
            for (int j=0; j<gridsize; j++) {
                putchar(global[i][j]);
            }
            printf("\n");
        }

        free2dchar(&global);
    }


    MPI_Finalize();

    return 0;
}

输出如下:

Global array is:
0123456789
3456789012
6789012345
9012345678
2345678901
5678901234
8901234567
1234567890
4567890123
7890123456
Local process on rank 0 is:
|0123456789|
|3456789012|
|6789012345|
|9012345678|
Local process on rank 1 is:
|2345678901|
|5678901234|
|8901234567|
|1234567890|
||
||
[blade001:3727] *** An error occurred in MPI_Gatherv
[blade001:3727] *** reported by process [2497249281,0]
[blade001:3727] *** on communicator MPI_COMM_WORLD
[blade001:3727] *** MPI_ERR_TRUNCATE: message truncated
[blade001:3727] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[blade001:3727] ***    and potentially your MPI job)

1 个答案:

答案 0 :(得分:2)

为什么你的代码错了

您定义的数据类型在不同的排名上应该是相同的。这不是它的完成方式。

如何正确地尝试

如您所述,完整行对重合数据的分解要简单得多。不需要复杂的派生数据类型,实际上根本不需要它们。您可以使用表示行的非常简单的数据类型。然后唯一的任务是正确设置MPI_Scatterv的大小/位移:

int local_rows[2] = {6, 4};

malloc2dchar(&local, local_rows[rank], gridsize);

MPI_Datatype row_type;
MPI_Type_contiguous(gridsize, MPI_CHAR, &row_type);
MPI_Type_commit(&row_type);

int displs[2];

if (rank == 0) {
  displs[0] = 0;
  for (int r = 1; r < 2; r++) {
    displs[r] = displs[r - 1] + local_rows[r - 1];
  }
}

MPI_Scatterv(globalptr, local_rows, displs, row_type, &(local[0][0]),
             local_rows[rank], row_type, 0, MPI_COMM_WORLD);

...

MPI_Gatherv(&(local[0][0]), local_rows[rank], row_type, globalptr, local_rows,
            displs, row_type, 0, MPI_COMM_WORLD);

这假定所有等级都知道预期的大小{6, 4}。您可以让每个人确定性地计算它,也可以只有根计算并对其进行分散(非根级别只需知道自己的行数)。

真正的不规则2D分解

如果你真的想要拆分不仅包括整行的块,它就会变得复杂得多。已经有a very good answer,所以我在此不再重复。请务必仔细阅读并仔细阅读。

由于复杂性,如果你绝对确定需要,我建议你这样做。

重叠

您无法使用单个散点图发送重叠数据。如果您需要重叠,请考虑直接在拥有 halo exchange范围内的相邻流程之间交换数据。