OpenMPI不从数组发送数据

时间:2013-11-13 01:00:57

标签: c openmpi

我正在尝试并行化BMP图像的灰度滤镜,我的功能在尝试从像素阵列发送数据时卡住了。

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "mpi.h"

#define MASTER_TO_SLAVE_TAG 1 //tag for messages sent from master to slaves
#define SLAVE_TO_MASTER_TAG 10 //tag for messages sent from slaves to master

#pragma pack(1)

struct image {
       struct fileHeader fh;
       struct imageHeader ih;
       pixel *array;
};

struct fileHeader {
       //blablabla...      
};

struct imageHeader {
       //blablabla...
};

typedef struct
{
        unsigned char R;
        unsigned char G;
        unsigned char B;
}pixel;


void grayScale_Parallel(struct image *im, int size, int rank)
{
     int i,j,lum,aux,r;
     pixel tmp;

     int total_pixels = (*im).ih.width * (*im).ih.height;
     int qty = total_pixels/(size-1);
     int rest = total_pixels % (size-1);
     MPI_Status status;

     //printf("\n%d\n", rank);

     if(rank == 0)
     {
         for(i=1; i<size; i++){
         j = i*qty - qty;
         aux = j;

         if(rest != 0 && i==size-1) {qty=qty+rest;} //para distrubuir toda la carga
         printf("\nj: %d  qty: %d  rest: %d\n", j, qty, rest);

         //it gets stuck  here,it doesn't send the data
         MPI_Send(&(*im).array[j], qty*3, MPI_BYTE, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD);
         MPI_Send(&aux, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD);
         MPI_Send(&qty, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD);

         printf("\nSending to node=%d, sender node=%d\n", i, rank);
        }

     }
     else
     {
    MPI_Recv(&aux, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD,&status);
    MPI_Recv(&qty, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD,&status);

    pixel *arreglo = (pixel *)calloc(qty, sizeof(pixel));
    MPI_Recv(&arreglo[0], qty*3, MPI_BYTE, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD,&status);
        //PROCESS RECEIVED PIXELS...
            //SEND to ROOT PROCESS

     }


    if (rank==0){
        //RECEIVE DATA FROM ALL PROCESS
    }
}


int main(int argc, char *argv[])
{    
    int rank, size;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Status status;

    int op=1;
    char filename_toload[50];
    int bright_number=0;
    struct image image2;

    if (rank==0)
    {
    printf("File to load: \n");
    scanf("%s", filename_toload);
    loadImage(&image2, filename_toload);
    }

    while(op != 0)
    {
        if (rank==0)
        {
        printf("Welcome to example program!\n\n");
        printf("\t1.- GrayScale Parallel Function\n");
        printf("\t2.- Call another Function\n");
        printf("\t0.- Exit\n\t");

        printf("\n\n\tEnter option:");
        scanf("%d", &op);
        }

        //Broadcast the user's choice to all other ranks
        MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);

        switch(op)
        {
            case 1:
                    grayScale_Parallel(&image2, size, rank);
                    MPI_Barrier(MPI_COMM_WORLD);
                    printf("GrayScale applied successfully!\n\n");
                    break;
            case 2:
                    function_blabla();
                    printf("Function called successfully\n\n");
                    break;
        }
    }

    MPI_Finalize();
    return 0;
}

我认为MPI_Send函数无法读取像素数组,但很奇怪,因为我可以打印像素。

有什么想法吗?

2 个答案:

答案 0 :(得分:1)

要详细说明Soravux的答案,您应该更改MPI_Send次来电的顺序(请注意已更改的MASTER_TO_SLAVE_TAG),以避免死锁:

MPI_Send(&aux, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD);
MPI_Send(&qty, 1, MPI_INT, i, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD);
MPI_Send(&(*im).array[j], qty*3, MPI_BYTE, i, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD);

这些调用需要通过以下MPI_Recv调用序列

进行匹配
MPI_Recv(&aux, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD,&status);
MPI_Recv(&qty, 1, MPI_INT, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+1, MPI_COMM_WORLD,&status);

pixel *arreglo = (pixel *)calloc(qty, sizeof(pixel));
MPI_Recv(&arreglo[0], qty*3, MPI_BYTE, MPI_ANY_SOURCE, MASTER_TO_SLAVE_TAG+2, MPI_COMM_WORLD,&status);

希望这能回答你的问题。

答案 1 :(得分:0)

您致电MPI_SendMPI_Recv的顺序非常重要。您必须确保您的呼叫始终处于相同的顺序,因为这些功能是阻止的。只要在目标上未执行相应的(相同标记)MPI_Send对应项,就不会返回对MPI_Recv的调用。否则可能会导致死锁。