Is there any way to get shared variables or create a similar functionality in parallel programming in C using MPI libraries?

时间:2017-12-17 08:12:23

标签: c parallel-processing mpi shared-variable

I am trying to implement a map where keys are numbers mapping into unique numbers. In other words, each process holds a set of numbers in an array that map into another set of numbers in another array held by the same process. The mappings need to be unique across all the process. I passed around a struct with the mappings to create mappings for each of the processes. However, this is not parallel, as I sequentially send information through processes. I request help from all of you wonderful programmers of the internet for how all processes can look at a specific variable at the same time? The following is the code I am currently working with. Thanks in advance and for all the support I have received till now.

#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>

struct map{             //saves all the mappings
   int keys[25];
   int values[25];
   int grow;
};

struct map rmap;    //global map

void mapadd(int key, int value){            //adding values to map
   rmap.keys[rmap.grow] = key;
   rmap.values[rmap.grow] = value;
   rmap.grow++;
}

int mapper(int key){           //get value from key
   for(int h=0; h<sizeof(rmap.keys)/sizeof(int); h++){
       if(rmap.keys[h] == key){
          return rmap.values[h];
       }
   }
   return 0;
}

int finder(int list[], int val, int mem){    //see if a value is in array
   for(int l=0; l<mem; l++){
       if(list[l] == val){
           return 1;
       }
   }
   return 0;
}

int main(int argc, char** argv){
   // Initialize the MPI environment
   MPI_Init(NULL, NULL);
   // Find out rank, size
   int world_rank;
   MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
   int world_size;
   MPI_Comm_size(MPI_COMM_WORLD, &world_size);

   srand(time(0));
   rmap.grow = 0;

   int dim[world_size];
   int maxdim = 0;
   for(int s=0; s<world_size; s++){    
      dim[s] = (rand()%10) + 1;
      if(dim[s]>maxdim){
         maxdim = dim[s];
      }
   }

   int nums[world_size][maxdim];
   int labels[world_size][maxdim];

   for(int u=0; u<world_size; u++){
       for(int d=0; d<dim[u]; d++){
           labels[u][d] = 0;
           nums[u][d] = 0;
       }
   }

   for(int t=0; t<world_size; t++){
       for(int i=0; i<dim[t]; i++){
           nums[t][i] = rand()%26 + 1;
           //printf("%d\n", nums[t][i]);
       }
   }

   if(world_rank!=0){
      MPI_Recv(&rmap.keys, 25, MPI_INT, world_rank-1, 0, 
      MPI_COMM_WORLD, MPI_STATUS_IGNORE);
      MPI_Recv(&rmap.values, 25, MPI_INT, world_rank-1, 0, 
      MPI_COMM_WORLD, MPI_STATUS_IGNORE);
   }

      for(int j=0; j<dim[world_rank]; j++){ 
         if(labels[world_rank][j] == 0){
            if(finder(rmap.keys, nums[world_rank][j], 25)==1){
                //printf("%s", "exist");
                labels[world_rank][j] = mapper(nums[world_rank][j]);
            }
            else{
               //printf("%s", "not");
               labels[world_rank][j] = (rand()%50) + 1;
               mapadd(nums[world_rank][j], labels[world_rank][j]);
               /*for(int o=0; o<25; o++){
                   printf("%d - %d", rmap.keys[o], rmap.values[o]);
               }*/
          }
       }
    }
    if(world_rank<world_size-1){
        MPI_Send(&rmap.keys, 25, MPI_INT, world_rank+1, 0, MPI_COMM_WORLD);
        MPI_Send(&rmap.values, 25, MPI_INT, world_rank+1, 0, MPI_COMM_WORLD);
    }

    for(int rank=0; rank<world_size; rank++){
       if(rank==world_rank){
           for(int k=0; k<dim[rank]; k++){
               printf("Process #%d: %d --> %d\n", rank, nums[rank][k], labels[rank][k]);
           }
       }
   }
   MPI_Finalize();
   return 0;

}

0 个答案:

没有答案