/*
Matricefilenames:
small matrix A.bin of dimension 100 × 50
small matrix B.bin of dimension 50 × 100
large matrix A.bin of dimension 1000 × 500
large matrix B.bin of dimension 500 × 1000
An MPI program should be implemented such that it can
• accept two file names at run-time,
• let process 0 read the A and B matrices from the two data files,
• let process 0 distribute the pieces of A and B to all the other processes,
• involve all the processes to carry out the the chosen parallel algorithm
for matrix multiplication C = A * B ,
• let process 0 gather, from all the other processes, the different pieces
of C ,
• let process 0 write out the entire C matrix to a data file.
*/
int main(int argc, char *argv[]) {
printf("Oblig 2 \n");
double **matrixa;
double **matrixb;
int ma,na,my_ma,my_na;
int mb,nb,my_mb,my_nb;
int i,j,k;
int myrank,numprocs;
int konstanta,konstantb;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
if(myrank==0) {
read_matrix_binaryformat ("small_matrix_A.bin", &matrixa, &ma, &na);
read_matrix_binaryformat ("small_matrix_B.bin", &matrixb, &mb, &nb);
}
//mpi broadcast
MPI_Bcast(&ma,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&mb,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&na,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&nb,1,MPI_INT,0,MPI_COMM_WORLD);
fflush(stdout);
int resta = ma % numprocs;//rest antall som har den største verdien
//int restb = mb % numprocs;
if (myrank == 0) {
printf("ma : %d",ma);
fflush(stdout);
printf("mb : %d",mb);
fflush(stdout);
}
MPI_Barrier(MPI_COMM_WORLD);
if (resta == 0) {
my_ma = ma / numprocs;
printf("null rest\n ");
fflush(stdout);
} else {
if (myrank < resta) {
my_ma = ma / numprocs + 1;//husk + 1
} else {
my_ma = ma / numprocs; //heltalls divisjon gir nedre verdien !
}
}
my_na = na;
my_nb = nb;
double **myblock = malloc(my_ma*sizeof(double*));
for(i=0;i<na;i++) {
myblock[i] = malloc(my_na*sizeof(double));
}
//send_cnt for scatterv
//________________________________________________________________________________________________________________________________________________
int* send_cnta = (int*)malloc(numprocs*sizeof(int));//array med antall elementer sendt til hver prosess array[i] = antall elementer , i er process
int tot_elemsa = my_ma*my_na;
MPI_Allgather(&tot_elemsa,1,MPI_INT,&send_cnta[0],1,MPI_INT,MPI_COMM_WORLD);//arrays i c må sendes &array[0]
//send_disp for scatterv
//__________________________________________________________________________________
int* send_dispa = (int*)malloc(numprocs*sizeof(int)); //hvorfor trenger disp
// int* send_dispb = (int*)malloc(numprocs*sizeof(int));
//disp hvor i imagechars første element til hver prosess skal til
fflush(stdout);
if(resta==0) {
send_dispa[myrank]=myrank*my_ma*my_na;
} else if(myrank<=resta) {
if(myrank<resta) {
send_dispa[myrank]=myrank*my_ma*my_na;
} else {//my_rank == rest
send_dispa[myrank]=myrank*(my_ma+1)*my_na;
konstanta=myrank*(my_ma+1)*my_na;
}
}
MPI_Bcast(&konstanta,1,MPI_INT,resta,MPI_COMM_WORLD);
if (myrank>resta){
send_dispa[myrank]=((myrank-resta)*(my_ma*my_na))+konstanta;
}
MPI_Allgather(&send_dispa[myrank],1,MPI_INT,&send_dispa[0],1,MPI_INT,MPI_COMM_WORLD);
//___________________________________________________________________________________
printf("print2: %d" , myrank);
fflush(stdout);
//recv_buffer for scatterv
double *recv_buffera=malloc((my_ma*my_na)*sizeof(double));
MPI_Scatterv(&matrixa[0], &send_cnta[0], &send_dispa[0], MPI_UNSIGNED_CHAR, &recv_buffera[0], my_ma*my_na, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
for(i=0; i<my_ma; i++) {
for(j=0; j<my_na; j++) {
myblock[i][j]=recv_buffera[i*my_na + j];
}
}
MPI_Finalize();
return 0;
}
OLD:我得到三种类型的错误。我可以得到scatterv计数错误,segmentationfault 11,或者进程刚被卡住。我得到的错误似乎是随机的。我每次运行带有2个proc的代码。当它被卡住时,它会在printf之前卡住(“print2:%d”,myrank);.当我的朋友在他自己的计算机上运行代码也有两个散文时,他没有通过第一个MPI_Bcast过去。当他跑步时没有打印出来。以下是我收到的错误的链接:http://justpaste.it/zs0
更新的问题:现在我在“printf(”print2:%d“,myrank);”在scatterv调用之前“之后只得到一个分段错误。即使我在printf语句之后删除了所有代码,我也会遇到分段错误,但前提是我运行了两个以上的代码。
答案 0 :(得分:0)
我在追踪你想要做的事情时遇到了一些困难。我认为你的分散调用比它需要的更复杂。这是我今年从类似任务中获得的一个片段。希望这是散射器如何工作的更清晰的例子。
/*********************************************************************
* Scatter A to All Processes
* - Using Scatterv for versatility.
*********************************************************************/
int *send_counts; // Send Counts
int *displacements; // Send Offsets
int chunk; // Number of Rows per Process (- Root)
int chunk_size; // Number of Doubles per Chunk
int remainder; // Number of Rows for Root Process
double * rbuffer; // Receive Buffer
// Do Some Math
chunk = m / (p - 1);
remainder = m % (p - 1);
chunk_size = chunk * n;
// Setup Send Counts
send_counts = malloc(p * sizeof(int));
send_counts[0] = remainder * n;
for (i = 1; i < p; i++)
send_counts[i] = chunk_size;
// Setup Displacements
displacements = malloc(p * sizeof(int));
displacements[0] = 0;
for (i = 1; i < p; i++)
displacements[i] = (remainder * n) + ((i - 1) * chunk_size);
// Allocate Receive Buffer
rbuffer = malloc(send_counts[my_rank] * sizeof(double));
// Scatter A Over All Processes!
MPI_Scatterv(A, // A
send_counts, // Array of counts [int]
displacements, // Array of displacements [int]
MPI_DOUBLE, // Sent Data Type
rbuffer, // Receive Buffer
send_counts[my_rank], // Receive Count - Per Process
MPI_DOUBLE, // Received Data Type
root, // Root
comm); // Comm World
MPI_Barrier(comm);
另外,这会导致我的机器出现段错误,没有mpi ......很确定这是myblock的分配方式。你应该做@Hristo在评论中提出的建议。将两个矩阵和结果矩阵分配为平面阵列。这将消除双指针的使用,让你的生活变得更加简单。
#include <stdio.h>
#include <stdlib.h>
void main ()
{
int na = 5;
int my_ma = 5;
int my_na = 5;
int i;
int j;
double **myblock = malloc(my_ma*sizeof(double*));
for(i=0;i<na;i++) {
myblock = malloc(my_na*sizeof(double));
}
unsigned char *recv_buffera=malloc((my_ma*my_na)*sizeof(unsigned char));
for(i=0; i<my_ma; i++) {
for(j=0; j<my_na; j++) {
myblock[i][j]=(float)recv_buffera[i*my_na + j];
}
}
}
尝试分配更多这样的内容:
// Allocate A, b, and y. Generate random A and b
double *buff=0;
if (my_rank==0)
{
int A_size = m*n, b_size = n, y_size = m;
int size = (A_size+b_size+y_size)*sizeof(double);
buff = (double*)malloc(size);
if (buff==NULL)
{
printf("Process %d failed to allocate %d bytes\n", my_rank, size);
MPI_Abort(comm,-1);
return 1;
}
// Set pointers
A = buff; b = A+m*n; y = b+n;
// Generate matrix and vector
genMatrix(m, n, A);
genVector(n, b);
}