我目前正在使用MPI在C中编写一个程序来并行执行矩阵乘法。我对C和MPI很新,所以这是一个相当粗略的代码。我似乎无法让我的代码工作,所以有人可以帮助我阅读它并帮助我理解我需要做些什么来解决它?
以下是代码:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
// code adapted from source codes from
// http://www.programiz.com/c-programming/c-multi-dimensional-arrays
// http://www.cs.hofstra.edu/~cscccl/csc145/imul.c
// GENERAL VARIABLES
int **A, **B, **AB;
int i,j,k;
int rows_A, cols_A, rows_B, cols_B;
int dimensions[3];
// MATRIX MULTIPLICATION
void matrixMult(int start, int interval){
for (i = start; i < start+interval; ++i){
for (j = 0; j < cols_B; ++j){
for (k = 0; k < cols_A; ++k)
AB[i][j] += (A[i][k] * B[k][j]);}}}
int main(int argc, char *argv[]){
// MPI VARIABLES, INITIALIZE MPI
int rank, size, interval, remainder;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (rank == 0){
// READ AND WRITE MATRICES ------------------------------------
FILE *matrix1, *matrix2;
matrix1 = fopen("matrix1", "r");
fscanf(matrix1, "%d", &rows_A);
fscanf(matrix1, "%d", &cols_A);
matrix2 = fopen("matrix2", "r");
fscanf(matrix2, "%d", &rows_B);
fscanf(matrix2, "%d", &cols_B);
int dimensions[3] = {rows_A, cols_A, cols_B};
/*printf("\n\nRows A = %d",rows_A);
printf("\nCols A = %d",cols_A);
printf("\n\nRows B = %d",rows_B);
printf("\nCols B = %d",cols_B);*/
// Allocate memory for matrices
int **A = malloc(rows_A * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
A[0] = malloc((size_t)rows_A * (size_t)cols_A * sizeof(int));
for(i = 1; i < rows_A; i++)
A[i] = A[0] + i*cols_A;
int **B = malloc(rows_B * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
B[0] = malloc((size_t)rows_B * (size_t)cols_B * sizeof(int));
for(i = 1; i < rows_A; i++)
B[i] = B[0] + i*cols_B;
int **AB = malloc(rows_A * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
AB[0] = malloc((size_t)rows_A * (size_t)cols_B * sizeof(int));
for(i = 1; i < rows_A; i++)
AB[i] = AB[0] + i*cols_B;
/*int **A = (int **)malloc(rows_A * sizeof(int*));
for(i = 0; i < rows_A; i++)
A[i] = (int *)malloc(cols_A * sizeof(int));
int **B = (int **)malloc(rows_B * sizeof(int*));
for(i = 0; i < rows_B; i++)
B[i] = (int *)malloc(cols_B * sizeof(int));
int **AB = (int **)malloc(rows_A * sizeof(int*));
for(i = 0; i < rows_B; i++)
AB[i] = (int *)malloc(cols_B * sizeof(int));*/
// Write matrices
while(!feof(matrix1)){
for(i=0;i<rows_A;i++){
for(j=0;j<cols_A;j++)
fscanf(matrix1,"%d",&A[i][j]);}}
while(!feof(matrix2)){
for(i=0;i<rows_B;i++){
for(j=0;j<cols_B;j++)
fscanf(matrix2,"%d",&B[i][j]);}}
/*
// Print Matrices
printf("\n\n");
//print matrix 1
printf("Matrix A:\n");
for(i=0;i<rows_A;i++){
for(j=0;j<cols_A;j++)
printf("%d\t",A[i][j]);
printf("\n");}
printf("\n");
//print matrix 2
printf("Matrix B:\n");
for(i=0;i<rows_B;i++){
for(j=0;j<cols_B;j++)
printf("%d\t",B[i][j]);
printf("\n");} */
// ------------------------------------------------------------------
// MULTIPLICATION (Parallelize here)
printf("begin rank 0\n");
interval = rows_A / size; // work per processor
remainder = rows_A % size;
// SEND B BROADCAST to all
MPI_Bcast(B, rows_B * cols_B, MPI_INT, 0, MPI_COMM_WORLD);
printf("1\n");
// SEND A, ROWS, COLS, interval to each rank
for(i=1;i<size;i++)
MPI_Send(dimensions,3,MPI_INT,i,123,MPI_COMM_WORLD);
printf("2\n");
for(i=1;i<size;i++)
MPI_Send(A[i*interval],interval*rows_A,MPI_INT,i,123,MPI_COMM_WORLD);
printf("3\n");
// ROOT MM
matrixMult(0, interval);
printf("3.5\n");
matrixMult(size * interval, remainder);
printf("4\n");
// receive AB from workers, add to current AB
for(i=1;i<size;i++)
MPI_Recv(AB[i*interval],interval*rows_A,MPI_INT,i,123,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("5\n");
// PRINT MATRIX PRODUCT
printf("\nSum Of Matrix:\n");
for(i = 0; i < rows_A; ++i){
for(j = 0; j < cols_B; ++j){
printf("%d\t",AB[i][j]);
if(j == cols_B - 1)/* To display matrix sum in order. */
printf("\n");}}
// CLOSE FILES
fclose(matrix1);
fclose(matrix2);
}
else{ // WORKER NODES
printf("bring workers\n");
// RECEIVE B BROADCAST
MPI_Bcast(B, rows_B * cols_B, MPI_INT, 0, MPI_COMM_WORLD);
printf("a\n");
// RECEIVE A, INTERVAL
MPI_Recv(dimensions,3,MPI_INT,0,123, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
printf("b\n");
rows_A = dimensions[0];
cols_A = dimensions[1];
cols_B = dimensions[2];
printf("c\n");
MPI_Recv(A[rank*interval],interval*rows_A,MPI_INT,0,123, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
printf("d\n");
// WORKER MM
matrixMult(rank*interval, interval);
printf("e\n");
// send AB to root
MPI_Send(AB[rank*interval],interval*rows_A,MPI_INT,0,123,MPI_COMM_WORLD);
printf("f\n");
}
// FINALIZE MPI
MPI_Finalize(); /* EXIT MPI */
}
我陷入了一些打印,试图了解我的代码失败的地方,看起来它变成了工人和0级根中的实际矩阵乘法部分。这是否意味着我收到了问题?输入是2×3矩阵1 2 3 4 5 6和3x2 7 8 9 10 11 12这是输出的样子:
hjiang1@cook:~/cs287/PMatrixMultiply$ make
mpicc parallelMatrixMult.c -std=c99 -lm -o parallelMatrix.out
hjiang1@cook:~/cs287/PMatrixMultiply$ mpirun --hostfile QuaCS parallelMatrix.out
No protocol specified
No protocol specified
bring workers
a
bring workers
a
bring workers
a
begin rank 0
1
2
b
c
b
c
b
c
3
d
e
d
3.5
[cook:06730] *** Process received signal ***
[cook:06730] Signal: Segmentation fault (11)
[cook:06730] Signal code: Address not mapped (1)
[cook:06730] Failing at address: 0xffffffffbbc4d600
[cook:06728] *** Process received signal ***
[cook:06728] Signal: Segmentation fault (11)
[cook:06728] Signal code: Address not mapped (1)
[cook:06728] Failing at address: 0x5d99f200
[cook:06727] *** Process received signal ***
[cook:06730] [ 0] /lib/x86_64-linux-gnu/libpthread.so.0(+0xfcb0)[0x7fdaa80eccb0]
[cook:06730] [ 1] [cook:06728] [ 0] /lib/x86_64-linux-gnu/libc.so.6(+0x147b55)[0x7fdaa7e65b55]
[cook:06730] [ 2] /usr/local/lib/openmpi/mca_btl_vader.so(+0x23f9)[0x7fda9e70f3f9]
[cook:06730] [ 3] /usr/local/lib/openmpi/mca_pml_ob1.so(mca_pml_ob1_send_request_start_rndv+0x1d3)[0x7fda9e0df393]
[cook:06730] [ 4] /usr/local/lib/openmpi/mca_pml_ob1.so(mca_pml_ob1_send+0x754)[0x7fda9e0d5404]
[cook:06730] [ 5] /lib/x86_64-linux-gnu/libpthread.so.0(+0xfcb0)[0x7f910bef2cb0]
[cook:06728] [ 1] parallelMatrix.out[0x400bad]
[cook:06728] [ 2] parallelMatrix.out[0x401448]
[cook:06728] [ 3] /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xed)[0x7f910bb4576d]
[cook:06728] [ 4] parallelMatrix.out[0x400a79]
[cook:06728] *** End of error message ***
/usr/local/lib/libmpi.so.1(PMPI_Send+0xf2)[0x7fdaa8368332]
[cook:06730] [ 6] parallelMatrix.out[0x401492]
[cook:06730] [ 7] /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xed)[0x7fdaa7d3f76d]
[cook:06730] [ 8] parallelMatrix.out[0x400a79]
[cook:06730] *** End of error message ***
[cook:06727] Signal: Segmentation fault (11)
[cook:06727] Signal code: Address not mapped (1)
[cook:06727] Failing at address: (nil)
[cook:06727] [ 0] /lib/x86_64-linux-gnu/libpthread.so.0(+0xfcb0)[0x7f73e0d09cb0]
[cook:06727] [ 1] parallelMatrix.out[0x400bad]
[cook:06727] [ 2] [cook:6729] *** An error occurred in MPI_Recv
[cook:6729] *** reported by process [1864040449,2]
[cook:6729] *** on communicator MPI_COMM_WORLD
[cook:6729] *** MPI_ERR_COUNT: invalid count argument
[cook:6729] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[cook:6729] *** and potentially your MPI job)
如果有人能提供帮助,我们将不胜感激。再说一遍,我是C和MPI的新手,所以请耐心等待我的代码有多糟糕。
答案 0 :(得分:4)
我一遍又一遍地重复这个错误。使用MPI时,使用平面数组,即将矩阵分配为连续的内存块,而不是分别分配每一行,即代替:
int **A = (int **)malloc(rows_A * sizeof(int*));
for(i = 0; i < rows_A; i++)
A[i] = (int *)malloc(cols_A * sizeof(int));
你应该使用:
int **A = malloc(rows_A * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
A[0] = malloc((size_t)rows_A * (size_t)cols_A * sizeof(int));
for(i = 1; i < rows_A; i++)
A[i] = A[0] + i*cols_A;
释放这样的矩阵就像:
free(A[0]);
free(A);
也就是说,代码中还有另一类错误:
MPI_Recv(A+(i*interval), ...);
MPI_Send(A+(i*interval), ...);
A
是指向每一行的指针数组。 A+i
是指向该数组的 i -th元素的指针。因此,您传递的MPI不是内存中行数据的实际地址,而是指向该数据指针的指针。正确的表达式(假设您已经在前面概述的单个块中分配了内存)是:
MPI_Recv(A[i*interval], ...);
或
MPI_Recv(*(A + i*interval), ...);
换句话说,array[index]
相当于*(array + index)
,而不是array + index
。
答案 1 :(得分:0)
如果您熟悉gdb,请记住,您仍然可以使用它仍然用于debug MPI
mpirun -np 4 xterm -e gdb my_mpi_application
这将打开4个终端,您可以在每个进程中使用gdb。
答案 2 :(得分:0)
您似乎只在根进程上分配内存
if (rank == 0){
// READ AND WRITE MATRICES ------------------------------------
// Allocate memory for matrices
int **A = malloc(rows_A * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
A[0] = malloc((size_t)rows_A * (size_t)cols_A * sizeof(int));
int **B = malloc(rows_B * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
B[0] = malloc((size_t)rows_B * (size_t)cols_B * sizeof(int));
int **AB = malloc(rows_A * sizeof(int*));
// The cast to size_t prevents integer overflow with big matrices
AB[0] = malloc((size_t)rows_A * (size_t)cols_B * sizeof(int));
我建议你做的第一件事是将矩阵参数读取与分配分开。然后,在阅读矩阵大小后,您应该广播它,然后在所有进程上分配它。
同样通过在int** A
条件中声明rank==0
,您在代码的开头隐藏了A的声明。
类似的东西:
if(rank == 0) {
// Read rows_A, cols_A, rows_B, cols_B
....
}
MPI_Bcast(rows_A, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(rows_B, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(cols_A, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(cols_B, 1, MPI_INT, 0, MPI_COMM_WORLD);
// allocate memory
....
if(rank == 0) {
// read matrix
....
}
// broadcast matrices
....