MPI错误内存不足

时间:2016-08-23 17:38:17

标签: c visual-studio mpi

我有这个矩阵添加m程序。当我使用一个小的矩阵文件时,它工作正常,但当它与大矩阵文件(5300 * 4200)一起使用时,它会在处理器高于2时崩溃。错误是“内存不足”。

#include<mpi.h>
#include<stdio.h>
#include<stdlib.h>
#include<ctype.h>
#include<time.h>

int main(int argc, char** argv)
{
char buffer[256];
FILE* pIn = NULL;
FILE* pIn1 = NULL;
FILE* pOut = NULL;
char inputF[128];
char inputF1[128];
char outF[128];
int rowA = 0;
int columnA = 0;
int rowB = 0;
int columnB = 0;
int averow = 0;
int rows = 0;
int extra = 0,offset = 0;
double **mat;
double **mat2;
double **sum;
double **sum2;
double StartTime = 0;
double EndTime = 0;


int id,p;
int i = 0,j = 0,source =0,dest =0;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&id);
MPI_Comm_size(MPI_COMM_WORLD,&p);


if(id == 0 || p == 1)
{

    /////////OPEN FILE AND READ ROWS AND COLUMNS////////////////////
    fgets(buffer,sizeof(buffer),stdin);
    sscanf(buffer,"%s",inputF);

    fgets(buffer,sizeof(buffer),stdin);
    sscanf(buffer,"%s",inputF1);

    //printf("File A: %s File B: %s\n",inputF,inputF1);
    pIn = fopen_s(inputF,"r");
    if(pIn == NULL)
    {
        printf("Error opening File");
        exit(0);    
    }
    //printf("File 1 reading Success!\n");
    fgets(buffer,200,pIn);
    sscanf(buffer,"%d %d",&rowA,&columnA);

    pIn1 = fopen(inputF1,"r");
    if(pIn1 == NULL)
    {
        printf("Error opening File");
        exit(0);

    }
    //printf("File 2 reading Success!\n");
    fgets(buffer,200,pIn1);
    sscanf(buffer,"%d %d",&rowB,&columnB);

    //printf("Dimensions of two matrices A: %d %d B: %d %d \n",rowA,columnA,rowB,columnB);
    /////////OPEN FILE AND READ ROWS AND COLUMNS////////////////////



    ///////////ALLOCATING MEMORY FOR 3 MATRICES/////////////////////

    mat = (double **)calloc(rowA,sizeof(double*));
    for(i = 0; i < rowA; i++)
        mat[i] = (double*)calloc(columnA,sizeof(double));

    if(mat == NULL)
    {
        printf("Error allocating memory for 1st matrix.");
        exit(1);
    }



    mat2 = (double **)calloc(rowA,sizeof(double*));
    for(i = 0; i < rowA; i++)
        mat2[i] = (double*)calloc(columnA,sizeof(double));

    if(mat2 == NULL)
    {
        printf("Error allocating memory for 2nd matrix.");
        exit(1);
    }


    sum2 = (double **)calloc(rowA,sizeof(double*));
    for(i = 0; i < rowA; i++)
        sum2[i] = (double*)calloc(columnA,sizeof(double));

    if(sum2 == NULL)
    {
        printf("Error allocating memory for ANS matrix.");
        exit(1);
    }

    ///////////ALLOCATING MEMORY FOR 3 MATRICES/////////////////////


    //////READ MATRIX DATA///////////////////////////
    for (i = 0; i < rowA; i++)
    {
        for (j = 0; j < columnA; j++)
        {
            fscanf(pIn,"%lf",&mat[i][j]);
            fscanf(pIn1,"%lf",&mat2[i][j]);
        }
    }
    //////READ MATRIX DATA///////////////////////////






    ////////DIVIDE WORK AMONG SLAVES///////////////////
    if(p > 1)
    {
        StartTime = MPI_Wtime();
        averow = rowA/(p-1);
        extra = rowA%(p-1);
        offset = 0;

        for (dest=1; dest<=(p-1); dest++)
        {
            rows = (dest <= extra) ? averow+1 : averow; 
            //printf("Process %d:Sending %d rows to process %d offset=%d\n",id,rows,dest,offset);
            MPI_Send(&columnA, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);
            MPI_Send(&rowA, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);
            MPI_Send(&offset, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);
            MPI_Send(&rows, 1, MPI_INT, dest, 1, MPI_COMM_WORLD);

            for (i = offset; i < rows+offset; i++)
                for (j = 0; j < columnA; j++)
                {
                    MPI_Send(&mat[i][j],5, MPI_DOUBLE, dest, 1,MPI_COMM_WORLD);
                    MPI_Send(&mat2[i][j],5, MPI_DOUBLE, dest, 1, MPI_COMM_WORLD);
                }


            offset = offset + rows;

        }
    ////////DIVIDE WORK AMONG SLAVES///////////////////
    ////////RECIEVE BACK THE ADDED WORK///////////////////

        for (source = 1;source <=(p-1);source++)
        {
            MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(&columnA, 1, MPI_INT, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            for (i = offset; i < rows+offset; i++)
                for (j = 0; j < columnA; j++)
                    MPI_Recv(&sum2[i][j], 5, MPI_DOUBLE, source, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

            printf("Process %d: Received results from process %d\n",id,source);
        }
    EndTime = MPI_Wtime();
    printf("Timings : %f Sec", EndTime - StartTime);
    ////////RECIEVE BACK THE ADDED WORK///////////////////
    }

    ////////ADD FOR 1 PROCESSOR///////////////////
    if(p == 1)
    {
    StartTime = MPI_Wtime();
        for (i = 0; i < rowA; i++)
            for (j = 0; j < columnA; j++)
                sum2[i][j] = mat[i][j]+mat2[i][j];
    EndTime = MPI_Wtime();
    printf("Timings : %f Sec", EndTime - StartTime);
    }
    ////////ADD FOR 1 PROCESSOR///////////////////


    /////////////////PRINT RESULTS////////////////////

    //for (int i = 0; i < rowA; i++)
        //for (int j = 0; j < columnA; j++)
            //printf(" %.1lf",sum[i][j]);

    /////////////////PRINT RESULTS////////////////////



    //////////////WRITE RESULTS TO FILE//////////////
    pOut = fopen("out.txt","w");
    if(pOut == NULL)
        {
        printf("Error opening File");
        exit(0);
        }

    fprintf(pOut,"%d %d\n",rowA,columnA);
    for (i = 0; i < rowA; i++)
        for (j = 0; j < columnA; j++)
            fprintf(pOut," %.1lf ",sum2[i][j]);

    fclose(pOut);
    fclose(pIn);
    fclose(pIn1);
    //////////////WRITE RESULTS TO FILE//////////////



    /////////////FREE MEMORY ALLOCATED///////////////
    /////////////FREE MEMORY ALLOCATED///////////////

}



if(id > 0)
{
    MPI_Recv(&columnA, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    MPI_Recv(&rowA, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

    ///////////ALLOCATING MEMORY FOR 3 MATRICES/////////////////////

    mat = (double **)calloc(rowA,sizeof(double*));
    for(i = 0; i < rowA; i++)
        mat[i] = (double*)calloc(columnA,sizeof(double));

    if(mat == NULL)
    {
        printf("Process %d:Error allocating memory for 1st matrix.",id);
        exit(1);
    }

    mat2 = (double **)calloc(rowA,sizeof(double*));
    for(i = 0; i < rowA; i++)
        mat2[i] = (double*)calloc(columnA,sizeof(double));

    if(mat2 == NULL)
    {
        printf("Process %d: Error allocating memory for 2nd matrix.",id);
        exit(1);
    }


    sum = (double **)calloc(rowA,sizeof(double*));
    for(i = 0; i < rowA; i++)
        sum[i] = (double*)calloc(columnA,sizeof(double));

    if(sum == NULL)
    {
        printf("Process %d: Error allocating memory for ANS matrix.",id);
        exit(1);
    }


    ///////////ALLOCATING MEMORY FOR 3 MATRICES/////////////////////


    //////////RECIEVE AND ADD EACH PART AND SEND BACK FOR EACH SLAVES/////////////
    MPI_Recv(&offset, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    MPI_Recv(&rows, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    //printf("Process %d: Recieved this number of Rows: %d\n",id,rows);

    for (i = offset; i < rows+offset; i++)
        for (j = 0; j < columnA; j++)
        {
            MPI_Recv(&mat[i][j], 5, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(&mat2[i][j],5, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            sum[i][j] = mat[i][j] + mat2[i][j];
            //printf(" %d:%.1lf",id,sum[i][j]);
        }

    //printf("send matrix %.1lf and %.1lf from Process\n",mat[0][0],mat[2][2],id );
    MPI_Send(&rows, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
    MPI_Send(&offset, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
    MPI_Send(&columnA, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
    for (i = offset; i < rows+offset; i++)
        for (j = 0; j < columnA; j++)
            MPI_Send(&sum[i][j],5, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD);

    //////////RECIEVE AND ADD AND SEND BACK TO MASTER EACH PART FOR EACH SLAVES/////////////

}

MPI_Finalize();
}

0 个答案:

没有答案