这是一个MPI并行代码,但是发生了错误,错误信息如下:
BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
PID 4577 RUNNING AT gxi-w560-G10
EXIT CODE:139
CLEANING UP REMAINING PROCESSES
YOU CAN IGNOPRE THE BELOW CLEANUP MESSAGES
YOUR APPLICATION TERMINATION WITH THE EXIT STRING:segmentation fault (signal 11)
this typically refers to a problem with your application.
please see the FAQ page for debugging suggestion.
this picture中显示的命令行和代码如下。我不知道问题出在哪里。也许不能将MPI_INT和MPI_CHAR打包在一起?
ReadItem_t
的定义
typedef struct
{
int rlen;
char* header;
char* qual;
char* seq;
uint8_t* EncodeSeq;
// aln report
int mapq;
int score;
int sub_score;
int CanNum;
int iBestAlnCanIdx;
AlignmentReport_t* AlnReportArr;
} ReadItem_t;
函数GetNextChunk
int GetNextChunk(bool bSepLibrary, FILE *file, FILE *file2, ReadItem_t* ReadArr)
{
char* rseq;
int i, iCount = 0;
int myid,numprocs;
MPI_Status status;
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
char send_buffer[1000][1000],recv_buffer[250][1000];
for(int j=0;j<1000;j++)
{
ReadArr[j].header=new char[151];
ReadArr[j].seq=new char[151];
ReadArr[j].qual=new char[150];
}
if(myid==0)
{
while (true)
{
if ((ReadArr[iCount] = GetNextEntry(file)).rlen == 0) break;
iCount++;
if (bSepLibrary) ReadArr[iCount] = GetNextEntry(file2);
else ReadArr[iCount] = GetNextEntry(file);
if (ReadArr[iCount].rlen == 0) break;
if (bPairEnd)
{
rseq = new char[ReadArr[iCount].rlen];
GetComplementarySeq(ReadArr[iCount].rlen, ReadArr[iCount].seq, rseq);
copy(rseq, rseq + ReadArr[iCount].rlen, ReadArr[iCount].seq); delete[] rseq;
if (FastQFormat)
{
string rqual = ReadArr[iCount].qual; reverse(rqual.begin(), rqual.end());
copy(rqual.c_str(), rqual.c_str() + ReadArr[iCount].rlen, ReadArr[iCount].qual);
}
}
ReadArr[iCount].EncodeSeq = new uint8_t[ReadArr[iCount].rlen];
for(int i=0;i<1000;i++)
{
int position=0;
MPI_Pack(&ReadArr[i].rlen, 1, MPI_INT, send_buffer[i], 1000, &position, MPI_COMM_WORLD);//
position+=10;
MPI_Pack(ReadArr[i].seq, 151, MPI_CHAR, send_buffer[i], 1000, &position, MPI_COMM_WORLD);
position+=160;
MPI_Pack(ReadArr[i].qual, 150, MPI_CHAR, send_buffer[i], 1000, &position, MPI_COMM_WORLD);
position+=160;
MPI_Pack(ReadArr[i].header, 151, MPI_CHAR, send_buffer[i], 1000, &position, MPI_COMM_WORLD);
}
iCount++;
if (iCount == ReadChunkSize || (bPacBioData && iCount == 10)) break;
}
}
else
{
MPI_Scatter(send_buffer,250000,MPI_PACKED,recv_buffer,250000,MPI_PACKED,0,MPI_COMM_WORLD);
for(int x=0;x<250;x++)
{
int pos=0;
MPI_Unpack(recv_buffer[x], 1000, &pos, &ReadArr[x].rlen, 1, MPI_INT, MPI_COMM_WORLD);
pos+=10;
MPI_Unpack(recv_buffer[x], 1000, &pos, ReadArr[i].seq, 151, MPI_CHAR, MPI_COMM_WORLD);
pos+=160;
MPI_Unpack(recv_buffer[x], 1000, &pos, ReadArr[i].qual, 150, MPI_CHAR, MPI_COMM_WORLD);
pos+=160;
MPI_Unpack(recv_buffer[x], 1000, &pos, ReadArr[i].header, 151, MPI_CHAR, MPI_COMM_WORLD);
}
for(int n=0;n<2000;n++)
{
ReadArr[n].EncodeSeq = new uint8_t[ReadArr[n].rlen];
for (i = 0; i != ReadArr[n].rlen; i++) ReadArr[n].EncodeSeq[i] = nst_nt4_table[(int)ReadArr[n].seq[i]];
}
}
return iCount;
}