我试图在GPU上使用2D网格和2D块执行矩阵求和,并在多次执行程序后得到不同的结果。对此行为的任何解释或修复都会有所帮助,谢谢。
大多数情况下,CPU上的结果与GPU上的结果一致。但有时(例如,在操作系统启动后)程序会告诉结果不一致。但是之后的所有执行将产生一致的结果(并且看起来更快)。 我还没有找到一种有保证的方法来重现这种行为。我尝试重新启动操作系统,但程序的第一次执行产生了一致的结果。
主函数对CPU和GPU上的两个2 ^ 10乘2 ^ 10矩阵进行求和(使用2 ^ 5乘2 ^ 5网格,2 ^ 5乘2 ^ 5块)并比较结果。
#include "stdio.h"
#define FALSE 0
#define TRUE !FALSE
double *mallocMatrix(const int row, const int column)
{
return (double*)malloc(row*column*sizeof(double));
}
void matrixInit(double *matrix, const int row, const int column)
{
;
}
int matEqual(double *mat1, double *mat2, const int row, const int column)
{
for(int i=0;i<row;i++)
{
for(int j=0;j<column;j++)
{
int k=i*column+j;
if(mat1[k]!=mat2[k])
{
printf("Entry %d doens't match.\n",k);
return FALSE;
}
}
}
return TRUE;
}
void matrixSumCpu(double *m1, double *m2, double *n, const int row, const int column)
{
for(int i=0; i<row; i++)
{
for(int j=0; j<column; j++)
{
int k = i * column + j;
n[k]=m1[k]+m2[k];
}
}
}
__global__ void _2dGrid2dBlockMatSum(double *m1, double *m2, double *n, const int row, const int column)
{
int rowIndex=blockIdx.x*blockDim.x+threadIdx.x;
int columnIndex=blockIdx.y*blockDim.y+threadIdx.y;
if(rowIndex<row&&columnIndex<column)
{
int i=rowIndex*column+columnIndex;//flatten
n[i]=m1[i]+m2[i];
}
}
void checkGpuMalloc(cudaError_t code)
{
if(code != cudaSuccess)
{
exit(-1);
printf("CUDA ERROR occured. ");
}
}
void printMatrix(double *mat, const int row, const int column)
{
const int rowToPrint=3;
const int columnToPrint=6;
for(int i=0;i<rowToPrint;i++)
{
for(int j=0;j<columnToPrint;j++)
printf("%lf", mat[i*column+j]);
if(column>columnToPrint)
printf("...");
printf("\n");
}
if(row>rowToPrint)
printf("...\n");
}
int main()
{
int row=1<<10, column=1<<10;
double *h_m1=NULL, *h_m2=NULL,*h_n1=NULL, *h_n2=NULL;//n=m1+m2
h_m1=mallocMatrix(row, column);
h_m2=mallocMatrix(row, column);
h_n1=mallocMatrix(row, column);
h_n2=mallocMatrix(row, column);
if(h_m1==NULL||h_m2==NULL||h_n1==NULL||h_n2==NULL)
{
printf("Unable to allocate enough memory on CPU\n");
exit(-1);
}
matrixInit(h_m1,row,column);
matrixInit(h_m2,row,column);
printf("Summing matrices on CPU...\n");
matrixSumCpu(h_m1,h_m2,h_n1,row,column);
double *d_m1=NULL, *d_m2=NULL, *d_n=NULL;
checkGpuMalloc(cudaMalloc((void**)&d_m1, row*column*sizeof(double)));
checkGpuMalloc(cudaMalloc((void**)&d_m2, row*column*sizeof(double)));
checkGpuMalloc(cudaMalloc((void**)&d_n, row*column*sizeof(double)));
cudaMemcpy(d_m1, h_m1, row*column*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_m2, h_m2, row*column*sizeof(double), cudaMemcpyHostToDevice);
printf("Summing matrices on GPU with 2D grid and 2D blocks.\n");
_2dGrid2dBlockMatSum<<<(1<<5,1<<5),(1<<5, 1<<5)>>>(d_m1, d_m2, d_n, row, column);
cudaDeviceSynchronize();
cudaMemcpy(h_n2, d_n, row*column*sizeof(double), cudaMemcpyDeviceToHost);
if(matEqual(h_n1, h_n2, row, column))
printf("Matrices match.\n");
else
{
printf("Matrices don't match.\nResult on CPU:\n");
printMatrix(h_n1, row, column);
printf("Result on GPU:");
printMatrix(h_n2, row, column);
}
free(h_m1);
free(h_m2);
free(h_n1);
free(h_n2);
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_n);
cudaDeviceReset();
return 0;
}
答案 0 :(得分:3)
这并不是你认为它做的,当我编译你的代码时,编译器会在这一行发出警告:
"OrganizationId","InterimPeriodId"
你应该这样做:
_2dGrid2dBlockMatSum<<<(1<<5,1<<5),(1<<5, 1<<5)>>>(d_m1, d_m2, d_n, row, column);
此:
_2dGrid2dBlockMatSum<<<dim3(1<<5,1<<5),dim3(1<<5, 1<<5)>>>(d_m1, d_m2, d_n, row, column);
与此不同:
dim3(1<<5,1<<5)
C ++编译器评估最后一个表达式产生某种你不期望的垃圾(标量为32,而不是2D数量(32,32))。
为什么您的(1<<5,1<<5)
功能为空?
如果要强制代码一直失败,请添加一些矩阵初始化:
matrixInit
并在内核调用之前添加此行:
void matrixInit(double *matrix, const int row, const int column)
{
for (int i = 0; i < row; i++)
for (int j = 0; j < column; j++)
matrix[i*column+j] = 1;
}
然后编译并运行它,它将失败。
之后,按照我的建议进行cudaMemset(d_n, 0, row*column*sizeof(double));
更改,然后修复它。
这是固定的例子:
dim3