试图消除共轭梯度内核中的cudaDeviceSynchronize()

时间:2015-04-28 04:27:38

标签: c cuda parallel-processing gpu gpgpu

我在具有统一内存的TegraK1板上实现Conjugate Gradient Solver。我面临的问题是在循环中我必须做cudaDeviceSynchronize();与TI Keystone-II相比,两次更新变量并且多次损害我的性能,虽然它具有较低的计算能力,但我只使用Naive代码来并行计算数据。我正在使用CUDA版本 - 6.0。

....    
double *A, *b, *x, *x1, *r, *d, *q, deltaNew, deltaFirst, Alpha, deltaOld, Beta; // data for init processing

double *temp, *temp1, Alpha1;

b = (double *) malloc(sizeof(double)*N*1); // b original
x1 = (double *) malloc(sizeof(double)*N*1); // x1 

checkCudaErrors(cudaMallocManaged(&A, sizeof(double)*N*N)); // A original 
checkCudaErrors(cudaMallocManaged(&x, sizeof(double)*N*1)); // x original
checkCudaErrors(cudaMallocManaged(&r, sizeof(double)*N*1)); // r original 
checkCudaErrors(cudaMallocManaged(&d, sizeof(double)*N*1)); // d original
checkCudaErrors(cudaMallocManaged(&q, sizeof(double)*N*1)); // q original
checkCudaErrors(cudaMallocManaged(&temp, sizeof(double)*1*1)); //  temp of d'*q for temporary storage
checkCudaErrors(cudaMallocManaged(&temp1, sizeof(double)*1*1)); //  temp1 of r'*r for temporary storage

fprintf(stderr, "\nIntializing data\n");
// Intializing all the data
setup_data(&A[0], &b[0], &x[0], &r[0], &d[0], &deltaNew, &deltaFirst);

// Get handle to the CUBLAS context 
cublasHandle_t cublasHandle = 0;
cublasCreate(&cublasHandle);
fprintf(stderr, "\nData setup done.. Starting..\n");    
startTime_GPU = omp_get_wtime();

while(deltaNew > (EPSI)*deltaFirst)
{
    // cublasSgemm(handle, op, op, colof2, rowof1, colof1, scalar1, mat2, colof2, mat1, colof1, scalar2, result, colof2 );
    cublasDgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1, N, N, &alpha, d, 1, A, N, &beta, q, 1);        // q = A * d   
    cublasDgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1, 1, N, &alpha, q, 1, d, N, &beta, temp, 1);        // alpha = deltaNew/(d' * q);

    cudaDeviceSynchronize(); // POSSIBLY ELIMINATE THIS
    Alpha = deltaNew/temp[0]; // alpha = deltaNew/(d' * q);
    Alpha1 = (-1)*Alpha;   

    // cublasSaxpy(handle, N, scalar, scaledinput, stride1, inout, stride2);
    cublasDaxpy(cublasHandle, N, &Alpha, d, 1, x, 1); // x = x + alpha * d  
    cublasDaxpy(cublasHandle, N, &Alpha1, q, 1, r, 1); // r = r - alpha * q

    deltaOld = deltaNew; // deltaOld = deltaNew  

    cublasDgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1, 1, N, &alpha, r, 1, r, N, &beta, temp1, 1);        // deltaNew = r' * r

    cudaDeviceSynchronize(); // POSSIBLY ELIMINATE THIS
    deltaNew = temp1[0];
    Beta = deltaNew/deltaOld; // beta = deltaNew/deltaOld
    cublasDgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1, N, &alpha, r, 1, &Beta, d, 1, d, 1); // d = r + beta * d
}

endTime_GPU = omp_get_wtime();
...
...

任何人都可以提出任何改进或增强功能,我可以使用这些改进或改进来消除这些改进或改进,从而提高性能。

1 个答案:

答案 0 :(得分:1)

内核启动使始终在CUDA中异步(即使在CUDA 1.0中)。那时,设备内存需要显式的memcpy才能使CUDA内核运行。 CPU / GPU同步被隐藏,因为device->主机memcpy将隐式强制执行排序:device->主机memcpy在内核完成之前无法启动。

当CPU和GPU映射了相同的内存时,必须更明确地进行同步。如果没有cudaDeviceSynchronize(),您的CPU代码可能会在GPU写入之前读取DGEMM的输出。托管内存实现了内核启动和CPU / GPU同步事件(如cudaDeviceSynchronize())背后的许多隐式策略,以使编程更容易。

获得CPU / GPU并发并仍然正确同步的方法是通过多缓冲并在每个缓冲区附加CUDA事件。在每个DGEMM之后致电cudaEventRecord()并在使用该事件之前致电cudaEventWait()

talonmies已经说过了,但它需要重复:如果你想要良好的表现,你可能不得不放弃管理记忆。