我正在编写一个简单的sgemm(square,alpha = 1,beta = 0),它应该利用本地内存,但它的执行速度只有天真版本的一半。
以下是内核:
const char* matrixMultiplySource =
"__kernel\n"
" void matrixMultiply(__global float* A, __global float* B, __global float* C)\n"
" {\n"
" int i = get_local_id(0);\n"
" int j = get_local_id(1);\n"
" int ig = get_global_id(0);\n"
" int jg = get_global_id(1);\n"
" int sizeG0 = get_global_size(0);\n"
" __local float localA[BLOCK_SIZE][BLOCK_SIZE];\n"
" __local float localB[BLOCK_SIZE][BLOCK_SIZE];\n"
" float val=0.0f;\n"
" for ( int index = 0; index < sizeG0; index += BLOCK_SIZE )\n"
" {\n"
" localA[j][i] = A[ig + sizeG0 * (index+j)];\n"
" localB[j][i] = B[index+i + sizeG0 * jg];\n"
" barrier(CLK_GLOBAL_MEM_FENCE);\n"
" #pragma unroll\n"
" for ( int kk = 0; kk < BLOCK_SIZE; ++kk)\n"
" {\n"
" val = val + localA[kk][i] * localB[j][kk];\n"
" }\n"
" barrier(CLK_GLOBAL_MEM_FENCE);\n"
" }\n"
" C[ig + sizeG0 * jg] = val;\n"
"}\n"
;
const char* matrixMultiplySource2 =
"__kernel\n"
" void matrixMultiply(__global float* A, __global float* B, __global float* C)\n"
" {\n"
" int ig = get_global_id(0);\n"
" int jg = get_global_id(1);\n"
" int sizeG0 = get_global_size(0);\n"
" float val=0;\n"
" for ( int k = 0; k < sizeG0; k++)\n"
" {\n"
" val = val + A[ig + k * sizeG0] * B[k + jg * sizeG0];\n"
" }\n"
" C[ig + sizeG0 * jg] = val;\n"
"}\n"
;
BLOCK_SIZE是16,我使用1024x1024矩阵以及预热。
// Create OpenCL context
context = mycl::myclCreateContext( NULL, ret_num_devices, devices, NULL, NULL, &ret);
// Create Command Queue
command_queue = mycl::myclCreateCommandQueue(context, devices[0], 0, &ret);
// Create Memory Buffer
memobjA = mycl::myclCreateBuffer(context, CL_MEM_READ_ONLY, widthA * heightA * sizeof(float), NULL, &ret);
memobjB = mycl::myclCreateBuffer(context, CL_MEM_READ_ONLY, widthB * heightB * sizeof(float), NULL, &ret);
memobjC = mycl::myclCreateBuffer(context, CL_MEM_READ_WRITE, widthC * heightC * sizeof(float), NULL, &ret);
// Copy the lists A and B to their respective memory buffers
ret = mycl::myclEnqueueWriteBuffer(command_queue,memobjA, CL_TRUE, 0,
widthA * heightA * sizeof(float), A, 0, NULL, NULL);
ret = mycl::myclEnqueueWriteBuffer(command_queue, memobjB, CL_TRUE, 0,
widthB * heightB * sizeof(float), B, 0, NULL, NULL);
// Create Kernel Program from the source
program = mycl::myclCreateProgramWithSource(context, 1, (const char **)&matrixMultiplySource,
NULL, &ret);
// Build Kernel Program
ret = mycl::myclBuildProgram(program, ret_num_devices, devices, "-D BLOCK_SIZE=16", NULL, NULL);
if(ret != CL_SUCCESS){cout << "PROBREM! " << ret << endl;return -1;}
// Create OpenCL Kernel
kernel = mycl::myclCreateKernel(program, "matrixMultiply", &ret);
size_t globalThreads[2] = {heightA, widthB};
size_t localThreads[2] = {BLOCK_SIZE, BLOCK_SIZE};
// Set OpenCL Kernel Arguments
ret = mycl::myclSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&memobjA);
ret = mycl::myclSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&memobjB);
ret = mycl::myclSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&memobjC);
// Time the kernel
struct timeval timev1, timev2;
float time_seconds = 0.0f;
mycl::myclEnqueueNDRangeKernel(command_queue, kernel, 2, NULL, globalThreads, localThreads, 0, 0, NULL);
mycl::myclFinish(command_queue);
gettimeofday(&timev1, NULL);
ret = mycl::myclEnqueueNDRangeKernel(command_queue, kernel, 2, NULL, globalThreads, localThreads, 0, 0, NULL);
if(ret != CL_SUCCESS){cout << "fail! " << ret << endl;}
ret = mycl::myclFinish(command_queue);
if(ret != CL_SUCCESS){cout << "fail! " << ret << endl;}
gettimeofday(&timev2,NULL);
time_seconds=(timev2.tv_sec-timev1.tv_sec)+0.000001*(timev2.tv_usec- timev1.tv_usec);
答案 0 :(得分:0)
您是否看过AMD APP KernelAnalyzer中的两个内核或同等工具?这些工具编译内核并显示其预测的性能特征
答案 1 :(得分:0)
您使用
barrier(CLK_GLOBAL_MEM_FENCE);
我希望看到
barrier(CLK_LOCAL_MEM_FENCE);
当你在循环中写入本地内存时。 此外,我怀疑localA的副本确实对你有所帮助 - 有一次,每个项目只能访问一次。