我正在编写一个简单的OpenCL应用程序,它将计算目标GPU设备的最大实验FLOPS。我决定让我的cl内核尽可能简单。这是我的OpenCL内核和我的主机代码。内核代码是:
__kernel void flops(__global float *data) {
int gid = get_global_id(0);
double s = data[gid];
data[gid] = s * 0.35;
}
主持人代码是:
#include <iostream>
#include <sstream>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "support.h"
#include "Event.h"
#include "ResultDatabase.h"
#include "OptionParser.h"
#include "ProgressBar.h"
using namespace std;
std::string kernels_folder = "/home/users/saman/shoc/src/opencl/level3/FlopsFolder/";
std::string kernel_file = "flops.cl";
static const char *opts = "-cl-mad-enable -cl-no-signed-zeros "
"-cl-unsafe-math-optimizations -cl-finite-math-only";
cl_program createProgram (cl_context context,
cl_device_id device,
const char* fileName) {
cl_int errNum;
cl_program program;
std::ifstream kernelFile (fileName, std::ios::in);
if (!kernelFile.is_open()) {
std::cerr << "Failed to open file for reading: " << fileName << std::endl;
}
std::ostringstream oss;
oss << kernelFile.rdbuf();
std::string srcStdStr = oss.str();
const char *srcStr = srcStdStr.c_str();
program = clCreateProgramWithSource (context, 1, (const char **)&srcStr,
NULL, &errNum);
CL_CHECK_ERROR(errNum);
errNum = clBuildProgram (program, 0, NULL, NULL, NULL, NULL);
CL_CHECK_ERROR (errNum);
return program;
}
bool createMemObjects (cl_context context, cl_command_queue queue,
cl_mem* memObject,
const int memFloatsSize, float *a) {
cl_int err;
*memObject = clCreateBuffer (context, CL_MEM_READ_WRITE,
memFloatsSize * sizeof(float), NULL, &err);
CL_CHECK_ERROR(err);
if (*memObject == NULL) {
std::cerr << "Error creating memory objects. " << std::endl;
return false;
}
Event evWrite("write");
err = clEnqueueWriteBuffer (queue, *memObject, CL_FALSE, 0, memFloatsSize * sizeof(float),
a, 0, NULL, &evWrite.CLEvent());
CL_CHECK_ERROR(err);
err = clWaitForEvents (1, &evWrite.CLEvent());
CL_CHECK_ERROR(err);
return true;
}
void cleanup (cl_context context, cl_command_queue commandQueue,
cl_program program, cl_kernel kernel, cl_mem memObject) {
if (memObject != NULL)
clReleaseMemObject (memObject);
if (kernel != NULL)
clReleaseKernel (kernel);
if (program != NULL)
clReleaseProgram (program);
}
void addBenchmarkSpecOptions(OptionParser &op) {
}
void RunBenchmark(cl_device_id id,
cl_context ctx,
cl_command_queue queue,
ResultDatabase &resultDB,
OptionParser &op)
{
for (float i = 0.1; i <= 0.2; i+=0.1 ) {
std::cout << "Deploying " << 100*i << "%" << std::endl;
bool verbose = false;
cl_int errNum;
cl_program program = 0;
cl_kernel kernel;
cl_mem memObject = 0;
char maxFloatsStr[128];
char testStr[128];
program = createProgram (ctx, id, (kernels_folder + kernel_file).c_str());
if (program == NULL) {
exit (0);
}
if (verbose) std::cout << "Program created successfully!" << std::endl;
kernel = clCreateKernel (program, "flops", &errNum);
CL_CHECK_ERROR(errNum);
if (verbose) std::cout << "Kernel created successfully!" << std::endl;
// Identify maximum size of the global memory on the device side
cl_long maxAllocSizeBytes = 0;
cl_long maxComputeUnits = 0;
cl_long maxWorkGroupSize = 0;
clGetDeviceInfo (id, CL_DEVICE_MAX_MEM_ALLOC_SIZE,
sizeof(cl_long), &maxAllocSizeBytes, NULL);
clGetDeviceInfo (id, CL_DEVICE_MAX_COMPUTE_UNITS,
sizeof(cl_long), &maxComputeUnits, NULL);
clGetDeviceInfo (id, CL_DEVICE_MAX_WORK_GROUP_SIZE,
sizeof(cl_long), &maxWorkGroupSize, NULL);
// Let's use 80% of this memory for transferring data
cl_long maxFloatsUsageSize = ((maxAllocSizeBytes / 4) * 0.8);
if (verbose) std::cout << "Max floats usage size is " << maxFloatsUsageSize << std::endl;
if (verbose) std::cout << "Max compute unit is " << maxComputeUnits << std::endl;
if (verbose) std::cout << "Max Work Group size is " << maxWorkGroupSize << std::endl;
// Prepare buffer on the host side
float *a = new float[maxFloatsUsageSize];
for (int j = 0; j < maxFloatsUsageSize; j++) {
a[j] = (float) (j % 77);
}
if (verbose) std::cout << "Host buffer been prepared!" << std::endl;
// Creating buffer on the device side
if (!createMemObjects(ctx, queue, &memObject, maxFloatsUsageSize, a)) {
exit (0);
}
errNum = clSetKernelArg (kernel, 0, sizeof(cl_mem), &memObject);
CL_CHECK_ERROR(errNum);
size_t wg_size, wg_multiple;
cl_ulong local_mem, private_usage, local_usage;
errNum = clGetKernelWorkGroupInfo (kernel, id,
CL_KERNEL_WORK_GROUP_SIZE,
sizeof (wg_size), &wg_size, NULL);
CL_CHECK_ERROR (errNum);
errNum = clGetKernelWorkGroupInfo (kernel, id,
CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
sizeof (wg_multiple), &wg_multiple, NULL);
CL_CHECK_ERROR (errNum);
errNum = clGetKernelWorkGroupInfo (kernel, id,
CL_KERNEL_LOCAL_MEM_SIZE,
sizeof (local_usage), &local_usage, NULL);
CL_CHECK_ERROR (errNum);
errNum = clGetKernelWorkGroupInfo (kernel, id,
CL_KERNEL_PRIVATE_MEM_SIZE,
sizeof (private_usage), &private_usage, NULL);
CL_CHECK_ERROR (errNum);
if (verbose) std::cout << "Work Group size is " << wg_size << std::endl;
if (verbose) std::cout << "Preferred Work Group size is " << wg_multiple << std::endl;
if (verbose) std::cout << "Local memory size is " << local_usage << std::endl;
if (verbose) std::cout << "Private memory size is " << private_usage << std::endl;
size_t globalWorkSize[1] = {maxFloatsUsageSize};
size_t localWorkSize[1] = {1};
Event evKernel("flops");
errNum = clEnqueueNDRangeKernel (queue, kernel, 1, NULL,
globalWorkSize, localWorkSize,
0, NULL, &evKernel.CLEvent());
CL_CHECK_ERROR (errNum);
if (verbose) cout << "Waiting for execution to finish ";
errNum = clWaitForEvents(1, &evKernel.CLEvent());
CL_CHECK_ERROR(errNum);
evKernel.FillTimingInfo();
if (verbose) cout << "Kernel execution terminated successfully!" << std::endl;
delete[] a;
sprintf (maxFloatsStr, "Size: %d", maxFloatsUsageSize);
sprintf (testStr, "Flops: %f\% Memory", 100*i);
double flopCount = maxFloatsUsageSize * 16000;
double gflop = flopCount / (double)(evKernel.SubmitEndRuntime());
resultDB.AddResult (testStr, maxFloatsStr, "GFLOPS", gflop);
// Now it's time to read back the data
a = new float[maxFloatsUsageSize];
errNum = clEnqueueReadBuffer(queue, memObject, CL_TRUE, 0, maxFloatsUsageSize*sizeof(float), a, 0, NULL, NULL);
CL_CHECK_ERROR(errNum);
if (verbose) {
for (int j = 0; j < 10; j++) {
std::cout << a[j] << " ";
}
}
delete[] a;
if (memObject != NULL)
clReleaseMemObject (memObject);
if (program != NULL)
clReleaseProgram (program);
if (kernel != NULL)
clReleaseKernel (kernel);
}
std::cout << "Program executed successfully!" << std::endl;
}
解释代码,在内核代码中我实际上做了一个浮点操作,这意味着每一个任务都会在FOPS上完成。在主机代码中,我首先检索GPU的最大全局内存大小,分配它的一部分(for循环定义它的多少),然后将数据和内核执行推入其中。我将测量clEnqueueNDRangeKernel的执行时间,然后计算应用程序的GFLOPS。在我目前的实现中,无论cl_mem的大小是多少,我都会获得0.28 GFLOPS的性能,远远低于广告的功率。我假设我在这里效率低下。或者一般来说,我计算GPU性能的方法是不对的。有谁能告诉我应该在代码中做出哪些更改?
答案 0 :(得分:2)
如果本地组大小为1,则会浪费31/32的资源(因此最多只能达到峰值性能的1/32)。您需要至少32(并且是32的倍数)的本地组大小以充分利用计算资源,并且需要64来实现100%占用(尽管不需要100%占用)。
内存访问具有高延迟和低带宽。如果其他事情是正确的,你的内核将始终在等待内存控制器。你需要做更多的算术运算才能让ALU忙碌。
您需要先阅读文档并使用Visual Profiler。在前两部分中,我只想告诉你事情比你想象的要奇怪。但更奇怪的事情还在等待。
您可以使用汇编语言在CPU上实现最佳性能(通过仅执行独立的算术运算。如果您在C中编写此类代码,它将被编译器简单地删除)。 NVidia只为我们提供了一个名为PTX的IL接口,我不确定编译器是否会优化它。你只能在CUDA中使用PTX。
编辑:似乎编译器会优化未使用的PTX代码,至少在内联集合中。