结果不正确 - 英特尔HD 4000上的OpenCL

时间:2014-04-17 17:48:10

标签: c macos opencl intel gpgpu

Apple将最新的英特尔OpenCL驱动程序与Mavericks捆绑在一起,其中包括OpenCL对集成GPU的支持(耶!)。 CPU支持已经存在。无论如何,我想我会在我的MacBook上尝试一下。我采用了以下简单的向量加法示例:

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <OpenCL/opencl.h>

// OpenCL kernel. Each work item takes care of one element of c
const char *kernelSource =                                       "\n" \
"#pragma OPENCL EXTENSION cl_khr_fp64 : enable                    \n" \
"__kernel void vecAdd(  __global double *a,                       \n" \
"                       __global double *b,                       \n" \
"                       __global double *c,                       \n" \
"                       const unsigned int n)                    \n" \
"{                                                               \n" \
"    //Get our global thread ID                                  \n" \
"    int id = get_global_id(0);                                  \n" \
"                                                                \n" \
"    //Make sure we do not go out of bounds                      \n" \
"    if (id < n)                                                 \n" \
"        c[id] = a[id] + b[id];                                  \n" \
"}                                                               \n" \
                                                                "\n" ;

int main( int argc, char* argv[] )
{
    // Length of vectors
    unsigned int n = 100000;

    // Host input vectors
    double *h_a;
    double *h_b;
    // Host output vector
    double *h_c;

    // Device input buffers
    cl_mem d_a;
    cl_mem d_b;
    // Device output buffer
    cl_mem d_c;

    cl_platform_id cpPlatform;        // OpenCL platform
    cl_device_id device_id;           // device ID
    cl_context context;               // context
    cl_command_queue queue;           // command queue
    cl_program program;               // program
    cl_kernel kernel;                 // kernel

    // Size, in bytes, of each vector
    size_t bytes = n * sizeof(double);

    // Allocate memory for each vector on host
    h_a = (double*) malloc(bytes);
    h_b = (double*) malloc(bytes);
    h_c = (double*) malloc(bytes);

    // Initialize vectors on host
    int i;
    for (i = 0; i < n; i++)
    {
        h_a[i] = sinf(i) * sinf(i);
        h_b[i] = cosf(i) * cosf(i);
    }

    size_t globalSize, localSize;
    cl_int err;

    // Number of work items in each local work group
    localSize = 64;

    // Number of total work items - localSize must be devisor
    globalSize = ceil(n / (float) localSize) * localSize;

    // Bind to platform
    err = clGetPlatformIDs(1, &cpPlatform, NULL);

    // Get ID for the device
    err = clGetDeviceIDs(cpPlatform, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL);

    // Create a context  
    context = clCreateContext(0, 1, &device_id, NULL, NULL, &err);

    // Create a command queue 
    queue = clCreateCommandQueue(context, device_id, 0, &err);

    // Create the compute program from the source buffer
    program = clCreateProgramWithSource(context, 1, (const char **) &kernelSource, NULL, &err);

    // Build the program executable 
    clBuildProgram(program, 0, NULL, NULL, NULL, NULL);

    // Create the compute kernel in the program we wish to run
    kernel = clCreateKernel(program, "vecAdd", &err);

    // Create the input and output arrays in device memory for our calculation
    d_a = clCreateBuffer(context, CL_MEM_READ_ONLY, bytes, NULL, NULL);
    d_b = clCreateBuffer(context, CL_MEM_READ_ONLY, bytes, NULL, NULL);
    d_c = clCreateBuffer(context, CL_MEM_WRITE_ONLY, bytes, NULL, NULL);

    // Write our data set into the input array in device memory
    err = clEnqueueWriteBuffer(queue, d_a, CL_TRUE, 0, bytes, h_a, 0, NULL, NULL);
    err |= clEnqueueWriteBuffer(queue, d_b, CL_TRUE, 0, bytes, h_b, 0, NULL, NULL);

    // Set the arguments to our compute kernel
    err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &d_a);
    err |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &d_b);
    err |= clSetKernelArg(kernel, 2, sizeof(cl_mem), &d_c);
    err |= clSetKernelArg(kernel, 3, sizeof(unsigned int), &n);

    // Execute the kernel over the entire range of the data set  
    err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &globalSize, &localSize, 0, NULL, NULL);

    // Wait for the command queue to get serviced before reading back results
    clFinish(queue);

    // Read the results from the device
    clEnqueueReadBuffer(queue, d_c, CL_TRUE, 0, bytes, h_c, 0, NULL, NULL );

    //Sum up vector c and print result divided by n, this should equal 1 within error
    double sum = 0;
    for (i = 0; i < n; i++)
        sum += h_c[i];

    printf("final result: %lf\n", sum / (double) n);

    // release OpenCL resources
    clReleaseMemObject(d_a);
    clReleaseMemObject(d_b);
    clReleaseMemObject(d_c);
    clReleaseProgram(program);
    clReleaseKernel(kernel);
    clReleaseCommandQueue(queue);
    clReleaseContext(context);

    //release host memory
    free(h_a);
    free(h_b);
    free(h_c);

    return 0;
}

然后在我的MacBook 9,2(i7-3520M)的HD 4000芯片上运行它。它运行并完成没有抱怨,但是,非常奇怪的是,它在GPU上产生了错误的结果。这段代码应该返回一个非常接近1的数字,但GPU的最终结果是40.726689。当我使用OpenCL(或其他OpenCL系统)在CPU上运行相同的代码时,它返回1.000000。

有没有人知道这里发生了什么?我错过了什么,或者OpenCL实现或图形处理器有限制吗?我的第一个想法是记忆,但这个例子使用的不到一兆字节,所以不应该这样。

编辑:

我刚刚回答了我自己的问题:我将示例切换为使用单精度而不是双精度,并返回正确的结果。有人可以确认HD 4000支持单一但不是双精度吗?另外,如果不支持双精度,为什么编译器不会抱怨呢?

1 个答案:

答案 0 :(得分:3)

这似乎是Apple的OpenCL实施的一个错误。根据{{​​1}},英特尔HD4000在OS X 10.9下支持OpenCL 1.2。这意味着它具有以支持双精度,因为这是OpenCL 1.2的核心功能。我刚刚在我自己的HD4000上测试了一个更简单的双精度内核,它刚刚完全坏了。我将提出一个针对此的错误,但如果你想这样做,你可以使用Apple Bug Reporting System

您不需要在内核中使用clGetDeviceInfo(..., CL_DEVICE_VERSION, ...)启用cl_khr_fp64扩展名,但删除此项会导致程序无法构建(这也是缺陷)。

在编写上述内容时我错了 - 在OpenCL 1.2中,双精度从可选扩展更改为核心可选功能;它不是强制性的。您可以通过调用#pragma并检查非零值(表明确实支持它)来查询特定设备是否支持双精度。我刚刚在OS X下的HD4000上尝试了这个,它返回clGetDeviceInfo(..., CL_DEVICE_DOUBLE_FP_CONFIG, ...)

也就是说,如果不支持双精度,我希望编译器在尝试编译使用它的内核时会抛出错误,所以这仍然是我书中的错误。