OpenCL - GPU总和与CPU总和不一样

时间:2018-01-04 12:43:10

标签: c++ c parallel-processing opencl

我是OpenCL的新手。

我写了一个程序,应该对100万个元素数组进行并行缩减。在代码的最后部分,我正在比较CPU总和和GPU总和,它们不一样,这就是问题所在。我当地的面积是64。 从索引“90”开始,GPU中的总和开始变大。

编辑:如果我总和较小的数字(现在我总和0 - 1米)假设1的最终总和是正确的。

KERNEL:

__kernel void gpuSumfunc(  __global float *vec ,__global float* sum, int n)          
{
    __local float tempSum[64];

    const int i;                                                        
    const int globalID = get_global_id(0); //BLOCK_DIM*BLOCK_IND+THREAD_ID
    const int tid = get_local_id(0);         //THREAD_ID
    const int BlockDIM = get_local_size(0);//BLOCK_DIM=64

    if (globalID < n)
    {
        tempSum[tid] = vec[globalID];    //Inserting global data to local data

    }
    else
    {
        tempSum[tid] = 0;
    }
        barrier(CLK_LOCAL_MEM_FENCE);    //Wating for all the threads to copy their data

        for (i = BlockDIM / 2; i > 0; i /= 2)
        {

            if (tid < i)
            {

                tempSum[tid] += tempSum[tid + i];
            }
            barrier(CLK_LOCAL_MEM_FENCE);

        }

        if (tid == 0)
        {
            sum[get_group_id(0)] = tempSum[0];
        }

    }

MAIN:

//HOST-cpu
    float *h_a;//input
    float *h_b;//output
    float *h_s;
    //DEVICE-gpu
    cl_mem d_a;//input buffer
    cl_mem d_b;//Output

               //Kernel File
    FILE* fileKernel;

    //Memory allocation - cpu input 
    vector = (float*)malloc(n * sizeof(float));
    h_a = (float*)malloc(n * sizeof(float));
    h_b = (float*)malloc(n * sizeof(float));
    h_s = (float*)malloc(n * sizeof(float));

    *vector = { 0 };
    *h_a = { 0 };
    *h_b = { 0 };
    *h_s = { 0 };


    //Initializing Data for gpu
    for (i = 0; i < n; i++) {
        h_a[i] = i;//(float)i;
    }


    //Initializing Data for cpu
    for (i = 0; i < n; i++) {
        vector[i] = i;//(float)i;
    }
    fileKernel = fopen("KernelCode.cl", "r");
    if (!fileKernel)
    {
        printf("Cannot open kernel file!\n");
        exit(1);
    }

    // Read kernel code
    kernelSource = (char*)malloc(MAX_SOURCE_SIZE);
    source_size = fread(kernelSource, 1, MAX_SOURCE_SIZE, fileKernel);
    fclose(fileKernel);


    error = clGetPlatformIDs(2, cp_Platform, NULL); //array with two devices

    error = clGetDeviceIDs(cp_Platform[1], CL_DEVICE_TYPE_GPU, 1, &Device_ID, NULL); // cp_platform[1] = Nvidia GPU

    context = clCreateContext(NULL, 1, &Device_ID, NULL, NULL, &error); // creating openCL context 

    queue = clCreateCommandQueue(context, Device_ID, 0, &error); // creating command queue, executing openCL context on device cp_Platform[1] 


    globalSize = ceil(n / (float)localSize)*localSize;

    d_a = clCreateBuffer(context, CL_MEM_READ_ONLY, n * sizeof(float), NULL, NULL);
    d_b = clCreateBuffer(context, CL_MEM_READ_ONLY, n * sizeof(float), NULL, NULL);

    error = clEnqueueWriteBuffer(queue, d_a, CL_TRUE, 0, n * sizeof(float), h_a, 0, NULL, NULL); //Enqueue commands to write to a buffer object from host memory.
    error |= clEnqueueWriteBuffer(queue, d_b, CL_TRUE, 0,n * sizeof(float), h_s, 0, NULL, NULL); //Enqueue commands to write to a buffer object from host memory.


    program = clCreateProgramWithSource(context, 1, (const char **)& kernelSource, (const size_t *)&source_size, &error); //this function creates a program object for this specific openCL context
    error = clBuildProgram(program, 0, NULL, NULL, NULL, NULL); //compiles and links a program executable from the program source


    kernel = clCreateKernel(program, "gpuSumfunc", &error); //creating kernel object 
    error = clGetKernelWorkGroupInfo(kernel, Device_ID, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), (void*)&workGroupSize, NULL);
    error = clGetKernelWorkGroupInfo(kernel, Device_ID, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(size_t), (void*)&pWorkGroupSize, NULL);
    error = clGetDeviceInfo(Device_ID, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(NumOfCU), &NumOfCU, NULL);

    error |= clSetKernelArg(kernel, 0, sizeof(cl_mem), &d_a); //Used to set the argument value for a specific argument of a kernel.
    error |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &d_b);
    error |= clSetKernelArg(kernel, 2, sizeof(int), &n);
    error |= clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &globalSize, &localSize, 0, NULL, NULL); // Enqueues a command to execute a kernel on a device.

    clFinish(queue);

    clEnqueueReadBuffer(queue, d_b, CL_TRUE, 0, n*sizeof(float) , h_b, 0, NULL, NULL); ////writing data from the device (d_b) to host(h_b)
    clock_t end = clock();

    for (i = 0; i < (n+localSize-1)/localSize; i++)
    {
        gpuSum += h_b[i];
        cpuSum = cpuSumfunc(vector, 64*(i+1));
    if ((gpuSum - cpuSum) > Tolerance)
        {
            printf("\nfailed! for index:%d",i);
            printf("\nCPU sum = %f", cpuSum);
            printf("\nGPU sum = %f\n", gpuSum);
        }
        else
        {
            printf("\nPassed! for index:%d",i);
            printf("\nCPU sum: %.2f", cpuSum);
            printf("\nGPU sum: %.2f\n", gpuSum);
        }
    }


    // cpu


    time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
    //printf("\nTotal program's running time is: %.2f\n", time_spent);

    free(h_a);
    free(h_b);
    free(h_s);
    free(vector);
    //free(kernelSource);
    clReleaseProgram(program);
    clReleaseContext(context);
    clReleaseKernel(kernel);
    clReleaseCommandQueue(queue);
}

float cpuSumfunc(float * vec, int n)
{

    float sum = 0;
    int i;

    for (i = 0; i < n; i++)
    {
        sum += vec[i];

    }
    return sum;
}

1 个答案:

答案 0 :(得分:2)

Float32值对于求和操作不够准确,并且会出现舍入错误,这些错误在CPU和GPU设备中会有所不同。

16956560需要25位才能准确表示。 Float32仅提供23位精度。 这意味着:如果在Float32中执行操作,则为16956560 + 1 = 16956560。

两种设备的不同之处在于:

  • 订购:CPU和GPU将按不同顺序求和,具有不同的舍入误差。
  • 准确性:大多数CPU(x86等)使用内部48位浮点数学运算,然后将其保存为32位。虽然GPU在纯32位中完成所有数学运算。

您可以使用Float64(double)或使用整数(int64_t = Long)来解决它。

注意:实际上,您的GPU总和比CPU更精确,因为它首先将小值组合在一起,然后将这些大值与最终总和相加。