OpenCL:一个内核可以调用另一个内核

时间:2014-09-18 10:46:33

标签: opencl

嗨,
我试图在OpenCL中运行可用的卷积代码 我正在使用异构系统 -
1)CPU
2)GPU
PFB我在我的系统中运行的代码库:

convolution.cl

// TODO: Add OpenCL kernel code here.
__kernel 
void convolve(
    const __global uint * const input,
    __constant uint     * const mask,
    __global uint       * const output,
    const int                   inputWidth,
    const int                   maskWidth){

        const int x = get_global_id(0);
        const int y = get_global_id(1);

        uint sum = 0;

        for (int r = 0; r < maskWidth; r++)
        {
            const int idxIntmp = (y + r) * inputWidth + x;
            for (int c = 0; c < maskWidth; c++)
            {
                sum += mask[(r * maskWidth) + c] * input[idxIntmp + c];
            }
        }

        output[y * get_global_size(0) + x] = sum;
}

和convolution.cpp -

//卷积 - 将3×3掩模应用于8×8输入信号的过程,产生6×6输出信号

    #include "CL/cl.h"
    #include "vector"
    #include "iostream"
    #include "time.h"

    #include <fstream>
    #include <sstream>
    #include <string>

using namespace std;

// Constants
const unsigned int inputSignalWidth = 8;
const unsigned int inputSignalHeight = 8;

cl_uint inputSignal[inputSignalWidth][inputSignalHeight] =
{
    {3, 1, 1, 4, 8, 2, 1, 3},
    {4, 2, 1, 1, 2, 1, 2, 3},
    {4, 4, 4, 4, 3, 2, 2, 2},
    {9, 8, 3, 8, 9, 0, 0, 0},
    {9, 3, 3, 9, 0, 0, 0, 0},
    {0, 9, 0, 8, 0, 0, 0, 0},
    {3, 0, 8, 8, 9, 4, 4, 4},
    {5, 9, 8, 1, 8, 1, 1, 1}
};

const unsigned int outputSignalWidth = 6;
const unsigned int outputSignalHeight = 6;

cl_uint outputSignal[outputSignalWidth][outputSignalHeight];

const unsigned int maskWidth = 3;
const unsigned int maskHeight = 3;

cl_uint mask[maskWidth][maskHeight] =
{
    {1, 1, 1}, 
    {1, 0, 1}, 
    {1, 1, 1},
};

inline void checkErr(cl_int err, const char * name)
{
    if (err != CL_SUCCESS)
    {
        std::cerr << "ERROR: " << name
            << " (" << err << ")" << std::endl;
        exit(EXIT_FAILURE);
    }
}

void CL_CALLBACK contextCallback(
    const char * errInfo,
    const void * private_info,
    size_t cb,
    void * user_data)
{
    std::cout << "Error occurred during context use: "<< errInfo << std::endl;
    exit(EXIT_FAILURE);
}

int main(int argc,char argv[]){
    cl_int errNum;

    cl_uint numPlatforms;
    cl_uint numDevices;

    cl_platform_id * platformIDs;
    cl_device_id * deviceIDs;

    cl_context context = NULL;
    cl_command_queue queue;
    cl_program program;
    cl_kernel kernel;

    cl_mem inputSignalBuffer;
    cl_mem outputSignalBuffer;
    cl_mem maskBuffer;

    double start,end,Totaltime;//Timer variables

    errNum = clGetPlatformIDs(0, NULL, &numPlatforms);

    checkErr(
        (errNum != CL_SUCCESS) ? errNum :
        (numPlatforms <= 0 ? -1 : CL_SUCCESS),
        "clGetPlatformIDs");

    platformIDs = (cl_platform_id *)malloc(sizeof(cl_platform_id) * numPlatforms);

    errNum = clGetPlatformIDs(numPlatforms, platformIDs, NULL);

    checkErr(
        (errNum != CL_SUCCESS) ? errNum :
        (numPlatforms <= 0 ? -1 : CL_SUCCESS), "clGetPlatformIDs");

    deviceIDs = NULL;

    cl_uint i;

    for (i = 0; i < numPlatforms; i++)
    {
        errNum = clGetDeviceIDs(
            platformIDs[i],
            CL_DEVICE_TYPE_GPU,
            0,
            NULL,
            &numDevices);
        if (errNum != CL_SUCCESS && errNum != CL_DEVICE_NOT_FOUND)
        {
            checkErr(errNum, "clGetDeviceIDs");
        }
        else if (numDevices > 0)
        {
            deviceIDs = (cl_device_id *)malloc(
                sizeof(cl_device_id) * numDevices);

            errNum = clGetDeviceIDs(
                platformIDs[i], 
                CL_DEVICE_TYPE_GPU, 
                numDevices,
                &deviceIDs[0], 
                NULL);

            checkErr(errNum, "clGetDeviceIDs");

            break;
        }
    }
    if (deviceIDs == NULL) {
        std::cout << "No CPU device found" << std::endl;
        exit(-1);
    }
    cl_context_properties contextProperties[] =
    {
        CL_CONTEXT_PLATFORM,(cl_context_properties)platformIDs[i], 0
    };

    context = clCreateContext(
        contextProperties, numDevices, deviceIDs,
        &contextCallback, NULL, &errNum);

    checkErr(errNum, "clCreateContext");

    std::ifstream srcFile("convolution.cl");

    checkErr(srcFile.is_open() ? CL_SUCCESS : -1,
        "reading convolution.cl");

    std::string srcProg(
        std::istreambuf_iterator<char>(srcFile),
        (std::istreambuf_iterator<char>()));

    const char * src = srcProg.c_str();
    size_t length = srcProg.length();

    program = clCreateProgramWithSource(context, 1, &src, &length, &errNum);

    checkErr(errNum, "clCreateProgramWithSource");

    errNum = clBuildProgram(program, numDevices, deviceIDs, NULL, NULL, NULL);

    checkErr(errNum, "clBuildProgram");

    kernel = clCreateKernel(program, "convolve", &errNum);

    checkErr(errNum, "clCreateKernel");

    inputSignalBuffer = clCreateBuffer(
        context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
        sizeof(cl_uint) * inputSignalHeight * inputSignalWidth,
        static_cast<void *>(inputSignal), &errNum);

    checkErr(errNum, "clCreateBuffer(inputSignal)");    

    maskBuffer = clCreateBuffer(
        context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
        sizeof(cl_uint) * maskHeight * maskWidth,
        static_cast<void *>(mask), &errNum);

    checkErr(errNum, "clCreateBuffer(mask)");

    outputSignalBuffer = clCreateBuffer(
        context, CL_MEM_WRITE_ONLY,
        sizeof(cl_uint) * outputSignalHeight * outputSignalWidth,
        NULL, &errNum);

    checkErr(errNum, "clCreateBuffer(outputSignal)");

    queue = clCreateCommandQueue(
        context, deviceIDs[0], 0, &errNum);
    checkErr(errNum, "clCreateCommandQueue");

    errNum = clSetKernelArg(
        kernel, 0, sizeof(cl_mem), &inputSignalBuffer);
    errNum |= clSetKernelArg(
        kernel, 1, sizeof(cl_mem), &maskBuffer);
    errNum |= clSetKernelArg(
        kernel, 2, sizeof(cl_mem), &outputSignalBuffer);
    errNum |= clSetKernelArg(
        kernel, 3, sizeof(cl_uint), &inputSignalWidth);
    errNum |= clSetKernelArg(
        kernel, 4, sizeof(cl_uint), &maskWidth);

    checkErr(errNum, "clSetKernelArg");

    const size_t globalWorkSize[1] ={ outputSignalWidth * outputSignalHeight };
    const size_t localWorkSize[1] = { 1 };

    start = clock();

    errNum = clEnqueueNDRangeKernel(
                                    queue,
                                    kernel,
                                    1,
                                    NULL,
                                    globalWorkSize,
                                    localWorkSize,
                                    0,
                                    NULL,
                                    NULL
                                    );

    checkErr(errNum, "clEnqueueNDRangeKernel");

    errNum = clEnqueueReadBuffer(
        queue, outputSignalBuffer, CL_TRUE, 0,
        sizeof(cl_uint) * outputSignalHeight * outputSignalHeight,
        outputSignal, 0, NULL, NULL);

    checkErr(errNum, "clEnqueueReadBuffer");

    end= clock(); - start;
    cout<<"Time in ms = "<<((end/CLOCKS_PER_SEC) * 1000) << endl;

    for (int y = 0; y < outputSignalHeight; y++)
    {
        for (int x = 0; x < outputSignalWidth; x++)
        {
            std::cout << outputSignal[x][y] << " ";
        }
        std::cout << std::endl;
    }

    return 0;
}

问题:     我有点疑惑 -
    1)当我使用设备类型为CL_DEVICE_TYPE_GPU时,
    我的性能提高了267毫秒。当我使用CL_DEVICE_TYPE_CPU时,执行时间变为467毫秒。     我想知道在没有GPU的CPU和CPU与GPU上运行卷积代码之间的区别是什么(通过选择设备类型为CL_DEVICE_TYPE_CPU)。     2)我可以看到convolution.cl文件,其中有一个执行3次的for循环。我可以调用其他内核从可用的内核文件中执行此操作吗?

我问这个问题,因为我是OpenCL编码的新手,想知道那件事。

2 个答案:

答案 0 :(得分:5)

  1. CPU&amp; GPU是OpenCL设备。因此,通过选择CL_DEVICE_TYPE_CPU,您告诉OpenCL运行时将内核代码编译为CPU汇编程序&amp;在CPU上运行它。当您选择CL_DEVICE_TYPE_GPU时,内核代码将编译为GPU汇编程序&amp;在您的视频卡上执行。能够在不重写源代码的情况下更改设备类型是OpenCL的主要功能。没关系,你的CPU是否集成了GPU,和/或安装了独立的GPU,你只需选择可用的Device&amp;在其上运行内核。

  2. 对于OpenCL 1.2&amp;你不能从内核调用内核。动态并行性在OpenCL 2.0中实现。

答案 1 :(得分:2)

对于第一个问题:您应该对内核进行矢量化,以便opencl可以轻松使用CPU的SIMD功能,从而为每个内核解锁4x(或8x)个计算单元。

__kernel 
void convolve(
    const __global uint8 * const input, // uint8 fits AVX(AVX2?) and uint4 fits SSE(SSE3?)
    __constant uint8     * const mask,
    __global uint8       * const output,
    const int                   inputWidth,
    const int                   maskWidth){

        const int x = get_global_id(0);  // this is 1/8 size now
        const int y = get_global_id(1);  // this is 1/8 size now

        uint8 sum = 0;                      // a vector of 8 unsigneds

        for (int r = 0; r < maskWidth; r++)
        {
            const int idxIntmp = (y + r) * inputWidth + x; 
            for (int c = 0; c < maskWidth; c++)
            {
                sum += mask[(r * maskWidth) + c] * input[idxIntmp + c]; //8 issued per clock
                 // scalars get promoted when used in direct multiplication of addition.
            }
        }

        output[y * get_global_size(0) + x] = sum;
}

不要忘记将总工作线程减少7/8比例(例如:从8k线程减少到1k线程)。 请增加每个线程的工作,例如每个线程50个卷积,以增加工作单元的占用率,然后进行一些本地内存优化(对于GPU)以获得更好的结果,例如每个内核5ms ......

在我支持AVX的CPU上,一个简单的矩阵乘法得到的速度比为2.4倍,就像这样的8元素矢量化。

如果你卸载了足够多的工作,运行内核3次不是问题。如果没有,你应该使用一些棘手的算法将多个内核连接成一个内核。

如果此刻无法使用探查器,您可以检查GPU / CPU温度,以了解您与硬件限制的接近程度。

使用每个工作组的本地线程数。这可以改变性能,因为它允许每个线程使用更多或更少的寄存器。