为什么这个CUDA内核变慢了?

时间:2016-06-12 15:15:01

标签: c++ cuda bit-shift texture2d

我需要帮助让我的cuda程序运行得更快。 NVIDIA视觉分析器显示性能不佳,称“低计算利用率1.4%”:

enter image description here

代码如下。第一次内核准备:

void laskeSyvyydet(int& tiilet0, int& tiilet1, int& tiilet2, int& tiilet3) {

cudaArray *tekstuuriSisaan, *tekstuuriUlos;

//take care of synchronazion
cudaEvent_t cEvent;
cudaEventCreate(&cEvent);

//let's take control of OpenGL textures
cudaGraphicsMapResources(1, &cuda.cMaxSyvyys);
cudaEventRecord(cEvent, 0);
cudaGraphicsMapResources(1, &cuda.cDepthTex);
cudaEventRecord(cEvent, 0);

//need to create CUDA pointers
cudaGraphicsSubResourceGetMappedArray(&tekstuuriSisaan, cuda.cDepthTex, 0, 0);
cudaGraphicsSubResourceGetMappedArray(&tekstuuriUlos, cuda.cMaxSyvyys, 0, 0);

cudaProfilerStart();

//launch kernel
cLaskeSyvyydet(tiilet0, tiilet1, tiilet2, tiilet3, tekstuuriSisaan, tekstuuriUlos);
cudaEventRecord(cEvent, 0);

cudaProfilerStop();

//release textures back to OpenGL
cudaGraphicsUnmapResources(1, &cuda.cMaxSyvyys, 0);
cudaEventRecord(cEvent, 0);
cudaGraphicsUnmapResources(1, &cuda.cDepthTex, 0);
cudaEventRecord(cEvent, 0);

//final synchronazion
cudaEventSynchronize(cEvent);
cudaEventDestroy(cEvent);
}

内核启动:

void cLaskeSyvyydet(int& tiilet0, int& tiilet1, int& tiilet2, int& tiilet3, cudaArray* tekstuuriSisaan, cudaArray* tekstuuriUlos) {

cudaBindTextureToArray(surfRefSisaan, tekstuuriSisaan);
cudaBindSurfaceToArray(surfRefUlos, tekstuuriUlos);

    int blocksW = (int)ceilf( tiilet0 / 32.0f );
    int blocksH = (int)ceilf( tiilet1 / 32.0f );
    dim3 gridDim( blocksW, blocksH, 1 );
    dim3 blockDim(32, 32, 1 );

kLaskeSyvyydet<<<gridDim, blockDim>>>(tiilet0, tiilet1, tiilet2, tiilet3);

}

内核:

__global__ void kLaskeSyvyydet(const int tiilet0, const int tiilet1, const int tiilet2, const int tiilet3) {

//first define indexes
    unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
    unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
    if (i >= tiilet0 || j >= tiilet1) return;

//if we are inside boundaries, let's find the greatest depth value
    unsigned int takana=0;
    unsigned int ddd;
    uchar4 syvyys;
    uchar4 dd;

//there's possibly four different tile sizes to choose between
    if (j!=tiilet1-1 && i!=tiilet0-1) {

    for (int y=j*BLOCK_SIZE; y<(j+1)*BLOCK_SIZE; y++) {
        for (int x=i*BLOCK_SIZE; x<(i+1)*BLOCK_SIZE; x++) {
            dd=tex2D(surfRefSisaan, x, y);
            ddd=(dd.x << 24) | (dd.y << 16) | (dd.z << 8) | (dd.w);
            takana=max(takana, ddd);
        }
    }

    } else if (j==tiilet1-1 && i!=tiilet0-1) {

    for (int y=j*BLOCK_SIZE; y<j*BLOCK_SIZE+tiilet3; y++) {
        for (int x=i*BLOCK_SIZE; x<(i+1)*BLOCK_SIZE; x++) {
            dd=tex2D(surfRefSisaan, x, y);
            ddd=(dd.x << 24) | (dd.y << 16) | (dd.z << 8) | (dd.w);
            takana=max(takana, ddd);
            }
        }

    } else if (j!=tiilet1-1 && i==tiilet0-1) {

    for (int y=j*BLOCK_SIZE; y<(j+1)*BLOCK_SIZE; y++) {
        for (int x=i*BLOCK_SIZE; x<i*BLOCK_SIZE+tiilet2; x++) {
            dd=tex2D(surfRefSisaan, x, y);
            ddd=(dd.x << 24) | (dd.y << 16) | (dd.z << 8) | (dd.w);
            takana=max(takana, ddd);
        }
    }

    } else if (j==tiilet1-1 && i==tiilet0-1) {

    for (int y=j*BLOCK_SIZE; y<j*BLOCK_SIZE+tiilet3; y++) {
        for (int x=i*BLOCK_SIZE; x<i*BLOCK_SIZE+tiilet2; x++) {
            dd=tex2D(surfRefSisaan, x, y);
            ddd=(dd.x << 24) | (dd.y << 16) | (dd.z << 8) | (dd.w);
            takana=max(takana, ddd);
        }
    }
    }

//if there's empty texture, then we choose the maximum possible value
    if (takana==0) {
    takana=1000000000;
    }

//after slicing the greatest 32bit depth value into four 8bit pieces we write the value into another texture
    syvyys.x=(takana & 0xFF000000) >> 24;
    syvyys.y=(takana & 0x00FF0000) >> 16;
    syvyys.z=(takana & 0x0000FF00) >> 8;
    syvyys.w=(takana & 0x000000FF) >> 0;

    surf2Dwrite(syvyys, surfRefUlos, i*sizeof(syvyys), j, cudaBoundaryModeZero);

}

请帮助我加快工作速度,我没有想法......

1 个答案:

答案 0 :(得分:1)

看起来你有一个大小为

的2D int输入数组
((tiilet0-1)*BLOCK_SIZE+tiilet2, ((tiilet1-1)*BLOCK_SIZE)+tiilet3)

每个线程将按顺序读取大小为

的输入块中的所有元素
(BLOCK_SIZE, BLOCK_SIZE)

并将每个输入块的最大值写入大小为

的2D结果数组
(tiilet0, tiilet1)

与合并的内存访问相比,这可能是访问全局内存的最糟糕方式,即使使用2D纹理也是如此。你们很多人都想了解合并内存访问。

https://devblogs.nvidia.com/parallelforall/how-access-global-memory-efficiently-cuda-c-kernels/

一般来说,你把太多的工作放在一个线程中。考虑到将CUDA线程块映射到输入数组的方式,我猜想除非你有非常大的输入,否则gridDim太小而无法充分利用GPU。

为了获得更好的性能,您可能希望将每个输入块的一个CUDA线程更改为每个输入块(int[BLOCK_SIZE][BLOCK_SIZE])的一个CUDA线程块,并使用parallel reduction来查找块的最大值。