Cuda Bayer / CFA deosaicing例子

时间:2011-11-09 21:57:47

标签: performance image cuda

我编写了一个CUDA4 Bayer demosaicing例程,但它比单线程CPU代码慢,在16核GTS250上运行。
Blocksize是(16,16),图像变暗是16的倍数 - 但改变它不会改善它。

我做了什么明显愚蠢的事吗?

--------------- calling routine ------------------
uchar4 *d_output;
size_t num_bytes; 

cudaGraphicsMapResources(1, &cuda_pbo_resource, 0);    
cudaGraphicsResourceGetMappedPointer((void **)&d_output, &num_bytes, cuda_pbo_resource);

// Do the conversion, leave the result in the PBO fordisplay
kernel_wrapper( imageWidth, imageHeight, blockSize, gridSize, d_output );

cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0);

--------------- cuda -------------------------------
texture<uchar, 2, cudaReadModeElementType> tex;
cudaArray *d_imageArray = 0;

__global__ void convertGRBG(uchar4 *d_output, uint width, uint height)
{
    uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
    uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
    uint i = __umul24(y, width) + x;

    // input is GR/BG output is BGRA
    if ((x < width) && (y < height)) {

        if ( y & 0x01 ) {
            if ( x & 0x01 ) {  
                d_output[i].x =  (tex2D(tex,x+1,y)+tex2D(tex,x-1,y))/2;  // B                
                d_output[i].y = (tex2D(tex,x,y));     // G in B
                d_output[i].z = (tex2D(tex,x,y+1)+tex2D(tex,x,y-1))/2;  // R                    
            } else {
                d_output[i].x = (tex2D(tex,x,y));        //B
                d_output[i].y = (tex2D(tex,x+1,y) + tex2D(tex,x-1,y)+tex2D(tex,x,y+1)+tex2D(tex,x,y-1))/4;  // G
                d_output[i].z = (tex2D(tex,x+1,y+1) + tex2D(tex,x+1,y-1)+tex2D(tex,x-1,y+1)+tex2D(tex,x-1,y-1))/4;   // R
            }
        } else {
            if ( x & 0x01 ) {
                 // odd col = R
                d_output[i].y = (tex2D(tex,x+1,y+1) + tex2D(tex,x+1,y-1)+tex2D(tex,x-1,y+1)+tex2D(tex,x-1,y-1))/4;  // B
                d_output[i].z = (tex2D(tex,x,y));        //R
                d_output[i].y = (tex2D(tex,x+1,y) + tex2D(tex,x-1,y)+tex2D(tex,x,y+1)+tex2D(tex,x,y-1))/4;  // G    
            } else {    
                d_output[i].x = (tex2D(tex,x,y+1)+tex2D(tex,x,y-1))/2;  // B
                d_output[i].y = (tex2D(tex,x,y));               // G  in R               
                d_output[i].z = (tex2D(tex,x+1,y)+tex2D(tex,x-1,y))/2;  // R                    
            }
        }                                
    }
}



void initTexture(int imageWidth, int imageHeight, uchar *imagedata)
{

    cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
    cutilSafeCall( cudaMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight) ); 
    uint size = imageWidth * imageHeight * sizeof(uchar);
    cutilSafeCall( cudaMemcpyToArray(d_imageArray, 0, 0, imagedata, size, cudaMemcpyHostToDevice) );
    cutFree(imagedata);

    // bind array to texture reference with point sampling
    tex.addressMode[0] = cudaAddressModeClamp;
    tex.addressMode[1] = cudaAddressModeClamp;
    tex.filterMode = cudaFilterModePoint;
    tex.normalized = false; 

    cutilSafeCall( cudaBindTextureToArray(tex, d_imageArray) );
}

3 个答案:

答案 0 :(得分:8)

您的代码中没有任何明显的错误,但有几个明显的性能机会:

1)为了获得最佳性能,您应该使用纹理进入共享内存 - 请参阅“SobelFilter”SDK示例。

2)如上所述,代码将字节写入全局内存,这可确保产生巨大的性能损失。在将结果提交到全局内存之前,您可以使用共享内存来暂存结果。

3)以与硬件纹理缓存属性相匹配的方式调整块大小具有令人惊讶的巨大性能优势。在特斯拉级硬件上,使用与内核相同的寻址方案的内核的最佳块大小为16x4。 (每个块64个线程)

对于像这样的工作负载,可能很难与优化的CPU代码竞争。 SSE2可以在一条指令中执行16字节大小的操作,CPU的时钟速度大约是其5倍。

答案 1 :(得分:1)

基于Nvidia论坛上的答案,这里(对于搜索引擎)是一个稍微更加优化的版本,它在每个线程中写入2x2像素块。虽然我的设置无法衡量速度的差异。

请注意,应使用图片大小的格栅来调用它;

dim3 blockSize(16, 16); // for example
dim3 gridSize((width/2) / blockSize.x, (height/2) / blockSize.y);


__global__ void d_convertGRBG(uchar4 *d_output, uint width, uint height)
{
    uint x = 2 * (__umul24(blockIdx.x, blockDim.x) + threadIdx.x);
    uint y = 2 * (__umul24(blockIdx.y, blockDim.y) + threadIdx.y);
    uint i = __umul24(y, width) + x;

    // input is GR/BG output is BGRA
    if ((x < width-1) && (y < height-1)) {
        // x+1, y+1:

        d_output[i+width+1] = make_uchar4( (tex2D(tex,x+2,y+1)+tex2D(tex,x,y+1))/2,  // B                
                                             (tex2D(tex,x+1,y+1)),     // G in B
                                             (tex2D(tex,x+1,y+2)+tex2D(tex,x+1,y))/2,  // R                    
                                             0xff);

        // x, y+1:
        d_output[i+width] =   make_uchar4( (tex2D(tex,x,y+1)),        //B
                                             (tex2D(tex,x+1,y+1) + tex2D(tex,x-1,y+1)+tex2D(tex,x,y+2)+tex2D(tex,x,y))/4,  // G
                                             (tex2D(tex,x+1,y+2) + tex2D(tex,x+1,y)+tex2D(tex,x-1,y+2)+tex2D(tex,x-1,y))/4,   // R
                                             0xff);


        // x+1, y:
        d_output[i+1] =       make_uchar4( (tex2D(tex,x,y-1) + tex2D(tex,x+2,y-1)+tex2D(tex,x,y+1)+tex2D(tex,x+2,y-1))/4,  // B
                                            (tex2D(tex,x+2,y) + tex2D(tex,x,y)+tex2D(tex,x+1,y+1)+tex2D(tex,x+1,y-1))/4,  // G
                                            (tex2D(tex,x+1,y)),        //R
                                            0xff);


        // x, y:
        d_output[i] =         make_uchar4( (tex2D(tex,x,y+1)+tex2D(tex,x,y-1))/2,  // B
                                             (tex2D(tex,x,y)),               // G  in R           
                                             (tex2D(tex,x+1,y)+tex2D(tex,x-1,y))/2,  // R                    
                                             0xff);

    }
}

答案 2 :(得分:0)

代码中有许多if和else。如果您构造代码以消除所有条件语句,那么您将获得巨大的性能提升,因为分支是性能杀手。确实可以删除分支。正好有30个案例需要明确编码。我已在CPU上实现它,它不包含任何条件语句。我正在考虑制作一个博客来解释它。一旦完成就会发布。