如何在cv :: cuda :: PtrStepSzf数据上使用线性索引

时间:2016-12-12 23:29:32

标签: c++ opencv cuda

我正在使用opencv 3.1 cv :: cuda模板匹配,但cv::cuda::minMaxLoc()函数对我的情况来说太慢了。我的匹配结果的最小尺寸为128x128,最大尺寸为512x512。对于minMaxLoc()128x128平均350x350需要1.65 ms;对于像cv::cuda::PtrStepSzf这样的内容,cudaMallocPitch的平均时间长达25 ms。因为这样做了数百次。

我感到不安,我的匹配尺寸可能太小了,因为你通常在GPU中使用它。但是我想按照罗伯特克罗维拉在thrust::max_element slow in comparison cublasIsamax - More efficient implementation?所做的那样测试,看看我是否能获得更好的表现。

我的问题是,使用线性索引和cudaMemcpy2D读取数据的所有减少都不允许这样做(至少我没有找到)。我尝试重塑我的匹配结果,但我不能这样做,因为数据不连续。我是否需要转向cv::cuda::GPUMatcv::cuda::PtrStepSzf如果是这样的情况我如何使用 __global__ void minLoc(const cv::cuda::PtrStepSzf data, float* minVal, float * minValLoc ) { int dsize = data.cols*data.rows __shared__ volatile T vals[nTPB]; __shared__ volatile int idxs[nTPB]; __shared__ volatile int last_block; int idx = threadIdx.x+blockDim.x*blockIdx.x; last_block = 0; T my_val = FLOAT_MIN; int my_idx = -1; // sweep from global memory while (idx < dsize) { //data(idx) is an illegal call;The legal one is data(x,y) // How do I do it? if (data(idx) > my_val) { my_val = data(idx); my_idx = idx; } idx += blockDim.x*gridDim.x; } // ... rest of the kernel } void callMinLocKernel(cv::InputArray _input, cv::Point minValLoc, float minVal, cv::cuda::Stream _stream) { const cv::cuda::GpuMat input = _input.getGpuMat(); dim3 cthreads(32, 32); dim3 cblocks( static_cast<int>(std::ceil(input1.size().width / static_cast<double>(cthreads.x))), static_cast<int>(std::ceil(input1.size().height / static_cast<double>(cthreads.y)))); // code that creates and upload d_min, d_minLoc float h_min = 9999; int h_minLoc = -1; float * d_min = 0; int * d_minLoc = 0; //gpuErrchk is defined on other place gpuErrchk( cudaMalloc((void**)&d_min, sizeof(h_min))); gpuErrchk( cudaMalloc((void**)&d_minLoc, sizeof(h_minLoc)); gpuErrchk( cudaMemcpy(d_min, &h_min, sizeof(h_min), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_minLoc, &h_minLoc, sizeof(h_minLoc), cudaMemcpyHostToDevice) ); cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); minLoc<<<cblocks, cthreads, 0, stream>>>(input,d_min,d_minLoc); gpuErrchk(cudaGetLastError()); //code to read the answer gpuErrchk( cudaMemcpy(&h_min, d_min, sizeof(h_min), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(&h_minLoc, d_minLoc, sizeof(h_minLoc), cudaMemcpyDeviceToHost) ); minValLoc = cv::point(h_minLoc/data.cols,h_minLoc%data.cols) minVal = h_min; } int main() { //read Background and template cv::Mat input = imread("cat.jpg",0); cv::Mat templ = imread("catNose.jpg",0) //convert to floats cv::Mat float_input, float_templ; input.convertTo(float_input,CV_32FC1); input.convertTo(float_templ,CV_32FC1); //upload Bckg and template to gpu cv::cuda::GpuMat d_src,d_templ, d_match; Size size = float_input.size(); d_src.upload(float_input); d_templ.upload(float_templ); double min_val, max_val; Point min_loc, max_loc; Ptr<cv::cuda::TemplateMatching> alg = cuda::createTemplateMatching(d_src.type(), CV_TM_SQDIFF); alg->match(d_src, d_templ, d_match); cv::cuda::Normalize(d_match,d_match); //Too slow //cv::cuda::minMaxLoc(d_match, &min_val, &max_val, &min_loc, &max_loc); callMinLocKernel(d_match,min_val,min_loc); return 0; } 读取{{1}}对象?

{{1}}

1 个答案:

答案 0 :(得分:1)

我没有找到一种方法来实际使用cv::cuda::PtrStepSzf的线性索引。我不确定有没有。看起来当使用这种格式时,它只能使用2个下标。相反,我在内核包装器中的ptr变量上使用了指针cv::cuda::GpuMat input,如下所示:

#define nTPB 1024
#define FLOAT_MAX 9999.0f
void callMinLocKernel(cv::InputArray _input,       
        cv::Point minValLoc,
        float minVal,
        cv::cuda::Stream _stream)
{
    const cv::cuda::GpuMat input = _input.getGpuMat();
    const float* linSrc = input.ptr<float>();
    size_t step         = input.step;
    dim3 cthreads(nTPB);
    dim3 cblocks(
    static_cast<int>(std::ceil(input.size().width*input1.size().height /
        static_cast<double>(nTPB))));

    // code that creates and upload d_min, d_minLoc
    float h_min    = 9999;
    int h_minLoc   = -1;
    float * d_min  = 0;
    int * d_minLoc = 0;
    //gpuErrchk is defined on other place
    gpuErrchk( cudaMalloc((void**)&d_min, sizeof(h_min)));
    gpuErrchk( cudaMalloc((void**)&d_minLoc, sizeof(h_minLoc));
    gpuErrchk( cudaMemcpy(d_min, &h_min, sizeof(h_min), cudaMemcpyHostToDevice) );
    gpuErrchk( cudaMemcpy(d_minLoc, &h_minLoc, sizeof(h_minLoc), cudaMemcpyHostToDevice) );

    cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream);
    minLoc<<<cblocks, cthreads, 0, stream>>>(input,d_min,d_minLoc);
    gpuErrchk(cudaGetLastError());
    //code to read the answer
    gpuErrchk( cudaMemcpy(&h_min, d_min, sizeof(h_min), cudaMemcpyDeviceToHost) );
    gpuErrchk( cudaMemcpy(&h_minLoc, d_minLoc, sizeof(h_minLoc), cudaMemcpyDeviceToHost) );

    minValLoc = cv::point(h_minLoc/data.cols,h_minLoc%data.cols)
    minVal = h_min;
}

在内核中:

__global__ void minLoc(const float* data,
                       const size_t step,
                       cv::Size dataSz,
                       float* minVal,
                       float * minValLoc
                    )
{

    __shared__ volatile T   vals[nTPB];
    __shared__ volatile int idxs[nTPB];
    __shared__ volatile int last_block;

    int idx         = threadIdx.x+blockDim.x*blockIdx.x;
    const int dsize = dataSz.height*dataSz.width;
    last_block = 0;
    float  my_val = FLOAT_MAX;
    int my_idx = -1;
    // sweep from global memory
    while (idx < dsize)
    {
        int row = idx / dataSz.width;
        int id = ( row*step / sizeof( float ) ) + idx % dataSz.width;
        if ( data[id] < my_val )
        {
           my_val = data[id];
           my_idx = idx;
        }
        idx += blockDim.x*gridDim.x;
    }

            // ... rest of the kernel
}  

step以字节为单位,因此需要除以sizeof(typeVariable) 我希望这有帮助!