从CUDA FFT获取相位图像

时间:2018-10-10 16:25:10

标签: opencv cuda fft cufft

我正在尝试对2D图像应用cuFFT(正向然后反向)。我需要将实际部分和复杂部分作为单独的输出,以便可以计算相位和幅度图像。我无法重新创建输入图像,并且还返回了非零相位。特别是,我不确定是否可以从缩小后的cuFFT复合输出正确创建完整尺寸的图像,该输出显然仅存储光谱的左侧。这是我当前的代码:

// Load image
cv::Mat_<float> img;
img = cv::imread(path,0);
if(!img.isContinuous()){
    std::cout<<"Input cv::Mat is not continuous!"<<std::endl;
    return -1;
}

float *h_Data, *d_Data;
h_Data = img.ptr<float>(0);

// Complex device pointers
cufftComplex
*d_DataSpectrum,
*d_Result,
*h_Result;

// Plans for cuFFT execution
cufftHandle
fftPlanFwd,
fftPlanInv;

// Image dimensions
const int dataH = img.rows;
const int dataW = img.cols;
const int complexW = dataW/2+1;

// Allocate memory
h_Result = (cufftComplex *)malloc(dataH    * complexW * sizeof(cufftComplex));
checkCudaErrors(cudaMalloc((void **)&d_DataSpectrum,   dataH * complexW * sizeof(cufftComplex)));
checkCudaErrors(cudaMalloc((void **)&d_Data,   dataH   * dataW   * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Result,   dataH * complexW * sizeof(cufftComplex)));

// Copy image to GPU
checkCudaErrors(cudaMemcpy(d_Data,   h_Data,   dataH   * dataW *   sizeof(float), cudaMemcpyHostToDevice));

// Forward FFT
checkCudaErrors(cufftPlan2d(&fftPlanFwd, dataH, dataW, CUFFT_R2C));
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_Data, (cufftComplex *)d_DataSpectrum));

// Inverse FFT
checkCudaErrors(cufftPlan2d(&fftPlanInv, dataH, dataW, CUFFT_C2C));
checkCudaErrors(cufftExecC2C(fftPlanInv, (cufftComplex *)d_DataSpectrum, (cufftComplex *)d_Result, CUFFT_INVERSE));

// Copy result to host memory
checkCudaErrors(cudaMemcpy(h_Result, d_Result, dataH * complexW * sizeof(cufftComplex), cudaMemcpyDeviceToHost));

// Convert cufftComplex to OpenCV real and imag Mat
Mat_<float> resultReal = Mat_<float>(dataH, dataW);
Mat_<float> resultImag = Mat_<float>(dataH, dataW);
for(int i=0; i<dataH; i++){
    float* rowPtrReal = resultReal.ptr<float>(i);
    float* rowPtrImag = resultImag.ptr<float>(i);
    for(int j=0; j<dataW; j++){
        if(j<complexW){
            rowPtrReal[j] = h_Result[i*complexW+j].x/(dataH*dataW);
            rowPtrImag[j] = h_Result[i*complexW+j].y/(dataH*dataW);
        }else{
            // Right side?
            rowPtrReal[j] = h_Result[i*complexW+(dataW-j)].x/(dataH*dataW);
            rowPtrImag[j] = -h_Result[i*complexW+(dataW-j)].y/(dataH*dataW);
        }
    }
}

// Compute phase and normalize to 8 bit
Mat_<float> resultPhase;
phase(resultReal, resultImag, resultPhase);
cv::subtract(resultPhase, 2*M_PI, resultPhase, (resultPhase > M_PI));
resultPhase = ((resultPhase+M_PI)*255)/(2*M_PI);
Mat_<uchar> normalized = Mat_<uchar>(dataH, dataW);
resultPhase.convertTo(normalized, CV_8U);
// Save phase image
cv::imwrite("cuda_propagation_phase.png",normalized);

// Compute amplitude and normalize to 8 bit
Mat_<float> resultAmplitude;
magnitude(resultReal, resultImag, resultAmplitude);
Mat_<uchar> normalizedAmplitude = Mat_<uchar>(dataH, dataW);
resultAmplitude.convertTo(normalizedAmplitude, CV_8U);
// Save phase image
cv::imwrite("cuda_propagation_amplitude.png",normalizedAmplitude);

我不确定我的错误在哪里。这是从简化版本(for循环)取回整个图像的正确方法吗?

2 个答案:

答案 0 :(得分:1)

我想我明白了。技巧是从一个复杂的矩阵开始。从一个实际的开始,您需要应用R2C转换(由于频谱的对称性而使用减小的大小),然后是C2C转换,以保留减小的大小。解决方案是通过将零作为复杂部分插入来从实数输入中创建复杂的输入,然后连续应用两个C2C转换,既保留了整个图像,又易于随后获得完整尺寸的实数和虚数矩阵:

// Load image
cv::Mat_<float> img;
img = cv::imread(path,0);
if(!img.isContinuous()){
    std::cout<<"Input cv::Mat is not continuous!"<<std::endl;
    return -1;
}

float *h_DataReal = img.ptr<float>(0);
cufftComplex *h_DataComplex;

// Image dimensions
const int dataH = img.rows;
const int dataW = img.cols;

// Convert real input to complex
h_DataComplex = (cufftComplex *)malloc(dataH    * dataW * sizeof(cufftComplex));
for(int i=0; i<dataH*dataW; i++){
    h_DataComplex[i].x = h_DataReal[i];
    h_DataComplex[i].y = 0.0f;
}

// Complex device pointers
cufftComplex
*d_Data,
*d_DataSpectrum,
*d_Result,
*h_Result;

// Plans for cuFFT execution
cufftHandle
fftPlanFwd,
fftPlanInv;

// Allocate memory
h_Result = (cufftComplex *)malloc(dataH    * dataW * sizeof(cufftComplex));
checkCudaErrors(cudaMalloc((void **)&d_DataSpectrum,   dataH * dataW * sizeof(cufftComplex)));
checkCudaErrors(cudaMalloc((void **)&d_Data,   dataH   * dataW   * sizeof(cufftComplex)));
checkCudaErrors(cudaMalloc((void **)&d_Result,   dataH * dataW * sizeof(cufftComplex)));

// Copy image to GPU
checkCudaErrors(cudaMemcpy(d_Data,   h_DataComplex,   dataH   * dataW *   sizeof(cufftComplex), cudaMemcpyHostToDevice));

// Forward FFT
checkCudaErrors(cufftPlan2d(&fftPlanFwd, dataH, dataW, CUFFT_C2C));
checkCudaErrors(cufftExecC2C(fftPlanFwd, (cufftComplex *)d_Data, (cufftComplex *)d_DataSpectrum, CUFFT_FORWARD));

// Inverse FFT
checkCudaErrors(cufftPlan2d(&fftPlanInv, dataH, dataW, CUFFT_C2C));
checkCudaErrors(cufftExecC2C(fftPlanInv, (cufftComplex *)d_DataSpectrum, (cufftComplex *)d_Result, CUFFT_INVERSE));

// Copy result to host memory
checkCudaErrors(cudaMemcpy(h_Result, d_Result, dataH * dataW * sizeof(cufftComplex), cudaMemcpyDeviceToHost));

// Convert cufftComplex to OpenCV real and imag Mat
Mat_<float> resultReal = Mat_<float>(dataH, dataW);
Mat_<float> resultImag = Mat_<float>(dataH, dataW);
for(int i=0; i<dataH; i++){
    float* rowPtrReal = resultReal.ptr<float>(i);
    float* rowPtrImag = resultImag.ptr<float>(i);
    for(int j=0; j<dataW; j++){
            rowPtrReal[j] = h_Result[i*dataW+j].x/(dataH*dataW);
            rowPtrImag[j] = h_Result[i*dataW+j].y/(dataH*dataW);
    }
}

答案 1 :(得分:0)

这是一个古老的问题,但是我想提供更多信息:R2C保留的信息量与C2C转换相同,只是元素数量减少了一半。 R2C(和C2R)转换利用Hermitian对称性来减少计算并存储在内存中的元素数量(例如FFT是对称的,因此您实际上不需要存储在a中的一半项)。 C2C转换)。

要生成实部和虚部的2D图像,可以使用R2C变换,然后编写一个内核,该内核将(Nx / 2 + 1) Ny个输出数组转换为一对大小为( Nx Ny),自己利用对称性将术语写到正确的位置。但是,使用C2C转换的代码更少,并且更加安全。