今天我正致力于扩展我的简单OpenCV图像处理应用程序。我想计算我加载的cv :: Mat的相位和幅度。我必须为此目的使用FFTW c ++库(我知道OpenCV中的dft)。
我的工作基于教程:http://www.admindojo.com/discrete-fourier-transform-in-c-with-fftw/
所以根据教程我的输出幅度应该是这样的:
不幸的是我的输出完全不同:
另一方面,相位图像与教程图像几乎相同,所以这部分很好。
看一下最重要的代码:(我在那里做的是尝试移植教程,因为它与OpenCV一起工作)
编辑:(两个帖子合并) 好。所以我稍微更改了代码,但输出仍然与教程不同。 看看代码:
void Processing::fft_moc(cv::Mat &pixels, cv::Mat &outMag, cv::Mat outPhase, int mode)
{
int squareSize = pixels.cols;
fftw_plan planR, planG, planB;
fftw_complex *inR, *inG, *inB, *outR, *outG, *outB;
// allocate input arrays
inB = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * squareSize * squareSize);
inG = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * squareSize * squareSize);
inR = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * squareSize * squareSize);
// allocate output arrays
outB = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * squareSize * squareSize);
outG = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * squareSize * squareSize);
outR = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * squareSize * squareSize);
if (mode == FFT)
{
// create plans
planB = fftw_plan_dft_2d(squareSize, squareSize, inR, outB, FFTW_FORWARD, FFTW_ESTIMATE);
planG = fftw_plan_dft_2d(squareSize, squareSize, inG, outG, FFTW_FORWARD, FFTW_ESTIMATE);
planR = fftw_plan_dft_2d(squareSize, squareSize, inB, outR, FFTW_FORWARD, FFTW_ESTIMATE);
}
// assig1n values to real parts (values between 0 and MaxRGB)
for( int x = 0; x < pixels.rows; x++ )
{
for( int y = 0; y < pixels.cols; y++ )
{
double blue = pixels.at<cv::Vec3b>(x,y)[0];
double green = pixels.at<cv::Vec3b>(x,y)[1];
double red = pixels.at<cv::Vec3b>(x,y)[2];
// save as real numbers
inB[squareSize*x+y][0] = blue;
inG[squareSize*x+y][0] = green;
inR[squareSize*x+y][0] = red;
}
}
// perform FORWARD fft
fftw_execute(planB);
fftw_execute(planG);
fftw_execute(planR);
double ***outMagF=new double**[pixels.rows];
for(int i = 0 ; i < pixels.rows ; i++)
{
outMagF[i]=new double *[pixels.cols];
for(int j = 0 ; j < pixels.cols ; j++)
{
outMagF[i][j]= new double[3];
}
}
//calculate magnitude
//find min and max for each channel
double n_minG = 0.0;
double n_maxG = 0.0;
double n_minB = 0.0;
double n_maxB = 0.0;
double n_minR = 0.0;
double n_maxR = 0.0;
for( int x = 0; x < pixels.rows; x++ )
{
for( int y = 0; y < pixels.cols; y++ )
{
int i = squareSize*x+y;
// normalize values
double realB = outB[i][0] / (double)(squareSize * squareSize);
double imagB = outB[i][1] / (double)(squareSize * squareSize);
double realG = outG[i][0] / (double)(squareSize * squareSize);
double imagG = outG[i][1] / (double)(squareSize * squareSize);
double realR = outR[i][0] / (double)(squareSize * squareSize);
double imagR = outR[i][1] / (double)(squareSize * squareSize);
// magnitude
double magB = log(1+sqrt((realB * realB) + (imagB * imagB)));
double magG = log(1+sqrt((realG * realG) + (imagG * imagG)));
double magR = log(1+sqrt((realR * realR) + (imagR * imagR)));
n_minB = n_minB > magB ? magB : n_minB;
n_maxB = n_maxB < magB ? magB : n_maxB;
n_minG = n_minG > magG ? magG : n_minG;
n_maxG = n_maxG < magG ? magG : n_maxG;
n_minR = n_minR > magR ? magR : n_minR;
n_maxR = n_maxR < magR ? magR : n_maxR;
outMagF[x][y][0] = magB;
outMagF[x][y][1] = magG;
outMagF[x][y][2] = magR;
}
}
for( int x = 0; x < pixels.rows; x++ )
{
for( int y = 0; y < pixels.cols; y++ )
{
int i = squareSize*x+y;
double realB = outB[i][0] / (double)(squareSize * squareSize);
double imagB = outB[i][1] / (double)(squareSize * squareSize);
double realG = outG[i][0] / (double)(squareSize * squareSize);
double imagG = outG[i][1] / (double)(squareSize * squareSize);
double realR = outR[i][0] / (double)(squareSize * squareSize);
double imagR = outR[i][1] / (double)(squareSize * squareSize);
// write normalized to output = (value-min)/(max-min)
outMag.at<cv::Vec3f>(x,y)[0] = (double)(outMagF[x][y][0]-n_minB)/(n_maxB-n_minB);
outMag.at<cv::Vec3f>(x,y)[1] = (double)(outMagF[x][y][1]-n_minG)/(n_maxG-n_minG);
outMag.at<cv::Vec3f>(x,y)[2] = (double)(outMagF[x][y][2]-n_minR)/(n_maxR-n_minR);
// std::complex for arg()
std::complex<double> cB(realB, imagB);
std::complex<double> cG(realG, imagG);
std::complex<double> cR(realR, imagR);
// phase
double phaseB = arg(cB) + M_PI;
double phaseG = arg(cG) + M_PI;
double phaseR = arg(cR) + M_PI;
// scale and write to output
outPhase.at<cv::Vec3f>(x,y)[0] = (phaseB / (double)(2 * M_PI)) * 1;
outPhase.at<cv::Vec3f>(x,y)[1] = (phaseG / (double)(2 * M_PI)) * 1;
outPhase.at<cv::Vec3f>(x,y)[2] = (phaseR / (double)(2 * M_PI)) * 1;
}
}
// move zero frequency to (squareSize/2, squareSize/2)
swapQuadrants(squareSize, outMag);
swapQuadrants(squareSize, outPhase);
// free memory
fftw_destroy_plan(planR);
fftw_destroy_plan(planG);
fftw_destroy_plan(planB);
fftw_free(inR); fftw_free(outR);
fftw_free(inG); fftw_free(outG);
fftw_free(inB); fftw_free(outB);
}
我将最终输出存储在cv :: Mat中,类型为CV_32FC3。是的,我规范化幅度的方式非常难看,但我只是想确保一切都像我期望的那样工作。
再次看一下我的输出:
所以你可以看到我仍然需要帮助。
答案 0 :(得分:0)
您将计算值分配给uchar变量,并且丢失精度,所有负值和255以上的值也会丢失。 尝试在实值变量中进行计算,然后将最终结果标准化为0-255范围,然后将其分配给CV_8U类型的结果图像。
答案 1 :(得分:0)
FFT平面通常在非常大的第0个元素(DC)和通常接近零的其余元素之间包含非常大的差异。
当显示幅度时,通常的做法是实际显示幅度的对数,以便较大的值比小的值更强烈地减少。
本教程明确说明了这一点:&#34;幅度似乎是黑色但不是。为了使信息可见,我们以对数方式缩放图像。&#34;
您需要显示值的日志才能看到类似的图像。