我正在尝试在傅立叶变换域中实现反卷积算法。我自己在Matlab中实现了一个工作,我想用OpenCV库将它翻译成C ++。
基本上我所做的是从输入图像中提取渐变,我在转换域中做一些事情,然后我回到空间域。
对我来说,有问题的部分是执行这个(元素方面的)部门:
DFT(im) = ( conj( DFT(f) ) * DFT(image) + L2 * conj( DFT( gradKernel-x ) ) * DFT(mux) )+ ... ) / ( norm( DFT(f) )^2 + L2 * norm(gradKernel-x)^2 + ... )
f
是代码中定义的高斯内核。
DFT( gradKernel-x )
是x方向上的梯度核的FFT,即DFT([1,-1])
零填充到模糊图像的大小。 mux是执行反卷积的辅助变量。
我决定在执行逆DFT之前,在变换域中分别按幅度和相位进行除法。
我不知道我的代码中的错误在哪里,也许在分区中,可能在我的变量的正向/反向变换中,在高斯内核中......
如果有人可以帮助我,我会非常感激。
这是代码的关键部分(请注意,我在发布之前对其进行了简化,所以如果你尝试它,不要指望一个好的去模糊结果,基本上我期望的是一个视觉上令人愉悦的输出图像):
imH00=imread("Cameraman256.png",0);
if(!imH00.data)
{
std::cout<< "Could not open or find the image" << std::endl ;
return -1;
}
imH00.convertTo(imH00,CV_32F,1.0/255.0,0.0);
// Gaussian Kernel
Mat ker1D=getGaussianKernel(ksize,sigma,CV_32F);
fkernel.create(imH00.size(),CV_32F);
// zero-padding
fkernel.setTo(Scalar::all(0));
temp=ker1D*ker1D.t();
temp.copyTo(fkernel(Rect(0,0,temp.rows,temp.cols)));
// Fourier transform
Mat planes[] = {Mat_<float>(fkernel), Mat::zeros(fkernel.size(), CV_32F)};
Mat ffkernel;
merge(planes, 2, ffkernel);
dft(ffkernel, ffkernel,DFT_COMPLEX_OUTPUT);
// Gradient filter in frequency domain, trying to do something similar to psf2otf([1;-1],size(imH00)); in Matlab.
dx=Mat::zeros(imH00.size(),CV_32F);
dx.at<float>(0,0)=1;
dx.at<float>(0,1)=-1;
Mat dxplanes[] = {Mat_<float>(dx), Mat::zeros(dx.size(), CV_32F)};
Mat fdx;
merge(dxplanes, 2, fdx);
dft(fdx, fdx,DFT_COMPLEX_OUTPUT);
dy=Mat::zeros(imH00.size(),CV_32F);
dy.at<float>(0,0)=1;
dy.at<float>(1,0)=-1;
Mat dyplanes[] = {Mat_<float>(dy), Mat::zeros(dy.size(), CV_32F)};
Mat fdy;
merge(dyplanes, 2, fdy);
dft(fdy, fdy,DFT_COMPLEX_OUTPUT);
// Denominators
Mat den1;
Mat den2;
Mat den21;
Mat den22;
// ||fdx||^2 and ||fdy||^2
mulSpectrums(fdx,fdx,den21,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fdy,fdy,den22,DFT_COMPLEX_OUTPUT,true);
add(den21,den22,den2);
mulSpectrums(ffkernel,ffkernel,den1,0,true);
imHk=imH00.clone();
mux=Mat::zeros(imH00.size(),CV_32F);
muy=Mat::zeros(imH00.size(),CV_32F);
// FFT imH00
Mat fHktplanes[] = {Mat_<float>(imH00), Mat::zeros(imH00.size(), CV_32F)};
Mat fHkt;
merge(fHktplanes, 2, fHkt);
dft(fHkt, fHkt,DFT_COMPLEX_OUTPUT);
std::cout<<"starting deconvolution"<<std::endl;
for (int j=0; j<4; j++)
{
// Deconvolution
// Gradients
Mat ddx(1,2,CV_32F,Scalar(0));
ddx.at<float>(0,0)=1;
ddx.at<float>(0,1)=-1;
filter2D(imHk,dHx,CV_32F,ddx);
Mat ddy(2,1,CV_32F,Scalar(0));
ddy.at<float>(0,0)=1;
ddy.at<float>(1,0)=-1;
filter2D(imHk,dHy,CV_32F,ddy);
mux=Scalar((float)-0.5*L1/L2);
add(mux,dHx,mux);
muy=Scalar((float)-0.5*L1/L2);
add(muy,dHy,muy);
// FFT mux, muy
Mat fmuxplanes[] = {Mat_<float>(mux), Mat::zeros(mux.size(), CV_32F)};
Mat fmux;
merge(fmuxplanes, 2, fmux);
dft(fmux, fmux,DFT_COMPLEX_OUTPUT);
Mat fmuyplanes[] = {Mat_<float>(muy), Mat::zeros(muy.size(), CV_32F)};
Mat fmuy;
merge(fmuyplanes, 2, fmuy);
dft(fmuy, fmuy,DFT_COMPLEX_OUTPUT);
Mat num1,num2,num3,num,den;
// Spectrums multiplication, complex conjugate
mulSpectrums(fHkt,ffkernel,num1,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fmux,fdx,num2,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fmuy,fdy,num3,DFT_COMPLEX_OUTPUT,true);
add(num2,num3,num2);
add(num1,L2*num2,num);
add(den1,L2*den2,den);
// Division in polar coordinates
Mat auxnum[2];
split(num,auxnum);
Mat auxden[2];
split(den,auxden);
Mat numMag,numPhase;
magnitude(auxnum[0],auxnum[1],numMag);
phase(auxnum[0],auxnum[1],numPhase);
Mat denMag,denPhase;
magnitude(auxden[0],auxden[1],denMag);
phase(auxden[0],auxden[1],denPhase);
Mat division[2];
divide(numMag,denMag,division[0]);
division[1]=numPhase-denPhase;
polarToCart(division[0],division[1],division[0],division[1]);
Mat fHk;
merge(division,2,fHk);
Mat imHkaux;
Mat planesfHk[2];
dft(fHk, imHkaux, DFT_INVERSE+DFT_SCALE);
split(imHkaux,planesfHk);
imHk=planesfHk[0]; // imHk is the Real part
}
imHk.convertTo(imHk,CV_8U,255);
imshow( "Deblurred image", imHk);
谢谢
答案 0 :(得分:1)
问题在于滤波器的傅里叶变换。我们需要在转换之前移位过滤器内核。这与psf2otf函数在Matlab中的作用相同。如果有人感兴趣,这个简单的代码应该执行不受居中影响的高斯内核的DFT(如psf2otf):
float sigma=1.0;
short int ksize=13; // always odd
Mat ker1D=getGaussianKernel(ksize,sigma,CV_32F); //1D gaussian kernel
fkernel.create(myImage.size(),CV_32F); //
// zero-padding
fkernel.setTo(Scalar::all(0));
temp=ker1D*ker1D.t(); // 2D gaussian kernel, (Gaussian filter is separable)
int r=(ksize-1)/2; //radius
// shifting
temp(Rect(r,r,r+1,r+1)).copyTo(fkernel(Rect(0,0,r+1,r+1))); // q1
temp(Rect(r,0,r+1,r)).copyTo(fkernel(Rect(0,fkernel.cols-r,r+1,r))); // q2
temp(Rect(0,r,r,r+1)).copyTo(fkernel(Rect(fkernel.rows-r,0,r,r+1))); // q3
temp(Rect(0,0,r,r)).copyTo(fkernel(Rect(fkernel.rows-r,fkernel.cols-r,r,r))); // q4
// DFT
Mat planes[] = {Mat_<float>(fkernel), Mat::zeros(fkernel.size(), CV_32F)};
Mat ffkernel;
merge(planes, 2, ffkernel);
dft(ffkernel, ffkernel,DFT_COMPLEX_OUTPUT);