扭转鱼眼失真(我用VC ++开启了openCV)

时间:2015-01-22 09:53:29

标签: c opencv visual-c++

我模拟了鱼眼扭曲。 我想开发一个可以将失真图像转换为普通图像的反向程序。 我曾尝试使用undistortPonts()函数,但无法理解输入(dist-coefficient)。 cv.UndistortPoints(distorted, undistorted, intrinsics, dist_coeffs)

我的鱼眼失真代码:

#include "stdio.h"
#include <cv.h>
#include <highgui.h>
#include <math.h>
#include <iostream>

void sampleImage(const IplImage* arr, float idx0, float idx1, CvScalar& res)
{
    if(idx0<0 || idx1<0 || idx0>(cvGetSize(arr).height-1) || idx1>(cvGetSize(arr).width-1))
    {
        res.val[0]=0;
        res.val[1]=0;
        res.val[2]=0;
        res.val[3]=0;
        return;
    }

    float idx0_fl=floor(idx0);
    float idx0_cl=ceil(idx0);
    float idx1_fl=floor(idx1);
    float idx1_cl=ceil(idx1);

    CvScalar s1=cvGet2D(arr,(int)idx0_fl,(int)idx1_fl);
    CvScalar s2=cvGet2D(arr,(int)idx0_fl,(int)idx1_cl);
    CvScalar s3=cvGet2D(arr,(int)idx0_cl,(int)idx1_cl);
    CvScalar s4=cvGet2D(arr,(int)idx0_cl,(int)idx1_fl);

    float x = idx0 - idx0_fl;
    float y = idx1 - idx1_fl;

    res.val[0]= s1.val[0]*(1-x)*(1-y) + s2.val[0]*(1-x)*y + s3.val[0]*x*y + s4.val[0]*x*(1-y);
    res.val[1]= s1.val[1]*(1-x)*(1-y) + s2.val[1]*(1-x)*y + s3.val[1]*x*y + s4.val[1]*x*(1-y);
    res.val[2]= s1.val[2]*(1-x)*(1-y) + s2.val[2]*(1-x)*y + s3.val[2]*x*y + s4.val[2]*x*(1-y);
    res.val[3]= s1.val[3]*(1-x)*(1-y) + s2.val[3]*(1-x)*y + s3.val[3]*x*y + s4.val[3]*x*(1-y);
}

float xscale;
float yscale;
float xshift;
float yshift;
float getRadialX(float x,float y,float cx,float cy,float k)
{
    x = (x*xscale+xshift);
    y = (y*yscale+yshift);
    float res = x+((x-cx)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
    return res;
}

float getRadialY(float x,float y,float cx,float cy,float k)
{
    x = (x*xscale+xshift);
    y = (y*yscale+yshift);
    float res = y+((y-cy)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
    return res;
}

float thresh = 1;
float calc_shift(float x1,float x2,float cx,float k)
{
    float x3 = x1+(x2-x1)*0.5;
    float res1 = x1+((x1-cx)*k*((x1-cx)*(x1-cx)));
    float res3 = x3+((x3-cx)*k*((x3-cx)*(x3-cx)));

    //  std::cerr<<"x1: "<<x1<<" - "<<res1<<" x3: "<<x3<<" - "<<res3<<std::endl;

    if(res1>-thresh && res1 < thresh)
    return x1;
    if(res3<0)
    {
        return calc_shift(x3,x2,cx,k);
    }
    else
    {
        return calc_shift(x1,x3,cx,k);
    }
}

int main(int argc, char** argv)
{
    IplImage* src = cvLoadImage( "D:\\2012 Projects\\FishEye\\Debug\\images\\grid1.bmp", 1 );
    IplImage* dst = cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
    IplImage* dst2 = cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
    float K=0.002;
    float centerX=(float)(src->width/2);
    float centerY=(float)(src->height/2);
    int width = cvGetSize(src).width;
    int height = cvGetSize(src).height;

    xshift = calc_shift(0,centerX-1,centerX,K);
    float newcenterX = width-centerX;
    float xshift_2 = calc_shift(0,newcenterX-1,newcenterX,K);

    yshift = calc_shift(0,centerY-1,centerY,K);
    float newcenterY = height-centerY;
    float yshift_2 = calc_shift(0,newcenterY-1,newcenterY,K);
    //  scale = (centerX-xshift)/centerX;
    xscale = (width-xshift-xshift_2)/width;
    yscale = (height-yshift-yshift_2)/height;

    std::cerr<<xshift<<" "<<yshift<<" "<<xscale<<" "<<yscale<<std::endl;
    std::cerr<<cvGetSize(src).height<<std::endl;
    std::cerr<<cvGetSize(src).width<<std::endl;

    for(int j=0;j<cvGetSize(dst).height;j++)
    {
        for(int i=0;i<cvGetSize(dst).width;i++)
        {
            CvScalar s;
            float x = getRadialX((float)i,(float)j,centerX,centerY,K);
            float y = getRadialY((float)i,(float)j,centerX,centerY,K);
            sampleImage(src,y,x,s);
            cvSet2D(dst,j,i,s);
        }
    }


    #if 0
    cvNamedWindow( "Source1", 1 );
    cvShowImage( "Source1", dst);
    cvWaitKey(0);
    #endif

    cvSaveImage("D:\\2012 Projects\\FishEye\\Debug\\images\\grid3.bmp",dst,0);

    cvNamedWindow( "Source1", 1 );
    cvShowImage( "Source1", src);
    cvWaitKey(0);

    cvNamedWindow( "Distortion", 2 );
    cvShowImage( "Distortion", dst);
    cvWaitKey(0);

    #if 0
    for(int j=0;j<cvGetSize(src).height;j++)
    {
        for(int i=0;i<cvGetSize(src).width;i++)
        {
            CvScalar s;
            sampleImage(src,j+0.25,i+0.25,s);
            cvSet2D(dst,j,i,s);
        }
    }

    cvNamedWindow( "Source1", 1 );
    cvShowImage( "Source1", src);
    cvWaitKey(0);

    #endif  

}

1 个答案:

答案 0 :(得分:1)

实际上,我原来的anwser是关于个别点的非失真算法。如果你想要取消整理一个完整的图像,那么有一种更简单的技术,如另一个帖子所述:

Understanding of openCV undistortion

算法的大纲(OpenCV函数undistort()中使用的算法)如下所示。对于目标镜头校正图像的每个像素,请执行以下操作:

  • 使用校准矩阵的倒数(u_dst, v_dst)将像素坐标(x', y')转换为标准化坐标K

  • 应用镜头扭曲模型,以获得扭曲的标准化坐标(x'', y'')

  • 使用校准矩阵(x'', y'')(u_src, v_src)转换为失真像素坐标K

  • 使用您选择的插值方法查找与源图像中的像素坐标(u_src, v_src)关联的强度/深度,并将此强度/深度指定给当前目标像素(u_dst, v_dst)


原始回答:

这是从OpenCV函数undistortPoints()中提取的非失真算法:

void dist2norm(const cv::Point2d &pt_dist, cv::Point2d &pt_norm) const {

    pt_norm.x = (pt_dist.x-Kcx)/Kfx;
    pt_norm.y = (pt_dist.y-Kcy)/Kfy;

    int niters=(Dk1!=0.?5:0);
    double x0=pt_norm.x, y0=pt_norm.y;
    for(int i=0; i<niters; ++i) {

        double x2=pt_norm.x*pt_norm.x,
            y2=pt_norm.y*pt_norm.y,
            xy=pt_norm.x*pt_norm.y,
            r2=x2+y2;

        double icdist = 1./(1 + ((Dk3*r2 + Dk2)*r2 + Dk1)*r2);
        double deltaX = 2*Dp1*xy + Dp2*(r2 + 2*x2);
        double deltaY = Dp1*(r2 + 2*y2) + 2*Dp2*xy;

        pt_norm.x = (x0-deltaX)*icdist;
        pt_norm.y = (y0-deltaY)*icdist;
    }
}

如果在参数pt_dist中提供失真图像中某点的坐标,它将计算相关点的标准化坐标并将其返回pt_norm。然后,您可以获得未失真图像中相关点的坐标

pt_undist = K . [pt_norm.x; pt_norm.y; 1]

其中K是相机矩阵。

OpenCV使用的标准镜头失真模型在this page

的开头解释

Distortion model equations

其中失真系数为(k1,k2,p1,p2,k3, k4,k5,k6)(我们通常使用k4=k5=k6=0)。

我不知道FishEye失真的模型是什么,但你可以肯定地将上述算法适用于你的情况。否则,您可以使用非线性优化算法(例如Levenberg-Marquardt或任何其他算法)从失真的坐标中恢复未失真的坐标。