使用Open CV框架在iOS中进行图像矫直

时间:2014-06-25 05:05:47

标签: ios objective-c opencv image-processing

代码:

cv::Point2f src_vertices[4]; 
src_vertices[0] = c1[0]; 
src_vertices[1] = c1[1]; 
src_vertices[2] = c1[2]; 
src_vertices[3] = c1[3]; 

cv::Point2f dst_vertices[4]; 
dst_vertices[0] = c2[0]; 
dst_vertices[1] = c2[1]; 
dst_vertices[2] = c2[2]; 
dst_vertices[3] = c2[3]; 

cv::Mat warpMatrix = getPerspectiveTransform(src_vertices,dst_vertices); 

cv::Mat output = cv::Mat::zeros(original.cols,original.rows , CV_32FC3); 

cv::warpPerspective(original, output, warpMatrix,cv::Size(606,606));

 UIImage *_adjustedImage = [MAOpenCV UIImageFromCVMat:output]; 

以下是原始图片 enter image description here

拉直后,输出低于图像

http://imgur.com/6Ycodof

问题

拉直后我们得到的图像输出从拐角处稍微裁剪,输出来自Open CV框架本身。

如何解决此问题。如果有人找到解决方案,请告诉我。谢谢。

2 个答案:

答案 0 :(得分:1)

由于这个问题经常被问到,我写了几行代码,为其他许多代码节省了一些时间。

试试这个:

cv::Rect computeWarpedContourRegion(const std::vector<cv::Point> & points, const cv::Mat & homography)
{
    std::vector<cv::Point2f> transformed_points(points.size());

    for(unsigned int i=0; i<points.size(); ++i)
    {
        // warp the points
        transformed_points[i].x = points[i].x * homography.at<double>(0,0) + points[i].y * homography.at<double>(0,1) + homography.at<double>(0,2) ;
        transformed_points[i].y = points[i].x * homography.at<double>(1,0) + points[i].y * homography.at<double>(1,1) + homography.at<double>(1,2) ;
    }

    // dehomogenization necessary?
    if(homography.rows == 3)
    {
        float homog_comp;
        for(unsigned int i=0; i<transformed_points.size(); ++i)
        {
            homog_comp = points[i].x * homography.at<double>(2,0) + points[i].y * homography.at<double>(2,1) + homography.at<double>(2,2) ;
            transformed_points[i].x /= homog_comp;
            transformed_points[i].y /= homog_comp;
        }
    }

    // now find the bounding box for these points:
    cv::Rect boundingBox = cv::boundingRect(transformed_points);
    return boundingBox;
}

cv::Rect computeWarpedImageRegion(const cv::Mat & image, const cv::Mat & homography)
{
    std::vector<cv::Point> imageBorder;
    imageBorder.push_back(cv::Point(0,0));
    imageBorder.push_back(cv::Point(image.cols,0));
    imageBorder.push_back(cv::Point(image.cols,image.rows));
    imageBorder.push_back(cv::Point(0,image.rows));

    return computeWarpedContourRegion(imageBorder, homography);
}

cv::Mat adjustHomography(const cv::Rect & transformedRegion, const cv::Mat & homography)
{
    if(homography.rows == 2) throw("homography adjustement for affine matrix not implemented yet");

    // unit matrix
    cv::Mat correctionHomography = cv::Mat::eye(3,3,CV_64F);
    // correction translation
    correctionHomography.at<double>(0,2) = -transformedRegion.x;
    correctionHomography.at<double>(1,2) = -transformedRegion.y;


    return correctionHomography * homography;
}

int main()
{
    // straightening algorithm without cropping:
    cv::Mat original = cv::imread("straightening_src.png");
    cv::Mat output;


    cv::Point2f src_vertices[4];
    cv::Point2f dst_vertices[4];
    // I have to add them manually, you can just use your old code here.
    // my result will look different, since I don't use your original point correspondences, but system is the same...
    src_vertices[0] = cv::Point2f(108,190);
    src_vertices[1] = cv::Point2f(273,178);
    src_vertices[2] = cv::Point2f(389,322);
    src_vertices[3] = cv::Point2f(183,355);

    dst_vertices[0] = cv::Point2f(172,190);
    dst_vertices[1] = cv::Point2f(374,193);
    dst_vertices[2] = cv::Point2f(380,362);
    dst_vertices[3] = cv::Point2f(171,366);

    // compute homography
    cv::Mat warpMatrix = getPerspectiveTransform(src_vertices,dst_vertices);

    // now you have to find out, whether the warped image will fit to the output image or whether it will be cropped.
    // if it will be cropped you will most probably have to
    // 1. find out how big your output image must be and the coordinates it will be warped to.
    // 2. modify your transformation (by a translation) so that the output image will be placed properly inside the output image

    // part 1: find the region that will hold the new image.
    cv::Rect warpedImageRegion = computeWarpedImageRegion(original, warpMatrix);

    // part 2: modify the transformation.
    cv::Mat adjustedHomography = adjustHomography(warpedImageRegion, warpMatrix);

    cv::Size transformedImageSize = cv::Size(warpedImageRegion.width,warpedImageRegion.height);
    cv::warpPerspective(original, output, adjustedHomography, transformedImageSize);

    cv::imshow("output", output);
    cv::imwrite("straightening_result.png", output);
    cv::waitKey(-1);

}

对于此输入(1)和给定的转换对应关系,您将获得该结果(2)

(1)

enter image description here

(2)

enter image description here

答案 1 :(得分:0)

图像歪斜后,应该可以删除图像的黑色多余部分。