透视图像拼接

时间:2016-10-09 00:25:22

标签: opencv perspectivecamera panoramas image-stitching

我从图像拼接中找到了非常有用的例子,但我的问题是那些类型的图像 这是一个例子 First Image

这是另一张图片 Second Image

当我使用opencv stitcher时,reult imaages越来越小 像这样Small Result

是否有任何方法可以将变换应用到输入图像中,因此它们就像这样enter image description here

这是代码

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/stitching/stitcher.hpp>
#include<vector>
using namespace cv;
using namespace std;
cv::vector<cv::Mat> ImagesList;
string result_name ="/TopViewsHorizantale/1.bmp";
int main()
{
      // Load the images

 Mat image1= imread("current_00000.bmp" );
 Mat image2= imread("current_00001.bmp" );
 cv::resize(image1, image1, image2.size());
 Mat gray_image1;
 Mat gray_image2;
 Mat Matrix = Mat(3,3,CV_32FC1);

 // Convert to Grayscale
 cvtColor( image1, gray_image1, CV_RGB2GRAY );
 cvtColor( image2, gray_image2, CV_RGB2GRAY );
 namedWindow("first image",WINDOW_AUTOSIZE);
 namedWindow("second image",WINDOW_AUTOSIZE);
 imshow("first image",image2);
 imshow("second image",image1);

if( !gray_image1.data || !gray_image2.data )
 { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

//-- Step 1: Detect the keypoints using SURF Detector
 int minHessian = 400;

SurfFeatureDetector detector( minHessian );

std::vector< KeyPoint > keypoints_object, keypoints_scene;

detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );

//-- Step 2: Calculate descriptors (feature vectors)
 SurfDescriptorExtractor extractor;

Mat descriptors_object, descriptors_scene;

extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );

//-- Step 3: Matching descriptor vectors using FLANN matcher
 FlannBasedMatcher matcher;
 std::vector< DMatch > matches;
 matcher.match( descriptors_object, descriptors_scene, matches );

double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
 for( int i = 0; i < descriptors_object.rows; i++ )
 { double dist = matches[i].distance;
 if( dist < min_dist ) min_dist = dist;
 if( dist > max_dist ) max_dist = dist;
 }

printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );

//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
 std::vector< DMatch > good_matches;

for( int i = 0; i < descriptors_object.rows; i++ )
 { if( matches[i].distance < 3*min_dist )
 { good_matches.push_back( matches[i]); }
 }
 std::vector< Point2f > obj;
 std::vector< Point2f > scene;

for( int i = 0; i < good_matches.size(); i++ )
 {
 //-- Get the keypoints from the good matches
 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
 }

// Find the Homography Matrix
 Mat H = findHomography( obj, scene, CV_RANSAC );
 // Use the Homography Matrix to warp the images
 cv::Mat result;
      int N = image1.rows + image2.rows;
 int M = image1.cols+image2.cols;
 warpPerspective(image1,result,H,cv::Size(N,M));
 cv::Mat half(result,cv::Rect(0,0,image2.rows,image2.cols));
 result.copyTo(half);
 namedWindow("Result",WINDOW_AUTOSIZE);
 imshow( "Result", result);

 imwrite(result_name, result);

 waitKey(0);
 return 0;
}

此处还有一些图片链接:: https://www.dropbox.com/sh/ovzkqomxvzw8rww/AAB2DDCrCF6NlCFre7V1Gb6La?dl=0 非常感谢 Lafi

1 个答案:

答案 0 :(得分:1)

问题:输出图像太大。

原始代码: -

warpPerspective

生成的结果图像存储的行数与image1和image2中的行数一样多。但是,输出图像应该等于image1和image2的尺寸 - 重叠区域的尺寸。

另一个问题 你为什么要扭曲image1。使用H'计算H'(H的逆矩阵)和扭曲图像2。您应该将image2注册到image1。

另外,研究import java.util.Iterator; import java.util.LinkedList; import java.util.List; import org.opencv.calib3d.Calib3d; import org.opencv.core.Core; import org.opencv.core.CvType; import org.opencv.core.DMatch; import org.opencv.core.KeyPoint; import org.opencv.core.Mat; import org.opencv.core.MatOfDMatch; import org.opencv.core.MatOfKeyPoint; import org.opencv.core.MatOfPoint2f; import org.opencv.core.Point; import org.opencv.core.Scalar; import org.opencv.core.Size; import org.opencv.features2d.DescriptorExtractor; import org.opencv.features2d.DescriptorMatcher; import org.opencv.features2d.FeatureDetector; import org.opencv.features2d.Features2d; import org.opencv.imgcodecs.Imgcodecs; import org.opencv.imgproc.Imgproc; public class Driver { public static void stitchImages() { // Read as grayscale Mat grayImage1 = Imgcodecs.imread("current_00000.bmp", 0); Mat grayImage2 = Imgcodecs.imread("current_00001.bmp", 0); if (grayImage1.dataAddr() == 0 || grayImage2.dataAddr() == 0) { System.out.println("Images read unsuccessful."); return; } // Create transformation matrix Mat transformMatrix = new Mat(3, 3, CvType.CV_32FC1); // -- Step 1: Detect the keypoints using AKAZE Detector int minHessian = 400; MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); FeatureDetector surf = FeatureDetector.create(FeatureDetector.AKAZE); surf.detect(grayImage1, keypoints1); surf.detect(grayImage2, keypoints2); // -- Step 2: Calculate descriptors (feature vectors) DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.AKAZE); Mat descriptors1 = new Mat(); Mat descriptors2 = new Mat(); extractor.compute(grayImage1, keypoints1, descriptors1); extractor.compute(grayImage2, keypoints2, descriptors2); // -- Step 3: Match the keypoints DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE); MatOfDMatch matches = new MatOfDMatch(); matcher.match(descriptors1, descriptors2, matches); List<DMatch> myList = new LinkedList<>(matches.toList()); // Filter good matches double min_dist = Double.MAX_VALUE; Iterator<DMatch> itr = myList.iterator(); while (itr.hasNext()) { DMatch element = itr.next(); min_dist = Math.min(element.distance, min_dist); } LinkedList<Point> img1GoodPointsList = new LinkedList<Point>(); LinkedList<Point> img2GoodPointsList = new LinkedList<Point>(); List<KeyPoint> keypoints1List = keypoints1.toList(); List<KeyPoint> keypoints2List = keypoints2.toList(); itr = myList.iterator(); while (itr.hasNext()) { DMatch dMatch = itr.next(); if (dMatch.distance >= 5 * min_dist) { img1GoodPointsList.addLast(keypoints1List.get(dMatch.queryIdx).pt); img2GoodPointsList.addLast(keypoints2List.get(dMatch.trainIdx).pt); } else { itr.remove(); } } matches.fromList(myList); Mat outputMid = new Mat(); System.out.println("best matches size: " + matches.size()); Features2d.drawMatches(grayImage1, keypoints1, grayImage2, keypoints2, matches, outputMid); Imgcodecs.imwrite("outputMid - A - A.jpg", outputMid); MatOfPoint2f img1Locations = new MatOfPoint2f(); img1Locations.fromList(img1GoodPointsList); MatOfPoint2f img2Locations = new MatOfPoint2f(); img2Locations.fromList(img2GoodPointsList); // Find the Homography Matrix - Note img2Locations is give first to get // inverse directly. Mat hg = Calib3d.findHomography(img2Locations, img1Locations, Calib3d.RANSAC, 3); System.out.println("hg is: " + hg.dump()); // Find the location of two corners to which Image2 will warp. Size img1Size = grayImage1.size(); Size img2Size = grayImage2.size(); System.out.println("Sizes are: " + img1Size + ", " + img2Size); // Store location x,y,z for 4 corners Mat img2Corners = new Mat(3, 4, CvType.CV_64FC1, new Scalar(0)); Mat img2CornersWarped = new Mat(3, 4, CvType.CV_64FC1); img2Corners.put(0, 0, 0, img2Size.width, 0, img2Size.width); // x img2Corners.put(1, 0, 0, 0, img2Size.height, img2Size.height); // y img2Corners.put(2, 0, 1, 1, 1, 1); // z - all 1 System.out.println("Homography is \n" + hg.dump()); System.out.println("Corners matrix is \n" + img2Corners.dump()); Core.gemm(hg, img2Corners, 1, new Mat(), 0, img2CornersWarped); System.out.println("img2CornersWarped: " + img2CornersWarped.dump()); // Find the new size to use int minX = 0, minY = 0; // The grayscale1 already has minimum location at 0 int maxX = 1500, maxY = 1500; // The grayscale1 already has maximum location at 1500(possible 1499, but 1 pixel wont effect) double[] xCoordinates = new double[4]; img2CornersWarped.get(0, 0, xCoordinates); double[] yCoordinates = new double[4]; img2CornersWarped.get(1, 0, yCoordinates); for (int c = 0; c < 4; c++) { minX = Math.min((int)xCoordinates[c], minX); maxX = Math.max((int)xCoordinates[c], maxX); minY = Math.min((int)xCoordinates[c], minY); maxY = Math.max((int)xCoordinates[c], maxY); } int rows = (maxX - minX + 1); int cols = (maxY - minY + 1); // Warp to product final output Mat output1 = new Mat(new Size(cols, rows), CvType.CV_8U, new Scalar(0)); Mat output2 = new Mat(new Size(cols, rows), CvType.CV_8U, new Scalar(0)); Imgproc.warpPerspective(grayImage1, output1, Mat.eye(new Size(3, 3), CvType.CV_32F), new Size(cols, rows)); Imgproc.warpPerspective(grayImage2, output2, hg, new Size(cols, rows)); Mat output = new Mat(new Size(cols, rows), CvType.CV_8U); Core.addWeighted(output1, 0.5, output2, 0.5, 0, output); Imgcodecs.imwrite("output.jpg", output); } public static void main(String[] args) { System.loadLibrary(Core.NATIVE_LIBRARY_NAME); stitchImages(); } } 的工作原理。它找到图像2将被扭曲的区域ROI。接下来对于结果的ROI区域中的每个像素(比如x,y),它在image2中找​​到相应的位置说(x',y')。注意:(x',y')可以是实数值,如(4.5,5.4)。

使用某种形式的插值(可能是线性插值)来查找图像结果中(x,y)的像素值。

接下来,如何查找结果矩阵的大小。不要使用N,M。使用矩阵H'和扭曲图像角来找到它们将结束的位置

对于转换矩阵,请参阅此wikihttp://planning.cs.uiuc.edu/node99.html。了解旋转,平移,仿射和透视变换矩阵之间的区别。然后阅读opencv docs here

您也可以阅读我之前的answer。这个答案显示了简单的代数来找到一个裁剪区域。您需要调整两个图像的四个角的代码。请注意,新图像的图像像素也可以转到负像素位置。

示例代码(在java中): -

{{1}}

更改描述符

从Surf转移到Akaze。我刚刚看到完美的图像注册。

输出图片

此输出使用较少的空间,描述符的更改显示完美的注册。

OutputImage

P.S。:恕我直言,编码很棒,但真正的宝藏是基础知识/概念。