我有两个正在俯视方形物体的相机,我想拍摄这两个图像并将它们组合起来得到一个(大约)代表整个区域的图像。
我的两个相机的视图看起来像这样:
左图像的左边缘应与右图像的右边缘缝合,黑色虚线是它们重叠的点。
我的第一次尝试是使用本教程中的技术将图像拼接在一起:
http://ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
void readme();
/** @function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
// Load the images
Mat image1= imread( argv[2] );
Mat image2= imread( argv[1] );
Mat gray_image1;
Mat gray_image2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
imshow("first image",image2);
imshow("second image",image1);
if( !gray_image1.data || !gray_image2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective(image1,result,H,cv::Size(image1.cols+image2.cols,image1.rows));
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);
imshow( "Result", result );
waitKey(0);
return 0;
}
/** @function readme */
void readme()
{ std::cout << " Usage: Panorama < img1 > < img2 >" << std::endl; }
不幸的是,这会导致错误(始终如一):
Debug Assertion Failed! Program: ....\VC\include\xmemory0 Line 106
Expression "(_Ptr_user & (_BIG_ALLOCATION_ALIGNMENT -1)) == 0" && 0
调用堆栈指示在调用std::_Deallocate<cv::KeyPoint>
期间发生这种情况 - 可能是在关键点向量被释放时。
毋庸置疑,图像拼接失败。
我尝试过使用cv::Stitcher
类,但我得到了同样的错误。
如何尝试将图像拼接在一起并获取有关失败的方式或原因的信息?
答案 0 :(得分:0)
尝试使用cv::Stitcher
这样的方式,这是将图像互相扭曲的最简单方法:
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/stitching/stitcher.hpp>
int main(int argc, char** argv)
{
cv::Mat img1 = cv::imread("image1.jpg");
cv::Mat img2 = cv::imread("image2.jpg");
if (img1.empty() || img2.empty())
{
return -1;
}
std::vector<cv::Mat> imgs;
imgs.push_back(img1);
imgs.push_back(img2);
// push more images here ...
cv::Mat panoramic;
cv::Stitcher stitcher = cv::Stitcher::createDefault(true);
stitcher.stitch(imgs, panoramic);
cv::imshow("Result", panoramic);
cv::waitKey(0);
return 0;
}