所以我有两个热图像(我知道的马铃薯质量,但这是我必须使用的),这张专辑中的前两张图像。我正在使用一些超常见的教程中的代码,但已编辑了很多代码。
所以我在代码中所做的是
1. Detecting KeyPoints
2. Describe the KeyPoints
3. Match the KeyPoints
4. Keep only good points
5. Gather both Query and Train points
6. Find Homography
7. Warp one of the images
8. Repeat the above steps for the warped image and the other original image
现在我的问题是:对于每组点,两个不同图像上两个相同点之间(x,y)距离的变化是否相同?
整个画面都朝着同一个方向移动,所以无论我们看哪个匹配点,变化应该是相同的,如果不是这样的话?
我发现的是远处的点数各不相同,有些是5像素不同,有些是700像素,我唯一能想到的就是匹配实际上并不好,而是比较两点在单独的帧中不在同一点附近。
我需要知道偏移是什么,以便我可以将一个帧叠加在另一个上面,然后平均掉重叠的像素值,并从两个原件的合成/平均值构建新图像。
我使用的代码如下:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "stitch.cpp"
#include "opencv2\stitching\stitcher.hpp"
#include "opencv2\nonfree\features2d.hpp"
using namespace cv;
void readme();
Mat describe(Mat img, vector<KeyPoint> key);
vector<KeyPoint> detect(Mat img);
vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap("vid.mp4");
vector<Mat> Vimg;
cout << "Grabbing Images" << endl;
for (int i = 0; i < 2; i++)
{
cout << "Grabbing Frame" << i << endl;
Mat temp;
cap.read(temp);
Vimg.push_back(temp);
imwrite("image" + to_string(i) + ".jpg", temp);
for (int j = 0; j < 80; j++)
cap.grab();
}
//Mat cimg1 = Vimg[0];
//Mat cimg2 = Vimg[1];
Mat cimg1 = imread("cap1.png");
Mat cimg2 = imread("cap2.png");
cout << "Starting Stitching" << endl;
//Converting the original images to grayscale
Mat img1, img2;
cvtColor(cimg1, img1, CV_BGR2GRAY);
cvtColor(cimg2, img2, CV_BGR2GRAY);
//Detecting Keypoints for original two images
vector<KeyPoint> keypointOne = detect(img1), keypointTwo = detect(img2);
Mat mkeypointOne, mkeypointTwo;
drawKeypoints(cimg1, keypointOne, mkeypointOne, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
drawKeypoints(cimg2, keypointTwo, mkeypointTwo, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("keypointOne.jpg", mkeypointOne);
imwrite("keypointTwo.jpg", mkeypointTwo);
//Computing descriptors
Mat descriptionOne = describe(img1, keypointOne), descriptionTwo = describe(img2, keypointTwo);
//Matching descriptors
vector<DMatch> matches = match(descriptionOne, descriptionTwo);
double max = 0;
double min = 100;
//Calculation of max and min distances
for (int i = 0; i < matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min) min = dist;
if (dist > max) max = dist;
}
vector<DMatch> goodMatches;
//Keep only good matches
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 2*min)
goodMatches.push_back(matches[i]);
}
//Localize
vector<Point2f> obj;
vector<Point2f> scene;
for (int i = 0; i < goodMatches.size(); i++)
{
obj.push_back(keypointOne[goodMatches[i].queryIdx].pt);
scene.push_back(keypointTwo[goodMatches[i].trainIdx].pt);
}
/*
for (int k = 0; k < obj.size(); k++)
{
cout << "Point data for Match #" << k << endl;
cout << "\tImage 1 Point: " << obj[k] << endl;
cout << "\tImage 2 Point: " << scene[k] << endl;
}*/
Mat H = findHomography(obj, scene, CV_RANSAC);
//Warping the image to fit on first image
Mat cwarpImage, warpImage;
//TODO: figure out the right size for this image that is created
warpPerspective(cimg2, cwarpImage, H, Size(img2.cols + img1.cols, img2.rows + img1.rows));
/*
Mat result;
Mat half(warpImage, Rect(0, 0, img2.cols, img2.rows));
cimg2.copyTo(half);
*/
imwrite("warp.jpg", warpImage);
//Processing Image
cvtColor(cwarpImage, warpImage, CV_BGR2GRAY);
vector<KeyPoint> keypointWarp = detect(warpImage);
Mat descriptionWarp = describe(warpImage, keypointWarp);
vector<DMatch> warpMatches = match(descriptionOne, descriptionWarp);
Mat mkeypointWarp;
drawKeypoints(cwarpImage, keypointWarp, mkeypointWarp, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("keypointWarp.jpg", mkeypointWarp);
Mat match;
drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//imshow("match", match);
imwrite("matches.jpg", match);
//Localize
vector<Point2f> obj2;
vector<Point2f> scene2;
for (int i = 0; i < warpMatches.size(); i++)
{
obj2.push_back(keypointOne[warpMatches[i].queryIdx].pt);
scene2.push_back(keypointWarp[warpMatches[i].trainIdx].pt);
}
for (int k = 0; k < obj.size(); k++)
{
cout << "Point data for Match #" << k << endl;
cout << "\tImage 1 Point: " << obj2[k] << endl;
cout << "\tImage 2 Point: " << scene2[k] << endl;
}
vector<unsigned char> inliersMask;
Mat H2 = findHomography(obj, scene, CV_RANSAC, 3, inliersMask);
vector<DMatch> inliers;
for (size_t i = 0; i < inliersMask.size(); i++)
{
if (inliersMask[i])
inliers.push_back(warpMatches[i]);
}
warpMatches.swap(inliers);
Mat match2;
drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match2, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("homorgraphyOutlierMatch.jpg", match2);
cout << "Writing Warp Image" << endl;
imwrite("warpimage.jpg", warpImage);
cout << H << endl;
waitKey(0);
}
Mat describe(Mat img, vector<KeyPoint> key)
{
Mat temp;
SurfDescriptorExtractor extractor;
extractor.compute(img, key, temp);
return temp;
}
vector<KeyPoint> detect(Mat img)
{
vector<KeyPoint> temp;
SurfFeatureDetector detector(400);
detector.detect(img, temp);
return temp;
}
vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo)
{
vector<DMatch> temp;
BFMatcher matcher(NORM_L2, true);
matcher.match(descriptionOne, descriptionTwo, temp);
return temp;
}
编辑:
我在BFMatcher中将Cross Check设置为true,并从Mastering_OpenCV实现了Homography异常值检测。以下是两个新结果。我不确定我是否应该实施交叉检查和KnnMatch,所以我只做了交叉检查。
正如你所看到的那样,它们好多了,但仍有一些不应该存在。我用全彩色和热图像来运行它。 新代码也在上面。
答案 0 :(得分:0)
虽然在一般情况下,点对应之间的距离变化对于所有点都不相同,但是你不会期望具有700像素的增量,图像大小为1300ish。
通过检查您发布的图像,很明显您的点对应关系不正确(简单地说,您的图像之间的匹配中有很多交叉线)
这表明你的第4步并没有做得很好。您可能想尝试将Brute Force匹配器的第二个参数设置为true以启用交叉检查测试:
BFMatcher matcher(NORM_L2, true);
您可能还需要考虑此处所述How to apply Ratio Test in order to remove outliers in a multiple object detection matcher?
所述的离群值删除率测试