我正在使用Emgu CV的SURF特性来识别图像中的类似物体。
图像绘制正确,显示在两个图像中找到的所有关键点,类似的点(这是我想要的)和一个矩形(通常为矩形) ,有时只是一条线,覆盖了类似的点。
问题是图像中可以看到相似点,但它们并没有以我想要的格式保存,实际上它们存储在 VectorOfKeyPoint object,它只存储一个指针,以及其他内存数据,这些点存储在内存中(这就是我的想法)。意思是,我无法成对地获得类似点:
((img1X,img1Y),(img2X,img2Y))
这将是我正在寻找的,以便我可以稍后使用这些点。 现在,我只能看到结果图像中的点,但我无法得到 他们是成对的。
我正在使用的代码是Emgu CV的示例。
//----------------------------------------------------------------------------
// Copyright (C) 2004-2016 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
#if !__IOS__
using Emgu.CV.Cuda;
#endif
using Emgu.CV.XFeatures2D;
namespace FirstEmgu
{
public static class DrawMatches
{
// --------------------------------
// ORIGINAL FUNCTION FROM EXAMPLE
// --------------------------------
private static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
{
int k = 2;
double uniquenessThreshold = 0.8;
double hessianThresh = 300;
Stopwatch watch;
homography = null;
modelKeyPoints = new VectorOfKeyPoint();
observedKeyPoints = new VectorOfKeyPoint();
#if !__IOS__
if (CudaInvoke.HasCuda)
{
CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
using (GpuMat gpuModelImage = new GpuMat(modelImage))
//extract features from the object image
using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
{
surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
watch = Stopwatch.StartNew();
// extract features from the observed image
using (GpuMat gpuObservedImage = new GpuMat(observedImage))
using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
//using (GpuMat tmp = new GpuMat())
//using (Stream stream = new Stream())
{
matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);
surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
mask.SetTo(new MCvScalar(255));
Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
int nonZeroCount = CvInvoke.CountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
matches, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
observedKeyPoints, matches, mask, 2);
}
}
watch.Stop();
}
}
else
#endif
{
using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
{
SURF surfCPU = new SURF(hessianThresh);
//extract features from the object image
UMat modelDescriptors = new UMat();
surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
watch = Stopwatch.StartNew();
// extract features from the observed image
UMat observedDescriptors = new UMat();
surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
BFMatcher matcher = new BFMatcher(DistanceType.L2);
matcher.Add(modelDescriptors);
matcher.KnnMatch(observedDescriptors, matches, k, null);
mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
mask.SetTo(new MCvScalar(255));
Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
int nonZeroCount = CvInvoke.CountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
matches, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
observedKeyPoints, matches, mask, 2);
}
watch.Stop();
}
}
matchTime = watch.ElapsedMilliseconds;
}
// --------------------------------
// ORIGINAL FUNCTION FROM EXAMPLE
// --------------------------------
/// <summary>
/// Draw the model image and observed image, the matched features and homography projection.
/// </summary>
/// <param name="modelImage">The model image</param>
/// <param name="observedImage">The observed image</param>
/// <param name="matchTime">The output total time for computing the homography matrix.</param>
/// <returns>The model image and observed image, the matched features and homography projection.</returns>
public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
out mask, out homography);
//Draw the matched keypoints
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
#region draw the projected region on the image
if (homography != null)
{
//draw a rectangle along the projected model
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);
Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
using (VectorOfPoint vp = new VectorOfPoint(points))
{
CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
}
}
#endregion
return result;
}
}
// ----------------------------------
// WRITTEN BY MYSELF
// ----------------------------------
// Returns 4 points (usually rectangle) of similar points
// but can't be used, since sometimes this is a line (negative
// points)
public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
out mask, out homography);
//Draw the matched keypoints
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
Point[] points = null;
if (homography != null)
{
//draw a rectangle along the projected model
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);
points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
}
return points;
}
}
}
}
修改
我设法从这样的匹配对象中获得了一些积分:
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
for (int i = 0; i < matches.Size; i++)
{
var a = matches[i].ToArray();
foreach (var e in a)
{
Point p = new Point(e.TrainIdx, e.QueryIdx);
Console.WriteLine(string.Format("Point: {0}", p));
}
Console.WriteLine("-----------------------");
}
我认为这应该得到我的观点。我设法让它在python中工作,代码差别不大。问题是返回的点太多了。事实上,这让我回到Y上的所有观点。
示例
(45,1),(67,1)
(656,2),(77,2)
...
虽然我可能很接近,但它并没有得到我想要的分数。任何建议都表示赞赏。
编辑2 这个问题:Find interest point in surf Detector Algorithm与我的需求非常相似。只有一个答案,但它没有说明如何获得匹配的点坐标。这就是我需要的,如果两个图像中都有一个物体,从两个图像中获取物体点的坐标。
答案 0 :(得分:4)
坐标不是由TrainIdx和QueryIdx组成的,它们是KeyPoints的索引。这将给出模型和观察图像之间匹配的像素坐标。
for (int i = 0; i < matches.Size; i++)
{
var arrayOfMatches = matches[i].ToArray();
if (mask.GetData(i)[0] == 0) continue;
foreach (var match in arrayOfMatches)
{
var matchingModelKeyPoint = modelKeyPoints[match.TrainIdx];
var matchingObservedKeyPoint = observedKeyPoints[match.QueryIdx];
Console.WriteLine("Model coordinate '" + matchingModelKeyPoint.Point + "' matches observed coordinate '" + matchingObservedKeyPoint.Point + "'.");
}
}
arrayOfMatches中的项目数等于K的值。我理解最低距离的匹配是最好的。
答案 1 :(得分:2)
在FindMatch
函数中,每对点都由函数VoteForUniqueness
验证。此验证的结果存储在mask
。
所以你要做的就是检查匹配是否有效:
for (int i = 0; i < matches.Size; i++)
{
var a = matches[i].ToArray();
if (mask.GetData(i)[0] == 0)
continue;
foreach (var e in a)
{
Point p = new Point(e.TrainIdx, e.QueryIdx);
Console.WriteLine(string.Format("Point: {0}", p));
}
Console.WriteLine("-----------------------");
}