您已经有功能解决方案,但有一个问题:
// The screenshot will be stored in this bitmap.
Bitmap capture = new Bitmap(rec.Width, rec.Height, PixelFormat.Format24bppRgb);
using (Graphics g = Graphics.FromImage(capture))
{
g.CopyFromScreen(rec.Location, new System.Drawing.Point(0, 0), rec.Size);
}
MCvSURFParams surfParam = new MCvSURFParams(500, false);
SURFDetector surfDetector = new SURFDetector(surfParam);
// Template image
Image<Gray, Byte> modelImage = new Image<Gray, byte>("template.jpg");
// Extract features from the object image
ImageFeature[] modelFeatures = surfDetector.DetectFeatures(modelImage, null);
// Prepare current frame
Image<Gray, Byte> observedImage = new Image<Gray, byte>(capture);
ImageFeature[] imageFeatures = surfDetector.DetectFeatures(observedImage, null);
// Create a SURF Tracker using k-d Tree
Features2DTracker tracker = new Features2DTracker(modelFeatures);
Features2DTracker.MatchedImageFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, 2);
matchedFeatures = Features2DTracker.VoteForUniqueness(matchedFeatures, 0.8);
matchedFeatures = Features2DTracker.VoteForSizeAndOrientation(matchedFeatures, 1.5, 20);
HomographyMatrix homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);
// Merge the object image and the observed image into one image for display
Image<Gray, Byte> res = modelImage.ConcateVertical(observedImage);
#region draw lines between the matched features
foreach (Features2DTracker.MatchedImageFeature matchedFeature in matchedFeatures)
{
PointF p = matchedFeature.ObservedFeature.KeyPoint.Point;
p.Y += modelImage.Height;
res.Draw(new LineSegment2DF(matchedFeature.SimilarFeatures[0].Feature.KeyPoint.Point, p), new Gray(0), 1);
}
#endregion
#region draw the project region on the image
if (homography != null)
{
// draw a rectangle along the projected model
Rectangle rect = modelImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
homography.ProjectPoints(pts);
for (int i = 0; i < pts.Length; i++)
pts[i].Y += modelImage.Height;
res.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Gray(255.0), 2);
}
#endregion
pictureBoxScreen.Image = res.ToBitmap();
结果是:
我的问题是,函数homography.ProjectPoints(pts);
只获得第一次出现的图案(上图中的白色矩形)
我如何能够预测所有模板的出现,分别是如何在图像中出现模板矩形
答案 0 :(得分:1)
我在硕士论文中遇到了与你类似的问题。基本上你有两个选择:
在“群集”中对匹配进行分区后,您可以估算属于对应群集的匹配项之间的单应性。