使用emgu获取两个图像中匹配特征的位置(x,y)

时间:2016-02-02 20:37:44

标签: algorithm emgucv surf

我有两个摄像头拍摄的两张照片,我想在Emgu中使用冲浪算法或任何算法来获取两个图像中的匹配特征位置,以计算(估计)与摄像机和此特征(对象)的真实距离,我发现例如,在Emgu示例中使用冲浪算法,但是它在匹配特征之间的绘制线我希望获得每行的任何开始和结束的x和y。 features matched by surf algorithm sample image

我尝试在冲浪算法示例中添加一些代码,但在Draw方法中没有按预期工作

long num_matches = matches.Size;
float lower = matches[0][0].Distance;
List<PointF> matched_points1= new List<PointF>();
List<PointF> matched_points2=new List<PointF>();

for (int i = 0; i < num_matches; i++)
{
    if (matches[i][0].Distance < 0.095)
    { 
        int idx1 = matches[i][0].TrainIdx;
        int idx2 = matches[i][0].QueryIdx;
        matched_points1.Add(observedKeyPoints[idx1].Point);
        matched_points2.Add(observedKeyPoints[idx2].Point);
        CvInvoke.Circle(result, new Point((int)observedKeyPoints[idx2].Point.X , (int)observedKeyPoints[idx2].Point.Y), 1, new MCvScalar(255, 0, 0));
        CvInvoke.Circle(result, new Point((int)modelKeyPoints[idx1].Point.X + modelImage.Width, (int)modelKeyPoints[idx1].Point.Y), 1, new MCvScalar(255, 0, 0));
    }

    if (lower > matches[i][0].Distance)
        lower = matches[i][0].Distance;
}

1 个答案:

答案 0 :(得分:0)

public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime)
  {
      Mat homography;
      VectorOfKeyPoint modelKeyPoints;
      VectorOfKeyPoint observedKeyPoints;
      using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
      {
          Mat mask;
  FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out      observedKeyPoints, matches,out mask, out homography);

          //Draw the matched keypoints
          Mat result = new Mat();
          Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
             matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

          Point[] points = null;
          if (homography != null)
          {
              //draw a rectangle along the projected model
              Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
              PointF[] pts = new PointF[]
            {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)
            };
              pts = CvInvoke.PerspectiveTransform(pts, homography);

              points = Array.ConvertAll<PointF, Point>(pts, Point.Round);

          }

          return points;
      }
  }

//在Draw函数中添加代码

for (int i = 0; i < matches.Size; i++) { var a = matches[i].ToArray(); if (mask.GetData(i)[0] == 0) continue; foreach (var e in a) { Point p = new `Point(e.TrainIdx, e.QueryIdx); Console.WriteLine(string.Format("Point: {0}", p)); } Console.WriteLine("-----------------------"); }`