EmguCV SURF带凸轮?

时间:2012-03-31 16:32:23

标签: c# opencv emgucv surf

我是EMGU CV的新人。我想SURF使用cam来检测多个模式。像this视频一样。但是现在,我试图将这一点只用于起点。

我检查了EMGUCV的SURF例子。当我尝试将此代码实现为cam capture的示例时,错误将打开运行时。我搜索了更多,但没有找到任何代码示例。

那么,你建议我一个代码片段或教程,这个解释很好。

现在非常感谢。

我正在制作代码;

...........................................
FrameRaw = capture.QueryFrame();
                    CamImageBox.Image = FrameRaw;
        Run(FrameRaw);
...........................................    

     private void Run(Image<Bgr, byte> TempImage)
            {

                Image<Gray, Byte> modelImage = new Image<Gray, byte>("sample.jpg");
                Image<Gray, Byte> observedImage = TempImage.Convert<Gray, Byte>();
                // Image<Gray, Byte> observedImage = new Image<Gray,byte>("box_in_scene.png");

                Stopwatch watch;
                HomographyMatrix homography = null;

                SURFDetector surfCPU = new SURFDetector(500, false);

                VectorOfKeyPoint modelKeyPoints;
                VectorOfKeyPoint observedKeyPoints;
                Matrix<int> indices;
                Matrix<float> dist;
                Matrix<byte> mask;

                if (GpuInvoke.HasCuda)
                {
                    GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                    using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                    using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                    using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
                    {
                        modelKeyPoints = new VectorOfKeyPoint();
                        surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                        watch = Stopwatch.StartNew();

                        // extract features from the observed image
                        using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
                        using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                        using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                        using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, 2, 1))
                        using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuMatchIndices.Size, 1))
                        {
                            observedKeyPoints = new VectorOfKeyPoint();
                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                            matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);

                            indices = new Matrix<int>(gpuMatchIndices.Size);
                            dist = new Matrix<float>(indices.Size);
                            gpuMatchIndices.Download(indices);
                            gpuMatchDist.Download(dist);

                            mask = new Matrix<byte>(dist.Rows, 1);

                            mask.SetValue(255);

                            Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                    homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                            }

                            watch.Stop();
                        }
                    }
                }
                else
                {
                    //extract features from the object image
                    modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                    //MKeyPoint[] kpts = modelKeyPoints.ToArray();
                    Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                    Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);

                    BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
                    matcher.Add(modelDescriptors);
                    int k = 2;
                    indices = new Matrix<int>(observedDescriptors.Rows, k);
                    dist = new Matrix<float>(observedDescriptors.Rows, k);
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);

                    mask = new Matrix<byte>(dist.Rows, 1);

                    mask.SetValue(255);

                    Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                            homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                    }

                    watch.Stop();
                }

                //Draw the matched keypoints
                Image<Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                   indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);

                #region draw the projected region on the image
                if (homography != null)
                {  //draw a rectangle along the projected model
                    Rectangle rect = modelImage.ROI;
                    PointF[] pts = new PointF[] { 
                   new PointF(rect.Left, rect.Bottom),
                   new PointF(rect.Right, rect.Bottom),
                   new PointF(rect.Right, rect.Top),
                   new PointF(rect.Left, rect.Top)};
                    homography.ProjectPoints(pts);

                    result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
                }
                #endregion

               // ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds));
            }

1 个答案:

答案 0 :(得分:0)

我找到了您使用的SURF tutorial,但我不明白为什么会导致错误。您是否能够在没有GPU加速复杂的情况下自行执行教程代码? 而且,发生了什么错误?