卡尔曼滤波器的多目标跟踪

时间:2014-08-25 21:24:05

标签: opencv filter kalman-filter

我是OpenCV的新手。我一直希望学习新的图像处理技术/编程。我见过很少关于物体检测,跟踪,计数等的教程。 我希望学到同样的东西,并尝试制作我自己的类似项目。

在互联网和论文上搜索很多。最后我开始了解卡尔曼滤波器的对象跟踪。我按照以下方式使用了以下代码:

  1. 背景减去
  2. 平滑,模糊等滤镜。
  3. 查找轮廓
  4. 绘制矩形并找到Centroid。
  5. 应用卡尔曼滤波器
  6. 现在,我可以用我的代码跟踪一个对象。我想跟踪多个物体(即在道路上行驶的人,车辆行驶等)

    如果有人可以指导我或给我一些示例代码,我会很高兴和欣赏。

    期待您的肯定回复。 单

    using namespace std;
    using namespace cv;
    
    #define drawCross( img, center, color, d )\
    line(img, Point(center.x - d, center.y - d), Point(center.x + d, center.y + d), color, 2, CV_AA, 0);\
    line(img, Point(center.x + d, center.y - d), Point(center.x - d, center.y + d), color, 2, CV_AA, 0 )\
    
    vector<Point> mousev,kalmanv;
    
    
    cv::KalmanFilter KF;
    cv::Mat_<float> measurement(2,1); 
    Mat_<float> state(4, 1); // (x, y, Vx, Vy)
    int incr=0;
    
    
    void initKalman(float x, float y)
    {
        // Instantate Kalman Filter with
        // 4 dynamic parameters and 2 measurement parameters,
        // where my measurement is: 2D location of object,
        // and dynamic is: 2D location and 2D velocity.
        KF.init(4, 2, 0);
    
        measurement = Mat_<float>::zeros(2,1);
        measurement.at<float>(0, 0) = x;
        measurement.at<float>(0, 0) = y;
    
    
        KF.statePre.setTo(0);
        KF.statePre.at<float>(0, 0) = x;
        KF.statePre.at<float>(1, 0) = y;
    
        KF.statePost.setTo(0);
        KF.statePost.at<float>(0, 0) = x;
        KF.statePost.at<float>(1, 0) = y; 
    
        setIdentity(KF.transitionMatrix);
        setIdentity(KF.measurementMatrix);
        setIdentity(KF.processNoiseCov, Scalar::all(.005)); //adjust this for faster convergence - but higher noise
        setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
        setIdentity(KF.errorCovPost, Scalar::all(.1));
    }
    
    Point kalmanPredict() 
    {
        Mat prediction = KF.predict();
        Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
    
        KF.statePre.copyTo(KF.statePost);
        KF.errorCovPre.copyTo(KF.errorCovPost);
    
        return predictPt;
    }
    
    Point kalmanCorrect(float x, float y)
    {
        measurement(0) = x;
        measurement(1) = y;
        Mat estimated = KF.correct(measurement);
        Point statePt(estimated.at<float>(0),estimated.at<float>(1));
        return statePt;
    }
    
    int main()
    {
      Mat frame, thresh_frame;
      vector<Mat> channels;
      VideoCapture capture;
      vector<Vec4i> hierarchy;
      vector<vector<Point> > contours;
    
       // cv::Mat frame;
        cv::Mat back;
        cv::Mat fore;
        cv::BackgroundSubtractorMOG2 bg;
    
        bg.nmixtures = 3;
        bg.bShadowDetection = false;
        int incr=0;
        int track=0;
    
        capture.open("E:/demo1.avi");
    
      if(!capture.isOpened())
        cerr << "Problem opening video source" << endl;
    
    
      mousev.clear();
      kalmanv.clear();
    
    initKalman(0, 0);
    
      while((char)waitKey(1) != 'q' && capture.grab())
        {
    
       Point s, p;
    
      capture.retrieve(frame);
    
            bg.operator ()(frame,fore);
            bg.getBackgroundImage(back);
            erode(fore,fore,Mat());
            erode(fore,fore,Mat());
            dilate(fore,fore,Mat());
            dilate(fore,fore,Mat());
            dilate(fore,fore,Mat());
            dilate(fore,fore,Mat());
            dilate(fore,fore,Mat());
            dilate(fore,fore,Mat());
            dilate(fore,fore,Mat());
    
            cv::normalize(fore, fore, 0, 1., cv::NORM_MINMAX);
            cv::threshold(fore, fore, .5, 1., CV_THRESH_BINARY);
    
    
          split(frame, channels);
          add(channels[0], channels[1], channels[1]);
          subtract(channels[2], channels[1], channels[2]);
          threshold(channels[2], thresh_frame, 50, 255, CV_THRESH_BINARY);
          medianBlur(thresh_frame, thresh_frame, 5);
    
    //       imshow("Red", channels[1]);
          findContours(fore, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
          vector<vector<Point> > contours_poly( contours.size() );
          vector<Rect> boundRect( contours.size() );
    
          Mat drawing = Mat::zeros(thresh_frame.size(), CV_8UC1);
          for(size_t i = 0; i < contours.size(); i++)
            {
    //          cout << contourArea(contours[i]) << endl;
              if(contourArea(contours[i]) > 500)
                drawContours(drawing, contours, i, Scalar::all(255), CV_FILLED, 8, vector<Vec4i>(), 0, Point());
            }
          thresh_frame = drawing;
    
          findContours(fore, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    
          drawing = Mat::zeros(thresh_frame.size(), CV_8UC1);
          for(size_t i = 0; i < contours.size(); i++)
            {
    //          cout << contourArea(contours[i]) << endl;
              if(contourArea(contours[i]) > 3000)
                drawContours(drawing, contours, i, Scalar::all(255), CV_FILLED, 8, vector<Vec4i>(), 0, Point());
          }
          thresh_frame = drawing;
    
    // Get the moments
          vector<Moments> mu(contours.size() );
          for( size_t i = 0; i < contours.size(); i++ )
          { 
              mu[i] = moments( contours[i], false ); }
    
    //  Get the mass centers:
          vector<Point2f> mc( contours.size() );
          for( size_t i = 0; i < contours.size(); i++ ) 
    
          { 
              mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); 
    
    
         /*  
          for(size_t i = 0; i < mc.size(); i++)
            {
    
         //       drawCross(frame, mc[i], Scalar(255, 0, 0), 5);
              //measurement(0) = mc[i].x;
              //measurement(1) = mc[i].y;
    
    
    //        line(frame, p, s, Scalar(255,255,0), 1);
    
    //          if (measurement(1) <= 130 && measurement(1) >= 120) {
      //            incr++;          
        //         cout << "Conter " << incr << " Loation " << measurement(1) << endl;
          //   }
          }*/
          }
    
    
            for( size_t i = 0; i < contours.size(); i++ )
           { approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
             boundRect[i] = boundingRect( Mat(contours_poly[i]) );
    
         }
    
                  p = kalmanPredict();
    //        cout << "kalman prediction: " << p.x << " " << p.y << endl;
              mousev.push_back(p);
    
          for( size_t i = 0; i < contours.size(); i++ )
           {
               if(contourArea(contours[i]) > 1000){
             rectangle( frame, boundRect[i].tl(), boundRect[i].br(), Scalar(0, 255, 0), 2, 8, 0 );
            Point center = Point(boundRect[i].x + (boundRect[i].width /2), boundRect[i].y + (boundRect[i].height/2));
            cv::circle(frame,center, 8, Scalar(0, 0, 255), -1, 1,0);
    
    
    
             s = kalmanCorrect(center.x, center.y);
            drawCross(frame, s, Scalar(255, 255, 255), 5);
    
            if (s.y <= 130 && p.y > 130 && s.x > 15) {
                incr++;
                 cout << "Counter " << incr << endl;
               }
    
    
                 for (int i = mousev.size()-20; i < mousev.size()-1; i++) {
                     line(frame, mousev[i], mousev[i+1], Scalar(0,255,0), 1);
                     }
    
                  }
           }
    
    
          imshow("Video", frame);
          imshow("Red", channels[2]);
          imshow("Binary", thresh_frame);
        }
      return 0;
    }
    

1 个答案:

答案 0 :(得分:1)

如果您要跟踪多个无关的对象,您可以为您正在跟踪的每个目标复制恒速卡尔曼滤波器。这将是最有效的方法。

如果你认为你的目标在某种程度上是相关的(例如,测量它们的错误有一个共同的组成部分,或者一个人的行为会影响你对另一个人行为的预测)那么你可以将它们组合在一起。您将采用现有的4x4矩阵(x,y,vx,vy)并以块对角方式组合它们以制作更大的矩阵。现在,较大的协方差矩阵将具有指示一个测量的不确定性如何与另一个测量相关的区域。让我们假设你认为你的相机可能在颤抖。然后,您会期望所有对象可能具有相同的角度噪声。您可以将这个想法包含在R矩阵中,使用非对角线术语将所有测量值捆绑在一起。这种形式的计算成本会更高(因为矩阵数学的非线性代价),并且在添加或删除目标时也会增加复杂性。