改变一系列帧的位移

时间:2017-09-19 20:55:23

标签: c++ opencv computer-vision opticalflow

我已经编写了一个代码来描述.avi视频文件中兴趣点的动作。 这是代码:

#include "opencv2/video/tracking.hpp"
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>

using namespace cv;
using namespace std;

int main() {
VideoCapture capture("video.avi");


if (!capture.isOpened()) {
    cout << "ERROR OPENING VIDEO\n\n";
    return(0);
}

double rate = capture.get(CV_CAP_PROP_FPS); 
unsigned int numberFrames = (unsigned int) capture.get(CV_CAP_PROP_FRAME_COUNT);
int width = (unsigned int) capture.get(CV_CAP_PROP_FRAME_WIDTH);
int height = (unsigned int) capture.get(CV_CAP_PROP_FRAME_HEIGHT);
unsigned int codec = (unsigned int) capture.get(CV_CAP_PROP_FOURCC);

Mat currentGray;
Mat previousGray;
vector< Point2f > points[2];
vector< Point2f > initial;
vector< Point2f > features;

vector< uchar > status;
vector< float > error;

int maxCorners = 500;  // maximum number of features to detect
double qualityLevel = 0.01;  // quality level for feature detection
double minDistance = 10; // min distance between two points

Mat frame, output;

VideoWriter createdVideo("output.avi", codec, rate, Size(width,height), 1);

for (unsigned frameCounter = 0; frameCounter < numberFrames; frameCounter++) {

    capture >> frame;

    if (frame.empty())
        break;

    imshow("Video", frame);
    cvtColor(frame, currentGray, CV_BGR2GRAY);
    frame.copyTo(output);


    if (points[0].size() <= 10){
        goodFeaturesToTrack(currentGray, // the image
            features, // the output detected features
            maxCorners, // the maximum number of features
            qualityLevel, // quality level
            minDistance); // min distance between two features

        // add the detected features to
        // the currently tracked features
        points[0].insert(points[0].end(),
            features.begin(), features.end());
        initial.insert(initial.end(),
            features.begin(), features.end());
    }

    if (previousGray.empty())
        currentGray.copyTo(previousGray);

    calcOpticalFlowPyrLK(previousGray, currentGray, // 2 consecutive images
        points[0], // input point positions in first image
        points[1], // output point positions in the 2nd image
        status, // tracking success
        error); // tracking error

    int k = 0;
    for (int i = 0; i < points[1].size(); i++) {
        // do we keep this point?

        if (status[i] && // if point has moved
            (abs(points[0][i].x - points[1][i].x) +
            (abs(points[0][i].y - points[1][i].y)) > 2))

            initial[k] = initial[i];
            points[1][k++] = points[1][i];
    }

    points[1].resize(k);
    initial.resize(k);


    for (int i = 0; i < points[1].size(); i++) {
        // draw line and circle
        line(output,
            initial[i], // initial position
            points[1][i],// new position
            Scalar(0, 255, 0), 2);
        circle(output,
            points[1][i],
            2,
            Scalar(0, 0, 255), -1);

    }

    std::swap(points[1], points[0]);
    cv::swap(previousGray, currentGray);

    createdVideo.write(output);

}

waitKey(0);
return(0);
}

我的代码逐帧跟踪点的位移,并保留它们的第一个位置,直到视频结束。 但是,我不想保留第一帧的位置,而是随着时间的推移改变它们,即用第二个点位置改变第一个点的位置,然后是大线不会出现,只会出现两帧中两点之间的位移。

有可能这样做吗?

1 个答案:

答案 0 :(得分:0)

由于你只想要两个帧中的点的位置,所以只需使用两个向量;一个持有最后一帧的关键点,一个持有前一帧的关键点。在每次迭代结束时,只需将先前的点设置为当前点。像这样的伪代码:

// first frame
// detect keypoints
prev_frame_points = keypoints

// rest of the frames
for frame in frames: 
    // detect keypoints
    curr_frame_points = keypoints
    line(..., prev_frame_points, curr_frame_points, ...)
    prev_frame_points = curr_frame_points