我正在使用cv :: calcOpticalFlowPyrLK来计算视频序列中从一帧到另一帧的光流。我注意到,与低fps相比,高fps跟踪的准确度较低。
源fps是30fps,我发现如果我将它下采样8,跟踪比使用所有帧更准确。
帧大小为360 * 480,搜索窗口大小为21 * 21
感谢任何帮助!
答案 0 :(得分:0)
主要的cpp
#include <iostream>
#include <queue>
#include <opencv2/opencv.hpp>
#include "corner_tracker.h"
using namespace std;
using namespace cv;
int main(int argc, char** argv) {
if (argc != 2) {
cout << "usage: " << argv[0] << " <video path>" << endl;
exit(1);
}
int frame_lag = 4;
string video_filepath(argv[1]);
VideoCapture vidcap(video_filepath);
Mat ref_frame, curr_frame, prev_frame;
queue<Mat> frame_buffer;
vector<Point2f> tracked_corners;
vector<Point2f> optical_flow;
CornerTrackerParameterBlock param;
CornerTracker corner_tracker(param);
Mat mask;
while (true){
vidcap >> ref_frame;
if (ref_frame.empty()) break;
cvtColor(ref_frame, curr_frame, CV_BGR2GRAY);
Mat tmp_frame;
curr_frame.copyTo(tmp_frame);
frame_buffer.push(tmp_frame);
if ((int)frame_buffer.size() < frame_lag+1 ) {
continue;
}
prev_frame = frame_buffer.front();
frame_buffer.pop();
corner_tracker.TrackCorners(prev_frame, curr_frame, mask, 100, tracked_corners, optical_flow);
for (int i = 0; i < (int)tracked_corners.size(); i++) {
//because optical flow is calculated between current frame and the frame_lag frame before it
//the actual value of the optical flow vector has to be normalized
Point2f normalized_optical_flow = optical_flow[i]*(1.0/(double)frame_lag);
line(ref_frame, tracked_corners[i], tracked_corners[i] + normalized_optical_flow, Scalar(0,255,0));
circle(ref_frame, tracked_corners[i], 2, Scalar(0,0,255));
}
imshow("window",ref_frame);
if((char)waitKey(30) == 27) {
break;
}
}
return 0;
}
角落跟踪器头文件
#ifndef CORNER_TRACKER_H_
#define CORNER_TRACKER_H_
#include <opencv2/core/core.hpp>
struct CornerTrackerParameterBlock {
double lkt_max_bidirectioal_error;
int lkt_maxlevel;
int lkt_winsize;
int feature_blocksize;
double feature_k;
double feature_mindist;
double feature_quality_level;
//default constructor
CornerTrackerParameterBlock(void) :
lkt_max_bidirectioal_error(2.0),
lkt_maxlevel(3),
lkt_winsize(16),
feature_blocksize(3),
feature_k(0.04),
feature_mindist(5.0),
feature_quality_level(0.01)
{}
};
class CornerTracker {
public:
CornerTracker(const CornerTrackerParameterBlock& param);
void TrackCorners(const cv::Mat& prev_frame, const cv::Mat& curr_frame, const cv::Mat& mask, int max_corners, std::vector<cv::Point2f>& tracked_corners, std::vector<cv::Point2f>& optical_flow_vectors) const;
private:
void AddAdditionalCorners(const cv::Mat& curr_frame, const cv::Mat& mask, int max_corners, std::vector<cv::Point2f>& tracked_corners) const;
CornerTrackerParameterBlock m_param;
};
#endif //CORNER_TRACKER_H_
角落跟踪器cpp文件
#include <iostream>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/tracking.hpp>
#include "corner_tracker.h"
using namespace std;
using namespace cv;
CornerTracker::CornerTracker(const CornerTrackerParameterBlock& param) :
m_param(param)
{}
void CornerTracker::AddAdditionalCorners(const cv::Mat& curr_frame, const cv::Mat& mask, int max_corners, std::vector<cv::Point2f>& tracked_corners) const {
//detect additional features
int additional_corners = max_corners - tracked_corners.size();
if (additional_corners <= 0) return;
//generate mask
Mat tmp_mask;
if (mask.rows != curr_frame.rows || mask.cols != curr_frame.cols || mask.type() != CV_8U) {
tmp_mask.create(curr_frame.rows, curr_frame.cols, CV_8U);
tmp_mask = Scalar::all(255);
}
else {
mask.copyTo(tmp_mask);
}
//mask out current points
for (const Point2f& p : tracked_corners) {
circle(tmp_mask, p, m_param.feature_mindist, Scalar::all(0), -1); //filled black circle
}
vector<Point2f> corners;
goodFeaturesToTrack(curr_frame, corners, additional_corners, m_param.feature_quality_level, m_param.feature_mindist, tmp_mask, m_param.feature_blocksize, true, m_param.feature_k );
for (const Point2f& p : corners) {
tracked_corners.push_back(p);
}
}
void CornerTracker::TrackCorners(const cv::Mat& prev_frame, const cv::Mat& curr_frame, const cv::Mat& mask, int max_corners, std::vector<cv::Point2f>& tracked_corners, std::vector<cv::Point2f>& optical_flow_vectors) const {
AddAdditionalCorners(curr_frame, mask, max_corners, tracked_corners);
vector<Point2f> prev_corners(tracked_corners);
vector<Point2f> next_corners(tracked_corners);
//optical flow corner tracking
vector<uchar> status1,status2;
vector<float> error1,error2;
calcOpticalFlowPyrLK(curr_frame, prev_frame, tracked_corners, prev_corners, status1, error1, Size(m_param.lkt_winsize,m_param.lkt_winsize), m_param.lkt_maxlevel, TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), OPTFLOW_USE_INITIAL_FLOW);
calcOpticalFlowPyrLK(prev_frame, curr_frame, prev_corners, next_corners, status2, error2, Size(m_param.lkt_winsize,m_param.lkt_winsize), m_param.lkt_maxlevel, TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), OPTFLOW_USE_INITIAL_FLOW);
//check tracked corner quality
vector<Point2f> temp_corners;
optical_flow_vectors.clear();
for (unsigned int i = 0; i < tracked_corners.size(); i++) {
if (status1[i] == 0 || status2[i] == 0) {
continue;
}
float bidirectional_error = norm(next_corners[i] - tracked_corners[i]);
//bidirectional error check
if (bidirectional_error > m_param.lkt_max_bidirectioal_error) {
continue;
}
optical_flow_vectors.push_back(tracked_corners[i] - prev_corners[i]);
temp_corners.push_back(tracked_corners[i]);
}
tracked_corners.swap(temp_corners);
}
答案 1 :(得分:0)
实际上,我发现我的问题是我的代码中的一个浮点到int转换错误。
在我的代码中,我循环遍历所有帧,并从光流跟踪点转换到IOS点(CGPoint)并再次返回。在这个过程中,我意外地从float转换为int(我使用了cv :: Point而不是cv :: Point2f)。
高fps时性能变差,因为错误累积了很多次,因为跟踪被调用了很多次。
答案 2 :(得分:0)
如果视频质量发生变化,也可能发生这种情况。在较低的FPS但相同的kBPS下,某些类型的视频编码器(例如h.264)具有更多的比特来编码每个帧,从而导致更高的质量。