我正在编写一种算法来稳定视频,我目前的流程是:
现在的代码如下:
void Stab::process()
{
Mat gray;
Mat img_keypoints_1;
cvtColor(frame, gray, COLOR_BGR2GRAY);
if(first == false)
{
prev_frame = frame.clone();
first = true;
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
detector->detectAndCompute( prev_frame, noArray(), keypoints1, descriptors1 );
detector->detectAndCompute( frame, noArray(), keypoints2, descriptors2 );
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
// Since SURF is a floating-point descriptor NORM_L2 is used
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
std::vector< std::vector<DMatch> > knn_matches;
matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 );
//-- Filter matches using the Lowe's ratio test
const float ratio_thresh = 0.7f;
std::vector<DMatch> good_matches;
for (size_t i = 0; i < knn_matches.size(); i++)
{
if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
{
good_matches.push_back(knn_matches[i][0]);
}
}
Mat HInv;
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
HInv = H.inv();
warpPerspective(frame, im_out, HInv, prev_frame.size());
prev_frame = frame.clone();
//waitKey(30);
}
但是,我得到的结果非常糟糕,是否可以进行任何优化,还是应该切换到其他方法?