图像稳定

时间:2010-11-22 16:30:49

标签: c++ image opencv computer-vision image-stabilization

嘿 我正在做一个使用光流法稳定视频序列的项目。 到目前为止,我的光学流程做得很好。但我面前有2个分支机构可以开展工作。 1-获得光流后,我找到了图像位移的平均值,然后我从第二帧的特征中减去了平均值,我的问题是下一步该做什么?

2-或者我可以使用openCV函数来稳定图像,我计算了转换矩阵然后我使用了cvPerspectiveTransform然后cvWarpPerspective,但是我得到的错误是“坏标志”

你可以看到代码,我想要的是如何稳定图像?我想提供你能提供的任何解决方案吗?

enter code here
#include <stdio.h>
#include <stdlib.h>    
//#include "/usr/include/opencv/cv.h"    
#include <cv.h>    
#include <cvaux.h>    
#include <highgui.h>    
#include <math.h>    
#include <iostream>

#define PI 3.1415926535898

double rads(double degs)
{
    return (PI/180 * degs);
}

CvCapture *cap;

IplImage *img;    
IplImage *frame;     
IplImage *frame1;    
IplImage *frame3;    
IplImage *frame2;    
IplImage *temp_image1;    
IplImage *temp_image2;    
IplImage *frame1_1C;     
IplImage *frame2_1C;     
IplImage *eig_image;     
IplImage *temp_image;     
IplImage *pyramid1 = NULL;    
IplImage *pyramid2 = NULL;

char * mapx;
char * mapy;

int h;
int corner_count;
CvMat* M = cvCreateMat(3,3,CV_32FC1);
CvPoint p,q,l,s;
double hypotenuse;
double angle;

int line_thickness = 1, line_valid = 1, pos = 0;
CvScalar line_color;
CvScalar target_color[4] = { // in BGR order
        {{   0,   0, 255,   0 }},  // red    
        {{   0, 255,   0,   0 }},  // green    
        {{ 255,   0,   0,   0 }}, // blue    
        {{   0, 255, 255,   0 }}   // yellow    
};

inline static double square(int a)    
{
return a * a;  
}

char* IntToChar(int num){return NULL;}

/*{
    char* retstr = static_cast<char*>(calloc(12, sizeof(char)));

    if (sprintf(retstr, "%i", num) > 0)
    {
        return retstr;
    }
    else
    {
        return NULL;
    }
}*/

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
    if ( *img != NULL ) 
         return;

    *img = cvCreateImage( size, depth, channels );

    if ( *img == NULL )
    {
        fprintf(stderr, "Error: Couldn't allocate image.  Out of memory?\n");
        exit(-1);
    }
}

void clearImage (IplImage *img)
{ 
    for (int i=0; i<img->imageSize; i++)    
        img->imageData[i] = (char) 0;    
}

int main()
{
    cap = cvCaptureFromCAM(0);    
    //cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi");

    CvSize frame_size;

    // Reading the video's frame size
    frame_size.height = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT );
    frame_size.width  = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_WIDTH );    
    cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);

    while(true)    
    {
    frame = cvQueryFrame( cap );

        if (frame == NULL)
        {    
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;    
        }

        // Allocating another image if it is not allocated already.     
        allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );    
        cvConvertImage(frame, frame1_1C, 0);    
        allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );    
        cvConvertImage(frame, frame1, 0);

        //Get the second frame of video.    
        frame = cvQueryFrame( cap );

        if (frame == NULL)    
        {
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;
        }

        if(!frame) 
        {    
            printf("bad video \n");    
            exit(0);
        }

        allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );   
        cvConvertImage(frame, frame2_1C, 0);    
        allocateOnDemand( &frame2, frame_size, IPL_DEPTH_8U, 3 );    
        cvConvertImage(frame, frame2, 0);

        CvSize optical_flow_window = cvSize(5,5);    
        eig_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );    
        temp_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );

        CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

        // Feature tracking 
        CvPoint2D32f frame1_features[4];
        CvPoint2D32f frame2_features[4];

        //cvCornerEigenValsAndVecs(eig_image, temp_image, 1 );    
        corner_count = 4;

        cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);    
        cvFindCornerSubPix( frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria);

        if ( corner_count <= 0 )    
            printf( "\nNo features detected.\n" );    
        else    
            printf( "\nNumber of features found = %d\n", corner_count );

        //Locus Kande method.     
        char optical_flow_found_feature[20];    
        float optical_flow_feature_error[20];

        allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );    
        allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

        cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL);

    /*
    double sumOfDistancesX = 0;    
    double sumOfDistancesY = 0;

    int debug = 0;

     CvFont font1, font2;    
     CvScalar red, green, blue;    
     IplImage* seg_in = NULL;    
     IplImage *seg_out = NULL;

     allocateOnDemand( &seg_in,  frame_size, IPL_DEPTH_8U, 3 );    
     allocateOnDemand( &seg_out, frame_size, IPL_DEPTH_8U, 3 );

     clearImage(seg_in);    
     clearImage(seg_in);    

     for( int i=0; i <corner_count; i++ )
     {

         if ( optical_flow_found_feature[i] == 0 )  
             continue;    
         p.x = (int) frame1_features[i].x;    
         p.y = (int) frame1_features[i].y;    
         q.x = (int) frame2_features[i].x;    
         q.y = (int) frame2_features[i].y;
         angle = atan2( (double) p.y - q.y, (double) p.x - q.x );

          sumOfDistancesX += q.x - p.x;     
          sumOfDistancesY += q.y - p.y;

          //cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));    
      }
      */

      /*    
      int averageDistanceX = sumOfDistancesX / corner_count;    
      int averageDistanceY = sumOfDistancesY / corner_count;    
      l.x = averageDistanceX - q.x;    
      s.y = averageDistanceY - q.y;
      */

#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform

       //CvMat* N = cvCreateMat(3,3,CV_32FC1);

       cvGetPerspectiveTransform(frame2_features, frame1_features, M);
       cvPerspectiveTransform(frame1_features, frame2_features, M);    
       cvWarpPerspective( frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0) );

        cvShowImage("Optical Flow", frame1);    
        cvWaitKey(50);
    }

    cvReleaseCapture(&cap);    
    cvReleaseMat(&M);    

    return 0;    
}

1 个答案:

答案 0 :(得分:3)

您不想从第二张图像中减去平均位移,您希望将第二张图像转换(移动)平均位移,以便它“匹配”第一张图像。您使用的“置换”取决于您的情况。

  • 如果您的相机正在摇晃但是静止,否则您希望使用两个连续帧之间的平均位移作为第二帧的变换矢量。使用每个新帧,您可以计算变换的第一帧和新帧之间的位移,并转换新帧。
  • 如果您的相机移动并摇动(即头盔式摄像机安装在山地车上),您首先需要找到几帧之间的平均帧间距,然后通过平均位移与平均位移之间的差异对序列中的各个帧进行变换。它与前一帧之间的位移。

修改 您基本上需要对选项2执行的操作是计算最后几帧中帧之间平均移动的平均值。这可以通过多种方式实现,但我建议使用像卡尔曼滤波器这样的东西。然后,对于新帧,您可以计算该帧与(已校正的)前一帧之间的移动。从你的运动中你可以减去到那一点的平均运动,然后用这个差异移动新的帧。