如何在opencv中提取检测到的区域?

时间:2013-05-24 10:25:53

标签: visual-c++ opencv image-processing

的所有人。我在下面的代码中显示了跟踪对象,它也显示了背景减法结果。这里我使用帧差分方法。现在我的问题是我必须从彩色视频文件中提取该移动物体。我做了分割。但是为了检测我想要提取我绘制边界框的区域。所以有人可以帮助我...请。提前谢谢你。

 int main(int argc, char* argv[])
               {



                CvSize imgSize;
            //CvCapture *capture =     cvCaptureFromFile("S:\\offline object detection database\\video1.avi");
            CvCapture *capture =     cvCaptureFromFile("S:\\offline object detection database\\SINGLE PERSON Database\\Walk1.avi");

                if(!capture){
                 printf("Capture failure\n");
                 return -1;
                }

                 IplImage* frame=0;
                 frame = cvQueryFrame(capture);           
                 if(!frame)
         return -1;

                 imgSize = cvGetSize(frame);

                 IplImage* greyImage = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
                 IplImage* colourImage;
                 IplImage* movingAverage = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
                 IplImage* difference;
                 IplImage* temp;
                 IplImage* motionHistory = cvCreateImage( imgSize, IPL_DEPTH_8U, 3);

                 CvRect bndRect = cvRect(0,0,0,0);
                 CvPoint pt1, pt2;
                 CvFont font;
                 int prevX = 0;
                 int numPeople = 0;
                 char wow[65];
                 int avgX = 0;
                 bool first = true;                   
                 int closestToLeft = 0;
                 int closestToRight = 320;


                 for(;;)
                {
                colourImage = cvQueryFrame(capture);
                if( !colourImage )
                {
                     break;
                }
                if(first)
                {
                 difference = cvCloneImage(colourImage);
                 temp = cvCloneImage(colourImage);
                 cvConvertScale(colourImage, movingAverage, 1.0, 0.0);
                 first = false;
                }
                else
                {
                cvRunningAvg(colourImage, movingAverage, 0.020, NULL);
                }
                cvConvertScale(movingAverage,temp, 1.0, 0.0);
            cvAbsDiff(colourImage,temp,difference);     
                cvCvtColor(difference,greyImage,CV_RGB2GRAY);       
                cvThreshold(greyImage, greyImage, 80, 250, CV_THRESH_BINARY);
        cvSmooth(greyImage, greyImage,2);
                cvDilate(greyImage, greyImage, 0, 1);
                cvErode(greyImage, greyImage, 0, 1);
        cvShowImage("back", greyImage);
                CvMemStorage* storage = cvCreateMemStorage(0);
                CvSeq* contour = 0;     
                cvFindContours( greyImage, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

        for( ; contour != 0; contour = contour->h_next )
                {
                bndRect = cvBoundingRect(contour, 0);          
            pt1.x = bndRect.x;
                pt1.y = bndRect.y;
                pt2.x = bndRect.x + bndRect.width;
                pt2.y = bndRect.y + bndRect.height;         
                avgX = (pt1.x + pt2.x) / 2;

                if(avgX > 90 && avgX < 250)
                {
                if(closestToLeft >= 88 && closestToLeft <= 90)
                {
                 if(avgX > prevX)
                {
                 numPeople++;
                 closestToLeft = 0;
                }
                }
                else if(closestToRight >= 250 && closestToRight <= 252)
                {
                if(avgX < prevX)
                {
                numPeople++;
                closestToRight = 220;
                }
                }                          
                cvRectangle(colourImage, pt1, pt2, CV_RGB(255,0,0), 1);
                }
                if(avgX > closestToLeft && avgX <= 90)
                {
                 closestToLeft = avgX;
                 }
                 if(avgX < closestToRight && avgX >= 250)
                 {
                 closestToRight = avgX;
                 }
                 prevX = avgX;
                 }
                 cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2);
                 cvPutText(colourImage, _itoa(numPeople, wow, 10), cvPoint(60, 200), &font, cvScalar(0, 0, 300));
                 cvShowImage("My Window", colourImage);
         cvShowImage("fore", greyImage); 
                 cvWaitKey(10);                     
                  }
                  cvReleaseImage(&temp);
                  cvReleaseImage(&difference);
                  cvReleaseImage(&greyImage);
                  cvReleaseImage(&movingAverage);
                  cvDestroyWindow("My Window");
                  cvReleaseCapture(&capture);   
                  return 0;     
                  }

2 个答案:

答案 0 :(得分:1)

在OpenCV的旧版C API中,您可以使用this命令从图像中提取感兴趣的区域。在您的代码中,您只需添加此行,图像将被视为仅包含提取的区域,非常多:

cvSetImageROI(colourImage, bndrect);

在OpenCV 2.0 API中,旧图像和“提取区域”图像将存储在单独的Mat对象中,但指向相同的数据:

Mat colourImage, extractedregion;
colourImage = imread("test.bmp");
extractedregion = colourImage(bndRect);  // Creates only a header, no new image data

许多有用的OpenCV教程都使用旧版API,但您应该为新版本授予特权。

答案 1 :(得分:0)

我知道如何使用新的OpenCV界面,而不是您正在使用的“传统”界面。 它会是这样的:

cv::Mat frame_m(frame);
...

cv::Mat region_m = frame_m(cv::Rect(bndRect));
IplImage region = region_m; // use &iplimg when an IplImage* is needed.

如果您不想混合界面,现在是时候学习新界面了。