如何使用OpenCV检测视频中的方块?

时间:2015-06-09 10:56:45

标签: c++ c opencv video

因此,我将squares.cppcvBoundingRect.cpp代码合并,以检测视频中的方块。因此,我必须从IplImage转换为Mat类型,以便findSquaresdrawSquares方法可以运行(通过使用cvarrToMat函数)。但不幸的是,在成功编译后,我在运行时遇到了这个错误:

  

OpenCV错误:mixChannels中的断言失败(j< nsrcs&& src [j] .depth()== depth),文件/Users/Desktop/opencv-3.0.0-rc1/modules/core/ src / convert.cpp,第1205行   libc ++ abi.dylib:以cv类型的未捕获异常终止::异常:/Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp:1205:错误:( - 215)j < nsrcs&& src [j] .depth()==功能mixChannels中的深度
  中止陷阱:6

以下是代码:

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"

#include <iostream>
#include <math.h>
#include <string.h>

using namespace cv;
using namespace std;

int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";

// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}

// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();

Mat pyr, timg, gray0(image.size(), CV_8U), gray;

// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;

// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
    int ch[] = {c, 0};
    mixChannels(&timg, 1, &gray0, 1, ch, 1);

    // try several threshold levels
    for( int l = 0; l < N; l++ )
    {
        // hack: use Canny instead of zero threshold level.
        // Canny helps to catch squares with gradient shading
        if( l == 0 )
        {
            // apply Canny. Take the upper threshold from slider
            // and set the lower to 0 (which forces edges merging)
            Canny(gray0, gray, 0, thresh, 5);
            // dilate canny output to remove potential
            // holes between edge segments
            dilate(gray, gray, Mat(), Point(-1,-1));
        }
        else
        {
            // apply threshold if l!=0:
            //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
            gray = gray0 >= (l+1)*255/N;
        }

        // find contours and store them all as a list
        findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);

        vector<Point> approx;

        // test each contour
        for( size_t i = 0; i < contours.size(); i++ )
        {
            // approximate contour with accuracy proportional
            // to the contour perimeter
            approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

            // square contours should have 4 vertices after approximation
            // relatively large area (to filter out noisy contours)
            // and be convex.
            // Note: absolute value of an area is used because
            // area may be positive or negative - in accordance with the
            // contour orientation
            if( approx.size() == 4 &&
                fabs(contourArea(Mat(approx))) > 1000 &&
                isContourConvex(Mat(approx)) )
            {
                double maxCosine = 0;

                for( int j = 2; j < 5; j++ )
                {
                    // find the maximum cosine of the angle between joint edges
                    double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
                    maxCosine = MAX(maxCosine, cosine);
                }

                // if cosines of all angles are small
                // (all angles are ~90 degree) then write quandrange
                // vertices to resultant sequence
                if( maxCosine < 0.3 )
                    squares.push_back(approx);
            }
        }
    }
}
}


// the function draws all the squares in the image
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
    const Point* p = &squares[i][0];
    int n = (int)squares[i].size();
    polylines(image, &p, &n, 1, true, Scalar(255,0,0), 3, LINE_AA);
}

imshow(wndname, image);
}

CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;

bool first = true;


int main(int argc, char* argv[])
{
//Create a new movie capture object.
   cam = cvCaptureFromCAM(0);

   //create storage for contours
   storage = cvCreateMemStorage(0);

   //capture current frame from webcam
   currentFrame = cvQueryFrame(cam);

   //Size of the image.
   CvSize imgSize;
   imgSize.width = currentFrame->width;
   imgSize.height = currentFrame->height;

   //Images to use in the program.
   currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);                           

namedWindow( wndname, 1 );
    vector<vector<Point> > squares;

while(1)
   {
          currentFrame = cvQueryFrame( cam );
          if( !currentFrame ) break;

          //Convert the image to grayscale.
          cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);

          if(first) //Capturing Background for the first time
          {
                 differenceImg = cvCloneImage(currentFrame_grey);
                 oldFrame_grey = cvCloneImage(currentFrame_grey);
                 cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
                 first = false;
                 continue;
          }

          //Minus the current frame from the moving average.
          cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);

          //bluring the differnece image
          cvSmooth(differenceImg, differenceImg, CV_BLUR);             

          //apply threshold to discard small unwanted movements
          cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);

          //find contours


cv::Mat diffImg = cv::cvarrToMat(differenceImg);
cv::Mat currFrame = cv::cvarrToMat(currentFrame);

          findSquares(diffImg, squares);

          //draw bounding box around each contour
          drawSquares(currFrame, squares);

          //display colour image with bounding box
          cvShowImage("Output Image", currentFrame);

          //display threshold image
          cvShowImage("Difference image", differenceImg);

          //New Background
          cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);

          //clear memory and contours
          cvClearMemStorage( storage );
          contours = 0;

          //press Esc to exit
          char c = cvWaitKey(33);
          if( c == 27 ) break;

   }

// Destroy the image & movies objects
   cvReleaseImage(&oldFrame_grey);
   cvReleaseImage(&differenceImg);
   cvReleaseImage(&currentFrame);
   cvReleaseImage(&currentFrame_grey);


return 0;
}

1 个答案:

答案 0 :(得分:1)

正如错误消息所示,您的问题出在cv :: mixChannels()中。请参阅documentation

或者您可以简单地执行类似

的操作
cv::Mat channels[3];
cv::split(multiChannelImage, channels);

然后使用

访问每个频道
cv::Mat currChannel = channels[channelNumber]