我正在编写代码来识别视频中的汽车。这是代码:
#include "mainwindow.h"
#include "ui_mainwindow.h"
#define SHOW_FLAG false
#define DEBUG_BY_STEP false
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);
}
MainWindow::~MainWindow()
{
delete ui;
}
QImage MainWindow::Mat2QImage(Mat cvImg)
{
QImage qImg;
if(cvImg.channels()==3) //3 channels color image
{
cv::cvtColor(cvImg,cvImg,CV_BGR2RGB);
qImg =QImage((const unsigned char*)(cvImg.data),
cvImg.cols, cvImg.rows,
cvImg.cols*cvImg.channels(),
QImage::Format_RGB888);
}
else if(cvImg.channels()==1) //grayscale image
{
qImg =QImage((const unsigned char*)(cvImg.data),
cvImg.cols,cvImg.rows,
cvImg.cols*cvImg.channels(),
QImage::Format_Indexed8);
}
else
{
qImg =QImage((const unsigned char*)(cvImg.data),
cvImg.cols,cvImg.rows,
cvImg.cols*cvImg.channels(),
QImage::Format_RGB888);
}
return qImg;
}
void MainWindow::updateFrame()
{
frame1 = frame2.clone();
capture.read(frame2);
if(!frame2.empty())
{
proc();
}
frameCnt++;
}
void MainWindow::proc()
{
/*== == == == == == == == == == = step2.1 Variable definition and Initialization == == == == == == == == == == == == == == =*/
vector<Blob> currentBlobs; //blob in current frame
vector<Blob> tempBlobs; //temp blob
Mat frame1Copy, frame2Copy; //copy completely
Mat imgDifference; //frame difference
Mat imgThresh; //binarization
/*== == == == == == == == == == = step2.2 denoise and find contours == == == == == == == == == == == == == == =*/
cv::resize(frame1, frame1Copy, Size((int)(frame1.cols / resizeWidthCoefficient), (int)(frame1.rows / resizeHeightCoefficient))); //resize
cv::resize(frame2, frame2Copy, Size((int)(frame2.cols / resizeWidthCoefficient), (int)(frame2.rows / resizeHeightCoefficient)));
cvtColor(frame1Copy, frame1Copy, CV_BGR2GRAY); //to gray
cvtColor(frame2Copy, frame2Copy, CV_BGR2GRAY);
GaussianBlur(frame1Copy, frame1Copy, Size(5, 5), 0); //GaussianBlur for smoothing
GaussianBlur(frame2Copy, frame2Copy, Size(5, 5), 0);
absdiff(frame1Copy, frame2Copy, imgDifference); //frame difference
//GaussianBlur(imgDifference, imgDifference, Size(5, 5), 0);
//threshold(imgDifference, imgThresh, 30, 255.0, CV_THRESH_BINARY); //binarization
adaptiveThreshold(imgDifference, imgThresh, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, 25, 10);
if (SHOW_FLAG) imshow("imgThresh", imgThresh);
Mat structuringElement3x3 = getStructuringElement(MORPH_RECT, Size(3, 3)); //definition of different kernels, big kernel for noisy image
Mat structuringElement5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
//Mat structuringElement7x7 = getStructuringElement(MORPH_RECT, Size(7, 7));
//Mat structuringElement15x15 = getStructuringElement(MORPH_RECT, Size(15, 15));
for (int i = 0; i < 2; i++)
{
dilate(imgThresh, imgThresh, structuringElement3x3, Point(-1, -1), 2);
erode(imgThresh, imgThresh, structuringElement5x5);
}
Mat imgThreshCopy = imgThresh.clone();
vector<vector<Point> > contours; //contours
findContours(imgThreshCopy, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
if (SHOW_FLAG) showContours(imgThresh.size(), contours, "imgContours");
vector<vector<Point> > convexHulls(contours.size());
for (int i = 0; i < contours.size(); i++) convexHull(contours[i], convexHulls[i]);
if (SHOW_FLAG) showContours(imgThresh.size(), convexHulls, "imgConvexHulls");
for (auto &convexHull : convexHulls) //filter with heuristic knowledge
{
Blob possibleBlob(convexHull, (int)lineStart.size());
Rect possibleBoundingBox = possibleBlob.getBoundingBox();
if (
possibleBoundingBox.area() > minBlobArea &&
possibleBoundingBox.area() < maxBlobArea &&
possibleBlob.getRatio() > minBlobRatio &&
possibleBlob.getRatio() < maxBlobRatio &&
possibleBoundingBox.width > minBlobWidth &&
possibleBoundingBox.width < maxBlobWidth &&
possibleBoundingBox.height > minBlobheight &&
possibleBoundingBox.height < maxBlobheight &&
possibleBlob.getDiagonalLength() > minBlobDiagonal &&
possibleBlob.getDiagonalLength() < maxBlobDiagonal &&
(contourArea(possibleBlob.getContour()) / (double)possibleBoundingBox.area()) > 0.50 //contour area / rect area
)
tempBlobs.push_back(possibleBlob);
}
for (int i = 0, j ; i < tempBlobs.size(); i++) //filter with inclusion
{
for (j = 0; j < tempBlobs.size(); j++)
if (j != i && isOverlapped(tempBlobs[i].getBoundingBox(), tempBlobs[j].getBoundingBox())) break; //is covered
if (j == tempBlobs.size())
currentBlobs.push_back(tempBlobs[i]);
}
if (SHOW_FLAG) showContours(imgThresh.size(), currentBlobs, "imgcurrentBlobs");
matchBlobs(blobs, currentBlobs, frame2Copy);
if (SHOW_FLAG) showContours(imgThresh.size(), blobs, "imgBlobs");
/*== == == == == == == == == == = step2.3 counting and draw blobs == == == == == == == == == == == == == == =*/
frame2Copy = frame2.clone();
drawBlob(blobs, frame2Copy);
for (int i = 0; i < lineStart.size(); i++)
{
if (isCrossLine(blobs, lineStart[i], lineEnd[i], cnt[i], i)) //some blob has crossed the line
line(frame2Copy, lineStart[i], lineEnd[i], GREEN, lineThickness);
else
line(frame2Copy, lineStart[i], lineEnd[i], RED, lineThickness);
}
drawCnt(cnt, frame2Copy);
Mat temp = frame2Copy.clone();
//QImage image2 = Mat2QImage(temp);
//ui->label->setPixmap(QPixmap::fromImage(image2).scaled(ui->label->width(),ui->label->height(),Qt::KeepAspectRatio));
imshow("frame2Copy", frame2Copy);
}
void MainWindow::on_proc_clicked()
{
crossingLineEnd.clear();
crossingLineStart.clear();
blobs.clear();
lineStart.clear();
lineEnd.clear();
cnt.clear();
/*****************************************************/
pair<double, double> straightStart(0.1, 0.8);
pair<double, double> straightEnd(0.9, 0.8);
// pair<double, double> leftStart(0.1, 0);
// pair<double, double> leftEnd(0.1, 0.8);
// pair<double, double> rightStart(0.9, 0);
// pair<double, double> rightEnd(0.9, 0.8);
crossingLineStart.push_back(straightStart);
// crossingLineStart.push_back(leftStart);
// crossingLineStart.push_back(rightStart);
crossingLineEnd.push_back(straightEnd);
// crossingLineEnd.push_back(leftEnd);
// crossingLineEnd.push_back(rightEnd);
for(int i=0;i<crossingLineStart.size();i++)
cnt.push_back(0);
frameCnt = 2;
if(capture.isOpened())
capture.release();
QString filename =QFileDialog::getOpenFileName(this,tr("Open Video File"),".",tr("Video Files(*.avi *.mp4 *.flv *.mkv)"));
capture.open(filename.toLocal8Bit().data());
if (!capture.isOpened()) //cannot open the video
{
cout << "can't open the video!" << endl;
system("pause");
return;
}
capture.read(frame1);
capture.read(frame2);
for (int i = 0; i < crossingLineStart.size(); i++) //scale to coordinate
{
int x = (int)((frame1.cols - 1) * crossingLineStart[i].first);
int y = (int)((frame1.rows - 1) * crossingLineStart[i].second);
Point start(x, y);
lineStart.push_back(start);
x = (int)((frame1.cols - 1) * crossingLineEnd[i].first);
y = (int)((frame1.rows - 1) * crossingLineEnd[i].second);
Point end(x, y);
lineEnd.push_back(end);
}
if(capture.isOpened())
{
double rate = capture.get(CV_CAP_PROP_FPS);
proc();
timer = new QTimer(this);
timer->setInterval(1000/rate);
connect(timer, SIGNAL(timeout()),this,SLOT(updateFrame()));
timer->start();
}
}
该代码用于识别视频中的移动车辆,并用矩形blob标记汽车。我试了两个视频。问题是,对于一个视频,没有任何事情发生,每件事情都可以。但对于另一个视频,起初,它没关系。然后,在处理了几帧之后,我发生了这个问题:
调试断言失败! 程序: ... CV-Desktop_Qt_5_8_0MSVC2015_64bit-调试\调试\ WIC_OpenCV.exe 文件:E:\ VisualStudio的\ VC \ INCLUDE \ xmemory0 行:106 表达式:&#34;(_ Ptr_user&amp;(_ BIG_ALLOCATION_ALIGNMENT - 1))== 0&#34; &安培;&安培; 0
我不明白为什么。我试图调试它,似乎错误发生在某个时候函数proc()中的最后一个代码被执行:
Mat temp = frame2Copy.clone();
//QImage image2 = Mat2QImage(temp);
//ui->label->setPixmap(QPixmap::fromImage(image2).scaled(ui->label->width(),ui->label->height(),Qt::KeepAspectRatio));
imshow("frame2Copy", frame2Copy);
我很沮丧。有人能帮我吗?非常感谢。
答案 0 :(得分:0)
经过很长一段时间的搜索,我发现这是opencv lib的错误。我使用的是OpenCV3.0,在我改为OpenCV3.2之后,问题就解决了。我不明白为什么。