opencv人脸识别预测错误

时间:2014-04-29 01:11:16

标签: c++ opencv face-recognition

我在我的应用程序中偶然发现一个异常,我无法摆脱......

我正在尝试使用所有三种人脸识别算法(Eigen,Fisher和LBPH)编写一个简单的图像人脸识别程序。

unchandled异常是由line:

引起的
Fisher_prediction = Fisher_model->predict(crop);

并且错误消息显示:Unhandled exception at at 0x000007FEFDB3A49D in FaceRecognition.exe: Microsoft C++ exception: cv::Exception at memory location 0x00000000002782B0.

由以下原因引起:msvcr110d.dll!_CxxThrowException(void * pExceptionObject, const _s__ThrowInfo * pThrowInfo) Line 152 C++

任何消息,我哪里出错?

这是代码的其余部分:

Mat frame = imread("1.jpg");

    // Apply the classifier to the frame
    if (!frame.empty()) {

        cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
        equalizeHist(frame_gray, frame_gray);

        // Detect faces
        face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));

        // Set Region of Interest
        cv::Rect roi_b;
        cv::Rect roi_c;

        size_t ic = 0; // ic is index of current element
        int ac = 0; // ac is area of current element

        size_t ib = 0; // ib is index of biggest element
        int ab = 0; // ab is area of biggest element

        // Iterate through all current elements (detected faces)
        for (ic = 0; ic < faces.size(); ic++) {

            roi_c.x = faces[ic].x;
            roi_c.y = faces[ic].y;
            roi_c.width = (faces[ic].width);
            roi_c.height = (faces[ic].height);

            ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)

            roi_b.x = faces[ib].x;
            roi_b.y = faces[ib].y;
            roi_b.width = (faces[ib].width);
            roi_b.height = (faces[ib].height);

            ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element

            if (ac > ab) {

                ib = ic;
                roi_b.x = faces[ib].x;
                roi_b.y = faces[ib].y;
                roi_b.width = (faces[ib].width);
                roi_b.height = (faces[ib].height);
            }

            crop = frame(roi_b);
            cv::resize(crop, res, Size(img_width, img_height), 0, 0, INTER_LINEAR); // This will be needed later while saving images
            cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale

            Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
            Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
            //rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);

            /* Calculate the position for annotated text */
            int pos_x = std::max(roi_b.tl().x - 10, 0);
            int pos_y = std::max(roi_b.tl().y - 10, 0);

        if(createdFisher) {
            Fisher_prediction = Fisher_model->predict(crop);
            QString Fisher_qs = QString::number(Fisher_prediction);
            /* Create the text we will annotate the box with */
            string Fisher_text = format("Prediction Fisherfaces = %d", Fisher_prediction);
            putText(frame, Fisher_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
            /* Draw a green rectangle around the detected face */
            rectangle(frame, pt1, pt2, MATCH_COLOR, 1);
            ui.txtConsole->appendPlainText(QString("Fisherfaces - " + Fisher_qs));
        }
        if(createdEigen) {
            Eigen_prediction = Eigen_model->predict(crop);
            QString Eigen_qs = QString::number(Eigen_prediction);
            /* Create the text we will annotate the box with */
            string Eigen_text = format("Prediction Eigenfaces = %d", Eigen_prediction);
            putText(frame, Eigen_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
            /* Draw a green rectangle around the detected face */
            rectangle(frame, pt1, pt2, MATCH_COLOR, 1);
            ui.txtConsole->appendPlainText(QString("Eigenfaces - " + Eigen_qs));
        }
        if(createdLBPH) {
            LBPH_prediction = LBPH_model->predict(crop);
            QString LBPH_qs = QString::number(LBPH_prediction);
            /* Create the text we will annotate the box with */
            string LBPH_text = format("Prediction LBPH = %d", LBPH_prediction);
            putText(frame, LBPH_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
            /* Draw a green rectangle around the detected face */
            rectangle(frame, pt1, pt2, MATCH_COLOR, 1);
            ui.txtConsole->appendPlainText(QString("Linear Binary Patern Histogram - " + LBPH_qs));
        }
        }

        putText(frame, text, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
        imshow("original", frame);

        if (!crop.empty()) {
            imshow("detected", crop);
        }
        else
            destroyWindow("detected");
    }

    int c = waitKey(0);

所有必要的#include和变量以及分类器都在程序开始时初始化。

1 个答案:

答案 0 :(得分:0)

我做错了是我将图片传递给未调整大小的面部识别器(我在我的数据库中使用200x200px的图片),因此算法无法根据更大的分辨率图像进行人脸识别而不是数据库中的那个。