SolvePnP给出OpenCV错误:断言失败

时间:2016-08-12 15:54:38

标签: opencv

收到错误:

  

( - 215)npoints> = 0&& npoints == std :: max(ipoints.checkVector(2,   CV_32F),ipoints.checkVector(2,CV_64F))

在函数solvePnP中运行循环。

这是我的代码的一部分。当我运行它时运行完美并且抛出上述错误。我试图通过使用flag来删除它来管理它,但它会在一段时间后发出错误,而不是立即给出。 请帮我找到解决这个问题的方法。

Pose.h

 #ifndef POSE_H
 #define POSE_H

 #include "opencv2/objdetect/objdetect.hpp"
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv2/imgproc/imgproc.hpp"
 #include "opencv2/calib3d/calib3d.hpp"
 #include "opencv2/core/core.hpp"
 #include <cv.h>
 #include <iostream>
 #include <math.h>
 #include <stdlib.h>


  using namespace std;
  using namespace cv;

  class Pose
  {
  public:
  Pose();

  double* contour_detection(Mat threshold,Mat src);
  double* pose_estimation(const std::vector<cv::Point3f> &objectPoints,const std::vector<cv::Point2f> &imagePoints);
  private:
  vector<Point3f> objectPoints ;
  Mat  cameraIntrinsicParams ;
  Mat distCoeffs;


  };
#endif // POSE_H

Pose.cpp

#include "Pose.h"
Pose::Pose()
{

objectPoints.push_back(cv::Point3f(240.f,680.f,715.f));
objectPoints.push_back(cv::Point3f(240.f,680.f,215.f));
objectPoints.push_back(cv::Point3f(615.f,680.f,215.f));
objectPoints.push_back(cv::Point3f(615.f,680.f,715.f));

cameraIntrinsicParams=Mat(Size(3,3),CV_32FC1);       
cameraIntrinsicParams.at<float>(0,0)=  727.957294f;
cameraIntrinsicParams.at<float>(0,1)= 0 ;
cameraIntrinsicParams.at<float>(0,2)= 320.f;//304.729528f;
cameraIntrinsicParams.at<float>(1,0)= 0 ;
cameraIntrinsicParams.at<float>(1,1)=  726.232798f;
cameraIntrinsicParams.at<float>(1,2)= 240.f;//235.217420f;
cameraIntrinsicParams.at<float>(2,0)= 0 ;
cameraIntrinsicParams.at<float>(2,1)= 0 ;
cameraIntrinsicParams.at<float>(2,2)= 1 ;


distCoeffs=Mat::zeros(Size(4,1),CV_64FC1); 

}

double* Pose::contour_detection(Mat threshold, Mat src)
{
 int flag =0;
 int largest_area=0;
 int largest_contour_index;
 int lowThreshold;
 int ratio=3;
 int kernel_size=3;
 int const max_lowThreshold = 100;

 vector<Point>Points ;
 vector< vector<Point> > contours; // Vector for storing contour
 vector<Vec4i> hierarchy;
 Mat detected_edges;
 blur(threshold, detected_edges, Size(3,3) );

 /// Canny detector
 Canny( detected_edges, detected_edges, lowThreshold,lowThreshold*ratio,  kernel_size );
 vector< vector<Point> > contours0;
 findContours( detected_edges, contours0, hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE ); // Find the contours in the image

 contours.resize(contours0.size());
 std::vector<Point2f> imagePoints;
 std::vector<Point2f> preciseCorners(4);
 for( size_t k = 0; k < contours0.size(); k++ )
    {
      double a=contourArea( contours0[k],false); // Find the area of contour
    if(a>largest_area)
        {
    largest_area=a;
        largest_contour_index=k ;
        approxPolyDP(Mat(contours0[largest_contour_index]), contours[largest_contour_index],9, true);
        if(contours[k].size()==4)
        {

    for (int c=0;c<4;c++)
    {       
    preciseCorners[c] = contours[largest_contour_index][c];
    }

    cv::cornerSubPix(threshold, preciseCorners, cvSize(5,5),cvSize(-1,-1),  TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 ));
          //  cv::cornerSubPix(threshold, preciseCorners, cvSize(5,5),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER,30,0.1));

    for (int c=0;c<4;c++)
    {
    contours[largest_contour_index][c] = preciseCorners[c];
    }

         imagePoints.push_back(Point2f(contours[largest_contour_index][0].x,contours[largest_contour_index][0].y));
        imagePoints.push_back(Point2f(contours[largest_contour_index][1].x,contours[largest_contour_index][1].y));
        imagePoints.push_back(Point2f(contours[largest_contour_index][2].x,contours[largest_contour_index][2].y));
        imagePoints.push_back(Point2f(contours[largest_contour_index][3].x,contours[largest_contour_index][3].y));

  Point P1=contours[largest_contour_index][0];
  Point P2=contours[largest_contour_index][1];
  Point P3=contours[largest_contour_index][2];
  Point P4=contours[largest_contour_index][3];

  line(src,P1,P2, Scalar(0,255,0),1,CV_AA,0);
  line(src,P2,P3, Scalar(0,255,0),1,CV_AA,0);
  line(src,P3,P4, Scalar(0,255,0),1,CV_AA,0);
  line(src,P4,P1, Scalar(0,255,0),1,CV_AA,0);

  flag =1;
        }
        }
  }
  // Pose estimation. 

    if(flag==1)
    {
        double *location=pose_estimation(objectPoints,imagePoints);
        flag =0;
        return location;
        }


    }

    double* Pose::pose_estimation(const std::vector<cv::Point3f> &objectPoints,const std::vector<cv::Point2f> &imagePoints)
    {

    static double position[4];

    Mat rvec,tvec;            
    bool useExtrinsicGuess = false;     

   int method =CV_ITERATIVE;
   cv::solvePnP(objectPoints,imagePoints, cameraIntrinsicParams, distCoeffs,rvec, tvec,useExtrinsicGuess,method);

    Mat distant=Mat(Size(3,3),CV_64FC1);
    Mat jacobian=Mat(Size(3,1),CV_32FC1);
    Rodrigues(rvec,distant,jacobian);

    Mat J;
    vector<Point2f> p(4);
    projectPoints(objectPoints,rvec,tvec, cameraIntrinsicParams,  distCoeffs, p, J);

    float sum = 0.;
    for (size_t i = 0; i <p.size(); i++)
     {
      sum += sqrt((p[i].x - imagePoints[i].x)* (p[i].x - imagePoints[i].x)+(p[i].y - imagePoints[i].y)* (p[i].y - imagePoints[i].y));
     }

    Mat R= distant.t();         

           cout << "sum=" << sum << endl;
           Mat T = -R * tvec ;
           position[0] =T.at<double>(0,0);
           position[1]=T.at<double>(1,0);
           position[2]=T.at<double>(2,0);

    return position;
    }

0 个答案:

没有答案