尝试运行时,我在文件assertion failed
中不断获得in cv::_InputArray::getMat,
(0< = i&& i<(int)vv.size())matrix.cpp
projectPoints。我已经检查了我能想到的一切,但我无法弄清楚什么是错的。我的猜测是校准相机出了问题,但我不知道怎样才能找出确切的错误。这是我的代码,谢谢。
using namespace cv;
using namespace std;
std::vector<cv::Point3f> Generate3DPoints()
{
std::vector<cv::Point3f> points;
float x,y,z;
x=.5;y=.5;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=0;y=0;z=0;
points.push_back(cv::Point3f(x,y,z));
x=-0;y=0;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=0;y=.5;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=0;y=-.5;z=0;
points.push_back(cv::Point3f(x,y,z));
x=.5;y=0;z=.5;
points.push_back(cv::Point3f(x,y,z));
x=.5;y=0;z=0;
points.push_back(cv::Point3f(x,y,z));
/*
for(unsigned int i = 0; i < points.size(); ++i)
{
std::cout << points[i] << std::endl;
}
*/
return points;
}
int main()
{
int numBoards = 4;
int numCornersHor = 6;
int numCornersVer = 9;
int numSquares = numCornersHor * numCornersVer;
Size board_sz = Size(numCornersHor, numCornersVer);
VideoCapture capture = VideoCapture(0);
vector<vector<Point3f>> object_points;
vector<vector<Point2f>> imagePoints;
vector<vector<Point2f>> image_points;
vector<Point3f> objectPoints1 = Generate3DPoints();
vector<Point2f> corners;
int successes=0;
Mat image;
Mat gray_image;
//capture >> image;
Sleep(1000);
capture.read(image);
imshow("Welcome", image);
vector<Point3f> obj;
for(int j=0;j<numSquares;j++)
obj.push_back(Point3f(j/numCornersHor, j%numCornersHor, 0.0f));
while(successes<numBoards)
{
cvtColor(image, gray_image, CV_BGR2GRAY);
bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray_image, board_sz, corners, found);
cout << "SUCESS\n";
}
else
cout << "FAIL\n";
imshow("win1", image);
imshow("win2", gray_image);
capture.read(image);
image_points.push_back(corners);
object_points.push_back(obj);
printf("Snap stored!\n");
char continues;
cout << "Press c to continue\n";
cin >> continues;
successes++;
//if(successes>=numBoards)
// break;
}
Mat intrinsic = Mat(3, 3, CV_32FC1);
Mat distCoeffs;
vector<Mat> rvecs;
vector<Mat> tvecs;
intrinsic.ptr<float>(0)[0] = 1;
intrinsic.ptr<float>(1)[1] = 1;
calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs);
cout << "Done\n";
projectPoints(objectPoints1, rvecs, tvecs, intrinsic, distCoeffs, imagePoints);
cout << "done2";
return 0;
}
答案 0 :(得分:2)
您正试图将rvecs
和tvecs
传递给功能projectPoints
,该功能需要Mat
而不是vector<Mat>
。
基本上,calibrateCamera
函数使用棋盘的几个图像来估计相机的内在矩阵和失真系数。该函数还返回extrinsics参数的多个版本(旋转+平移),每个图像对应一个您使用的图像。这些外部参数存储在rvecs
和tvecs
。
但是,您只想在其中一个图像中投影3D点,因此您应该选择所需的外部参数集(即rvecs[i]
和tvecs[i]
,i
对应到您想要投影对象的图像)并仅针对这些外部参数调用projectPoints
,而不是整个vectors<Mat>
。