我正在尝试校准我的两个Point Grey(Blackfly)相机以获得立体视觉效果。我正在使用OpenCV附带的stereo_calib.cpp教程(下面的代码)。出于某种原因,我得到了非常糟糕的结果(RMS误差= 4.49756,平均重投影错误= 8.06533),我所有经过纠正的图像都是灰色的。我认为我的问题是我没有为stereoCalibrate()函数选择正确的标志,但是我尝试了很多不同的组合,最好整理的图像会被扭曲。
以下是我使用的图片和示例整理对的链接:https://www.dropbox.com/sh/5wp31o8xcn6vmjl/AAADAfGiaT_NyXEB3zMpcEvVa#/
任何帮助将不胜感激!
static void
StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true)
{
if( imagelist.size() % 2 != 0 )
{
cout << "Error: the image list contains odd (non-even) number of elements\n";
return;
}
bool displayCorners = true;//false;//true;
const int maxScale = 1;//2;
const float squareSize = 1.8;
//const float squareSize = 1.f; // Set this to your actual square size
// ARRAY AND VECTOR STORAGE:
vector<vector<Point2f> > imagePoints[2];
vector<vector<Point3f> > objectPoints;
Size imageSize;
//int i, j, k, nimages = (int)imagelist.size()/2;
int i, j, k, nimages = (int)imagelist.size();
cout << "nimages: " << nimages << "\n";
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
vector<string> goodImageList;
for( i = j = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
const string& filename = imagelist[i*2+k];
Mat img = imread(filename, 0);
if(img.empty()) {
break;
}
if( imageSize == Size() ) {
imageSize = img.size();
} else if( img.size() != imageSize )
{
cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
break;
}
bool found = false;
vector<Point2f>& corners = imagePoints[k][j];
for( int scale = 1; scale <= maxScale; scale++ )
{
Mat timg;
if( scale == 1 )
timg = img;
else
resize(img, timg, Size(), scale, scale);
found = findChessboardCorners(timg, boardSize, corners,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
if( found )
{
if( scale > 1 )
{
Mat cornersMat(corners);
cornersMat *= 1./scale;
}
break;
}
}
if( displayCorners )
{
cout << filename << endl;
Mat cimg, cimg1;
cvtColor(img, cimg, COLOR_GRAY2BGR);
drawChessboardCorners(cimg, boardSize, corners, found);
double sf = 1280./MAX(img.rows, img.cols);
resize(cimg, cimg1, Size(), sf, sf);
imshow("corners", cimg1);
char c = (char)waitKey(500);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
if( !found ) {
cout << "!found\n";
break;
}
cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
30, 0.01));
}
if( k == 2 )
{
goodImageList.push_back(imagelist[i*2]);
goodImageList.push_back(imagelist[i*2+1]);
j++;
}
}
cout << j << " pairs have been successfully detected.\n";
nimages = j;
if( nimages < 2 )
{
cout << "Error: too little pairs to run the calibration\n";
return;
}
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
objectPoints.resize(nimages);
for( i = 0; i < nimages; i++ )
{
for( j = 0; j < boardSize.height; j++ )
for( k = 0; k < boardSize.width; k++ )
objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
}
cout << "Running stereo calibration ...\n";
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
Mat R, T, E, F;
double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
//TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5));
TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
//CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_RATIONAL_MODEL +
//CV_CALIB_FIX_K3);
//CV_CALIB_FIX_K2);
CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
//CV_CALIB_FIX_K1 + CV_CALIB_FIX_K2 + CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
cout << "done with RMS error=" << rms << endl;
double err = 0;
int npoints = 0;
vector<Vec3f> lines[2];
for( i = 0; i < nimages; i++ )
{
int npt = (int)imagePoints[0][i].size();
Mat imgpt[2];
for( k = 0; k < 2; k++ )
{
imgpt[k] = Mat(imagePoints[k][i]);
undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
}
for( j = 0; j < npt; j++ )
{
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] +
imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "average reprojection err = " << err/npoints << endl;
// save intrinsic parameters
FileStorage fs("intrinsics.yml", CV_STORAGE_WRITE);
if( fs.isOpened() )
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
//CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
CALIB_ZERO_DISPARITY, 0, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.yml", CV_STORAGE_WRITE);
if( fs.isOpened() )
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
// OpenCV can handle left-right
// or up-down camera arrangements
//bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));
bool isVerticalStereo = false;
// COMPUTE AND DISPLAY RECTIFICATION
if( !showRectified )
return;
Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useCalibrated )
{
// we already computed everything
}
// OR ELSE HARTLEY'S METHOD
else
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
vector<Point2f> allimgpt[2];
for( k = 0; k < 2; k++ )
{
for( i = 0; i < nimages; i++ )
std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
}
F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
Mat H1, H2;
stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);
R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
P1 = cameraMatrix[0];
P2 = cameraMatrix[1];
}
//Precompute maps for cv::remap()
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
Mat canvas;
double sf;
int w, h;
if( !isVerticalStereo )
{
sf = 600./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h, w*2, CV_8UC3);
}
else
{
sf = 600./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h*2, w, CV_8UC3);
}
for( i = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
if( useCalibrated )
{
Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
}
}
if( !isVerticalStereo )
for( j = 0; j < canvas.rows; j += 16 )
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
else
for( j = 0; j < canvas.cols; j += 16 )
line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
char c = (char)waitKey();
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
}
答案 0 :(得分:3)
首先,关于校准图像。我看到一些可能导致更好校准的要点:
关于如何在this SO answer
中进行良好校准,您有更详细的建议现在,关于立体声校准本身。我发现实现良好校准的最佳方法是使用内在函数作为猜测,分别校准每个相机内在函数(使用calibrateCamera函数),然后使用steretribrate(使用stereoCalibrate)。看一下stereoCalibrate标志,了解如何执行此操作。
除此之外,stereoCalibrate函数中的标志是这样的:
请注意,如果您独立校准每台摄像机并使用内在函数,则您对此数据的使用级别不同:
答案 1 :(得分:1)
几个星期前我遇到了同样的问题,在那里我尝试了近50个立体图像样本,但是经过校正的图像都是空白的。所以我决定实时编写我自己的立体声校准代码实现。
此代码实时检测棋盘角落并保存那些左右图像中棋盘角落的图像,然后在其上运行立体校准码。
我希望这有助于你和将来的某个人。
源代码:https://github.com/upperwal/opencv/blob/master/samples/cpp/stereo_calib.cpp
演示视频: https://www.youtube.com/watch?v=kizO_s-YUDU
一切顺利
答案 2 :(得分:0)
我有类似的问题然后我重新编辑了代码。请确保您在不同深度的相机和不同方向上有足够数量的图像,这将导致更少的投影错误
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<iostream>
int main()
{
const int CHESSBOARD_WIDTH = 9; //input width of chessboard
const int CHESSBOARD_HEIGHT = 6; //input height of chessboard
const float squareSize = 3.96; //input size of a side of a single square in chessboard
cv::Size corner=cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT);
int counter =30;
int nimages=24;
cv::Size imageSize;
enum{capturing=0,calibrated=1};
int mode=capturing;
char leftfilename[100];
char rightfilename[100];
std::vector<cv::Mat> imagePoints1;
std::vector<cv::Mat> imagePoints2;
std::vector<std::vector<cv::Point3f>> objectPoints;
bool found1=false;
bool found2=false;
int counter2=0;
cv::Mat pointBuf1=cv::Mat::zeros(54,2,CV_32FC1);
cv::Mat pointBuf2=cv::Mat::zeros(54,2,CV_32FC1);
for(int i=1;i<=counter;i++)
{ sprintf(leftfilename,"newleftcheck%d.jpg",i);
sprintf(rightfilename,"newrightcheck%d.jpg",i);
// const int CHESSBOARD_INTERSECTION_COUNT = CHESSBOARD_WIDTH * CHESSBOARD_HEIGHT;
cv::Mat imgleft_frame=cv::imread(leftfilename);
cv::Mat imgright_frame=cv::imread(rightfilename);
//cv::Mat imgleft_frame =cv::Mat(480,640,CV_8UC4,s.leftBuffer,4*imgWidth*sizeof(unsigned char));
//cv::Mat imgright_frame =cv::Mat(480,640,CV_8UC4,s.rightBuffer,4*imgWidth*sizeof(unsigned char));
imageSize=imgleft_frame.size();
found1 = findChessboardCorners(imgleft_frame, corner,pointBuf1,cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FAST_CHECK | cv::CALIB_CB_NORMALIZE_IMAGE);
found2 = findChessboardCorners(imgright_frame, cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT),pointBuf2,cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FAST_CHECK | cv::CALIB_CB_NORMALIZE_IMAGE);
if(found1)
{ cv::Mat gray_image1;
cvtColor(imgleft_frame,gray_image1,cv::COLOR_BGRA2GRAY);
cornerSubPix( gray_image1, pointBuf1, cv::Size(11,11),cv::Size(-1,-1), cv::TermCriteria(cv::TermCriteria::EPS+cv::TermCriteria::MAX_ITER, 30, 0.1 ));
drawChessboardCorners( imgleft_frame,cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT), pointBuf1, found1 );
}
if(found2)
{ cv::Mat gray_image2;
cvtColor(imgright_frame,gray_image2,cv::COLOR_BGRA2GRAY);
cornerSubPix( gray_image2, pointBuf2, cv::Size(11,11),cv::Size(-1,-1), cv::TermCriteria(cv::TermCriteria::EPS+cv::TermCriteria::MAX_ITER, 30, 0.1 ));
drawChessboardCorners( imgright_frame,cv::Size(CHESSBOARD_WIDTH,CHESSBOARD_HEIGHT), pointBuf2, found2 );
}
if(found1&&found2)
{ imagePoints1.push_back(pointBuf1);
imagePoints2.push_back(pointBuf2);
//sprintf(leftfilename,"newleftcheck%d.jpg",s.counter);
//sprintf(rightfilename,"newrightcheck%d.jpg",s.counter);
//cv::imwrite(leftfilename,imgleft_frame);
//cv::imwrite(rightfilename,imgright_frame);
counter2=counter2+1;
std::cout<<counter2<<std::endl;
}
nimages=counter2;
objectPoints.resize(nimages);
std::cout<<"countervalue"<<i<<std::endl;
}
for(int i = 0; i <nimages; i++ )
{
for( int j = 0; j < CHESSBOARD_HEIGHT; j++ )
for( int k = 0; k < CHESSBOARD_WIDTH; k++ )
objectPoints[i].push_back(cv::Point3f(j*squareSize, k*squareSize, 0));
}
std::cout<<"check1"<<std::endl;
cv::Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = cv::Mat::eye(3, 3, CV_64F);
cameraMatrix[1] = cv::Mat::eye(3, 3, CV_64F);
cv::Mat R, T, E, F;
std::cout<<objectPoints.size()<<std::endl;
std::cout<<imagePoints1.size()<<std::endl;
if(imagePoints1.size()==imagePoints2.size())
std::cout<<"samesize"<<std::endl;
if(imagePoints1.size()>=nimages )
{ std::cout<<"check2"<<std::endl;
double rms = stereoCalibrate( objectPoints, imagePoints1, imagePoints2,cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],imageSize, R, T, E, F,
cv::CALIB_FIX_ASPECT_RATIO +cv::CALIB_ZERO_TANGENT_DIST +
cv::CALIB_SAME_FOCAL_LENGTH +cv::CALIB_RATIONAL_MODEL +
cv::CALIB_FIX_K3 +cv::CALIB_FIX_K4 + cv::CALIB_FIX_K5,
cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, 1e-5) );
std::cout<<"check3"<<std::endl;
std::cout << "done with RMS error=" << rms << std::endl;
mode=calibrated;
std::cout<<"calibrated"<<std::endl;
}
if(mode==calibrated)
{
double err = 0;
int npoints = 0;
std::vector<cv::Vec3f> lines[2];
for(int i = 0; i < nimages; i++ )
{
int npt = (int)imagePoints1[i].rows;
std::cout<<npt<<std::endl;
cv:: Mat imgpt1;
cv::Mat imgpt2;
// for(int k = 0; k < 2; k++ )
imgpt1 = cv::Mat(imagePoints1[i]);
undistortPoints(imgpt1, imgpt1, cameraMatrix[0], distCoeffs[0], cv::Mat(), cameraMatrix[0]);
computeCorrespondEpilines(imgpt1, 1, F, lines[0]);
imgpt2 = cv::Mat(imagePoints2[i]);
undistortPoints(imgpt2, imgpt2, cameraMatrix[1], distCoeffs[1], cv::Mat(), cameraMatrix[1]);
computeCorrespondEpilines(imgpt2, 2, F, lines[1]);
std::cout<<"checksdcdufb"<<std::endl;
//std::cout<<"imagepoint"<<imagePoints1[1].at<unsigned int>(1,1)<<std::endl;
/* for(int j = 0; j < npt; j++ )
{
double errij = fabs(imagePoints1[i].at<double>(j,0) *lines[1][j][0] +imagePoints1[i].at<double>(j,1)*lines[1][j][1] + lines[1][j][2]) +fabs(imagePoints2[i].at<double>(j,0)*lines[0][j][0] +
imagePoints2[i].at<double>(j,1)*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}*/
std::cout<<"check8"<<std::endl;
cv::FileStorage fs("intrinsics.xml", cv::FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
std:: cout << "Error: can not save the intrinsic parameters\n";
cv::Mat R1, R2, P1, P2, Q;
cv::Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
cv::CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.xml", cv::FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
std::cout << "Error: can not save the intrinsic parameters\n";
}
//std::cout << "average reprojection err = " << err/npoints <<std::endl;
}
return 0;
}