以下是我正在使用的代码:
int main(int argc, char** argv)
{
std::vector<KeyPoint> kp;
Mat frame;
Mat frame_un;
Mat camera_matrix =(Mat_<double>(3,3) << 7.4191833420713715e+02, 0.0, 320.0, 0.0, 7.4191833420713715e+02, 240.0, 0.0, 0.0,
1.0);
Mat distortion_coefficients =(Mat_<double>(5,1) << -1.5271566741564191e-01, 1.5488166759164064e+00, 0.0, 0.0,
-7.6517765981508861e+00);
VideoCapture cap(1); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
for (;;)
{
cap.read(frame); // get a new frame from camera
if (frame.empty())
continue;
undistort(frame, frame_un, camera_matrix, distortion_coefficients);
// Default parameters of ORB
int nfeatures=30;
float scaleFactor=1.2f;
int nlevels=8;
int edgeThreshold=31; // Changed default (31);
int firstLevel=0;
int WTA_K=2;
int scoreType=ORB::HARRIS_SCORE;
int patchSize=31;
int fastThreshold=20;
Ptr<ORB> detector = ORB::create(
nfeatures,
scaleFactor,
nlevels,
edgeThreshold,
firstLevel,
WTA_K,
scoreType,
patchSize,
fastThreshold );
detector->detect(frame_un, kp);
std::cout << "Found " << kp.size() << " Keypoints " << std::endl;
for(int i=0; i<=kp.size();i++)
{
int x = kp[i].pt.x;
int y = kp[i].pt.y;
cout << "Point "<<i<<" Xpos = " << x << " Point "<<i<< " Ypos = " << y << "\n";
}
Mat out;
//drawKeypoints(img, kp, out, Scalar::all(255));
drawKeypoints(frame_un, kp, out, Scalar::all (255));
namedWindow("Kpts", WINDOW_FREERATIO);
imshow("Kpts", out);
waitKey(0);
destroyWindow("Kpts");
}
// waitKey(0);
return 0;
}
答案 0 :(得分:0)
只需将坐标系映射到机器人的基本坐标系并将其作为原点。在此之前确保您已经准确地校准了您的视力。