使用来自相机和opencv库的图像估算欧拉天使(相机姿势)

时间:2015-03-17 21:14:14

标签: java android opencv angle homography

我正在开发一个Android应用程序,我需要使用来自摄像头和opencv库的图像估算3D计划中的在线摄像头旋转。我喜欢计算欧拉角。

我已阅读thisthis page,我可以像here一样估算单应矩阵。

我的第一个问题是,我是否真的应该从相机校准中了解相机固有矩阵,还是单应矩阵(相机外部)足以估算欧拉角(俯仰,滚转,偏航)?

如果单应矩阵足够,我该怎么做呢?

对不起,我是opencv的初学者,不能将单词的“Mat”分解为旋转矩阵和翻译矩阵,如描述here。如何在android中实现euler角度?

您可以使用solvePnPRansac()和decomposeProjectionMatrix来查看我的代码来计算欧拉角。

但它只返回一个null-vector,因为double [] eulerArray = {0,0,0}!有人可以帮帮我吗?!有什么问题? 非常感谢您的回复!

public double[] findEulerAngles(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){

    KeyPoint[] k1 = keypoints1.toArray();
    KeyPoint[] k2 = keypoints2.toArray();


    List<DMatch> matchesList = matches.toList();
    List<KeyPoint> referenceKeypointsList = keypoints2.toList();
    List<KeyPoint> sceneKeypointsList = keypoints1.toList();
    // Calculate the max and min distances between keypoints.
    double maxDist = 0.0;
    double minDist = Double.MAX_VALUE;
    for(DMatch match : matchesList) {
        double dist = match.distance;
        if (dist < minDist) {
            minDist = dist;
        }
        if (dist > maxDist) {
            maxDist = dist;
        }
    }

    // Identify "good" keypoints based on match distance.
    List<Point3> goodReferencePointsList = new ArrayList<Point3>();
    ArrayList<Point> goodScenePointsList = new ArrayList<Point>();
    double maxGoodMatchDist = 1.75 * minDist;
    for(DMatch match : matchesList) {
        if (match.distance < maxGoodMatchDist) {
            Point kk2 = k2[match.queryIdx].pt;
            Point kk1 = k1[match.trainIdx].pt;

            Point3 point3 = new Point3(kk1.x, kk1.y, 0.0);
            goodReferencePointsList.add(point3);
            goodScenePointsList.add( kk2);
            sceneKeypointsList.get(match.queryIdx).pt);
        }
    }


    if (goodReferencePointsList.size() < 4 || goodScenePointsList.size() < 4) {
        // There are too few good points to find the pose.
        return;
    }

    MatOfPoint3f goodReferencePoints = new MatOfPoint3f();
    goodReferencePoints.fromList(goodReferencePointsList);
    MatOfPoint2f goodScenePoints = new MatOfPoint2f();
    goodScenePoints.fromList(goodScenePointsList);

    MatOfDouble mRMat = new MatOfDouble(3, 3, CvType.CV_32F);
    MatOfDouble mTVec = new MatOfDouble(3, 1, CvType.CV_32F);

    //TODO: solve camera intrinsic matrix
    Mat intrinsics = Mat.eye(3, 3, CvType.CV_32F); // dummy camera matrix
    intrinsics.put(0, 0, 400);
    intrinsics.put(1, 1, 400);
    intrinsics.put(0, 2, 640 / 2);
    intrinsics.put(1, 2, 480 / 2);
    Calib3d.solvePnPRansac(goodReferencePoints, goodScenePoints, intrinsics, new MatOfDouble(), mRMat, mTVec);
    MatOfDouble rotCameraMatrix1 = new MatOfDouble(3, 1, CvType.CV_32F);
    double[] rVecArray = mRMat.toArray();
    // Calib3d.Rodrigues(mRMat, rotCameraMatrix1);
    double[] tVecArray = mTVec.toArray();

    MatOfDouble projMatrix = new MatOfDouble(3, 4, CvType.CV_32F); //projMatrix 3x4 input projection matrix P.
    projMatrix.put(0, 0, rVecArray[0]);
    projMatrix.put(0, 1, rVecArray[1]);
    projMatrix.put(0, 2, rVecArray[2]);
    projMatrix.put(0, 3, 0);
    projMatrix.put(1, 0, rVecArray[3]);
    projMatrix.put(1, 1, rVecArray[4]);
    projMatrix.put(1, 2, rVecArray[5]);
    projMatrix.put(1, 3, 0);
    projMatrix.put(2, 0, rVecArray[6]);
    projMatrix.put(2, 1, rVecArray[7]);
    projMatrix.put(2, 2, rVecArray[8]);
    projMatrix.put(2, 3, 0);

    MatOfDouble cameraMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //cameraMatrix Output 3x3 camera matrix K.
    MatOfDouble rotMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrix Output 3x3 external rotation matrix R.
    MatOfDouble transVect = new MatOfDouble(4, 1, CvType.CV_32F); //transVect Output 4x1 translation vector T.
    MatOfDouble rotMatrixX = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixX a rotMatrixX
    MatOfDouble rotMatrixY = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixY a rotMatrixY
    MatOfDouble rotMatrixZ = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixZ a rotMatrixZ
    MatOfDouble eulerAngles = new MatOfDouble(3, 1, CvType.CV_32F); //eulerAngles Optional three-element vector containing three Euler angles of rotation in degrees.

    Calib3d.decomposeProjectionMatrix( projMatrix,
            cameraMatrix,
            rotMatrix,
            transVect,
            rotMatrixX,
            rotMatrixY,
            rotMatrixZ,
            eulerAngles);

    double[] eulerArray = eulerAngles.toArray();

    return eulerArray;
}

2 个答案:

答案 0 :(得分:0)

Homography涉及相同平面表面的图像,因此只有在图像中存在主导平面且您可以在两个图像中找到位于平面上的足够特征点并成功匹配它们时,它才起作用。最小匹配数为4,数学运算假设匹配100%正确。借助像RANSAC这样的稳健估计,即使您的特征点匹配集中的某些元素明显不匹配或未放置在平面上,您也可以获得结果。

对于没有平面度假设的一组矩阵特征的更一般情况,您需要找到一个基本矩阵。矩阵can be found here的确切定义。简而言之,它或多或少地像单应性一样 - 它将两个图像中的对应点相关联。计算基本矩阵所需的最小匹配数为5。要从这样的最小样本中获得结果,您需要确保已建立的匹配是100%正确的。同样,如果您的通信集中存在异常值,那么稳健的估计可能会有所帮助 - 并且通常会有自动特征检测和匹配。

OpenCV 3.0 has a function用于基本矩阵计算,方便地与RANSAC鲁棒估计集成。基本矩阵可以分解为旋转矩阵和平移向量,如我之前链接的维基百科文章中所示。 OpenCV 3.0也有a readily available function for this

答案 1 :(得分:0)

现在为我工作流动的代码,我已经从单应矩阵分解了欧拉角!我有一些关于俯仰,滚转和偏航的值,我不知道,是否有正确的。有人有任何想法,我该如何测试?!

private static MatOfDMatch filterMatchesByHomography(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){

    List<Point> lp1 = new ArrayList<Point>(500);
    List<Point> lp2 = new ArrayList<Point>(500);

    KeyPoint[] k1 = keypoints1.toArray();
    KeyPoint[] k2 = keypoints2.toArray();

    List<DMatch> matchesList = matches.toList();

    if (matchesList.size() < 4){
        MatOfDMatch mat = new MatOfDMatch();
        return mat;
    }

    // Add matches keypoints to new list to apply homography
    for(DMatch match : matchesList){
        Point kk1 = k1[match.queryIdx].pt;
        Point kk2 = k2[match.trainIdx].pt;
        lp1.add(kk1);
        lp2.add(kk2);
    }

    MatOfPoint2f srcPoints = new MatOfPoint2f(lp1.toArray(new Point[0]));
    MatOfPoint2f dstPoints  = new MatOfPoint2f(lp2.toArray(new Point[0]));

    //---------------------------------------

    Mat mask = new Mat();
    Mat homography = Calib3d.findHomography(srcPoints, dstPoints, Calib3d.RANSAC, 0.2, mask); // Finds a perspective transformation between two planes. ---Calib3d.LMEDS

    Mat pose = cameraPoseFromHomography(homography);

    //Decomposing a rotation matrix to eulerangle
    pitch = Math.atan2(pose.get(2, 1)[0], pose.get(2, 2)[0]); // arctan2(r32, r33)
    roll = Math.atan2(-1*pose.get(2, 0)[0], Math.sqrt( Math.pow(pose.get(2, 1)[0], 2) + Math.pow(pose.get(2, 2)[0], 2)) ); // arctan2(-r31, sqrt(r32^2 + r33^2))
    yaw = Math.atan2(pose.get(2, 0)[0], pose.get(0, 0)[0]);

    List<DMatch> matches_homo = new ArrayList<DMatch>();
    int size = (int) mask.size().height;
    for(int i = 0; i < size; i++){          
        if ( mask.get(i, 0)[0] == 1){
            DMatch d = matchesList.get(i);
            matches_homo.add(d);
        }
    }

    MatOfDMatch mat = new MatOfDMatch();
    mat.fromList(matches_homo);
    return mat;
}

这是我的单应矩阵的相机姿势(也见this page):

private static Mat cameraPoseFromHomography(Mat h) {
    //Log.d("DEBUG", "cameraPoseFromHomography: homography " + matToString(h));

    Mat pose = Mat.eye(3, 4, CvType.CV_32FC1);  // 3x4 matrix, the camera pose
    float norm1 = (float) Core.norm(h.col(0));
    float norm2 = (float) Core.norm(h.col(1));
    float tnorm = (norm1 + norm2) / 2.0f;       // Normalization value

    Mat normalizedTemp = new Mat();
    Core.normalize(h.col(0), normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);
    normalizedTemp.copyTo(pose.col(0)); // Normalize the rotation, and copies the column to pose

    Core.normalize(h.col(1), normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);    
    normalizedTemp.copyTo(pose.col(1));// Normalize the rotation and copies the column to pose

    Mat p3 = pose.col(0).cross(pose.col(1)); // Computes the cross-product of p1 and p2
    p3.copyTo(pose.col(2));// Third column is the crossproduct of columns one and two

    Mat temp = h.col(2);
    double[] buffer = new double[3];
    h.col(2).get(0, 0, buffer);
    pose.put(0, 3, buffer[0] / tnorm);  //vector t [R|t] is the last column of pose
    pose.put(1, 3, buffer[1] / tnorm);
    pose.put(2, 3, buffer[2] / tnorm);

    return pose;
}