使用g2o进行捆绑调整,编译并运行,但数据未更改

时间:2018-11-04 14:28:33

标签: computer-vision g2o

我正在使用g2o库对某些已知数据进行捆绑包调整。

我的功能基于OrbSlam2中的一个,并接受两个相机姿势,以及std::vector的3d点和相应的2d点,即每个相机姿势中的“观测值”。

它运行良好,并且当我使用已知的(正确的)相机姿势时,重新投影的点看起来正确。但是,当我在第二个摄影机姿态中添加噪点时,它仍然可以正常运行并完成优化,但是所产生的姿态看起来与嘈杂的输入数据完全相同。 完整的功能是:

void BundleAdjustment::Solve(const std::vector<KeyFrame *> &vpKFs, const std::vector<MapPoint *> &allMapPoints,cv::Mat intrinsic, cv::Mat dist,
    std::vector<cv::Point2f> &projPnts)
{
    dogleg = false;
    int64 t0 = cv::getTickCount();

    g2o::SparseOptimizer optimizer;
    optimizer.setVerbose(false);
    std::unique_ptr<g2o::BlockSolver_6_3::LinearSolverType> linearSolver;
    if (dense)
    {
        linearSolver = g2o::make_unique<g2o::LinearSolverDense<g2o::BlockSolver_6_3::PoseMatrixType>>();
    }
    else
    {
        linearSolver = g2o::make_unique < g2o::LinearSolverEigen<g2o::BlockSolver_6_3::PoseMatrixType>>();

    }


    if (dogleg)
    {
        g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(
            g2o::make_unique<g2o::BlockSolver_6_3>(std::move(linearSolver)));

        optimizer.setAlgorithm(solver);
    }
    else
    {
        g2o::OptimizationAlgorithmDogleg* solver = new g2o::OptimizationAlgorithmDogleg(
            g2o::make_unique<g2o::BlockSolver_6_3>(std::move(linearSolver)));

        optimizer.setAlgorithm(solver);
    }


    int num_cameras_ = vpKFs.size();
    int num_points_ = allMapPoints.size();
    const float thHuber2D = sqrt(5.99); 

    const int nExpectedSize = (vpKFs.size() + allMapPoints.size());

    vector<g2o::EdgeSE3ProjectXYZ*> vpEdgesMono;
    vpEdgesMono.reserve(nExpectedSize);

    //camera poses
    for (size_t i = 0; i < vpKFs.size(); i++)
    {
        KeyFrame *pKF = vpKFs[i];
        g2o::VertexSE3Expmap *vSE3 = new g2o::VertexSE3Expmap();
        g2o::SE3Quat squat;
        Eigen::Quaterniond qq(pKF->quat.w(), pKF->quat.x(), pKF->quat.y(), pKF->quat.z());
        squat.setRotation(qq);
        Eigen::Vector3d pp(pKF->pos.x(), pKF->pos.y(), pKF->pos.z());
        if (pKF->id == 1)
        {
            pp.x() += 1.2; //ADD NOISE
        }
        squat.setTranslation(pp);

        vSE3->setEstimate(squat);

        vSE3->setId(pKF->id);
        if (pKF->id == 0)
        {
            vSE3->setFixed(true);
        }
        optimizer.addVertex(vSE3);
    }

    // set point vertex
    for (size_t i = 0; i < allMapPoints.size(); i++)
    {
        MapPoint* pMP = allMapPoints[i];

        g2o::VertexSBAPointXYZ* vPoint = new g2o::VertexSBAPointXYZ();
        g2o::Vector3 pp(pMP->pos.x, pMP->pos.y, pMP->pos.z);
        vPoint->setEstimate(pp);
        vPoint->setId(i + num_cameras_);    
        vPoint->setMarginalized(true);
        optimizer.addVertex(vPoint);


        //
        //SET EDGES
        const map<KeyFrame*, size_t> observations = pMP->mObs;
        for (std::map<KeyFrame *, size_t>::const_iterator mit = observations.begin(); mit != observations.end(); mit++)
        {

            KeyFrame *pKF = mit->first;
            const cv::Point2f &kpUn = pKF->vObs_pos[mit->second];

            Eigen::Matrix<double, 2, 1> obs;
            obs << kpUn.x, kpUn.y;

            g2o::EdgeSE3ProjectXYZ *e = new g2o::EdgeSE3ProjectXYZ();
        //  g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber;
        //  g2o::RobustKernelDCS* rk = new g2o::RobustKernelDCS;
            g2o::RobustKernelCauchy* rk = new g2o::RobustKernelCauchy;
            rk->setDelta(thHuber2D);
            e->setRobustKernel(rk);

            const int camera_id = pKF->id;
            int point_id = i + num_cameras_;

            e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex *>(optimizer.vertex(point_id)));
            e->setVertex(1, dynamic_cast<g2o::OptimizableGraph::Vertex *>(optimizer.vertex(camera_id)));
            e->setMeasurement(obs);
            e->setInformation(Eigen::Matrix2d::Identity());     

            e->fx = pKF->fx;
            e->fy = pKF->fy;
            e->cx = pKF->cx;
            e->cy = pKF->cy;

            vpEdgesMono.push_back(e);
            optimizer.addEdge(e);
        }

    }


    // start optimization
    std::cout << "Optimization starts..." << std::endl;
    optimizer.initializeOptimization();

    optimizer.optimize(20);
    std::cout << "Optimization completed!" << std::endl;

std::cout << "Writing camera result" << std::endl;
    for (int i = 0; i < num_cameras_; i++)
    {
        g2o::VertexSE3Expmap *pCamera = dynamic_cast<g2o::VertexSE3Expmap*>(optimizer.vertex(i));
        g2o::SE3Quat SE3quat = pCamera->estimate();

        g2o::Quaternion gQuat = SE3quat.rotation();
        g2o::Vector3 gTrans = SE3quat.translation();


        if (i == 1)
        {

            Eigen::Matrix<double, 3, 3> er(gQuat);

            cv::Mat R;
            cv::Mat t;
            cv::Mat rvec;
            eigen2cv(er, R);

            eigen2cv(gTrans, t);
            cv::Rodrigues(R, rvec);

            std::vector<cv::Point3f> Pnts;

            for (int x = 1; x < allMapPoints.size(); x++)
            {
                Pnts.push_back(allMapPoints[x]->pos);
            }

            cv::projectPoints(Pnts, rvec, t, intrinsic, dist, projPnts);

            std::cout << i << " " << gTrans.x() << " " << gTrans.y() << " " << gTrans.z() << " " << gQuat.x() << " " << gQuat.y() << " " << gQuat.z() << " " << gQuat.w() << std::endl;


        }

    }
}

我在哪里出错呢?我的数据没有得到优化吗?还是我读回的数据不正确?

0 个答案:

没有答案