我的逆成分同态图像对齐不能收敛?

时间:2018-01-17 13:57:17

标签: c++ computer-vision robotics

我实施了基于homographyTrackingDemoLK-20-years的算法。它是用于估计仿射变换的逆成分LK算法的自适应。我尝试了几个步骤:

  1. 导出模板图像的衍生物;

  2. 在身份转换中导出模板控制点上单应变换的导数

  3. 计算最陡梯度矩阵和Hessian矩阵
  4. 这些步骤在进入迭代之前已预先计算,我们迭代:

    1. 通过单应性变形目标图像
    2. 计算扭曲目标图像和模板图像之间的差异
    3. 求解线性方程,然后更新单应性参数
    4. 我希望有些人可以给我一个关于我对这个算法的理解是否错误的提示。 c ++代码如下:

      AlignmentResults ICA::GaussNewtonMinimization(const StructOfArray2di & pixelsOnTemplate, const vector<Mat> & images, const vector<Mat> & templates, const OptimizationParameters optParam, vector<float> & parameters)
      {
          AlignmentResults alignmentResults;
          alignmentResults.nIter = 0;
          alignmentResults.exitFlag = 1e6;
      
          //parameters must contain the initial guess. It is updated during optimization
          uint nChannels(images.size());
          vector<vector<float> > templatePixelIntensities(nChannels,vector<float>(pixelsOnTemplate.size()));//, templatePixelDx(nChannels,vector<float>(pixelsOnTemplate.size())), templatePixelDy(nChannels,vector<float>(pixelsOnTemplate.size()));
          vector<Mat> imageDx(nChannels), imageDy(nChannels);
          uint nParam = parameters.size();
      
          //vector<Eigen::Matrix<float, 2, N_PARAM>, Eigen::aligned_allocator<Eigen::Matrix<float, 2, N_PARAM> > > warpJacobians(pixelsOnTemplate.size());
      
          Eigen::MatrixXf sdImages(pixelsOnTemplate.size(), nParam);
          vector<float>  errorImage(pixelsOnTemplate.size(), 0.0);
      
          StructOfArray2di warpedPixels;
          Eigen::Matrix<float, N_PARAM, N_PARAM> hessian;
          Eigen::Matrix<float, N_PARAM, N_PARAM> hessianInv;
          hessian.setZero();
      
      
          std::vector<Eigen::Matrix<float, N_PARAM, N_PARAM>, Eigen::aligned_allocator<Eigen::Matrix<float, N_PARAM, N_PARAM>>> hessians(nChannels);
          std::vector<Eigen::MatrixXf> JacosT(nChannels);
          Eigen::Matrix<float, N_PARAM, 1> rhs;
          Eigen::Matrix<float, N_PARAM, 1> deltaParam;
      
          std::vector<float> zeroPara(8, 0.);
      #pragma omp parallel for
          for(int iChannel = 0; iChannel<nChannels; ++iChannel)
          {
              ComputeImageDerivatives(templates[iChannel], imageDx[iChannel], imageDy[iChannel]);
              hessians[iChannel].setZero();
              JacosT[iChannel] = Eigen::MatrixXf(N_PARAM, pixelsOnTemplate.size());
      
              for(int iPoint = 0; iPoint < pixelsOnTemplate.size(); ++iPoint)
              {
                  int pos = templates[iChannel].cols * pixelsOnTemplate.y[iPoint] + pixelsOnTemplate.x[iPoint];
                  templatePixelIntensities[iChannel][iPoint] = ((float*)templates[iChannel].data)[pos];
      
                  float Dx = ((float*)imageDx[iChannel].data)[pos];
                  float Dy = ((float*)imageDy[iChannel].data)[pos];
                  //printf("%f %f\n", Dx, Dy);
                  Eigen::Matrix<float, 2, N_PARAM> warpJ;
                  Homography::ComputeWarpJacobian(pixelsOnTemplate.x[iPoint], pixelsOnTemplate.y[iPoint], zeroPara, warpJ);
                  //std::cout << warpJ << std::endl;
                  Eigen::Matrix<float, 1, N_PARAM> J2H;
                  Eigen::Matrix<float, 1, 2> ID;
                  ID[0] = Dx;
                  ID[1] = Dy;
                  J2H = ID*warpJ;
      
                  JacosT[iChannel].col(iPoint) = J2H.transpose();
                  hessians[iChannel] += JacosT[iChannel].col(iPoint) * J2H;
              }
              hessian += hessians[iChannel];
          }
      
          while (alignmentResults.exitFlag == 1e6)
          {
              rhs.setZero();
      
              ComputeWarpedPixels(pixelsOnTemplate, parameters, warpedPixels);
      
      #pragma omp parallel for
              for(int iChannel = 0; iChannel<images.size(); ++iChannel)
              {
                  ComputeResiduals(images[iChannel], templatePixelIntensities[iChannel], warpedPixels, errorImage);
                  for (int i = 0; i<nParam; ++i)
                  {
                      for(uint iPoint(0); iPoint<pixelsOnTemplate.size(); ++iPoint)
                      {
                          float val = (errorImage[iPoint] == std::numeric_limits<float>::infinity() ? 0: errorImage[iPoint]); 
                          rhs(i,0) -= (JacosT[iChannel])(i, iPoint) * val;
                      }
                  }
              }
      
      
              deltaParam = hessian.fullPivLu().solve(rhs);
      
              std::vector<float> vDelta(8, 0.), invDelta(8, 0.);
              for(size_t i = 0; i<N_PARAM; ++i)
              {
                  vDelta[i] = deltaParamd(i,0);
              }
      
              Homography::InverseWarpParameters(vDelta, invDelta);
              parameters = Homography::ParametersUpdateCompositional(parameters, invDelta);
      
      
              alignmentResults.poseIntermediateGuess.push_back(parameters);
              alignmentResults.residualNorm.push_back(ComputeResidualNorm(errorImage));
              if(alignmentResults.nIter > 0)
                  alignmentResults.exitFlag = CheckConvergenceOptimization(deltaParam.norm(), alignmentResults.nIter, abs(alignmentResults.residualNorm[alignmentResults.nIter] - alignmentResults.residualNorm[alignmentResults.nIter-1]), optParam);
              alignmentResults.nIter++;
      
          return alignmentResults;
      }
      

0 个答案:

没有答案