我一直在努力实现这篇论文Dense and Swift Mapping with Monocular Vision - Pedro Pinies, Lina Maria Paz, and Paul Newman,在这篇论文中,他们通过在反深度范围内创建成本量来开始初步估算深度。我使用绝对差值和作为我的光度误差测量来实现这部分。我将warp计算为
warp=K*LeftCameraPose*inv(RightCameraPose)*(inv(K)*inv_depth)
其中K
是内在的相机矩阵。我不确定这是否是正确的方法来扭曲图像以匹配文章中的等式(1)。我预计最初的估计是基本事实的喧闹版本,但我没有得到预期的结果。任何帮助将不胜感激。
这是我的代码:
#Parameters from KITTI Calib file
import numpy as np
import cv2
import matplotlib.pyplot as plt
K1 =np.array([[718.8560, 0.0, 607.1928],[0.0, 718.8560, 185.2157],[0.0, 0.0, 1.0]])
K2 = np.array([[718.8560, 0.0, 607.1928],[0.0,718.8560,185.2157],[0.0,0.0,1.0]])
R=np.array([[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,1.0]])
T1 = np.array([45.48225,-11.30887,3.779761])
T2 = np.array([-337.2877,2.369057,4.915215])
Camera_Left_Pose= np.vstack([np.c_[R,T1],np.array([0,0,0,1])])
Camera_Right_Pose= np.vstack([np.c_[R,T2],np.array([0,0,0,1])])
Right_to_World=np.linalg.inv(Camera_Right_Pose)
K_inv=np.linalg.inv(K1)
K_norm=np.hstack((K1,np.array([[0],[0],[0]])))
#Inverse Depth values for search range
inv_d_rng=np.arange(0,0.001,0.00001)
def InitialMap(left,right,inv_depth_range):
img_size = left.shape
win_size =3
Cost= np.zeros((np.size(left),np.size(inv_depth_range)))
n=0
for inv_depth in inv_depth_range:
Pixel_to_World= np.vstack((K_inv,np.array([0,0,inv_depth])))
perspectiveTransform=K_norm.dot(Camera_Left_Pose).dot(Right_to_World).dot(Pixel_to_World)
Warp_I= cv2.warpPerspective(right,perspectiveTransform,(img_size[1],img_size[0]),cv2.BORDER_CONSTANT,10000)
returned_cost=CostFunc(left,Warp_I,win_size)
Cost[:,n]=np.ravel(returned_cost)
n=n+1
depthMap = inv_depth_range[np.nanargmin(Cost,axis=1)]
depthMap=depthMap.reshape(img_size)
return depthMap
def CostFunc(img1, img2, win_size):
rows,cols=img1.shape
pad_img2=cv2.copyMakeBorder(img2,win_size, win_size, win_size, win_size, cv2.BORDER_CONSTANT,0)
cost_map=np.zeros(img1.shape)
for i in range(2+win_size):
for j in range(2+win_size):
cost_map += np.abs(img1-pad_img2[i:i+rows,j:j+cols])
return cost_map
img_l= cv2.imread('left.png')
img_r=cv2.imread('right.png')
I_gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
I_gray_r = cv2.cvtColor(img_r,cv2.COLOR_BGR2GRAY)
IInit_map = InitialMap(I_gray_l,I_gray_r,inv_d_rng)
plt.imsave('Result.png',IInit_map,cmap='gray')
左右图像对(取自KITTI),以及我的结果和使用的基本事实是here