让我们拍摄以下一对随后的航拍图像并执行image rectification(下面的代码):
import cv2
import matplotlib.pyplot as plt
import numpy as np
def main():
query_image = cv2.imread(filename='query.jpg', flags=cv2.CV_8U)
train_image = cv2.imread(filename='train.jpg', flags=cv2.CV_8U)
detector = cv2.xfeatures2d.SURF_create()
query_keypoints, query_descriptors = detector.detectAndCompute(query_image,
mask=None)
train_keypoints, train_descriptors = detector.detectAndCompute(train_image,
mask=None)
matcher = cv2.BFMatcher_create(normType=cv2.NORM_L1, crossCheck=True)
matches = matcher.match(queryDescriptors=query_descriptors,
trainDescriptors=train_descriptors)
matches = sorted(matches, key=lambda x: x.distance)
query_points = np.array([query_keypoints[match.queryIdx].pt
for match in matches])
train_points = np.array([train_keypoints[match.trainIdx].pt
for match in matches])
fundamental_matrix, _ = cv2.findFundamentalMat(points1=query_points,
points2=train_points,
method=cv2.FM_RANSAC,
param1=0.1)
_, query_homography, _ = cv2.stereoRectifyUncalibrated(
points1=query_points,
points2=train_points,
F=fundamental_matrix,
imgSize=query_image.shape[::-1])
map_1, map_2 = cv2.initUndistortRectifyMap(R=query_homography,
cameraMatrix=np.eye(3, 3),
distCoeffs=np.zeros(5),
newCameraMatrix=np.eye(3, 3),
size=query_image.shape[::-1],
m1type=cv2.CV_16SC2)
plt.imshow(cv2.remap(src=query_image,
map1=map_1,
map2=map_2,
interpolation=cv2.INTER_LINEAR))
plt.show()
if __name__ == '__main__':
main()
问题:
如何避免裁剪结果并使整个图像适合图形?
我尝试了什么:
size
的{{1}}参数。按cv2.initUndistortRectifyMap
更改后面会生成以下图片: 使用tuple(2*x for x in query_image.shape[::-1])
进行操作:
map_1
这种黑客有点工作,但我不想在图像周围留下太多的黑色空间。理想情况下,我希望图像的边框触摸图形的边框。我觉得我错过了opencv中的某种功能,这样可以不裁剪新图像,并且考虑到图像的旋转来调整新图形的大小。或者我可能需要以map_1[:, :, 0] -= 1000
map_1[:, :, 1] += 1000
和map_1
来提取和使用有关转换的某些信息......
修改
回复@Micka的评论。
在我的实际代码中,我有以下相机矩阵和失真系数:
map_2
从那里我计算出这样的新相机矩阵:
CAMERA_MATRIX = np.array([
[2425.51170203134142866475, 0, 2035.60834479390314299962],
[0, 2425.51170203134142866475, 1512.81389897734607075108],
[0, 0, 1]])
DISTORTION_COEFFICIENTS = np.array([-0.00470114123536235617,
-0.00149541744410850905,
-0.00024420109077626909,
-0.00003711484246148531,
0.00020459075700470246])
然后,我像这样使用它来计算new_camera_matrix, _ = cv2.getOptimalNewCameraMatrix(
cameraMatrix=CAMERA_MATRIX,
distCoeffs=DISTORTION_COEFFICIENTS,
imageSize=query_image.shape[::-1],
alpha=0,
centerPrincipalPoint=0)
和map_1
:
map_2
结果相当于我在上面提供的第一张经过校正的图像。
编辑Nº2:
可能值得注意的是,最终要求两个经过校正的图像不仅要符合图形,还要对齐。像这里(图像来自A short tutorial on image rectification by Du Huynh):
目前我得到以下内容:
编辑Nº3:
我试图提取有关我应该调整大小的数量的信息以及我应该从逆映射中移动整理后的图像的数量:
rotation_matrix = np.linalg.inv(CAMERA_MATRIX) @ query_homography @ CAMERA_MATRIX
map_1, map_2 = cv2.initUndistortRectifyMap(R=rotation_matrix,
cameraMatrix=CAMERA_MATRIX,
distCoeffs=DISTORTION_COEFFICIENTS,
newCameraMatrix=new_camera_matrix,
size=query_image.shape[::-1],
m1type=cv2.CV_16SC2)
从这里我们可以得到新修正图像的大小:
query_rotation_matrix = (np.linalg.inv(CAMERA_MATRIX)
@ query_homography
@ CAMERA_MATRIX)
inverse_query_rotation_matrix = np.linalg.inv(query_rotation_matrix)
train_rotation_matrix = (np.linalg.inv(CAMERA_MATRIX)
@ train_homography
@ CAMERA_MATRIX)
inverse_train_rotation_matrix = np.linalg.inv(train_rotation_matrix)
inv_query_map_1, inv_query_map_2 = cv2.initUndistortRectifyMap(
R=inverse_query_rotation_matrix,
cameraMatrix=CAMERA_MATRIX,
distCoeffs=DISTORTION_COEFFICIENTS,
newCameraMatrix=new_camera_matrix,
size=query_image.shape[::-1],
m1type=cv2.CV_16SC2)
inv_train_map_1, inv_train_map_2 = cv2.initUndistortRectifyMap(
R=inverse_train_rotation_matrix,
cameraMatrix=CAMERA_MATRIX,
distCoeffs=DISTORTION_COEFFICIENTS,
newCameraMatrix=new_camera_matrix,
size=query_image.shape[::-1],
m1type=cv2.CV_16SC2)
我们应该转移多少图像:
extended_x_size = (inv_query_map_1[:, :, 1].max()
- inv_query_map_1[:, :, 1].min())
extended_y_size = (inv_query_map_1[:, :, 0].max()
- inv_query_map_1[:, :, 0].min())
此外,考虑到经过纠正的列车图像将相对于经过纠正的查询图像移位,我们需要添加更多空间:
x_shift = np.abs(inv_query_map_1[:, :, 1].min())
y_shift = np.abs(inv_query_map_1[:, :, 0].min())
使用新尺寸创建地图:
delta_y = max(0, inv_train_map_1[:, :, 0].max() + y_shift - extended_y_size)
现在不是最明显的部分。将移位应用于这些贴图时,图像将不沿图的轴移动,而是沿图像轴的轴移动。应用班次:
query_map_1, query_map_2 = cv2.initUndistortRectifyMap(
R=query_rotation_matrix,
cameraMatrix=CAMERA_MATRIX,
distCoeffs=DISTORTION_COEFFICIENTS,
newCameraMatrix=new_camera_matrix,
size=(extended_y_size + delta_y, extended_x_size),
m1type=cv2.CV_16SC2)
train_map_1, train_map_2 = cv2.initUndistortRectifyMap(
R=train_rotation_matrix,
cameraMatrix=CAMERA_MATRIX,
distCoeffs=DISTORTION_COEFFICIENTS,
newCameraMatrix=new_camera_matrix,
size=(extended_y_size + delta_y, extended_x_size),
m1type=cv2.CV_16SC2)
最后,绘制结果:
x_1 = inv_query_map_1[0, 0, 1]
x_2 = inv_query_map_1[0, -1, 1]
y_1 = inv_query_map_1[0, 0, 0]
y_2 = inv_query_map_1[0, -1, 0]
d_1 = x_2 - x_1
d_2 = y_2 - y_1
phi = np.arctan(d_1 / d_2)
psi = phi + np.arctan(y_shift / x_shift)
ro = np.sqrt(x_shift**2 + y_shift**2)
x_shift_image = ro * np.cos(psi)
y_shift_image = ro * np.sin(psi)
query_map_1[:, :, 1] -= x_shift_image.astype(int)
query_map_1[:, :, 0] -= y_shift_image.astype(int)
train_map_1[:, :, 1] -= x_shift_image.astype(int)
train_map_1[:, :, 0] -= y_shift_image.astype(int)
rectified_query.jpg
rectified_train.jpg
epipolar_lines.jpg
乍一看这看起来相当不错,但不幸的是我的视差图已经破了......
disparity_before.jpg
disparity_after.jpg
编辑Nº4:
我在想,可能是因为不一致的问题可能是由于矫正图像的不同倾向。如果是这样,那么应该为每个图像单独计算移位,如下所示:
rectified_query_image = cv2.remap(src=query_image,
map1=query_map_1,
map2=query_map_2,
interpolation=cv2.INTER_LINEAR)
rectified_train_image = cv2.remap(src=train_image,
map1=train_map_1,
map2=train_map_2,
interpolation=cv2.INTER_LINEAR)
plt.imshow(rectified_query_image)
plt.show()
plt.imshow(rectified_train_image)
plt.show()
我希望这可以解决差异图像,但由于某种原因,它不会。