我正在使用OpenCV来计算场景的视差图。我已经通过cv2.calibrateCamera
然后用cv2.stereoCalibrate
单独找到内在参数来校准立体相机,以找到旋转矩阵和平移向量。
我复制我的校准代码,但我认为我的问题不在这里:
import numpy as np
import cv2
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.000001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpointsL = []
imgpointsL = []
objpointsR = []
imgpointsR = []
imgR = cv2.imread('right2.jpg',0)
# Find the chess board corners
ret, cornersR = cv2.findChessboardCorners(imgR, (7,6),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpointsR.append(objp)
cv2.cornerSubPix(imgR,cornersR,(11,11),(-1,-1),criteria)
imgpointsR.append(cornersR)
imgL = cv2.imread('left3.jpg',0)
#grayL = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, cornersL = cv2.findChessboardCorners(imgL, (7,6),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpointsL.append(objp)
cv2.cornerSubPix(imgL,cornersL,(11,11),(-1,-1),criteria)
imgpointsL.append(cornersL)
#Intrinsic parameters
distCoeffsR = np.array([1.191755160158372399e-02, -8.585146447098485067e-03, 8.429413399383550720e-04, -6.222973871460263460e-05, -7.966310474599684957e-03])
distCoeffsL = np.array([-1.627558337813042599e-02, 2.409982163230293128e-01, 4.443126374210568282e-03, 1.288079049351137243e-03, -3.177831292965794807e-01])
cameraMatrixR = np.matrix('3.252248978261580987e+02 0 3.269955537627058106e+02;0 3.228400384496266042e+02 2.341068611530280350e+02;0 0 1')
cameraMatrixL = np.matrix('4.570360097428241488e+02 0 3.465188967298854550e+02;0 4.573286269805292363e+02 2.691439570063795372e+02;0 0 1')
retval,cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, R, T, E, F = cv2.stereoCalibrate(objpointsL, imgpointsL, imgpointsR, cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, (640,480))
现在我cv2.stereoRectify
:
lFrame = cv2.imread('izquierda.jpg')
rFrame = cv2.imread('derecha.jpg')
w, h = lFrame.shape[:2] # both frames should be of same shape
#Perform stereorectification
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, (w,h), R, T, cv2.CALIB_ZERO_DISPARITY,0, (0,0))
#computes undistort and rectify maps
mapxL, mapyL = cv2.initUndistortRectifyMap(cameraMatrixL, distCoeffsL, R1, P1, (w,h), cv2.CV_32FC1)
mapxR, mapyR = cv2.initUndistortRectifyMap(cameraMatrixR, distCoeffsR, R2, P2, (w,h), cv2.CV_32FC1)
dstL = cv2.remap(lFrame, mapxL, mapyL,cv2.INTER_LINEAR)
dstR = cv2.remap(rFrame, mapxR, mapyR,cv2.INTER_LINEAR)
while (True):
cv2.imshow('Left normal',lFrame)
cv2.imshow('Right normal',rFrame)
cv2.imshow('Left rectify',dstL)
cv2.imshow('Right rectify',dstR)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
这些是经过纠正的图像:
左派纠正
正确纠正
有人可以帮我吗?我对此感到困惑......
答案 0 :(得分:1)
尝试切换高度和宽度,即更改代码的这一行:
w, h = lFrame.shape[:2] # both frames should be of same shape
到此:
h, w = lFrame.shape[:2] # both frames should be of same shape
我遇到了同样的问题,它帮助了我。我认为这是因为OpenCV在其方法中期望第二个numpy数组形状为宽度,第一个形状为高度。
希望,这有帮助。
答案 1 :(得分:1)
我也面临着类似的问题然后我将我的所有矩阵(从python获得)与使用stereocalibration app.
从Matlab获得的矩阵进行比较我发现它是由于校准不正确而发生的。我得到了错误的相机失真矩阵导致了这个错误。你可以在这里阅读我的完整答案:Python 2.7/OpenCV 3.3: Error in cv2.initUndistortRectifyMap . Not showing undistort rectified images