如果我
A
并构成[rvec, ...], [tvec, ...]
,cv2.projectPoints
中的参数,以生成相机在查看圆圈网格时生成的图像,cv2.findCirclesGrid
)cv2.calibrateCamera
以恢复相机参数我不应该恢复原有的内在和外在参数吗?
此问题底部的完整代码执行此过程,但没有 恢复原始相机参数:
Kept 4 full captures out of 4 images
calibration error 133.796093439
Simulation matrix:
[[ 5.00000000e+03 0.00000000e+00 3.20000000e+02]
[ 0.00000000e+00 5.00000000e+03 2.40000000e+02]
[ 0.00000000e+00 0.00000000e+00 1.00000000e+00]]
Estimated matrix:
[[ 1.0331118 0. 317.58445168]
[ 0. 387.49075886 317.98450481]
[ 0. 0. 1. ]]
即。平均误差很大,估计的相机矩阵看起来不像 模拟相机矩阵或者用于生成测试图像。
我希望这种闭环仿真能够对内在的相机矩阵进行非常好的估计。这种验证cameraCalibration
的方法似乎不起作用,我做错了什么?
编辑以回应AldurDisciple评论
1)在跳过direct_generation_of_points
以下的代码中添加了新功能
图像生成功能并直接使用cv2.projectPoints
计算传递到cv2.calibrateCamera
的圆圈位置。
这是正常的。
但这令人困惑:估计的圆圈位置(来自我的模拟 图像)通常在距离确切像素(主要像素)的大约十分之一的像素内 差异在于点的顺序不同:
# compare the y-component's
In [245]: S.dots[0][:,0,1]
Out[245]:
array([ 146.33618164, 146.30953979, 146.36413574, 146.26707458,
146.17976379, 146.30110168, 146.17236328, 146.35955811,
146.33454895, 146.36776733, 146.2612915 , 146.21359253,
146.23895264, 146.27839661, 146.27764893, 177.51347351,
177.57495117, 177.53858948, 177.48587036, 177.63012695,
177.48597717, 177.51727295, 177.5202179 , 177.52545166,
177.57287598, 177.51008606, 177.51296997, 177.53715515,
177.53053284, 177.58164978, 208.69573975, 208.7252655 ,
208.69616699, 208.73510742, 208.63375854, 208.66760254,
208.71517944, 208.74360657, 208.62438965, 208.59814453,
208.67456055, 208.72662354, 208.70921326, 208.63339233,
208.70820618, 239.8401947 , 240.06373596, 239.87176514,
240.04118347, 239.97781372, 239.97572327, 240.04475403,
239.95411682, 239.80995178, 239.94726562, 240.01327515,
239.82675171, 239.99989319, 239.90107727, 240.07745361,
271.31692505, 271.28417969, 271.28216553, 271.33111572,
271.33279419, 271.33584595, 271.30758667, 271.21173096,
271.28588867, 271.3387146 , 271.33770752, 271.2104187 ,
271.38504028, 271.25054932, 271.29376221, 302.52420044,
302.47903442, 302.41482544, 302.39868164, 302.47793579,
302.49789429, 302.45016479, 302.48071289, 302.50463867,
302.51422119, 302.46307373, 302.42077637, 302.60791016,
302.48162842, 302.46142578, 333.70709229, 333.75698853,
333.64157104, 333.64926147, 333.6647644 , 333.69546509,
333.73342896, 333.76846313, 333.57540894, 333.76605225,
333.74307251, 333.60968018, 333.7739563 , 333.70132446,
333.62057495], dtype=float32)
In [246]: S.exact_dots[0][:,0,1]
Out[246]:
array([ 146.25, 177.5 , 208.75, 240. , 271.25, 302.5 , 333.75,
146.25, 177.5 , 208.75, 240. , 271.25, 302.5 , 333.75,
<< snipped 10 identical rows >>
146.25, 177.5 , 208.75, 240. , 271.25, 302.5 , 333.75,
146.25, 177.5 , 208.75, 240. , 271.25, 302.5 , 333.75,
146.25, 177.5 , 208.75, 240. , 271.25, 302.5 , 333.75], dtype=float32)
这是我正在尝试做的工作版本:
import scipy
import cv2
import itertools
def direct_generation_of_points():
''' Skip the part where we actually generate the image,
just use cv2.projectPoints to generate the exact locations
of the grid centers.
** This seems to work correctly **
'''
S=Setup()
t=tvec(0.0,0.0,1.6) # keep the camera 1.6 meters away from target, looking at the origin
rvecs=[ rvec(0.0,0.0,0.0), rvec(0.0, scipy.pi/6,0.0), rvec(scipy.pi/8,0.0,0.0), rvec(0.0,0.0,0.5) ]
S.poses=[ (r,t) for r in rvecs ]
S.images='No images: just directly generate the extracted circle locations'
S.dots=S.make_locations_direct()
calib_flags=cv2.CALIB_ZERO_TANGENT_DIST|cv2.CALIB_SAME_FOCAL_LENGTH
calib_flags=calib_flags|cv2.CALIB_FIX_K3|cv2.CALIB_FIX_K4
calib_flags=calib_flags|cv2.CALIB_FIX_K5|cv2.CALIB_FIX_K6
S.calib_results=cv2.calibrateCamera( [S.grid,]*len(S.dots), S.dots, S.img_size, cameraMatrix=S.A, flags=calib_flags)
print "calibration error ", S.calib_results[0]
print "Simulation matrix: \n", S.A
print "Estimated matrix: \n", S.calib_results[1]
return S
def basic_test():
''' Uses a camera setup to
(1) generate an image of a grid of circles
(2) detects those circles
(3) generate an estimated camera model from the circle detections
** This does not work correctly **
'''
S=Setup()
t=tvec(0.0,0.0,1.6) # keep the camera 1.6 meters away from target, looking at the origin
rvecs=[ rvec(0.0,0.0,0.0), rvec(0.0, scipy.pi/6,0.0), rvec(scipy.pi/8,0.0,0.0), rvec(0.0,0.0,0.5) ]
S.poses=[ (r,t) for r in rvecs ]
S.images=S.make_images()
S.dots=extract_dots( S.images, S.grid_size[::-1] )
S.exact_dots=S.make_locations_direct()
calib_flags=cv2.CALIB_ZERO_TANGENT_DIST|cv2.CALIB_SAME_FOCAL_LENGTH
calib_flags=calib_flags|cv2.CALIB_FIX_K3|cv2.CALIB_FIX_K4|cv2.CALIB_FIX_K5
calib_flags=calib_flags|cv2.CALIB_FIX_K6
S.calib_results=cv2.calibrateCamera( [S.grid,]*len(S.dots), S.dots, S.img_size, cameraMatrix=S.A, flags=calib_flags)
print "calibration error ", S.calib_results[0]
print "Simulation matrix: \n", S.A
print "Estimated matrix: \n", S.calib_results[1]
return S
class Setup(object):
''' Class to simulate a camera, produces images '''
def __init__(self):
self.img_size=(480,640)
self.A=scipy.array( [ [5.0e3, 0.0, self.img_size[1]/2],
[ 0.0, 5.0e3, self.img_size[0]/2],
[ 0.0, 0.0, 1.0 ] ],
dtype=scipy.float32 )
# Nx, Ny, spacing, dot-size
self.grid_spec=( 15, 7, 0.01, 0.001 )
self.grid=square_grid_xy( self.grid_spec[0], self.grid_spec[1], self.grid_spec[2])
# a pose is a pair: rvec, tvec
self.poses=[ ( rvec(0.0, scipy.pi/6, 0.0), tvec( 0.0,0.0,1.6) ),
]
@property
def grid_size(self):
return self.grid_spec[:2]
def make_images(self):
return [make_dots_image(self.img_size, self.A, rvec, tvec, self.grid, self.grid_spec[-1] ) for (rvec,tvec) in self.poses]
def make_locations_direct(self):
return [cv2.projectPoints( self.grid, pose[0], pose[1], self.A, None)[0] for pose in self.poses]
def square_grid_xy( nx, ny, dx ):
''' Returns a square grid in the xy plane, useful
for defining test grids for camera calibration
'''
xvals=scipy.arange(nx)*dx
yvals=scipy.arange(ny)*dx
xvals=xvals-scipy.mean(xvals)
yvals=yvals-scipy.mean(yvals)
res=scipy.zeros( [3, nx*ny], dtype=scipy.float32 )
for (i,(x,y)) in enumerate( itertools.product(xvals, yvals)):
res[:,i]=scipy.array( [x,y,0.0] )
return res.transpose()
# single pixel dots were not detected?
#def make_single_pixel_dots( img_size, A, rvec, tvec, grid, dist_k=None):
# rgb=scipy.ones( img_size+(3,), dtype=scipy.uint8 )*0xff
# (dot_locs, jac)=cv2.projectPoints( grid, rvec, tvec, A, dist_k)
# for p in dot_locs:
# (c,r)=(int(p[0][0]+0.5), int(p[0][1]+0.5))
# if 0<=c<img_size[1] and 0<=r<img_size[0]:
# rgb[r,c,:]=0
# return rgb
def make_dots_image( img_size, A, rvec, tvec, grid, dotsize, dist_k=None):
''' Make the image of the dots, uses cv2.projectPoints to construct the image'''
# make white image
max_intensity=0xffffffff
intensity=scipy.ones( img_size, dtype=scipy.uint32)*max_intensity
# Monte-Carlo approach to draw the dots
for dot in grid:
deltas=2*dotsize*( scipy.rand(1024, 3 )-0.5) # no. of samples must be small relative to bit-depth of intensity array
deltas[:,2]=0
indicator=scipy.where( scipy.sum( deltas*deltas, 1)<dotsize*dotsize, 1, 0.0)
print "inside fraction: ", sum(indicator)/len(indicator)
(pts,jac)=cv2.projectPoints( dot+deltas, rvec, tvec, A, dist_k )
pts=( p for (ind,p) in zip(indicator, pts) if ind )
for p in pts:
(c,r)=( int(p[0][0]+0.5), int( p[0][1]+0.5 ) )
if r>=0 and c>=0 and c<img_size[1] and r<img_size[0]:
intensity[r,c]=intensity[r,c]-6
else:
print "col, row ", (c,r), " point rejected"
# rescale so that image goes from 0x0 to max intensity
min_intensity=min(intensity.flat)
# normalize the intensity
intensity=0xff*( (intensity-min_intensity)/float(max_intensity-min_intensity) )
pixel_img=scipy.ones( intensity.shape+(3,), dtype=scipy.uint8 )
return (pixel_img*intensity[:,:,scipy.newaxis]).astype(scipy.uint8 )
def extract_dots( img_list, grid_size ):
'''
@arg img_list: usually a list of images, can be a single image
'''
# convert single array, into a 1-element list
if type(img_list) is scipy.ndarray:
img_list=[img_list,]
def get_dots( img ):
res=cv2.findCirclesGridDefault( img, grid_size)
if not res[0]: # sometimes, reversing the grid size will make the detection successful
return cv2.findCirclesGridDefault( img, grid_size[::-1] )
return res
all_dots=[ get_dots( img) for img in img_list]
#all_dots=[cv2.findCirclesGrid( img, grid_size[::-1] ) for img in img_list ]
full_captures=[x[1] for x in all_dots if x[0] ]
print "Kept {0} full captures out of {1} images".format( len(full_captures), len(img_list) )
if len(full_captures)<len(img_list):
print "\t", [x[0] for x in all_dots]
return [scipy.squeeze(x) for x in full_captures]
# convenience functions
def vec3_32(x,y,z):
return scipy.array( [x,y,z], dtype=scipy.float32 )
rvec=vec3_32
tvec=vec3_32
if __name__=="__main__":
basic_test()
答案 0 :(得分:1)
关键问题在于组织cv2.calibrateCamera
的第一个参数中传递的网格点,
在问题中,要点按列主要顺序组织,可以这么说,需要按行主顺序排列:
def square_grid_xy_fixed( nx, ny, dx ):
''' Returns a square grid in the xy plane, useful
for defining test grids for camera calibration
'''
xvals=scipy.arange(nx)*dx
yvals=scipy.arange(ny)*dx
xvals=xvals-scipy.mean(xvals)
yvals=yvals-scipy.mean(yvals)
res=scipy.zeros( [3, nx*ny], dtype=scipy.float32 )
# need to have "x" be the most rapidly varying index, i.e.
# it must be the final argument to itertools.product
for (i,(y,x)) in enumerate( itertools.product(yvals, xvals)):
res[:,i]=scipy.array( [x,y,0.0] )
return res.transpose()