import numpy as np
import cv2
# Focal length, sensor size (mm and px)
f = 33.0 # mm
pix_width = 4928.0 # sensor size has 4928px in width
pix_height = 3624.0 # sensor size has 4928px in width
sensor_width = 23.7 # mm
sensor_height = 15.7 # mm
# set center pixel
u0 = int(pix_width / 2.0)
v0 = int(pix_height / 2.0)
# determine values of camera-matrix
mu = pix_width / sensor_width # px/mm
alpha_u = f * mu # px
mv = pix_height / sensor_height # px/mm
alpha_v = f * mv # px
# Distortion coefs
D = np.array([[0.0, 0.0, 0.0, 0.0]])
# Camera matrix
K = np.array([[alpha_u, 0.0, u0],
[0.0, alpha_v, v0],
[0.0, 0.0, 1.0]])
import numpy as np
import cv2
# Focal length, sensor size (mm and px)
f = 33.0 # mm
pix_width = 4928.0 # sensor size has 4928px in width
pix_height = 3624.0 import numpy as np
import cv2
# Focal length, sensor size (mm and px)
f = 33.0 # mm
pix_width = 4928.0 # sensor size has 4928px in width
pix_height = 3624.0 # sensor size has 4928px in width
sensor_width = 23.7 # mm
sensor_height = 15.7 # mm
# set center pixel
u0 = int(pix_width / 2.0)
v0 = int(pix_height / 2.0)
# determine values of camera-matrix
mu = pix_width / sensor_width # px/mm
alpha_u = f * mu # px
mv = pix_height / sensor_height # px/mm
alpha_v = f * mv # px
# Distortion coefs
D = np.array([[0.0, 0.0, 0.0, 0.0]])
# Camera matrix
K = np.array([[alpha_u, 0.0, u0],
[0.0, alpha_v, v0],
[0.0, 0.0, 1.0]])
# Set UV (image) and XYZ (real life)
UV_cp = np.array([[1300, 2544], # left down
[1607, 1000], # left up
[3681, 2516], # right down
[3320, 983]],np.float32 ) # right up
# Z is on 0 plane, so Z=0.0
XYZ_gcp = np.array([[0, 400, 0],
[0, 0, 0],
[300, 400, 0],
[300, 0, 0]],np.float32)
rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
rotM_cam = cv2.Rodrigues(rvec)[0]
# sensor size has 4928px in width
sensor_width = 23.7 # mm
sensor_height = 15.7 # mm
# set center pixel
u0 = int(pix_width / 2.0)
v0 = int(pix_height / 2.0)
# determine values of camera-matrix
mu = pix_width / sensor_width # px/mm
alpha_u = f * mu # px
mv = pix_height / sensor_height # px/mm
alpha_v = f * mv # px
# Distortion coefs
D = np.array([[0.0, 0.0, 0.0, 0.0]])
# Camera matrix
K = np.array([[alpha_u, 0.0, u0],
[0.0, alpha_v, v0],
[0.0, 0.0, 1.0]])
# Set UV (image) and XYZ (real life)
UV_cp = np.array([[1300, 2544], # left down
[1607, 1000], # left up
[3681, 2516], # right down
[3320, 983]],np.float32 ) # right up
# Z is on 0 plane, so Z=0.0
XYZ_gcp = np.array([[0, 400, 0],
[0, 0, 0],
[300, 400, 0],
[300, 0, 0]],np.float32)
rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
rotM_cam = cv2.Rodrigues(rvec)[0]
# Set UV (image) and XYZ (real life)
UV_cp = np.array([[1300, 2544], # left down
[1607, 1000], # left up
[3681, 2516], # right down
[3rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
rotM_cam = cv2.Rodrigues(rvec)[0]
# Set UV (image) and XYZ (real life)
UV_cp = np.array([[1300, 2544], # left down
[1607, 1000], # left up
[3681, 2516], # right down
[3320, 983]],np.float32 ) # right up
# Z is on 0 plane, so Z=0.0
XYZ_gcp = np.array([[0, 400,(rvec)[0]
0],
[0, 0, 0],
[300, 400, 0],
[300, 0, 0]],np.float32)
rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
rotM_cam = cv2.Rodrigues(rvec)[0]320, 983]],np.float32 ) # right up
# Z is on 0 plane, so Z=0.0
XYZ_gcp = np.array([[0, 400,(rvec)[0]
0],
[0, 0, 0],
[300, 400, 0],
[300, 0, 0]],np.float32)
rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
rotM_cam = cv2.Rodrigues(rvec)[0]
我从OpenCV: use solvePnP to determine homography获得了此代码 但我得到的错误是:
File "solv.py", line 50, in <module>
rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
ValueError: too many values to unpack
我们如何找到世界坐标xyz。 请帮帮我......... !!!!!
答案 0 :(得分:3)
如果你得到ValueError: too many values to unpack
,则意味着左侧的变量数与右侧返回的值的数量不匹配。
在你的情况下:
rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
方法cv2.solvePnP()
很可能在返回元组中返回两个以上的值。要快速检查,请执行以下操作:
print len(cv2.solvePnP(XYZ_gcp, UV_cp, K, D))
或更好,请参阅实际的返回元组:
print cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
并确保将左侧与此匹配。 EGS。 (另):
rtval, rvec, tvec = cv2.solvePnP(XYZ_gcp, UV_cp, K, D)
查看此帖子,了解更多信息ValueError: too many values to unpack (Python 2.7)