我正在使用python 3.7中的openni + opencv库来测量从kinect到对象的距离。我找到了一些例子,并录制了一些视频。关键是要使分割对象通过kinect看到?如何在Python 3.7中如此简单?我必须补充一点,我正在使用32位python,但无法安装PyPCL或其他库。 看看这个https://www.youtube.com/watch?v=H7zaEFXKomY
import numpy
import cv2
from openni import openni2
def show_depth_value(event, x, y, flags, param):
global depth
print(depth[y, x],'mm')
if __name__ == '__main__':
# can also accept the path of the OpenNI redistribution
openni2.initialize()
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
print(depth_stream.get_mirroring_enabled)
depth_stream.start()
color_stream = dev.create_color_stream()
color_stream.start()
depth_scale_factor = 255.0 / depth_stream.get_max_pixel_value()
cv2.namedWindow('depth')
cv2.setMouseCallback('depth', show_depth_value)
while True:
# Get depth
depth_frame = depth_stream.read_frame()
h, w = depth_frame.height, depth_frame.width
depth = numpy.ctypeslib.as_array(
depth_frame.get_buffer_as_uint16()).reshape(h, w)
depth_uint8 = cv2.convertScaleAbs(depth, alpha=depth_scale_factor)
depth_colored = cv2.applyColorMap(depth_uint8, cv2.COLORMAP_HSV)
# Get color
color_frame = color_stream.read_frame()
color = numpy.ctypeslib.as_array(
color_frame.get_buffer_as_uint8()).reshape(h, w, 3)
color = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)
# Display
cv2.imshow('depth', depth_uint8)
cv2.imshow('depth colored', depth_colored)
cv2.imshow('color', color)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
depth_stream.stop()
openni2.unload()