只需使用pykinect和python3获取RGB和深度图像流

时间:2019-03-31 00:23:53

标签: python-3.x opencv kinect pykinect

我想从kinect(版本1)获取深度和rgb视频流。 我使用的是the Python 3 version of pykinect,而不是CPython。

我找到了一些示例,但是Pykinect文档几乎不存在,我不想使用pygame。

在具有freenect的linux上,我做了:

rgb_stream = freenect.sync_get_video()[0]
rgb_stream = rgb_stream[:, :, ::-1]
rgb_image = cv.cvtColor(rgb_stream, cv.COLOR_BGR2RGB)

depth_stream = freenect.sync_get_depth()[0]
depth_stream = np.uint8(depth_stream)
depth_image = cv.cvtColor(depth_stream, cv.COLOR_GRAY2RGB)

但是我在Windows上使用pykinect,我想以类似的方式获取depth和rgb流,然后用OpenCV处理它并用Qt显示它。

这是我发现的示例代码:

from pykinect import nui
import numpy
import cv2

def video_handler_function(frame):
    video = numpy.empty((480,640,4),numpy.uint8)
    frame.image.copy_bits(video.ctypes.data)
    cv2.imshow('KINECT Video Stream', video)

kinect = nui.Runtime()
kinect.video_frame_ready += video_handler_function
kinect.video_stream.open(nui.ImageStreamType.Video, 2,nui.ImageResolution.Resolution640x480,nui.ImageType.Color)

cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)

while True:

    key = cv2.waitKey(1)
    if key == 27: break

kinect.close()
cv2.destroyAllWindows()

什么是video_handler_functionkinect.video_frame_ready += video_handler_function的目的是什么?

我尝试kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth)来获取深度图像,但对处理程序功能进行了一些修改,但无法使其正常工作。

1 个答案:

答案 0 :(得分:1)

from pykinect import nui
import numpy
import cv2


kinect = nui.Runtime()
kinect.skeleton_engine.enabled = True

def getColorImage(frame):
    height, width = frame.image.height, frame.image.width  #get width and height of the images
    rgb = numpy.empty((height, width, 4), numpy.uint8)
    frame.image.copy_bits(rgb.ctypes.data)                 #copy the bit of the image to the array

    cv2.imshow('KINECT Video Stream', rgb) # display the image

def getDepthImage(frame):
    height, width = frame.image.height, frame.image.width  #get frame height and width
    depth = numpy.empty((height, width, 1), numpy.uint8)
    arr2d = (depth >> 3) & 4095
    arr2d >>= 4
    frame.image.copy_bits(arr2d.ctypes.data)

    cv2.imshow('KINECT depth Stream', arr2d)

def frame_ready(frame):
    for skeleton in frame.SkeletonData:
        if skeleton.eTrackingState == nui.SkeletonTrackingState.TRACKED:
            print(skeleton.Position.x, skeleton.Position.y, skeleton.Position.z, skeleton.Position.w)

def main():

    while True:
        kinect.video_frame_ready += getColorImage
        kinect.video_stream.open(nui.ImageStreamType.Video, 2, nui.ImageResolution.Resolution640x480, nui.ImageType.Color)
        cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)

        kinect.depth_frame_ready += getDepthImage
        kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth)
        cv2.namedWindow('KINECT depth Stream', cv2.WINDOW_AUTOSIZE)

        kinect.skeleton_frame_ready += frame_ready

        if cv2.waitKey(0) == 27:
            cv2.destroyAllWindows()
            kinect.close()
            break

if __name__ == '__main__':
    main()

~~~~~