我正在尝试从我的MS Kinect V2生成真实世界坐标。
我设法将pyqt + opengl散点图拼凑在一起,并使用pylibfreenect2显示Kinect的深度数据。
我立即注意到深度数据与点云数据不同。请注意我的房间的天花板非常扭曲(应该是一个扁平的天花板开始类似曲棍球棒图)
经过一些阅读和挖掘源文件后,我设法找到了一个看起来很有希望的功能。
getPointXYZ - 在点云中构建一个三维点。
因为它一次只能在一个像素上工作,所以我写了一个简单的嵌套for循环。在下面的代码中,您应该看到以下行:
out = np.zeros((d.shape[0]*d.shape[1], 3)) #shape = (217088, 3)
for row in range(d.shape[0]):
for col in range(d.shape[1]):
world = registration.getPointXYZ(undistorted, row, col) #convert depth pixel to real-world coordinate
out[row + col] = world
不确定那里发生了什么。它看起来更像是一条直线,有时它看起来像一个矩形,它非常扁平(但它在所有三个维度上都处于任意角度)。当我在传感器前移动手时,我可以看到一些点移动但是没有可见的形状可见。似乎所有的点都挤在一起。
以下是一个Python脚本,它将显示包含openGL散点图的pyQt应用程序窗口。通过pylibfreenect2从Kinect传感器接收帧,并通过迭代深度数据的每一行和每列来生成散点图的点,并通过getPointXYZ发送它(这非常慢并且不起作用......)。
# coding: utf-8
# An example using startStreams
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import numpy as np
import cv2
import sys
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame, libfreenect2
fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
print("No device connected!")
sys.exit(1)
serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial)
types = 0
types |= FrameType.Color
types |= (FrameType.Ir | FrameType.Depth)
listener = SyncMultiFrameListener(types)
# Register listeners
device.setColorFrameListener(listener)
device.setIrAndDepthFrameListener(listener)
device.start()
# NOTE: must be called after device.start()
registration = Registration(device.getIrCameraParams(),
device.getColorCameraParams())
undistorted = Frame(512, 424, 4)
registered = Frame(512, 424, 4)
#QT app
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
#initialize some points data
pos = np.zeros((1,3))
sp2 = gl.GLScatterPlotItem(pos=pos)
w.addItem(sp2)
def update():
frames = listener.waitForNewFrame()
ir = frames["ir"]
color = frames["color"]
depth = frames["depth"]
d = depth.asarray()
registration.apply(color, depth, undistorted, registered)
#There are 3 optionally commented methods for generating points data (the last one is not commented here).
#First will generate points using depth data only.
#Second will generate colored points and pointcloud xyz coordinates.
#Third is simply the pointcloud xyz coordinates without the color mapping.
"""
#Format depth data to be displayed
m, n = d.shape
R, C = np.mgrid[:m, :n]
out = np.column_stack((d.ravel() / 4500, C.ravel()/m, (-R.ravel()/n)+1))
"""
"""
#Format undistorted and regisered data to real-world coordinates with mapped colors (dont forget color=out_col in setData)
out = np.zeros((d.shape[0]*d.shape[1], 3)) #shape = (217088, 3)
out_col = np.zeros((d.shape[0]*d.shape[1], 3)) #shape = (217088, 3)
for row in range(d.shape[0]):
for col in range(d.shape[1]):
world = registration.getPointXYZRGB(undistorted, registered, row, col)
out[row + col] = world[0:3]
out_col[row + col] = np.array(world[3:6]) / 255
"""
# Format undistorted data to real-world coordinates
out = np.zeros((d.shape[0]*d.shape[1], 3)) #shape = (217088, 3)
for row in range(d.shape[0]):
for col in range(d.shape[1]):
world = registration.getPointXYZ(undistorted, row, col)
out[row + col] = world
sp2.setData(pos=out, size=2)
listener.release(frames)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(50)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
device.stop()
device.close()
sys.exit(0)
我不确定接下来应该做什么才能获得实际的点云坐标数据。
有没有人对我做错了什么有任何建议?
我的操作系统是带有Python 3.5的Ubuntu 16.0.4
感谢。
答案 0 :(得分:1)
答案实际上是为了解决我在那些嵌套循环中犯的错误。我注意到它没有正确索引数组:
#From:
out[row + col]
#To:
out[row * n_columns + col]
顶点现在准确定位在3d空间中,看起来都很好!
以下是经过修订且功能齐全的代码:
# coding: utf-8
# An example using startStreams
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import numpy as np
import cv2
import sys
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame, libfreenect2
fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
print("No device connected!")
sys.exit(1)
serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial)
types = 0
types |= FrameType.Color
types |= (FrameType.Ir | FrameType.Depth)
listener = SyncMultiFrameListener(types)
# Register listeners
device.setColorFrameListener(listener)
device.setIrAndDepthFrameListener(listener)
device.start()
# NOTE: must be called after device.start()
registration = Registration(device.getIrCameraParams(),
device.getColorCameraParams())
undistorted = Frame(512, 424, 4)
registered = Frame(512, 424, 4)
#QT app
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
#initialize some points data
pos = np.zeros((1,3))
sp2 = gl.GLScatterPlotItem(pos=pos)
w.addItem(sp2)
def update():
colors = ((1.0, 1.0, 1.0, 1.0))
frames = listener.waitForNewFrame()
ir = frames["ir"]
color = frames["color"]
depth = frames["depth"]
d = depth.asarray()
registration.apply(color, depth, undistorted, registered)
listener.release(frames)
"""
#Format raw depth data to be displayed
m, n = d.shape
R, C = np.mgrid[:m, :n]
out = np.column_stack((d.ravel() / 4500, C.ravel()/m, (-R.ravel()/n)+1))
"""
#Format undistorted and regisered data to real-world coordinates with mapped colors (dont forget color=out_col in setData)
n_rows = d.shape[0]
n_columns = d.shape[1]
out = np.zeros((d.shape[0] * d.shape[1], 3), dtype=np.float64)
colors = np.zeros((d.shape[0] * d.shape[1], 3), dtype=np.float64)
for row in range(n_rows):
for col in range(n_columns):
X, Y, Z, B, G, R = registration.getPointXYZRGB(undistorted, registered, row, col)
out[row * n_columns + col] = np.array([X, Y, Z]) # np.array(pt, dtype=np.float64)
colors[row * n_columns + col] = np.divide([R, G, B], 255) # np.array(pt, dtype=np.float64)
"""
#Format undistorted depth data to real-world coordinates
n_rows = d.shape[0]
n_columns = d.shape[1]
out = np.zeros((d.shape[0] * d.shape[1], 3), dtype=np.float64)
for row in range(n_rows):
for col in range(n_columns):
X, Y, Z = registration.getPointXYZ(undistorted, row, col)
out[row * n_columns + col] = np.array([X, Y, Z]) # np.array(pt, dtype=np.float64)
"""
sp2.setData(pos=np.array(out, dtype=np.float64), color=colors, size=2)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(50)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
device.stop()
device.close()
sys.exit(0)
[编辑]
有关其他信息,请参阅This Post