如何在世界坐标系中投影2D激光扫描仪数据

时间:2019-12-21 13:27:39

标签: python coordinates robotics coordinate-transformation lidar

我有一个小型坦克驱动机器人,上面装有rplidar a1。该机器人的机载树莓派。 我正在努力制作一个有纸板障碍的小竞技场。 我可以使用aruco标记和向下安装的向下看的摄像头找到机器人的姿势(x,y,heading)。 我的x轴与磁北对齐,y轴与西对齐,z轴向上(完全不需要)。

现在,如果我驾驶机器人并查看激光扫描,看起来机器人总是在坐标系统的中心,并且看起来机器人是静止的,世界正在移动

但是我想使用激光扫描数据和机器人姿势来显示地图,其中世界是静止的,机器人正在移动,就像在google地图中所看到的(类似,不像整个实用软件一样)

我只想使用当前的激光扫描和机器人的当前位置,而不要使用任何以前的姿势或扫描数据,我知道这可能会使地图看起来有些小故障,我也可以。 给定一个机器人姿势(rx,ry,heading)和一个点(x,y)        扫描仪的坐标系,我该如何返回点的坐标        世界坐标系。

这是aruco标记的代码-

import numpy as np
import cv2
import cv2.aruco as aruco
import sys, time, math
cap = cv2.VideoCapture(1)

#--- Define Tag
id_to_find  = 1
marker_size  = 6.4#- [cm]



# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
    Rt = np.transpose(R)
    shouldBeIdentity = np.dot(Rt, R)
    I = np.identity(3, dtype=R.dtype)
    n = np.linalg.norm(I - shouldBeIdentity)
    return n < 1e-6


# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R):
    assert (isRotationMatrix(R))

    sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])

   singular = sy < 1e-6

    if not singular:
        x = math.atan2(R[2, 1], R[2, 2])
        y = math.atan2(-R[2, 0], sy)
        z = math.atan2(R[1, 0], R[0, 0])
    else:
        x = math.atan2(-R[1, 2], R[1, 1])
        y = math.atan2(-R[2, 0], sy)
        z = 0

    return np.array([x, y, z])




    #--- Get the camera calibration path
    calib_path  = ""
    camera_matrix   = np.loadtxt(calib_path+'cameraMatrix.txt', delimiter=',')
    camera_distortion   = np.loadtxt(calib_path+'cameraDistortion.txt', delimiter=',')

    #--- 180 deg rotation matrix around the x axis
    R_flip  = np.zeros((3,3), dtype=np.float32)
    R_flip[0,0] = 1.0
    R_flip[1,1] =-1.0
    R_flip[2,2] =-1.0

    #--- Define the aruco dictionary
    aruco_dict  = aruco.getPredefinedDictionary(aruco.DICT_ARUCO_ORIGINAL)
    parameters  = aruco.DetectorParameters_create()
    #--- Capture the videocamera (this may also be a video or a picture)
    #-- Set the camera size as the one it was calibrated with
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    #-- Font for the text in the image
    font = cv2.FONT_HERSHEY_PLAIN

    while True:

        #-- Read the camera frame
        ret, frame = cap.read()

        #-- Convert in gray scale
        gray    = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #-- remember, OpenCV stores color images                       
in Blue, Green, Red

    #-- Find all the aruco markers in the image
    corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=aruco_dict, 
parameters=parameters,
                          cameraMatrix=camera_matrix, distCoeff=camera_distortion)

if ids is not None:# and ids[0] == id_to_find:

    #-- ret = [rvec, tvec, ?]
    #-- array of rotation and position of each marker in camera frame
    #-- rvec = [[rvec_1], [rvec_2], ...]    attitude of the marker respect to camera frame
    #-- tvec = [[tvec_1], [tvec_2], ...]    position of the marker in camera frame
    for cr in corners:
        print('IDS FOUND =' + str(len(corners)))
        #print(corners)
        ret = aruco.estimatePoseSingleMarkers(cr, marker_size, camera_matrix, camera_distortion)
        #-- Unpack the output, get only the first
        rvec, tvec = ret[0][0,0,:], ret[1][0,0,:]
        #-- Draw the detected marker and put a reference frame over it
        aruco.drawDetectedMarkers(frame, corners)
        aruco.drawAxis(frame, camera_matrix, camera_distortion, rvec, tvec, 10)
        #-- Print the tag position in camera frame
        str_position = "MARKER Position x=%4.0f  y=%4.0f  z=%4.0f"%(tvec[0], -tvec[1], tvec[2])
        cv2.putText(frame, str_position, (0, 100), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('frame', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
    cap.release()
    cv2.destroyAllWindows()
    break

这是激光扫描仪数据的代码-

    #!/usr/bin/env python3
'''Animates distances and measurment quality'''
from rplidar import RPLidar
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation

PORT_NAME = 'COM18'
DMAX = 4000
IMIN = 0
IMAX = 50

def update_line(num, iterator, line):
    scan = next(iterator)
    offsets = np.array([(np.radians(meas[1]), meas[2]) for meas in scan])
    line.set_offsets(offsets)
    intens = np.array([meas[0] for meas in scan])
    line.set_array(intens)
    return line,

def run():
    lidar = RPLidar(PORT_NAME)
    fig = plt.figure()
    ax = plt.subplot(111, projection='polar')
    line = ax.scatter([0, 0], [0, 0], s=5, c=[IMIN, IMAX],
                           cmap=plt.cm.Greys_r, lw=0)
    ax.set_rmax(DMAX)
    ax.grid(True)

    iterator = lidar.iter_scans()
    ani = animation.FuncAnimation(fig, update_line,
        fargs=(iterator, line), interval=50)
    plt.show()
    lidar.stop()
    lidar.disconnect()

if __name__ == '__main__':
    run()

0 个答案:

没有答案