在python中使用粒子滤波器解决坐标状态估计

时间:2019-05-25 04:06:06

标签: python state prediction pose-estimation particle-filter

我有一个泡菜文件,其中包含我的受测对象所在时间的300个坐标。在其中间有一些缺失值,我正在使用粒子过滤器来估计那些缺失值。最后,我得到了一些预测(并不完全准确),但形式有些偏离。

因此,我的对象的位置实际上就是我的对象的鼻子的位置。我总共拍摄了300帧,每帧都包含一个鼻子坐标。有些帧的值为(0,0),表示缺少这些值。因此,为了找到它们,我实现了粒子过滤器。我是粒子过滤器的新手,所以有可能我弄乱了代码。我得到的结果使我可以预测300个带有漂移值的帧。您可以从图像中获得清晰的想法。 我的测量值是到四个地标的距离,我提供到下一个点的方向角和到下一个点的距离作为附加测量值。


from filterpy.monte_carlo import systematic_resample
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm
from numpy.random import randn
import scipy.stats
from numpy.random import uniform
import pickle
from math import *

#####################################################
def create_uniform_particles(x_range, y_range, hdg_range, N):
    particles = np.empty((N, 3))
    particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
    particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
    particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
    particles[:, 2] %= 2 * np.pi
    return particles

def create_gaussian_particles(mean, std, N):
    particles = np.empty((N, 3))
    particles[:, 0] = mean[0] + (randn(N) * std[0])
    particles[:, 1] = mean[1] + (randn(N) * std[1])
    particles[:, 2] = mean[2] + (randn(N) * std[2])
    particles[:, 2] %= 2 * np.pi
    return particles


#####################################################
def predict(particles, u, std):
    # move according to control input u (heading change, velocity)
    #with noise Q (std heading change, std velocity)`

    N = len(particles)
    # update heading
    #particles[:, 2] += u[0] + (randn(N) * std[0])
    #particles[:, 2] %= 2 * np.pi
    #u[0] += (randn(N) * std[0])
    u[0] %= 2 * np.pi
    # move in the (noisy) commanded direction
    dist = (u[1]) #+ (randn(N) * std[1])
    particles[:, 0] += np.cos(u[0]) * dist
    particles[:, 1] += np.sin(u[0]) * dist


#####################################################
def update(particles, weights, z, R, landmarks):
    for i, landmark in enumerate(landmarks):
        distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
        weights *= scipy.stats.norm(distance, R).pdf(z[i])

    weights += 1.e-300      # avoid round-off to zero
    weights /= sum(weights) # normalize


#####################################################   
def estimate(particles, weights):
    #returns mean and variance of the weighted particles

    pos = particles[:, 0:2]
    mean = np.average(pos, weights=weights, axis=0)
    var  = np.average((pos - mean)**2, weights=weights, axis=0)
    return mean, var

#####################################################
def simple_resample(particles, weights):
    N = len(particles)
    cumulative_sum = np.cumsum(weights)
    cumulative_sum[-1] = 1. # avoid round-off error
    indexes = np.searchsorted(cumulative_sum, random(N))

    # resample according to indexes
    particles[:] = particles[indexes]
    weights.fill(1.0 / N)
#####################################################
def neff(weights):
    return 1. / np.sum(np.square(weights))

#####################################################
def resample_from_index(particles, weights, indexes):
    particles[:] = particles[indexes]
    weights[:] = weights[indexes]
    weights.fill(1.0 / len(weights))

#####################################################
def read_pickle(pkl_file, f,j):
        with open(pkl_file, 'rb') as res:
            dets = pickle.load(res, encoding = 'latin1')
        all_keyps = dets['all_keyps']
        keyps_t = np.array(all_keyps[1])
        keyps = np.zeros((keyps_t.shape[0], 4, 17))
        for k in range(keyps.shape[0]):
            if keyps_t[k]!=[]:
                keyps[k] = keyps_t[k][0]
        keyps = keyps[:,:2,:]

        for i in range(keyps.shape[0]):
            keyps[i][0] = keyps[i][0]/480*256
            keyps[i][1] = keyps[i][1]/640*256

        x0=keyps[f][0][j]
        y0=keyps[f][1][j]
        x1=keyps[f+1][0][j]
        y1=keyps[f+1][1][j]

        cord = np.array([x0,y0])
        orientation = atan2((y1 - y0),(x1 - x0))
        dist= sqrt((x1-x0) ** 2 + (y1-y0) ** 2)
        u = np.array([orientation,dist])
        return (cord, u)

#####################################################
def run_pf1(N, iters=298, sensor_std_err=.1, 
            do_plot=True, plot_particles=False,
            xlim=(-256, 256), ylim=(-256, 256),
            initial_x=None):
    landmarks = np.array([[0, 0], [0, 256], [256,0], [256,256]])
    NL = len(landmarks)

    plt.figure()

    # create particles and weights
    if initial_x is not None:
        particles = create_gaussian_particles(
            mean=initial_x, std=(5, 5, np.pi/4), N=N)
    else:
        particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
    weights = np.ones(N) / N

    if plot_particles:
        alpha = .20
        if N > 5000:
            alpha *= np.sqrt(5000)/np.sqrt(N)           
        plt.scatter(particles[:, 0], particles[:, 1], 
                    alpha=alpha, color='g')

    xs = []
    #robot_pos, u = read_pickle('.pkl',1,0)
    for x in range(iters):
        robot_pos, uv = read_pickle('.pkl',x,0)
        print("orignal: ", robot_pos,)
        # distance from robot to each landmark
        zs = (norm(landmarks - robot_pos, axis=1) + 
              (randn(NL) * sensor_std_err))

        # move diagonally forward to (x+1, x+1)
        predict(particles, u=uv, std=(0, .0))

        # incorporate measurements
        update(particles, weights, z=zs, R=sensor_std_err, 
               landmarks=landmarks)

        # resample if too few effective particles
        if neff(weights) < N/2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
            assert np.allclose(weights, 1/N)
        mu, var = estimate(particles, weights)
        #mu +=(120,10)
        xs.append(mu)
        print("expected: ",mu)
        if plot_particles:
            plt.scatter(particles[:, 0], particles[:, 1], 
                        color='k', marker=',', s=1)
        p1 = plt.scatter(robot_pos[0], robot_pos[1], marker='+',
                         color='k', s=180, lw=3)
        p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')
        print(p2)
    xs = np.array(xs)
    #plt.plot(xs[:, 0], xs[:, 1])
    plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
    plt.xlim(*xlim)
    plt.ylim(*ylim)
    print('final position error, variance:\n\t', mu - np.array([iters, iters]), var)
    plt.show()
    return(p2)

###############################
run_pf1(N=5000)

由于粒子过滤器的存在,我希望有一组300个坐标值(估计值),因此我可以用此预测值替换原始文件中的缺失值。

0 个答案:

没有答案