简述ExtendKalmanFilter
想必大家在看扩展卡尔曼滤波时候都已经熟悉或者了解卡尔曼滤波算法。这里主要对扩展卡尔曼滤波进行简单概述,文末链接都有对扩展卡尔曼滤波详细讲解。
首先,为什么需要扩展卡尔曼滤波?卡尔曼滤波算法不能够解决什么,需要对其优化。
解答这个问题,其实文末链接扩展卡尔曼滤波EKF解释的比较充分。其实主要就是卡尔曼滤波解决是线性系统问题,一旦遇到非线性系统情况下,卡尔曼滤波算法就无法解决了。通常现实生活中很多都是非线性状态,因此需要EKF
来进一步优化。那么,扩展EKF
是如何解决的呢?既然,你的前后状态变化是非线性,那么我就想办法用线性来近似表示非线性变化。这里用到的是泰勒展开式,取一阶导数部分即可。也许存在其它的线性来近似表示非线性方式,目前我还未知。
上面吧啦吧啦一堆文字,其实可以用下面几张图代表一下,参考扩展卡尔曼滤波EKF里面的图像,侵权请联系博主删除。
总结一下:扩展卡尔曼滤波原理与流程几乎与卡尔曼滤波一致,只是在解决非线性变换时候使用的泰勒一阶展开来近似非线性变化【原因在于正太分布的随机变量通过非线性系统就不在是正太分布了】。取一阶泰勒展开就是近似线性化表示,所有的状态变量组合在一起就是我们经常看到的雅克比矩阵。
代码部分
下面展示的python
代码来对比卡尔曼与扩展卡尔曼滤波的效果,代码原始路径为传送门我做了简单的修改来方便可视化与卡尔曼对比。
import numpy as np
import math
import matplotlib.pyplot as plt
# Estimation parameter of EKF
Q = np.diag([0.1, 0.1, np.deg2rad(1.0), 1.0])**2
R = np.diag([1.0, np.deg2rad(40.0)])**2
# Simulation parameter
Qsim = np.diag([0.5, 0.5])**2
Rsim = np.diag([1.0, np.deg2rad(30.0)])**2
dt = 0.1 # time tick [s]
similar_time = 60.0 # simulation time [s]
show_animation = True
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
u = np.matrix([v, yawrate]).T
return u
def observation(xTrue, xd, u):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
zx = xTrue[0, 0] + np.random.randn() * Qsim[0, 0]
zy = xTrue[1, 0] + np.random.randn() * Qsim[1, 1]
z = np.matrix([zx, zy])
# add noise to input
ud1 = u[0, 0] + np.random.randn() * Rsim[0, 0]
ud2 = u[1, 0] + np.random.randn() * Rsim[1, 1]
ud = np.matrix([ud1, ud2]).T
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.matrix([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.matrix([[dt * math.cos(x[2, 0]), 0],
[dt * math.sin(x[2, 0]), 0],
[0.0, dt],
[1.0, 0.0]])
# x_{4x1} = F_{4x4} * x_{4x1} + B_{4x2} * u_{2x1}
x = F * x + B * u
return x
def observation_model(x):
# Observation Model
H = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
# z_{2x1} = H_{2x4} * x_{4x1}
z = H * x
return z
def jacobF(x, u):
"""
Jacobian of Motion Model
motion model
x_{t+1} = x_t+v*dt*cos(yaw)
y_{t+1} = y_t+v*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
so that
dx/dyaw = -v*dt*sin(yaw)
dx/dv = dt*cos(yaw)
dy/dyaw = v*dt*cos(yaw)
dy/dv = dt*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
jF = np.matrix([
[1.0, 0.0, -dt * v * math.sin(yaw), dt * math.cos(yaw)],
[0.0, 1.0, dt * v * math.cos(yaw), dt * math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
return jF
def F(x, u):
yaw = x[2, 0]
v = u[0, 0]
F = np.matrix([
[1.0, 0.0, 0.0, dt*v*math.cos(yaw)],
[0.0, 1.0, 0.0, dt*v*math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
# F = np.matrix([
# [1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [0.0, 0.0, 0.0, 1.0]])
return F
def jacobH(x):
# Jacobian of Observation Model
jH = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
return jH
def ekf_estimation(xEst, PEst, z, u):
# Predict
xPred = motion_model(xEst, u)
jF = jacobF(xPred, u)
PPred = jF * PEst * jF.T + Q
# Update
jH = jacobH(xPred)
zPred = observation_model(xPred)
y = z.T - zPred
S = jH * PPred * jH.T + R
K = PPred * jH.T * np.linalg.inv(S)
xEst = xPred + K * y
PEst = (np.eye(len(xEst)) - K * jH) * PPred
return xEst, PEst
def kf_estimation(xEst, PEst, z, u):
# Predict
xPred = motion_model(xEst, u)
kF = F(xPred, u)
PPred = kF * PEst * kF.T + Q
# Update
kH = jacobH(xPred)
zPred = observation_model(xPred)
y = z.T - zPred
S = kH * PPred * kH.T + R
K = PPred * kH.T * np.linalg.inv(S)
xEst = xPred + K * y
PEst = (np.eye(len(xEst)) - K * kH) * PPred
return xEst, PEst
def plot_covariance_ellipse(xEst, PEst, Algo="ekf"):
Pxy = PEst[0:2, 0:2]
eigval, eigvec = np.linalg.eig(Pxy)
if eigval[0] >= eigval[1]:
bigind = 0
smallind = 1
else:
bigind = 1
smallind = 0
t = np.arange(0, 2 * math.pi + 0.1, 0.1)
a = math.sqrt(eigval[bigind])
b = math.sqrt(eigval[smallind])
x = [a * math.cos(it) for it in t]
y = [b * math.sin(it) for it in t]
angle = math.atan2(eigvec[bigind, 1], eigvec[bigind, 0])
R = np.matrix([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
fx = R * np.matrix([x, y])
px = np.array(fx[0, :] + xEst[0, 0]).flatten()
py = np.array(fx[1, :] + xEst[1, 0]).flatten()
if Algo == "ekf":
plt.plot(px, py, "--r")
else:
plt.plot(px, py, "--k")
def main():
print(__file__ + " start!!")
time = 0.0
# State Vector [x y yaw v]'
xEst = np.matrix(np.zeros((4, 1)))
xTrue = np.matrix(np.zeros((4, 1)))
PEst = np.eye(4)
ekf_Est = np.matrix(np.zeros((4, 1)))
ekf_PEst = np.eye(4)
kf_Est = np.matrix(np.zeros((4, 1)))
kf_PEst = np.eye(4)
xDR = np.matrix(np.zeros((4, 1))) # Dead reckoning
# history
hxEst = xEst
kf_hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
hz = np.zeros((1, 2))
while similar_time >= time:
time += dt
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u)
ekf_Est, ekf_PEst = ekf_estimation(ekf_Est, ekf_PEst, z, ud)
kf_Est, kf_PEst = kf_estimation(kf_Est, kf_PEst, z, ud)
# store data history
hxEst = np.hstack((hxEst, ekf_Est))
kf_hxEst = np.hstack((kf_hxEst, kf_Est))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
hz = np.vstack((hz, z))
if show_animation:
plt.cla()
plt.plot(hz[:, 0], hz[:, 1], ".g")
plt.plot(np.array(hxTrue[0, :]).flatten(),
np.array(hxTrue[1, :]).flatten(), "-b")
# plt.plot(np.array(hxDR[0, :]).flatten(),
# np.array(hxDR[1, :]).flatten(), "-k")
plt.plot(np.array(hxEst[0, :]).flatten(),
np.array(hxEst[1, :]).flatten(), "-r")
plt.plot(np.array(kf_hxEst[0, :]).flatten(),
np.array(kf_hxEst[1, :]).flatten(), "-k")
plot_covariance_ellipse(kf_Est, kf_PEst, "kf")
plot_covariance_ellipse(ekf_Est, ekf_PEst, "ekf")
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
结果示意图
上面实时跑出KF
与EKF
的对比状态估计结果如下:蓝色线为真值、黑线为卡尔曼滤波估计、红色线为扩展卡尔曼滤波估计。椭圆为对应的方差均值可视化。可以看出在非线性变化下,扩展卡尔曼滤波要优于卡尔曼滤波的状态估计。
参考
导航系统设计专题(五)—扩展卡尔曼滤波(2)
(三十九)通俗易懂理解——卡尔曼滤波与扩展卡尔曼滤波
扩展卡尔曼滤波EKF
【卡尔曼滤波器】_Kalman Filter_全网最详细数学推导
DR_CAN的EKF视频讲解