基于EKF的IMU和轮速计融合方法

使用EKF融合IMU数据和编码器数据 - 知乎

自动泊车APA需要短程高精度的定位,受限于平台算力,找到一种基于EKF的IMU和轮速计融合方法。

待估计量:横摆角、加速度、速度和位置

观测值:速度、相邻两帧的角度差、距离

采用如下的公式计算状态转移方程:

 观测模型:

 

求解雅可比矩阵F和H

 其中,EKF计算如下:

 

 

 代码如下:

    def ekf(self,z_k_observation_vector, state_estimate_k_minus_1, control_vector_k,
        control_vector_k_minus_1, P_k_minus_1, dt,s):
        q_k_minus_1=state_estimate_k_minus_1[0]
        v_x_k_minus_1=state_estimate_k_minus_1[1]
        v_y_k_minus_1=state_estimate_k_minus_1[2]
        p_x_k_minus_1=state_estimate_k_minus_1[3]
        p_y_k_minus_1=state_estimate_k_minus_1[4] 
        
        a_x_k=control_vector_k[0]
        a_y_k=control_vector_k[1]
        w_k=control_vector_k[2]
              
        a_x_k_minus_1=control_vector_k_minus_1[0]
        a_y_k_minus_1=control_vector_k_minus_1[1]
        w_k_minus_1=control_vector_k_minus_1[2]
        
        R_k_minus_1=np.array([[math.cos(q_k_minus_1),-math.sin(q_k_minus_1)],
                    [math.sin(q_k_minus_1),math.cos(q_k_minus_1)]])
        
        w_m=1/2*(w_k+w_k_minus_1)
        
        q_k=q_k_minus_1+w_m*dt+self.process_noise_v_k_minus_1[0]
        
        R_k=np.array([[math.cos(q_k),-math.sin(q_k)],
                    [math.sin(q_k),math.cos(q_k)]])
        
        a_m=1.0/2*(R_k_minus_1.dot(np.array([a_x_k_minus_1,a_y_k_minus_1]).T)+R_k.dot(np.array([a_x_k,a_y_k]).T))
        
        v_k_minus_1=np.array([v_x_k_minus_1,v_y_k_minus_1])
        
        p_k_minus_1=np.array([p_x_k_minus_1,p_y_k_minus_1])
        
        v_k=v_k_minus_1+a_m.T*dt
        
        v_x_k=v_k[0]+self.process_noise_v_k_minus_1[1]  
        v_y_k=v_k[1]+self.process_noise_v_k_minus_1[2]
        
        p_k=p_k_minus_1+v_k_minus_1*dt+1.0/2*a_m.T*dt*dt
        
        p_x_k=p_k[0]+self.process_noise_v_k_minus_1[3]  
        p_y_k=p_k[1]+self.process_noise_v_k_minus_1[4]
        
        state_estimate_k=np.array([q_k,v_x_k,v_y_k,p_x_k,p_y_k])


        F_k=np.array([[1,0,0,0,0],
                   [1/2*(-math.sin(q_k_minus_1)*a_x_k_minus_1-math.cos(q_k_minus_1)*a_y_k_minus_1)*dt,1,0,0,0],
                   [1.0/2*(math.cos(q_k_minus_1)*a_x_k_minus_1-math.sin(q_k_minus_1)*a_y_k_minus_1)*dt,0,1,0,0],
                   [1.0/4*(-math.sin(q_k_minus_1)*a_x_k_minus_1-math.cos(q_k_minus_1)*a_y_k_minus_1)*dt*dt,dt,0,1,0],
                   [1.0/4*(math.cos(q_k_minus_1)*a_x_k_minus_1-math.sin(q_k_minus_1)*a_y_k_minus_1)*dt*dt,0,dt,0,1]])
        
        # F_k=np.array([[1,0,0,0,0],
        #     [1/2*(-math.sin(q_k_minus_1)*a_x_k_minus_1-math.cos(q_k_minus_1)*a_y_k_minus_1)*dt,1,0,0,0],
        #     [1.0/2*(math.cos(q_k_minus_1)*a_x_k_minus_1-math.sin(q_k_minus_1)*a_y_k_minus_1)*dt,0,1,0,0],
        #     [0,dt,0,1,0],
        #     [0,0,dt,0,1]])
        
        theta_k=w_k*dt
        

        H_k=np.array([[(((math.sin(q_k))-(math.cos(q_k)))*(p_x_k-p_x_k_minus_1+p_y_k-p_y_k_minus_1))/((math.cos(q_k)+math.sin(q_k))**2),0,0,1/(math.cos(q_k)+math.sin(q_k)),1/(math.cos(q_k)+math.sin(q_k))],
                   [1.0,0,0,0,0],
                   [0,v_x_k*math.pow((v_x_k**2+v_y_k**2),-1.0/2),v_y_k*math.pow((v_x_k**2+v_y_k**2),-1.0/2),0,0]])
        
        # H_k=np.array([[(((math.sin(q_k)))*(p_x_k-p_x_k_minus_1))/((math.cos(q_k))**2),0,0,1/(math.cos(q_k)),0],
        #     [1.0,0,0,0,0],
        #     [0,v_x_k*math.pow((v_x_k**2+v_y_k**2),-1.0/2),v_y_k*math.pow((v_x_k**2+v_y_k**2),-1.0/2),0,0]])
        
        P_k=(F_k .dot(P_k_minus_1)).dot(F_k.T) +self.Q_k
        
               
        # if s==0:
        #     v=math.pow((v_x_k**2+v_y_k**2),1/2 )
        # else:
        #     v=-math.pow((v_x_k**2+v_y_k**2),1/2 )
        
        measurement_residual_y_k=np.array([z_k_observation_vector[0]-(p_x_k-p_x_k_minus_1+p_y_k-p_y_k_minus_1)/(math.cos(q_k_minus_1)+math.sin(q_k_minus_1))+self.sensor_noise_w_k[0],z_k_observation_vector[1]-(q_k-q_k_minus_1)+self.sensor_noise_w_k[1],z_k_observation_vector[2]-math.pow((v_x_k**2+v_y_k**2),1/2 )+self.sensor_noise_w_k[2]])
        
        # measurement_residual_y_k=np.array([z_k_observation_vector[0]-(p_x_k-p_x_k_minus_1)/(math.cos(q_k))+self.sensor_noise_w_k[0],z_k_observation_vector[1]-(q_k-q_k_minus_1)+self.sensor_noise_w_k[1],z_k_observation_vector[2]-math.pow((v_x_k**2+v_y_k**2),1/2 )+self.sensor_noise_w_k[2]])
        
        S_k = H_k @ P_k @ H_k.T + self.R_k
        K_k = P_k @ H_k.T @ np.linalg.pinv(S_k)
        
        state_estimate_k = state_estimate_k + (K_k @ measurement_residual_y_k)
        
        P_k = P_k - (K_k @ H_k @ P_k)
        return state_estimate_k, P_k
    

    process_noise_v_k_minus_1=np.array([0,0,0,0,0])
    sensor_noise_w_k=np.array([0,0,0])
    # # q、vx、vy、px、py
    # Q_k=np.array([[10000,0,0,0,0],
    #                             [0,1000,0,0,0],
    #                             [0,0,1000,0,0],
    #                             [0,0,0,0.00001,0],
    #                             [0,0,0,0,0.00001]])
    # # s,theta,v
    # R_k=np.array([[0.01,0,0],
    #                             [0,0.01,0],
    #                             [0,0,0.01]
    #                             ])
    
        # q、vx、vy、px、py
    Q_k=np.array([[1,0,0,0,0],
                                [0,1,0,0,0],
                                [0,0,1,0,0],
                                [0,0,0,1,0],
                                [0,0,0,0,1]])
    # s,theta,v
    R_k=np.array([[1000,0,0],
                                [0,1000,0],
                                [0,0,1000]
                                ])
     
    def method_EKF(self):
        time=self.get_time()
        vline=self.vline_wheelspeed()
        yaw_rate=self.get_yaw_rate()
        lat=self.get_acc_lat()
        lgt=self.get_acc_lgt()
        state=self.get_moveing_state()
        n=len(vline)
        x=[]
        y=[]
        yaw=[]
        vline_all=[]
        # 初始�?
        # 后面会重新计算F和H矩阵
        F_k=np.array([[1.0,0,0,0,0],
                                [0,1.0,0,0,0],
                                [0,0,1.0,0,0],
                                [0,0,0,1.0,0],
                                [0,0,0,0,1.0]])
        
        
        H_k=np.array([[1.0,0,0,0,0],
                                [0,1.0,0,0,0],
                                [0,0,1.0,0,0],
                                [0,0,0,1.0,0],
                                [0,0,0,0,1.0]])
        
        
        state_estimate_k_minus_1 = np.array([0.0,0.0,0.0,0.0,0.0])
        
        P_k_minus_1=np.array([[1.0,0,0,0,0],
                                [0,1.0,0,0,0],
                                [0,0,1.0,0,0],
                                [0,0,0,1.0,0],
                                [0,0,0,0,1.0]])
        
    
        for i in range(1,n):
            s=state[i]
            dt=time[i]-time[i-1]
            if state[i]==0:
                v=vline[i];
            elif state[i]==1:
                v=-vline[i]
            else:
                v=0.0
            # 观测值s,theta,v
            obs_vector_z_k=np.array([v*dt,yaw_rate[i]*dt,math.fabs(v)])
            # 控制量ax,ay,w
            control_vector_k_minus_1=np.array([lgt[i-1],lat[i-1],yaw_rate[i-1]])
            control_vector_k=np.array([lgt[i],lat[i],yaw_rate[i]])         
            
            # 计算优化后的状态量和协方差矩阵
            optimal_state_estimate_k,covariance_estimate_k = self.ekf(obs_vector_z_k,state_estimate_k_minus_1, control_vector_k,
            control_vector_k_minus_1, P_k_minus_1, dt,s)
            
            # 更新k-1时刻的状态量和协方差矩阵
            state_estimate_k_minus_1 = optimal_state_estimate_k
            P_k_minus_1 = covariance_estimate_k  
            x.append(optimal_state_estimate_k[3])
            y.append(optimal_state_estimate_k[4])
            yaw.append(optimal_state_estimate_k[0])
            vline_all.append(math.pow(optimal_state_estimate_k[1]**2+optimal_state_estimate_k[2]**2,1.0/2))
        return x,y,yaw,vline_all

 实验结果一般,Q和R矩阵较难设置,同时轮速和加速度计本身并不准确,在进一步学习了EKF融合后有以下优化思路:

1、不再使用轮速,改用轮速脉冲和标定准确的轮胎直径

2、将加速度的零偏作为待估计量,不必时刻update,等脉冲发生变化后再进行update

因为后续项目中采用了基于标定的方法,该方案暂时搁置,待有空后再进行新的尝试。

  • 5
    点赞
  • 34
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值