多机编队—(4)基于Turtlebot3模型的轨迹跟踪


前言

前阵子实现了,Fast_planner无人机模型替换为Turtlebot3模型实现无地图的轨迹规划,最近尝试了几种方法是Turtlebot3小车跟踪规划出的轨迹,以实现在未知环境中也能实时避障的效果。


一、基于PID轨迹跟踪

本以为简简单单,仿照之前给epuck2小车写的PID跟踪算法微调了下,但发现Turtlebot3并不能很好的跟踪轨迹,经过一番查找原因如下。

把fast_planner生成的所有路径点打印出来(planning/bspline
在这里插入图片描述
发现其生成的是一系列的散点,将其在环境中画出(这大大增加了轨迹跟踪的难度):
在这里插入图片描述

1.采用分段的方法进行轨迹跟踪

一开始我采用了这样的办法解决这个问题:虽然fast_planner生成的路径是一段一段的,但每一段是连续的,把fast_planner每次生成的路径保存起来(csv文件),当机器人收到下一次信息,则覆盖原有的数据,这样确保每次存的一段数据是一条连续的路径,代码如下tracking_path_node.cpp

#include <iostream>
#include <ros/ros.h>
#include <geometry_msgs/Twist.h>
#include <nav_msgs/Odometry.h>
#include <nav_msgs/Path.h>
#include <geometry_msgs/PoseStamped.h>
#include <visualization_msgs/Marker.h>
#include <visualization_msgs/MarkerArray.h>
#include "bspline/non_uniform_bspline.h"
#include "mpc_tracking/Bspline.h"
#include "std_msgs/Empty.h"
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#include "tf2/utils.h"
#include <vector>
#include <Eigen/Dense>
#include "tf2/utils.h"
#include <casadi/casadi.hpp>
#include <chrono>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include "nav_msgs/Odometry.h"
#include "std_msgs/String.h"
#include <tf/tf.h>
#include <ros/time.h>
#include <queue>
#include <cmath>
#include <thread>
#include <tf2/LinearMath/Quaternion.h>
#include "mpc_tracking/mpc.h"

using namespace std;

struct Pose2D {
    double x;
    double y;
    double angle;

    Pose2D(double X, double Y, double YAW) : x(X), y(Y), angle(YAW) {}
};

void bsplineCallback(const mpc_tracking::BsplineConstPtr& msg) {
    std::ofstream csv_file;
    csv_file.open("/home/wjx/nryp_ws/src/swarm/waypoint_loader/waypoints/bspline_path.csv", std::ofstream::out); // 打开CSV文件以写入模式,自动覆盖
    
    cout<<"wjx"<<endl;
    for (int i = 0; i < msg->pos_pts.size(); i++) {
        double x = msg->pos_pts[i].x;
        double y = msg->pos_pts[i].y;
        double yaw = 0.1; // 假设一个简单的角度

        // 写入CSV文件
        csv_file << x << "," << y << "," << yaw << "\n";
    }
    
    csv_file.close(); // 关闭文件
    ROS_INFO("B-spline path data has been written to bspline_path.csv");
}

int main(int argc, char **argv) {
    ros::init(argc, argv, "tracking_path_node");
    ros::NodeHandle nh;

    // 订阅B-spline路径
    ros::Subscriber bspline_sub = nh.subscribe("/planning/bspline", 10, bsplineCallback);

    ros::spin();
    return 0;
}

然后去监听这个文件,每当保存路径的bspline_path.csv被修改,则另其调用之前写的轨迹生成launch文件(waypoint_loader.launch),在环境中生成轨迹。

monitor_csv.py监听bspline_path.csv

#!/usr/bin/env python

import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import subprocess

class Watcher:
    FILE_TO_WATCH = "/home/wjx/nryp_ws/src/swarm/waypoint_loader/waypoints/bspline_path.csv"

    def __init__(self):
        self.observer = Observer()

    def run(self):
        event_handler = Handler()
        self.observer.schedule(event_handler, self.FILE_TO_WATCH, recursive=False)
        self.observer.start()
        try:
            while True:
                time.sleep(5)
        except:
            self.observer.stop()
            print("Observer Stopped")

        self.observer.join()

class Handler(FileSystemEventHandler):

    @staticmethod
    def on_modified(event):
        if not event.is_directory and event.src_path.endswith('.csv'):
            print("Detected modification in bspline_path.csv")
            Handler.launch_ros()

    @staticmethod
    def launch_ros():
        print("Launching ROS waypoint loader with updated CSV")
        try:
            # Kill existing roslaunch if necessary
            subprocess.call(['pkill', '-f', 'waypoint_loader_1'])
            # Start new launch file
            cmd = "roslaunch waypoint_loader waypoint_loader.launch"
            subprocess.Popen(cmd, shell=True)
        except Exception as e:
            print(str(e))

if __name__ == '__main__':
    w = Watcher()
    w.run()

waypoint_loader.py 跟踪轨迹生成

#!/usr/bin/env python

import os
import csv
import math
import tf
import rospy
import sys

from geometry_msgs.msg import Quaternion, PoseStamped

from styx_msgs.msg import Lane, Waypoint

from nav_msgs.msg import Path

CSV_HEADER = ['x', 'y', 'yaw']
MAX_DECEL = 1.0


class WaypointLoader(object):

    def __init__(self):
        rospy.init_node('waypoint_loader', log_level=rospy.DEBUG)

        self.pub = rospy.Publisher('/base_waypoints', Lane, queue_size=1, latch=True)
        self.pub_path = rospy.Publisher('/path', Path, queue_size=1, latch=True)
        
        self.velocity = self.kmph2mps(rospy.get_param('~velocity'))
        self.new_waypoint_loader(rospy.get_param('~path'))
        rospy.spin()

    def new_waypoint_loader(self, path):
        if os.path.isfile(path):
            waypoints, base_path = self.load_waypoints(path)
            self.publish(waypoints, base_path)
            rospy.loginfo('Waypoint Loded')
        else:
            rospy.logerr('%s is not a file', path)

    def quaternion_from_yaw(self, yaw):
        return tf.transformations.quaternion_from_euler(0., 0., yaw)

    def kmph2mps(self, velocity_kmph):
        return (velocity_kmph * 1000.) / (60. * 60.)

    def load_waypoints(self, fname):
        waypoints = []
        base_path = Path()
        #base_path.header.frame_id = 'world'
        base_path.header.frame_id = 'odom'
        with open(fname) as wfile:
            reader = csv.DictReader(wfile, CSV_HEADER)
            for wp in reader:
                #print(wp)
                p = Waypoint()
                p.pose.pose.position.x = float(wp['x'])
                p.pose.pose.position.y = float(wp['y'])
                q = self.quaternion_from_yaw(float(wp['yaw']))
                p.pose.pose.orientation = Quaternion(*q)
                p.twist.twist.linear.x = float(self.velocity)
                p.forward = True
                waypoints.append(p)

                path_element = PoseStamped()
                path_element.pose.position.x = p.pose.pose.position.x
                path_element.pose.position.y = p.pose.pose.position.y
                base_path.poses.append(path_element)

                
        waypoints = self.decelerate(waypoints)
        return waypoints,base_path

    def distance(self, p1, p2):
        x, y, z = p1.x - p2.x, p1.y - p2.y, p1.z - p2.z
        return math.sqrt(x*x + y*y + z*z)

    def decelerate(self, waypoints):
        last = waypoints[-1]
        last.twist.twist.linear.x = 0.
        for wp in waypoints[:-1][::-1]:
            print(wp)
            dist = self.distance(wp.pose.pose.position, last.pose.pose.position)
            vel = math.sqrt(2 * MAX_DECEL * dist)
            print(vel)
            if vel < 1.0:
                vel = 0.0
            wp.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
        return waypoints

    def publish(self, waypoints, base_path):
        lane = Lane()
        lane.header.frame_id = 'odom'
        lane.header.stamp = rospy.Time(0)
        lane.waypoints = waypoints
        self.pub.publish(lane)
        self.pub_path.publish(base_path)

    rospy.loginfo('publishing......\n')


if __name__ == '__main__':
    try:
        WaypointLoader()
        print("waypoint loader closed.")
    except rospy.ROSInterruptException:
        rospy.logerr('Could not start waypoint node.')
        

路径信息(覆盖)

处理好轨迹后,调用pure_pursuit进行轨迹跟踪:

#include <ackermann_msgs/AckermannDriveStamped.h>
#include <geometry_msgs/TransformStamped.h>
#include <geometry_msgs/Twist.h>
#include <nav_msgs/Odometry.h>
#include <nav_msgs/Path.h>
#include <ros/ros.h>
#include <tf2_ros/transform_broadcaster.h>
#include <tf2_ros/transform_listener.h>

#include <algorithm>
#include <cmath>
#include <kdl/frames.hpp>
#include <string>

// TODO: Figure out how to use tf2 DataConversions
// for more elegant and compact code
//#include <tf2_kdl/tf2_kdl.h>
//#include <tf2_geometry_msgs/tf2_geometry_msgs.h>

#include <dynamic_reconfigure/server.h>
#include <pure_pursuit/PurePursuitConfig.h>

using std::string;

class PurePursuit
{
public:
  //! Constructor
  PurePursuit();

  //! Compute velocit commands each time new odometry data is received.
  void computeVelocities(nav_msgs::Odometry odom);

  //! Receive path to follow.
  void receivePath(nav_msgs::Path path);

  //! Compute transform that transforms a pose into the robot frame (base_link)
  KDL::Frame transformToBaseLink(const geometry_msgs::Pose &pose,
                                 const geometry_msgs::Transform &tf);

  //! Helper founction for computing eucledian distances in the x-y plane.
  template <typename T1, typename T2>
  double distance(T1 pt1, T2 pt2)
  {
    return sqrt(pow(pt1.x - pt2.x, 2) + pow(pt1.y - pt2.y, 2) +
                pow(pt1.z - pt2.z, 2));
  }

  //! Run the controller.
  void run();

private:
  //! Dynamic reconfigure callback.
  void reconfigure(pure_pursuit::PurePursuitConfig &config, uint32_t level);

  // Vehicle parameters
  double L_;
  // Algorithm variables
  // Position tolerace is measured along the x-axis of the robot!
  double ld_, pos_tol_;
  // Generic control variables
  double v_max_, v_, w_max_;
  // Control variables for Ackermann steering
  // Steering angle is denoted by delta
  double delta_, delta_vel_, acc_, jerk_, delta_max_;
  nav_msgs::Path path_;
  unsigned idx_;
  bool goal_reached_;
  geometry_msgs::Twist cmd_vel_;
  ackermann_msgs::AckermannDriveStamped cmd_acker_;

  // Ros infrastructure
  ros::NodeHandle nh_, nh_private_;
  ros::Subscriber sub_odom_, sub_path_;
  ros::Publisher pub_vel_, pub_acker_;
  tf2_ros::Buffer tf_buffer_;
  tf2_ros::TransformListener tf_listener_;
  tf2_ros::TransformBroadcaster tf_broadcaster_;
  geometry_msgs::TransformStamped lookahead_;
  string map_frame_id_, robot_frame_id_, lookahead_frame_id_, acker_frame_id_;

  dynamic_reconfigure::Server<pure_pursuit::PurePursuitConfig>
      reconfigure_server_;
  dynamic_reconfigure::Server<pure_pursuit::PurePursuitConfig>::CallbackType
      reconfigure_callback_;
};

PurePursuit::PurePursuit()
    : ld_(0.01),
      v_max_(0.1),
      v_(v_max_),
      w_max_(1.0),
      pos_tol_(0.1),
      idx_(0),
      goal_reached_(true),
      nh_private_("~"),
      tf_listener_(tf_buffer_),
      map_frame_id_("map"),
      robot_frame_id_("base_link"),
      lookahead_frame_id_("lookahead")
{
  // Get parameters from the parameter server
  nh_private_.param<double>("wheelbase", L_, 0.2);
  nh_private_.param<double>("lookahead_distance", ld_, 0.01);
  // nh_private_.param<double>("linear_velocity", v_, 0.1);
  nh_private_.param<double>("max_rotational_velocity", w_max_, 1.0);
  nh_private_.param<double>("position_tolerance", pos_tol_, 0.1);
  nh_private_.param<double>("steering_angle_velocity", delta_vel_, 1.5);
  nh_private_.param<double>("acceleration", acc_, 1.0);
  nh_private_.param<double>("jerk", jerk_, 100.0);
  nh_private_.param<double>("steering_angle_limit", delta_max_, 1.57);
  nh_private_.param<string>("map_frame_id", map_frame_id_, "map");
  // Frame attached to midpoint of rear axle (for front-steered vehicles).
  nh_private_.param<string>("robot_frame_id", robot_frame_id_, "base_link");
  // Lookahead frame moving along the path as the vehicle is moving.
  nh_private_.param<string>("lookahead_frame_id", lookahead_frame_id_,
                            "lookahead");
  // Frame attached to midpoint of front axle (for front-steered vehicles).
  nh_private_.param<string>("ackermann_frame_id", acker_frame_id_,
                            "rear_axle_midpoint");

  // Populate messages with static data
  lookahead_.header.frame_id = robot_frame_id_;
  lookahead_.child_frame_id = lookahead_frame_id_;

  cmd_acker_.header.frame_id = acker_frame_id_;
  cmd_acker_.drive.steering_angle_velocity = delta_vel_;
  cmd_acker_.drive.acceleration = acc_;
  cmd_acker_.drive.jerk = jerk_;

  sub_path_ = nh_.subscribe("path_segment", 1, &PurePursuit::receivePath, this);
  sub_odom_ =
      nh_.subscribe("odometry", 1, &PurePursuit::computeVelocities, this);
  pub_vel_ = nh_.advertise<geometry_msgs::Twist>("cmd_vel", 1);
  pub_acker_ =
      nh_.advertise<ackermann_msgs::AckermannDriveStamped>("cmd_acker", 1);

  reconfigure_callback_ = boost::bind(&PurePursuit::reconfigure, this, _1, _2);
  reconfigure_server_.setCallback(reconfigure_callback_);
}

void PurePursuit::computeVelocities(nav_msgs::Odometry odom)
{
  ROS_INFO("start");
  // The velocity commands are computed, each time a new Odometry message is
  // received. Odometry is not used directly, but through the tf tree. Get the
  // current robot pose
  geometry_msgs::TransformStamped tf;
  try
  {
    tf = tf_buffer_.lookupTransform(map_frame_id_, robot_frame_id_,
                                    ros::Time(0));
    // We first compute the new point to track, based on our current pose,
    // path information and lookahead distance.
    for (; idx_ < path_.poses.size(); idx_++)
    {
      ROS_INFO("index is %d \n", idx_);
      // the distance between current pose and the path point must
      // larger than ld_
      ROS_INFO("distance is %f:", distance(path_.poses[idx_].pose.position, tf.transform.translation));
      ROS_INFO("ld_:%f ", ld_);
      std::cout << "postiton: " << path_.poses[idx_].pose.position << std::endl;

      tf.transform.translation.z = 0;
      std::cout << "transform: " << tf.transform.translation << std::endl;
      if (distance(path_.poses[idx_].pose.position, tf.transform.translation) >
          ld_)
      {
        // Transformed lookahead to base_link frame is lateral error
        KDL::Frame F_bl_ld =
            transformToBaseLink(path_.poses[idx_].pose, tf.transform);
        lookahead_.transform.translation.x = F_bl_ld.p.x();
        lookahead_.transform.translation.y = F_bl_ld.p.y();
        lookahead_.transform.translation.z = F_bl_ld.p.z();
        F_bl_ld.M.GetQuaternion(
            lookahead_.transform.rotation.x, lookahead_.transform.rotation.y,
            lookahead_.transform.rotation.z, lookahead_.transform.rotation.w);
        ROS_INFO("F_bl_ld %f", lookahead_.transform.rotation.w);

        // TODO: See how the above conversion can be done more elegantly
        // using tf2_kdl and tf2_geometry_msgs
        break;
      }
    }
    ROS_INFO("end for");
    // try to optimise the pose, although it already arrived at nearby the
    // specific point
    if (!path_.poses.empty() && idx_ >= path_.poses.size())
    {
      // We are approaching the goal,
      // which is closer than ld
      ROS_INFO("start if");
      // This is the pose of the goal w.r.t. the base_link frame
      KDL::Frame F_bl_end =
          transformToBaseLink(path_.poses.back().pose, tf.transform);

      if (fabs(F_bl_end.p.x()) <= pos_tol_)
      {
        // We have reached the goal
        goal_reached_ = true;

        // Reset the path
        path_ = nav_msgs::Path();
      }
      else
      {
        // We need to extend the lookahead distance
        // beyond the goal point.

        // Find the intersection between the circle of radius ld
        // centered at the robot (origin)
        // and the line defined by the last path pose
        double roll, pitch, yaw;
        F_bl_end.M.GetRPY(roll, pitch, yaw);
        double k_end = tan(yaw); // Slope of line defined by the last path pose
        double l_end = F_bl_end.p.y() - k_end * F_bl_end.p.x();
        double a = 1 + k_end * k_end;
        double b = 2 * l_end;
        double c = l_end * l_end - ld_ * ld_;
        double D = sqrt(b * b - 4 * a * c);
        double x_ld = (-b + copysign(D, v_)) / (2 * a);
        double y_ld = k_end * x_ld + l_end;

        lookahead_.transform.translation.x = x_ld;
        lookahead_.transform.translation.y = y_ld;
        lookahead_.transform.translation.z = F_bl_end.p.z();
        F_bl_end.M.GetQuaternion(
            lookahead_.transform.rotation.x, lookahead_.transform.rotation.y,
            lookahead_.transform.rotation.z, lookahead_.transform.rotation.w);
        ROS_INFO("F_bl_end %f", lookahead_.transform.rotation.w);
      }
      ROS_INFO("F_bl_end");
    }

    if (!goal_reached_)
    {
      // We are tracking.
      // Compute linear velocity.
      // Right now,this is not very smart :)
      v_ = copysign(v_max_, v_); // max linear speed

      // Compute the angular velocity.
      // Lateral error is the y-value of the lookahead point (in base_link
      // frame)
      double yt = lookahead_.transform.translation.y;
      double ld_2 = ld_ * ld_;
      cmd_vel_.angular.z = std::min(2 * v_ / ld_2 * yt, w_max_)*0.2;
      ROS_INFO("yt: %f", yt);
      ROS_INFO("ld_2: %f", ld_2);
      ROS_INFO("acmd_vel_.angular.z: %f", cmd_vel_.angular.z);
      // cmd_vel_.angular.z = std::min(2 * v_ / ld_2 * yt, w_max_);

      // Compute desired Ackermann steering angle
      cmd_acker_.drive.steering_angle =
          std::min(atan2(2 * yt * L_, ld_2), delta_max_);

      // Set linear velocity for tracking.
      cmd_vel_.linear.x = v_;
      cmd_acker_.drive.speed = v_;

      cmd_acker_.header.stamp = ros::Time::now();
      ROS_INFO("here if");
    }
    else
    {
      // We are at the goal!

      // Stop the vehicle

      // The lookahead target is at our current pose.
      lookahead_.transform = geometry_msgs::Transform();
      lookahead_.transform.rotation.w = 1.0;

      // Stop moving.
      cmd_vel_.linear.x = 0.0;
      cmd_vel_.angular.z = 0.0;

      cmd_acker_.header.stamp = ros::Time::now();
      cmd_acker_.drive.steering_angle = 0.0;
      cmd_acker_.drive.speed = 0.0;
    }

    // Publish the lookahead target transform.
    lookahead_.header.stamp = ros::Time::now();
    tf_broadcaster_.sendTransform(lookahead_);

    // Publish the velocities
    pub_vel_.publish(cmd_vel_);

    // Publish ackerman steering setpoints
    pub_acker_.publish(cmd_acker_);
    ROS_INFO("here!");
  }

  catch (tf2::TransformException &ex)
  {
    ROS_INFO("here");
    ROS_WARN_STREAM(ex.what());
  }
}

void PurePursuit::receivePath(nav_msgs::Path new_path)
{
  // When a new path received, the previous one is simply discarded
  // It is up to the planner/motion manager to make sure that the new
  // path is feasible.
  // Callbacks are non-interruptible, so this will
  // not interfere with velocity computation callback.
  ROS_INFO("new_path %s", new_path.header.frame_id.c_str());
  if (new_path.header.frame_id == map_frame_id_)
  {
    path_ = new_path;
    idx_ = 0;
    if (new_path.poses.size() > 0)
    {
      goal_reached_ = false;
      ROS_INFO("goal_reached_");
    }
    else
    {
      goal_reached_ = true;
      ROS_WARN_STREAM("Received empty path!");
    }
  }
  else
  {
    ROS_WARN_STREAM("The path must be published in the "
                    << map_frame_id_ << " frame! Ignoring path in "
                    << new_path.header.frame_id << " frame!");
  }
}

KDL::Frame PurePursuit::transformToBaseLink(
    const geometry_msgs::Pose &pose, const geometry_msgs::Transform &tf)
{
  // Pose in global (map) frame
  KDL::Frame F_map_pose(
      KDL::Rotation::Quaternion(pose.orientation.x, pose.orientation.y,
                                pose.orientation.z, pose.orientation.w),
      KDL::Vector(pose.position.x, pose.position.y, pose.position.z));

  // Robot (base_link) in global (map) frame
  KDL::Frame F_map_tf(
      KDL::Rotation::Quaternion(tf.rotation.x, tf.rotation.y, tf.rotation.z,
                                tf.rotation.w),
      KDL::Vector(tf.translation.x, tf.translation.y, tf.translation.z));

  // TODO: See how the above conversions can be done more elegantly
  // using tf2_kdl and tf2_geometry_msgs

  return F_map_tf.Inverse() * F_map_pose;
}

void PurePursuit::run() { ros::spin(); }

void PurePursuit::reconfigure(pure_pursuit::PurePursuitConfig &config,
                              uint32_t level)
{
  v_max_ = config.max_linear_velocity;
}

int main(int argc, char **argv)
{
  ros::init(argc, argv, "pure_pursuit");

  PurePursuit controller;
  controller.run();

  return 0;
}

PID跟踪(分段跟踪)

2.采用拟合的方法进行轨迹跟踪

后面了解到,生成多个非均匀B样条后,可以使用DeBoor算法将多段曲线整合为一条连续且平滑的曲线。

其推导可参考这个大佬写的这篇文章:使用De Boor递推算法求B样条曲线上的点

在这里插入图片描述

De Boor代码:

Eigen::VectorXd NonUniformBspline::evaluateDeBoor(const double& u) {

  double ub = min(max(u_(p_), u), u_(m_ - p_));

  // determine which [ui,ui+1] lay in
  int k = p_;
  while (true) {
    if (u_(k + 1) >= ub) break;
    ++k;
  }

  /* deBoor's alg */
  vector<Eigen::VectorXd> d;
  for (int i = 0; i <= p_; ++i) {
    d.push_back(control_points_.row(k - p_ + i));
    // cout << d[i].transpose() << endl;
  }

  for (int r = 1; r <= p_; ++r) {
    for (int i = p_; i >= r; --i) {
      double alpha = (ub - u_[i + k - p_]) / (u_[i + 1 + k - r] - u_[i + k - p_]);
      // cout << "alpha: " << alpha << endl;
      d[i] = (1 - alpha) * d[i - 1] + alpha * d[i];
    }
  }

  return d[p_];
}

则能够将相邻的两个控制点合并为一个新的控制点,通过p次迭代(P是B样条曲线的阶数),最终得到我们想要的点的坐标,整合为一条连续且平滑的曲线。

DeBoor轨迹生成

PID轨迹跟踪(DeBoor拟合)

二、基于MPC轨迹跟踪

1.代码实现

mpc_node.cpp

#include <ros/ros.h>
#include <geometry_msgs/Twist.h>
#include <nav_msgs/Odometry.h>
#include <nav_msgs/Path.h>
#include <geometry_msgs/PoseStamped.h>
#include <visualization_msgs/Marker.h>
#include <visualization_msgs/MarkerArray.h>
#include "bspline/non_uniform_bspline.h"
#include "mpc_tracking/Bspline.h"
#include "std_msgs/Empty.h"

#include "mpc_tracking/mpc.h"


using fast_planner::NonUniformBspline;

ros::Publisher cmd_vel_pub, motion_path_pub, predict_path_pub;
nav_msgs::Path predict_path, motion_path;
nav_msgs::Odometry odom;

bool receive_traj = false;
vector<NonUniformBspline> traj;
double traj_duration;
ros::Time start_time;

ros::Timer control_cmd_pub, path_pub;

const int N = 15;
const double dt = 0.1;
const double u_max = 0.5;
const double w_max =1;
vector<double> weights = {10,10,1,1,1,0}; //Q,R
Eigen::Vector3d current_state;

unique_ptr<Mpc> mpc_ptr;


void bsplineCallback(mpc_tracking::BsplineConstPtr msg) {
  // parse pos traj
  Eigen::MatrixXd pos_pts(msg->pos_pts.size(), 3);
  for (int i = 0; i < msg->pos_pts.size(); ++i) {
    pos_pts(i, 0) = msg->pos_pts[i].x;
    pos_pts(i, 1) = msg->pos_pts[i].y;
    pos_pts(i, 2) = msg->pos_pts[i].z;
  }

  Eigen::VectorXd knots(msg->knots.size());
  for (int i = 0; i < msg->knots.size(); ++i) {
    knots(i) = msg->knots[i];
  }

  NonUniformBspline pos_traj(pos_pts, msg->order, 0.1);
  pos_traj.setKnot(knots);

  // parse yaw traj
  Eigen::MatrixXd yaw_pts(msg->yaw_pts.size(), 1);
  for (int i = 0; i < msg->yaw_pts.size(); ++i) {
    yaw_pts(i, 0) = msg->yaw_pts[i];
  }

  NonUniformBspline yaw_traj(yaw_pts, msg->order, msg->yaw_dt);

  start_time = msg->start_time;

  traj.clear();
  traj.push_back(pos_traj);
  traj.push_back(traj[0].getDerivative());
  traj.push_back(traj[1].getDerivative());
  traj.push_back(yaw_traj);
  traj.push_back(yaw_traj.getDerivative());

  traj_duration = traj[0].getTimeSum();
  std::cout<<"-----wjx-----"<<std::endl;
  std::cout<<"traj_duration: "<<traj_duration<<std::endl;
  std::cout<<"-----wjx-----"<<std::endl;

  receive_traj = true;
}

void replanCallback(std_msgs::Empty msg) {
  /* reset duration */
  const double time_out = 0.01;
  ros::Time time_now = ros::Time::now();
  double t_stop = (time_now - start_time).toSec() + time_out;
  traj_duration = min(t_stop, traj_duration);
}

void odomCallback(const nav_msgs::Odometry &msg) {
    odom = msg;
    current_state(0) = msg.pose.pose.position.x;
    current_state(1) = msg.pose.pose.position.y;
    current_state(2) = tf2::getYaw(msg.pose.pose.orientation);

}

void publish_control_cmd(const ros::TimerEvent &e) {
    if (!receive_traj) return;
    
    ros::Time time_now = ros::Time::now();
    double t_cur = (time_now - start_time).toSec();
    Eigen::Vector3d pos, vel, acc, pos_f;
    double yaw, yawdot;

    Eigen::MatrixXd desired_state = Eigen::MatrixXd::Zero(N+1, 3);

    if (t_cur + (N-1) * dt <= traj_duration && t_cur > 0) {
      for (int i = 0; i <= N; ++i) {
        cout<<"Test01"<<endl;
        pos = traj[0].evaluateDeBoorT(t_cur + i * dt);
        vel = traj[1].evaluateDeBoorT(t_cur + i * dt);
        acc = traj[2].evaluateDeBoorT(t_cur + i * dt);
        yaw = traj[3].evaluateDeBoorT(t_cur + i * dt)[0];
        yawdot = traj[4].evaluateDeBoorT(t_cur + i * dt)[0];

        desired_state(i, 0) = pos[0];
        desired_state(i, 1) = pos[1];
        desired_state(i, 2) = yaw;
      }

    } else if (t_cur + (N-1) * dt > traj_duration && t_cur < traj_duration) {
        int more_num = (t_cur + (N-1) * dt - traj_duration) / dt;
        for (int i = 0; i < N - more_num; ++i) {
          cout<<"Test02_01"<<endl;
          pos = traj[0].evaluateDeBoorT(t_cur + i * dt);
          vel = traj[1].evaluateDeBoorT(t_cur + i * dt);
          acc = traj[2].evaluateDeBoorT(t_cur + i * dt);
          yaw = traj[3].evaluateDeBoorT(t_cur + i * dt)[0];
          yawdot = traj[4].evaluateDeBoorT(t_cur + i * dt)[0];

          desired_state(i, 0) = pos(0);
          desired_state(i, 1) = pos(1);
          desired_state(i, 2) = yaw;          
        }
        for (int i = N - more_num; i < N; ++i) {
          cout<<"Test02_02"<<endl;
          pos = traj[0].evaluateDeBoorT(traj_duration);
          vel.setZero();
          acc.setZero();
          yaw = traj[3].evaluateDeBoorT(traj_duration)[0];
          yawdot = traj[4].evaluateDeBoorT(traj_duration)[0];

          desired_state(i, 0) = pos(0);
          desired_state(i, 1) = pos(1);
          desired_state(i, 2) = yaw;
        }
    } else if (t_cur >= traj_duration)  {
      cout<<"Test03"<<endl;
      pos = traj[0].evaluateDeBoorT(traj_duration);
      vel.setZero();
      acc.setZero();
      yaw = traj[3].evaluateDeBoorT(traj_duration)[0];
      yawdot = traj[4].evaluateDeBoorT(traj_duration)[0];
      for (int i = 0; i <= N; ++i) {
          desired_state(i, 0) = pos(0);
          desired_state(i, 1) = pos(1);
          desired_state(i, 2) = yaw;
      }
    } else {
      //cout << "[Traj server]: invalid time." << endl;
  }
  
    Eigen::MatrixXd desired_state2 = desired_state.transpose();
    ros::Time solve_start = ros::Time::now();
    auto success = mpc_ptr->solve(current_state, desired_state2);
    cout<<"desired_state2:"<<desired_state2<<endl;
    //cout << "solve success" << endl;
    ros::Time solve_end = ros::Time::now();
    double t_cur1 = (solve_end - solve_start).toSec();
   // cout << "solve time:" << t_cur1 << endl;

    geometry_msgs::Twist cmd;
    auto control_cmd = mpc_ptr->getFirstU();
    //cout << "got cmd" << endl;

    //cout << "linear:" << control_cmd[0] << endl;
    //cout << "angular:" << control_cmd[1] << endl;

    cmd.linear.x = control_cmd[0];
    //cmd.linear.y = control_cmd[1];
    cmd.angular.z = control_cmd[1];



    cmd_vel_pub.publish(cmd);
    //cout << "u:" << result[0] << " " << "r:" << result[1] << endl;

    predict_path.header.frame_id = "odom";
    predict_path.header.stamp = ros::Time::now();
    geometry_msgs::PoseStamped pose_msg;
    geometry_msgs::Point pt;
    auto predict_states = mpc_ptr->getPredictX();
    //cout << "got predict x" << endl;
    for (int i = 0; i < predict_states.size(); i += 2) {
        pose_msg.pose.position.x = predict_states[i];
        pose_msg.pose.position.y = predict_states[i + 1];
        predict_path.poses.push_back(pose_msg);
    }
    predict_path_pub.publish(predict_path);
    
    predict_path.poses.clear();
}


int main(int argc, char **argv)
{
    ros::init(argc, argv, "mpc_tracking_node");
    ros::NodeHandle nh;
    mpc_ptr.reset(new Mpc());

    cmd_vel_pub = nh.advertise<geometry_msgs::Twist>("/cmd_vel", 1);
    predict_path_pub = nh.advertise<nav_msgs::Path>("/predict_path", 1);
    motion_path_pub = nh.advertise<nav_msgs::Path>("/motion_path", 1);
   
    ros::Subscriber odom_sub = nh.subscribe("/odom", 1, &odomCallback);
    ros::Subscriber bspline_sub = nh.subscribe("planning/bspline", 10, bsplineCallback);
    ros::Subscriber replan_sub = nh.subscribe("planning/replan", 10, replanCallback);

    control_cmd_pub = nh.createTimer(ros::Duration(0.1), publish_control_cmd);
    
    ros::spin();
    return 0;

}

mpc.cpp

#include "mpc_tracking/mpc.h"

Mpc::Mpc() {
    
    
    N_ = 15;
    dt_ = 0.1;
    
    u_max_ = 0.6;
    w_max_ =2.0;
    //vector<double> weights = {10,10,1,1,1}; //Q,R
   vector<double> weights = {20,20,1,1,1}; //Q,R
    u_min_ = - u_max_;
    w_min_ = - w_max_;
    
    Q_ = DM::zeros(3,3); //索引之前初始化size
    R_ = DM::zeros(2,2);
    

    setWeights(weights);
    kinematic_equation_ = setKinematicEquation();
}
Mpc::~Mpc() {}

void Mpc::setWeights(vector<double> weights) {
    //cout << "setweights" << endl;
    Q_(0, 0) = weights[0];
    Q_(1, 1) = weights[1];
    Q_(2, 2) = weights[2];
    R_(0, 0) = weights[3];
    R_(1, 1) = weights[4];
    //R_(2, 2) = weights[5];
    //cout << "set weight finish" << endl;

}

Function Mpc::setKinematicEquation() {
    //cout << "set kinematic" << endl;
    MX x = MX::sym("x");
    MX y = MX::sym("y");
    MX theta = MX::sym("theta");
    MX state_vars = MX::vertcat({x, y, theta});

    MX v = MX::sym("v");
    MX w = MX::sym("w");
    MX control_vars = MX::vertcat({v, w});
    
    //rhs means right hand side
    MX rhs = MX::vertcat({v * MX::cos(theta), v * MX::sin(theta), w});
    return Function("kinematic_equation", {state_vars, control_vars}, {rhs});

bool Mpc::solve(Eigen::Vector3d current_states, Eigen::MatrixXd desired_states) {
    
    //cout << "jinqiujiele" << endl;
    Opti opti = Opti();

    Slice all;

    MX cost = 0;
    X = opti.variable(3, N_ + 1);
    U = opti.variable(2, N_);
    MX x = X(0, all);
    MX y = X(1, all);
    MX theta = X(2, all);
    MX v = U(0, all);
    MX w = U(1, all);
    //MX w = U(2, all);


    MX X_ref = opti.parameter(3, N_ + 1);
    MX X_cur = opti.parameter(3);
    DM x_tmp1 = {current_states[0], current_states[1], current_states[2]};

    opti.set_value(X_cur, x_tmp1);  //set current state
    cout << "set current state success" << endl;

    
    //按列索引
    vector<double> X_ref_v(desired_states.data(), desired_states.data() + desired_states.size());
    //auto tp1 = std::chrono::steady_clock::now();
    DM X_ref_d(X_ref_v);
    
    //X_ref_d.resize(3, N_ + 1);
    
    //cout << "desired_states:" << desired_states << endl;
    //cout << "X_ref_v:" << X_ref_v << endl;
    //cout << "X_ref_d:" << X_ref_d << endl;
    // DM x_tmp2(3, N_ + 1);
    // for (int i = 0; i < 3; ++i) {
    //     for (int j = 0; j <= N_; ++j) {
    //         x_tmp2(i, j) = desired_states(i, j);
    //     }
    // }
    X_ref = MX::reshape(X_ref_d, 3, N_ + 1);
    
    
    
    //opti.set_value(X_ref, X_ref_d);  //set reference traj

    // auto tp2 = std::chrono::steady_clock::now();
    // cout <<"set trajectory time:" 
    // << chrono::duration_cast<chrono::microseconds>(tp2 - tp1).count() 
    // << "microseconds" << endl;
    //cout << "set reference traj success" << endl;
    //cout << "x_ref:" << X_ref.size() << endl;

    //set costfunction
    for (int i = 0; i < N_; ++i) {
        MX X_err = X(all, i) - X_ref(all, i); 
        MX U_0 = U(all, i);
        //cout << "U_0 size:" << U_0.size() << endl;
        //cout << "cost size:" << cost_.size() << endl;
        cost += MX::mtimes({X_err.T(), Q_, X_err});
        //cout << "cost size:" << cost_.size() << endl; 
        cost += MX::mtimes({U_0.T(), R_, U_0});
        //cout << "cost size:" << cost_.size() << endl;
    }
    //cout << "cost size:" << cost_.size() << endl;
    cost += MX::mtimes({(X(all, N_) - X_ref(all, N_)).T(), Q_,
                        X(all, N_) - X_ref(all, N_)});
    //cout << "cost:" << cost << endl;
    opti.minimize(cost);
    //cout << "set cost success" << endl;

    //kinematic constrains
    for (int i = 0; i < N_; ++i) {
        vector<MX> input(2);
        input[0] = X(all, i);
        input[1] = U(all, i);
        MX X_next = kinematic_equation_(input)[0] * dt_ + X(all, i);
        opti.subject_to(X_next == X(all, i + 1));
    }

    //init value
    opti.subject_to(X(all, 0) == X_cur);

    //speed angle_speed limit
    

    opti.subject_to(0 <= v <= u_max_);
    opti.subject_to(w_min_ <= w <= w_max_);

    //set solver
    casadi::Dict solver_opts;
    solver_opts["expand"] = true; //MX change to SX for speed up
    solver_opts["ipopt.max_iter"] = 100;
    solver_opts["ipopt.print_level"] = 0;
    solver_opts["print_time"] = 0;
    solver_opts["ipopt.acceptable_tol"] = 1e-6;
    solver_opts["ipopt.acceptable_obj_change_tol"] = 1e-6;

    opti.solver("ipopt", solver_opts);

    //auto sol = opti.solve();
    solution_ = std::make_unique<casadi::OptiSol>(opti.solve());

    return true;
}

vector<double> Mpc::getFirstU() {
    vector<double> res;
    
        auto first_v =  solution_->value(U)(0,0);
        auto first_w = solution_->value(U)(1,0);
        
        cout << "first_v" << first_v << " " << "first_w" << first_w << endl;

        // for (int i = 0; i < N_; i++)
        // {
        //      cout << "v_" << i <<"  "<<solution_->value(U)(0,i)  << " " << "w_" << i<<solution_->value(U)(1,i) << endl;
        // }
        

        res.push_back(static_cast<double>(first_v));
        res.push_back(static_cast<double>(first_w));
        
        return res;
    
    
}

vector<double> Mpc::getPredictX() {
    vector<double> res;
    auto predict_x = solution_->value(X);
    cout << "nomal" << endl;
    //cout << "predict_x size :" << predict_x.size() << endl;
    for (int i = 0; i <= N_; ++i) {
        res.push_back(static_cast<double>(predict_x(0, i)));
        res.push_back(static_cast<double>(predict_x(1, i)));
    }
    return res;
}

2.注意事项

在一开始机器人并不能跟上fast_planner规划的轨迹,本以为时mpc控制器的问题,又尝试了一些其他的控制器,发现出现同样的问题,经过一番思索,发现真正的原因是:turtlebot3运动学的限制,fast_planner相较于机器人来说规划的速度太快了,mpc解算出的速度太大,turtlebot3执行不了,故将fast_planner规划速度调慢。

在这里插入图片描述

fast_planner MPC轨迹跟踪


总结

以上就是基于Turtlebot3模型的轨迹跟踪的内容,后面将其扩展到多机…

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值