转载:https://blog.csdn.net/moyu123456789/article/details/100988989
1.入口main函数
feature_tracker结点的入口函数为feature_tracker_node.cpp文件中的main函数。main函数代码如下:
int main(int argc, char** argv){
ros::init(argc,argv,"feature_tracker");
ros::NodeHandle n("~");
ros::console::set_logger_level(ROSCINSOLE_DEFAULT_NAME,ros::console::level::Info);
//读取参数
readParameters(n);
//NUM_OF_CAM为相机个数,为1
for(int i = 0;i<NUM_OF_CAM;i++)
//读取相机内参
trackerData[i].readIntrinsicParameter(CAM_NAMES[i]);
//判断是否为鱼眼相机
if(FISHEYE)
{
for(int i = 0;i < NUM_OF_CAM;i++)
{
//读取鱼眼相机掩膜
trackerData[i].fisheye_mask = cv::imread(FISHEYE_MASK,0);
if(!trackerData[i].fisheye_mask.data)
{
ROS_INFO("load mask fail");
ROS_BREAK();
}
else
ROS_INFO("load mask success");
}
}
//订阅图像消息,回调函数为img_callback
ros::Subscriber sub_img = n.subscribe(IMAGE_TOPIC,100,img_callback);
//定义跟踪的特征点发布器pub_img,标记出了特征点的图片pub_match
pub_img = n.advertise<sensor_msgs::PointCloud>("feature",1000);//发布类型为PointCloud的消息,该消息为从相机图像中跟踪的特征点
pub_match = n.advertise<sensor_msgs::Image>("feature_img",1000);//发布类型为Image的消息,该话题的消息标出了特征点的图像
pub_restart = n.advertise<std_msgs::Bool>("restart",1000);//发布系统重启消息
ros::spin();
return 0;
}
可以看到,main函数中做完ros的初始化和结点创建外,紧接着就进行了进行了下面几项处理:
1)参数读取包括相机内参读取。这里读取的参数所在的配置文件为src/VINS-Mono/config/;
2)鱼眼判断。如果为鱼眼相机,则加载fisheye_mask.jpg用于去除image信息的边缘噪声。
3)订阅和发布topic。这里订阅了IMAGE_TOPIC(/cam0/image_raw)
,并创建了pub_img(
发布feature topic
)、pub_match
(发布feature_img topic
)、pub_restart
(发布restart topic
)三个topic发布器。
2.配置参数读取
位置feature_tracker/src/parameters.cpp
配置参数读取函数readParameters代码如下:
/**
* 从配置文件中读取设置的参数
*/
void readParameters(ros::NodeHandle &n)
{
std::string config_file;
config_file = readParam<std::string>(n, "config_file");
cv::FileStorage fsSettings(config_file, cv::FileStorage::READ);
if(!fsSettings.isOpened())
{
std::cerr << "ERROR: Wrong path to settings" << std::endl;
}
std::string VINS_FOLDER_PATH = readParam<std::string>(n, "vins_folder");
//IMAGE_TOPIC:"/cam0/image_raw"
fsSettings["image_topic"] >> IMAGE_TOPIC;
//IMU_TOPIC:"/imu0"
fsSettings["imu_topic"] >> IMU_TOPIC;
MAX_CNT = fsSettings["max_cnt"];//在特征追踪中的最大特征数,读取的值为150
MIN_DIST = fsSettings["min_dist"];//两个特征之间的最小距离
ROW = fsSettings["image_height"];//图像的高度
COL = fsSettings["image_width"];//图像的宽度
FREQ = fsSettings["freq"]; //发布跟踪结果的频率,良好的估计至少10Hz
F_THRESHOLD = fsSettings["F_threshold"];//ransac threshold (pixel)
SHOW_TRACK = fsSettings["show_track"];//发布跟踪的图像作为topic
EQUALIZE = fsSettings["equalize"];//如果图像太暗或太亮,请打开均衡器以找到足够的特征。
FISHEYE = fsSettings["fisheye"]; //读取的值为0
if (FISHEYE == 1)
FISHEYE_MASK = VINS_FOLDER_PATH + "config/fisheye_mask.jpg";
CAM_NAMES.push_back(config_file);
WINDOW_SIZE = 20;
STEREO_TRACK = false;
FOCAL_LENGTH = 460;
PUB_THIS_FRAME = false;
if (FREQ == 0)
FREQ = 100;
fsSettings.release();
}
假设读取的配置文件为:src/VINS-Mono/config/euroc/euroc_config.yaml,文件代码如下:
%YAML:1.0
#common parameters
imu_topic: "/imu0"
image_topic: "/cam0/image_raw"
output_path: "/home/shaozu/output/"
#camera calibration
model_type: PINHOLE
camera_name: camera
image_width: 752
image_height: 480
distortion_parameters: #去畸变参数
k1: -2.917e-01
k2: 8.228e-02
p1: 5.333e-05
p2: -1.578e-04
projection_parameters: #相机内参
fx: 4.616e+02
fy: 4.603e+02
cx: 3.630e+02
cy: 2.481e+02
# Extrinsic parameter between IMU and Camera.
estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
# 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
# 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
#If you choose 0 or 1, you should write down the following matrix.
#Rotation from camera frame to imu frame, imu^R_cam
extrinsicRotation: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [0.0148655429818, -0.999880929698, 0.00414029679422,
0.999557249008, 0.0149672133247, 0.025715529948,
-0.0257744366974, 0.00375618835797, 0.999660727178]
#Translation from camera frame to imu frame, imu^T_cam
extrinsicTranslation: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [-0.0216401454975,-0.064676986768, 0.00981073058949]
#feature traker paprameters
max_cnt: 150 # max feature number in feature tracking
min_dist: 30 # min distance between two features
freq: 10 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
F_threshold: 1.0 # ransac threshold (pixel)
show_track: 1 # publish tracking image as topic
equalize: 1 # if image is too dark or light, trun on equalize to find enough features
fisheye: 0 # if using fisheye, trun on it. A circle mask will be loaded to remove edge noisy points
#optimization parameters
max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time
max_num_iterations: 8 # max solver itrations, to guarantee real time
keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
#imu parameters The more accurate parameters you provide, the better performance
acc_n: 0.08 # accelerometer measurement noise standard deviation. #0.2 0.04
gyr_n: 0.004 # gyroscope measurement noise standard deviation. #0.05 0.004
acc_w: 0.00004 # accelerometer bias random work noise standard deviation. #0.02
gyr_w: 2.0e-6 # gyroscope bias random work noise standard deviation. #4.0e-5
g_norm: 9.81007 # gravity magnitude
#loop closure parameters
loop_closure: 1 # start loop closure
load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'
fast_relocalization: 0 # useful in real-time and large project
pose_graph_save_path: "/home/shaozu/output/pose_graph/" # save and load path
#unsynchronization parameters
estimate_td: 0 # online estimate time offset between camera and imu
td: 0.0 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
#rolling shutter parameters
rolling_shutter: 0 # 0: global shutter camera, 1: rolling shutter camera
rolling_shutter_tr: 0 # unit: s. rolling shutter read out time per frame (from data sheet).
#visualization parameters
save_image: 1 # save image in pose graph for visualization prupose; you can close this function by setting 0
visualize_imu_forward: 0 # output imu forward propogation to achieve low latency and high frequence results
visualize_camera_size: 0.4 # size of camera marker in RVIZ
上边的参数配置文件里包含了相机的内参和去畸变参数以及imu的一些参数,还有系统中各个结点中需要的参数,在阅读代码过程中可以看看这些参数是怎么使用的。
3.回调函数img_callback解析
img_callback函数主要是接收了原始图像后对图像帧中的特征点使用光流法进行检测处理。代码如下:
void img_callback(const sensor_msgs::ImageConstPtr &img_msg)
{
//第一帧图片的标记
if(first_image_flag)
{
first_image_flag = false;
first_image_time = img_msg->header.stamp.toSec();
last_image_time = img_msg->header.stamp.toSec();
return;
}
// detect unstable camera stream检测不稳定的相机流
if (img_msg->header.stamp.toSec() - last_image_time > 1.0 || img_msg->header.stamp.toSec() < last_image_time)
{
ROS_WARN("image discontinue! reset the feature tracker!");
first_image_flag = true;
last_image_time = 0;
pub_count = 1;
std_msgs::Bool restart_flag;
restart_flag.data = true;
//发布消息重新启动系统
pub_restart.publish(restart_flag);
return;
}
last_image_time = img_msg->header.stamp.toSec();
// frequency control 频率控制
// 修改PUB_THIS_FRAME的值,决定是否要把检测到的特征点打包成/feature_tracker/featuretopic发出去(FREQ决定每间隔多久)
// 这里计算的是每秒发送的image个数,保证每秒钟处理的image不多于FREQ
if (round(1.0 * pub_count / (img_msg->header.stamp.toSec() - first_image_time)) <= FREQ)
{
PUB_THIS_FRAME = true;
// reset the frequency control
if (abs(1.0 * pub_count / (img_msg->header.stamp.toSec() - first_image_time) - FREQ) < 0.01 * FREQ)
{
first_image_time = img_msg->header.stamp.toSec();
pub_count = 0;
}
}
else
PUB_THIS_FRAME = false;
cv_bridge::CvImageConstPtr ptr;
if (img_msg->encoding == "8UC1")
{
sensor_msgs::Image img;
img.header = img_msg->header;
img.height = img_msg->height;
img.width = img_msg->width;
img.is_bigendian = img_msg->is_bigendian;
img.step = img_msg->step;
img.data = img_msg->data;
img.encoding = "mono8";
/**
* cv_bridge::toCvCopy从ROS的img消息中获得一个图像数据的拷贝。也就是从ROS的sensor_msg中获取到图像数据信息
* cv_bridge在ROS的message和opencv的image之间架起了一座桥梁,将二者进行转换
*/
ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::MONO8);
}
else
ptr = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::MONO8);
//图像数据信息
cv::Mat show_img = ptr->image;
TicToc t_r;
for (int i = 0; i < NUM_OF_CAM; i++)
{
ROS_DEBUG("processing camera %d", i);
//单目情况
if (i != 1 || !STEREO_TRACK)
//读取图像信息到trackerData中
trackerData[i].readImage(ptr->image.rowRange(ROW * i, ROW * (i + 1)), img_msg->header.stamp.toSec());
else
{
//配置文件中EQUALIZE值为1:if image is too dark or light, trun on equalize to find enough features
if (EQUALIZE)
{
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
clahe->apply(ptr->image.rowRange(ROW * i, ROW * (i + 1)), trackerData[i].cur_img);
}
else
trackerData[i].cur_img = ptr->image.rowRange(ROW * i, ROW * (i + 1));
}
//定义SHOW_UNDISTORTION的值为0
#if SHOW_UNDISTORTION
trackerData[i].showUndistortion("undistrotion_" + std::to_string(i));
#endif
}
for (unsigned int i = 0;; i++)
{
bool completed = false;
for (int j = 0; j < NUM_OF_CAM; j++)
if (j != 1 || !STEREO_TRACK)
//更新feature的id
completed |= trackerData[j].updateID(i);
if (!completed)
break;
}
if (PUB_THIS_FRAME)
{
pub_count++;
sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
//特征点的id
sensor_msgs::ChannelFloat32 id_of_point;
//图像的u、v坐标
sensor_msgs::ChannelFloat32 u_of_point;
sensor_msgs::ChannelFloat32 v_of_point;
sensor_msgs::ChannelFloat32 velocity_x_of_point;
sensor_msgs::ChannelFloat32 velocity_y_of_point;
feature_points->header = img_msg->header;
feature_points->header.frame_id = "world";
vector<set<int>> hash_ids(NUM_OF_CAM);
for (int i = 0; i < NUM_OF_CAM; i++)
{
auto &un_pts = trackerData[i].cur_un_pts;
auto &cur_pts = trackerData[i].cur_pts;
auto &ids = trackerData[i].ids;
auto &pts_velocity = trackerData[i].pts_velocity;
for (unsigned int j = 0; j < ids.size(); j++)
{
if (trackerData[i].track_cnt[j] > 1)
{
int p_id = ids[j];
hash_ids[i].insert(p_id);
geometry_msgs::Point32 p;
p.x = un_pts[j].x;
p.y = un_pts[j].y;
p.z = 1;
feature_points->points.push_back(p);
id_of_point.values.push_back(p_id * NUM_OF_CAM + i);
//像素点的x,y坐标
u_of_point.values.push_back(cur_pts[j].x);
v_of_point.values.push_back(cur_pts[j].y);
//x轴方向上的速度
velocity_x_of_point.values.push_back(pts_velocity[j].x);
//y轴方向上的速度
velocity_y_of_point.values.push_back(pts_velocity[j].y);
}
}
}
feature_points->channels.push_back(id_of_point);
feature_points->channels.push_back(u_of_point);
feature_points->channels.push_back(v_of_point);
feature_points->channels.push_back(velocity_x_of_point);
feature_points->channels.push_back(velocity_y_of_point);
ROS_DEBUG("publish %f, at %f", feature_points->header.stamp.toSec(), ros::Time::now().toSec());
// skip the first image; since no optical speed on frist image
if (!init_pub)
{
init_pub = 1;
}
else
pub_img.publish(feature_points);//发布检测到的特征点topic:feature,该topic会被vins_estimator中接收处理
if (SHOW_TRACK)
{
//将ptr转为BGR8格式的图片
ptr = cv_bridge::cvtColor(ptr, sensor_msgs::image_encodings::BGR8);
//cv::Mat stereo_img(ROW * NUM_OF_CAM, COL, CV_8UC3);
cv::Mat stereo_img = ptr->image;
for (int i = 0; i < NUM_OF_CAM; i++)
{
cv::Mat tmp_img = stereo_img.rowRange(i * ROW, (i + 1) * ROW);
cv::cvtColor(show_img, tmp_img, CV_GRAY2RGB);
for (unsigned int j = 0; j < trackerData[i].cur_pts.size(); j++)
{
//根据特征点被追踪的次数,显示他的颜色,越红表示这个特征点看到的越久,一幅图像要是大部分特征点是蓝色,说明前端tracker效果很差了
double len = std::min(1.0, 1.0 * trackerData[i].track_cnt[j] / WINDOW_SIZE);
/**
* cv::circle是opencv中用于画圆的函数
* 第一个参数:tmp_img为图像
* 第二个参数:trackerData[i].cur_pts[j]决定圆的中心点坐标
* 第三个参数:2为圆的半径
* 第四个参数:为圆的颜色,这里用len值来决定点的颜色
* 第五个参数:为设置圆线条的粗细,其值越大则线条越粗,为负数则是填充效果
* */
cv::circle(tmp_img, trackerData[i].cur_pts[j], 2, cv::Scalar(255 * (1 - len), 0, 255 * len), 2);
//draw speed line
/*
Vector2d tmp_cur_un_pts (trackerData[i].cur_un_pts[j].x, trackerData[i].cur_un_pts[j].y);
Vector2d tmp_pts_velocity (trackerData[i].pts_velocity[j].x, trackerData[i].pts_velocity[j].y);
Vector3d tmp_prev_un_pts;
tmp_prev_un_pts.head(2) = tmp_cur_un_pts - 0.10 * tmp_pts_velocity;
tmp_prev_un_pts.z() = 1;
Vector2d tmp_prev_uv;
trackerData[i].m_camera->spaceToPlane(tmp_prev_un_pts, tmp_prev_uv);
cv::line(tmp_img, trackerData[i].cur_pts[j], cv::Point2f(tmp_prev_uv.x(), tmp_prev_uv.y()), cv::Scalar(255 , 0, 0), 1 , 8, 0);
*/
//char name[10];
//sprintf(name, "%d", trackerData[i].ids[j]);
//cv::putText(tmp_img, name, trackerData[i].cur_pts[j], cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
}
}
//cv::imshow("vis", stereo_img);
//cv::waitKey(5);
//这里发布的topic为feature_img,会在Rviz中的tracked_image里进行图像显示,图像中的红色圆点就是标记出来的特征点
pub_match.publish(ptr->toImageMsg());
}
}
ROS_INFO("whole feature tracker processing costs: %f", t_r.toc());
}
4.feature_tracker.cpp中的代码
img_callback函数中会调用feature_tracker.cpp中的函数,代码如下:
#include "feature_tracker.h"
int FeatureTracker::n_id = 0;
//判断pt这个点有没有在边缘之内
bool inBorder(const cv::Point2f &pt)
{
const int BORDER_SIZE = 1;
//cvRound():返回跟参数最接近的整数值,即四舍五入
int img_x = cvRound(pt.x);
int img_y = cvRound(pt.y);
/**
* 边缘长度为BORDER_SIZE,那么pt的x坐标的范围为[BORDER_SIZE,COL-BORDER_SIZE]
* pt的y坐标的范围为[BORDER_SIZE, ROW - BORDER_SIZE]
*/
return BORDER_SIZE <= img_x && img_x < COL - BORDER_SIZE && BORDER_SIZE <= img_y && img_y < ROW - BORDER_SIZE;
}
//将所有status数组中值为1的数组元素调整到前0~(j-1)的位置,此时数组大小设置为j
void reduceVector(vector<cv::Point2f> &v, vector<uchar> status)
{
int j = 0;
for (int i = 0; i < int(v.size()); i++)
if (status[i])
v[j++] = v[i];
v.resize(j);
}
void reduceVector(vector<int> &v, vector<uchar> status)
{
int j = 0;
for (int i = 0; i < int(v.size()); i++)
if (status[i])
v[j++] = v[i];
v.resize(j);
}
FeatureTracker::FeatureTracker()
{
}
void FeatureTracker::setMask()
{
if(FISHEYE)
mask = fisheye_mask.clone();
else
mask = cv::Mat(ROW, COL, CV_8UC1, cv::Scalar(255));
// prefer to keep features that are tracked for long time
vector<pair<int, pair<cv::Point2f, int>>> cnt_pts_id;
for (unsigned int i = 0; i < forw_pts.size(); i++)
cnt_pts_id.push_back(make_pair(track_cnt[i], make_pair(forw_pts[i], ids[i])));
sort(cnt_pts_id.begin(), cnt_pts_id.end(), [](const pair<int, pair<cv::Point2f, int>> &a, const pair<int, pair<cv::Point2f, int>> &b)
{
return a.first > b.first;
});
forw_pts.clear();
ids.clear();
//track_cnt里面保存都是能够追踪到的角点的追踪次数
track_cnt.clear();
for (auto &it : cnt_pts_id)
{
if (mask.at<uchar>(it.second.first) == 255)
{
//图像中的角点坐标
forw_pts.push_back(it.second.first);
//保存了当前追踪到的角点的ID,这个ID非常关键,保存了帧与帧之间角点的匹配关系。
ids.push_back(it.second.second);
//保存了当前追踪到的角点一共被多少帧图像追踪到
track_cnt.push_back(it.first);
cv::circle(mask, it.second.first, MIN_DIST, 0, -1);
}
}
}
/**
* 添加新追踪到的角点
*/
void FeatureTracker::addPoints()
{
for (auto &p : n_pts)
{
forw_pts.push_back(p);
ids.push_back(-1);
track_cnt.push_back(1);
}
}
/**
* _img 图像
* _cur_time 时间戳
*/
void FeatureTracker::readImage(const cv::Mat &_img, double _cur_time)
{
cv::Mat img;
TicToc t_r;
cur_time = _cur_time;
if (EQUALIZE)
{
/**
* 限制对比度自适应直方图均衡(Contrast Limited Adaptive Histogram Equalization,CLAHE)
* CLAHE算法参考:https://blog.csdn.net/panda1234lee/article/details/52852765
* 可以增加图像的增强效果
*/
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(3.0, cv::Size(8, 8));
TicToc t_c;
//apply函数中,_img是输入参数,img是出参
clahe->apply(_img, img);
ROS_DEBUG("CLAHE costs: %fms", t_c.toc());
}
else
img = _img;
//forw_img为空说明是第一次调用readImage接口
if (forw_img.empty())
{
//第一次的时候赋值,img为经过CLAHE处理后的图像
prev_img = cur_img = forw_img = img;
}
else
{
//forw_img中保存了当前帧
forw_img = img;
}
//forw_pts中保存的是当前图像中能通过光流追踪到的角点的坐标
forw_pts.clear();
//cur_pts中保存的是上一帧图像的角点
if (cur_pts.size() > 0)
{
TicToc t_o;
vector<uchar> status;
vector<float> err;
/**
* LK计算光流。光流描述的是图像上每个像素点的灰度的位置(速度)变化情况,
* 光流的研究是利用图像序列中的像素强度数据的时域变化和相关性来确定各自像素位置的“运动”。
* forw_pts中保存的是当前图像中能通过光流追踪到的角点的坐标
* status数组。如果对应特征的光流被发现,数组中的每一个元素都被设置为 1, 否则设置为 0。
*/
cv::calcOpticalFlowPyrLK(cur_img, forw_img, cur_pts, forw_pts, status, err, cv::Size(21, 21), 3);
//遍历所有被发现的光流能够追踪到的角点,如果角点不在边界范围内,则将status数组置为0
for (int i = 0; i < int(forw_pts.size()); i++)
if (status[i] && !inBorder(forw_pts[i]))
status[i] = 0;
reduceVector(prev_pts, status);
reduceVector(cur_pts, status);
reduceVector(forw_pts, status);
reduceVector(ids, status);
reduceVector(cur_un_pts, status);
reduceVector(track_cnt, status);
ROS_DEBUG("temporal optical flow costs: %fms", t_o.toc());
}
//track_cnt里面保存都是能够追踪到的角点的追踪次数
for (auto &n : track_cnt)
n++;
if (PUB_THIS_FRAME)
{
rejectWithF();
ROS_DEBUG("set mask begins");
TicToc t_m;
//通过设置相应的mask,来保证角点的提取不会重复
setMask();
ROS_DEBUG("set mask costs %fms", t_m.toc());
ROS_DEBUG("detect feature begins");
TicToc t_t;
//MAX_CNT=150表示光流法跟踪的角点的最大个数
int n_max_cnt = MAX_CNT - static_cast<int>(forw_pts.size());
if (n_max_cnt > 0)
{
if(mask.empty())
cout << "mask is empty " << endl;
if (mask.type() != CV_8UC1)
cout << "mask type wrong " << endl;
if (mask.size() != forw_img.size())
cout << "wrong size " << endl;
/**
* 提取新的角点,MAX_CNT - forw_pts.size()为提取的最大个数
* 新提取的角点坐标保存在n_pts中
* MIN_DIST=30,该参数保证2个相邻角点之间的最小距离
*
* 第一个参数是输入图像(8位或32位单通道图)。
* 第二个参数是检测到的所有角点,类型为vector或数组,由实际给定的参数类型而定。如果是vector,那么它应该是一个包含cv::Point2f的vector对象;如果类型是cv::Mat,那么它的每一行对应一个角点,点的x、y位置分别是两列。
* 第三个参数用于限定检测到的点数的最大值。
* 第四个参数表示检测到的角点的质量水平(通常是0.10到0.01之间的数值,不能大于1.0)。
* 第五个参数用于区分相邻两个角点的最小距离(小于这个距离得点将进行合并)。
* 第六个参数是mask,如果指定,它的维度必须和输入图像一致,且在mask值为0处不进行角点检测。
* 第七个参数是blockSize,表示在计算角点时参与运算的区域大小,常用值为3,但是如果图像的分辨率较高则可以考虑使用较大一点的值。
* 第八个参数用于指定角点检测的方法,如果是true则使用Harris角点检测,false则使用Shi Tomasi算法。
* 第九个参数是在使用Harris算法时使用,最好使用默认值0.04。
* */
cv::goodFeaturesToTrack(forw_img, n_pts, MAX_CNT - forw_pts.size(), 0.01, MIN_DIST, mask);
}
else
n_pts.clear();
ROS_DEBUG("detect feature costs: %fms", t_t.toc());
ROS_DEBUG("add feature begins");
TicToc t_a;
//把新追踪到的角点n_pts加入到forw_pts和ids中去
addPoints();
ROS_DEBUG("selectFeature costs: %fms", t_a.toc());
}
prev_img = cur_img;
prev_pts = cur_pts;
prev_un_pts = cur_un_pts;
cur_img = forw_img;
cur_pts = forw_pts;
//对角点图像坐标做去畸变处理,并计算每个角点的速度
undistortedPoints();
prev_time = cur_time;
}
void FeatureTracker::rejectWithF()
{
//判断追踪到的角点个数是否>=8
if (forw_pts.size() >= 8)
{
ROS_DEBUG("FM ransac begins");
TicToc t_f;
vector<cv::Point2f> un_cur_pts(cur_pts.size()), un_forw_pts(forw_pts.size());
for (unsigned int i = 0; i < cur_pts.size(); i++)
{
Eigen::Vector3d tmp_p;
//将点从图像平面对应到投影空间,tmp_p为输出结果。其实就是2d-->3d的转换过程
//cur_pts中保存的是上一帧图像的角点
m_camera->liftProjective(Eigen::Vector2d(cur_pts[i].x, cur_pts[i].y), tmp_p);
//转换为归一化像素坐标,FOCAL_LENGTH的值为460
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
un_cur_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
//forw_pts中保存的是当前图像中能通过光流追踪到的角点的坐标
m_camera->liftProjective(Eigen::Vector2d(forw_pts[i].x, forw_pts[i].y), tmp_p);
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
un_forw_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
}
vector<uchar> status;
//计算基础矩阵:利用上一帧图像的角点和当前图像角点,来计算基础矩阵
/**
* 从两个图像中对应的3d点对来计算基础矩阵
*/
cv::findFundamentalMat(un_cur_pts, un_forw_pts, cv::FM_RANSAC, F_THRESHOLD, 0.99, status);
int size_a = cur_pts.size();
reduceVector(prev_pts, status);
reduceVector(cur_pts, status);
reduceVector(forw_pts, status);
reduceVector(cur_un_pts, status);
reduceVector(ids, status);
reduceVector(track_cnt, status);
ROS_DEBUG("FM ransac: %d -> %lu: %f", size_a, forw_pts.size(), 1.0 * forw_pts.size() / size_a);
ROS_DEBUG("FM ransac costs: %fms", t_f.toc());
}
}
bool FeatureTracker::updateID(unsigned int i)
{
if (i < ids.size())
{
if (ids[i] == -1)
ids[i] = n_id++;
return true;
}
else
return false;
}
/**
* 读取相机标定的内参
*/
void FeatureTracker::readIntrinsicParameter(const string &calib_file)
{
ROS_INFO("reading paramerter of camera %s", calib_file.c_str());
m_camera = CameraFactory::instance()->generateCameraFromYamlFile(calib_file);
}
void FeatureTracker::showUndistortion(const string &name)
{
cv::Mat undistortedImg(ROW + 600, COL + 600, CV_8UC1, cv::Scalar(0));
vector<Eigen::Vector2d> distortedp, undistortedp;
for (int i = 0; i < COL; i++)
for (int j = 0; j < ROW; j++)
{
Eigen::Vector2d a(i, j);
Eigen::Vector3d b;
m_camera->liftProjective(a, b);
distortedp.push_back(a);
undistortedp.push_back(Eigen::Vector2d(b.x() / b.z(), b.y() / b.z()));
//printf("%f,%f->%f,%f,%f\n)\n", a.x(), a.y(), b.x(), b.y(), b.z());
}
for (int i = 0; i < int(undistortedp.size()); i++)
{
cv::Mat pp(3, 1, CV_32FC1);
pp.at<float>(0, 0) = undistortedp[i].x() * FOCAL_LENGTH + COL / 2;
pp.at<float>(1, 0) = undistortedp[i].y() * FOCAL_LENGTH + ROW / 2;
pp.at<float>(2, 0) = 1.0;
//cout << trackerData[0].K << endl;
//printf("%lf %lf\n", p.at<float>(1, 0), p.at<float>(0, 0));
//printf("%lf %lf\n", pp.at<float>(1, 0), pp.at<float>(0, 0));
if (pp.at<float>(1, 0) + 300 >= 0 && pp.at<float>(1, 0) + 300 < ROW + 600 && pp.at<float>(0, 0) + 300 >= 0 && pp.at<float>(0, 0) + 300 < COL + 600)
{
undistortedImg.at<uchar>(pp.at<float>(1, 0) + 300, pp.at<float>(0, 0) + 300) = cur_img.at<uchar>(distortedp[i].y(), distortedp[i].x());
}
else
{
//ROS_ERROR("(%f %f) -> (%f %f)", distortedp[i].y, distortedp[i].x, pp.at<float>(1, 0), pp.at<float>(0, 0));
}
}
cv::imshow(name, undistortedImg);
cv::waitKey(0);
}
/**
* 对角点图像坐标做去畸变处理
*/
void FeatureTracker::undistortedPoints()
{
cur_un_pts.clear();
cur_un_pts_map.clear();
//cv::undistortPoints(cur_pts, un_pts, K, cv::Mat());
for (unsigned int i = 0; i < cur_pts.size(); i++)
{
Eigen::Vector2d a(cur_pts[i].x, cur_pts[i].y);
Eigen::Vector3d b;
m_camera->liftProjective(a, b);
cur_un_pts.push_back(cv::Point2f(b.x() / b.z(), b.y() / b.z()));
cur_un_pts_map.insert(make_pair(ids[i], cv::Point2f(b.x() / b.z(), b.y() / b.z())));
//printf("cur pts id %d %f %f", ids[i], cur_un_pts[i].x, cur_un_pts[i].y);
}
// caculate points velocity
if (!prev_un_pts_map.empty())
{
double dt = cur_time - prev_time;
pts_velocity.clear();
for (unsigned int i = 0; i < cur_un_pts.size(); i++)
{
if (ids[i] != -1)
{
std::map<int, cv::Point2f>::iterator it;
it = prev_un_pts_map.find(ids[i]);
if (it != prev_un_pts_map.end())
{
//x轴和y轴各自方向上的距离除以时间,就是各自方向上的速度
double v_x = (cur_un_pts[i].x - it->second.x) / dt;
double v_y = (cur_un_pts[i].y - it->second.y) / dt;
pts_velocity.push_back(cv::Point2f(v_x, v_y));
}
else
pts_velocity.push_back(cv::Point2f(0, 0));
}
else
{
pts_velocity.push_back(cv::Point2f(0, 0));
}
}
}
else
{
for (unsigned int i = 0; i < cur_pts.size(); i++)
{
pts_velocity.push_back(cv::Point2f(0, 0));
}
}
prev_un_pts_map = cur_un_pts_map;
}
这里涉及到的视觉图像处理算法和相关接口的使用需要我们去网上查资料理解。
1)CLAHE算法增强图像效果
代码中使用了cv::createCLAHE(3.0, cv::Size(8, 8))函数来增强图像的显示效果,这样便于后边的检测。
2)LK光流追踪法
cv::calcOpticalFlowPyrLK接口封装了光流追踪的算法,但是我们最好还是对光流追踪是怎样一回事有个了解,可以看看《视觉SLAM十四讲》第8章中的介绍,或者在网上查找相关的介绍文档来学习。
3)opencv中的cv::findFundamentalMat接口
cv::findFundamentalMat接口用于计算图像中特征点对应3d点的基础矩阵。
————————————————
版权声明:本文为CSDN博主「文科升」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/moyu123456789/article/details/100988989