1.准备工作先对相机进行内参标定
%YAML:1.0
---
model_type: KANNALA_BRANDT
camera_name: camera
scaling_ratio: 1
image_width: 640
image_height: 480
projection_parameters:
k2: -0.02059949
k3: -0.00272535
k4: -0.00411149
k5: 0.00134326
mu: 338.79350848
mv: 338.75216467
u0: 334.20126771
v0: 234.73275102
2.添加遮罩
3.下载相机模型文件https://github.com/hengli/camodocal
camera_models
函数
#include <opencv2/opencv.hpp>
#include <chrono>
#include <eigen3/Eigen/Dense>
#include <list>
#include <algorithm>
#include <vector>
#include <iostream>
#include <map>
#include <string>
using namespace std;
#include "./include/tic_toc.h"
#include "./camera_models/include/camodocal/camera_models/CameraFactory.h"
#include "./camera_models/include/camodocal/camera_models/CataCamera.h"
#include "./camera_models/include/camodocal/camera_models/PinholeCamera.h"
typedef Eigen::Matrix<double, 7, 1> Feature_t; // <图像id xyz_uv_vel>
typedef map<int, vector<pair<int, Feature_t>>> FeatureFrame_t; // <特征点ID <图像id xyz_uv_vel>>
typedef pair<double, FeatureFrame_t> FeatureFrameStamped; // <时间 <特征点ID <图像id xyz_uv_vel>>>
#define MASK_WIDTH 4.0
#define MASK_RANGE 3.0
using namespace std;
using namespace cv;
using namespace camodocal;
using namespace Eigen;
const double FOCAL_LENGTH = 460;
vector<camodocal::CameraPtr> m_camera;
vector<cv::Point2f> prev_pts, cur_pts, cur_right_pts;
vector<cv::Point2f> unstable_pts;
vector<int> track_cnt;
cv::Mat prev_img, cur_img;
bool hasPrediction;
vector<cv::Point2f> predict_pts;
vector<cv::Point2f> predict_pts_debug;
bool USE_REJECT_F;
cv::Mat mask, ground_mask;
int FLOW_BACK;
int CONSIDER_UNSTABLE_PTS;
int row, col;
std::vector<std::string> CAM_NAMES;
vector<cv::Point2f> reject_pts;
double F_THRESHOLD;
//带un的pt是在归一化平面去畸变后的特征点坐标
vector<cv::Point2f> prev_un_pts, cur_un_pts, cur_un_right_pts;
int FISHEYE_MASK;
bool GROUND_MASK;
cv::Mat fisheye_mask;
std::string FISHEYE_MASK_PATH;
std::string PROJECT_DIR="/media/jankin/5622A53322A518CF/linux/0805/FeatureTracker";
//PROJECT_DIR = ros::package::getPath("vins") + "/";
int MIN_DIST;
bool walk_forward = false;
int MAX_CNT;
std::vector<cv::KeyPoint> key_points;
int n_id;
int fast_it = 0;
double cur_time;
double prev_time;
bool stereo_cam=false;//单目
cv::Mat imTrack, featureMatchImg;
map<int, cv::Point2f> cur_un_pts_map, prev_un_pts_map;
map<int, cv::Point2f> prevLeftPtsMap;
vector<cv::Point2f> pts_velocity, right_pts_velocity;
vector<int> ids, ids_right;
map<int, cv::Point2f> cur_un_right_pts_map, prev_un_right_pts_map;
int FAST_THRE;
TicToc frontend_process_time("frontend_process_time", 2);
TicToc optical_flow_time("optical_flow_time",2);
TicToc sorted_time("sorted_time",2);
TicToc reverse_flow_time("reverse_flow_time",2);
TicToc feature_tracking_time("feature_tracking_time", 2);
TicToc key_points_selection_time("key_points_selection_time",2);
TicToc reject_with_F_time("reject_with_F_time",2);
TicToc fast_corner_detection_time("fast_corner_detection_time",2);
TicToc feature_extracting_time("feature_extracting_time", 2);
template <typename T>
T readYaml(cv::FileStorage &fsSettings, std::string name, T defaultValue)
{
T ans;
if(fsSettings[name].empty()){
ans = defaultValue;
std::cout << "Read " << name << "yet use default value: " << defaultValue<<std::endl;
}
else{
fsSettings[name] >> ans;
std::cout << "Read " << name << ": " << ans<<std::endl;
}
return ans;
}
/*
先将特征点按照观测次数多少进行排列
然后由高到低重新保存,每一个特征点保存后,在周围一定范围内设置mask,mask内部不再产生新的特征点
fillPoly(img,ppt,npt,1,Scalar(255,255,255),lineType);
函数参数:
1、多边形将被画到img上
2、多边形的顶点集为ppt
3、绘制的多边形顶点数目为npt
4、要绘制的多边形数量为1
5、多边形的颜色定义为Scarlar(255,255,255),即RGB的值为白色
// 把追踪到的点进行标记
// 设置遮挡部分(鱼眼相机)
// 对检测到的特征点按追踪到的次数排序
// 在mask图像中将追踪到点的地方设置为0,否则为255,目的是为了下面做特征点检测的时候可以选择没有特征点的区域进行检测。
// 在同一区域内,追踪到次数最多的点会被保留,其他的点会被删除
*/
void set_ground_mask(){
Eigen::Vector3d pt1, pt2, pt3, pt4;
pt1 << -MASK_WIDTH/2, 1.5, 0;
pt2 << MASK_WIDTH/2, 1.5, 0;
pt3 << MASK_WIDTH/2, 1, MASK_RANGE;
pt4 << -MASK_WIDTH/2, 1, MASK_RANGE;
Eigen::Vector2d tmp_uv;
cv::Point root_points[1][4];
m_camera[0]->spaceToPlane(pt1, tmp_uv);//畸变模型的坐标通过相机内参投影到像素坐标系中,其中为相机在的焦距,为主点的坐标(像素坐标系原点相对于归一化平面原点的偏移)
root_points[0][0] = cv::Point2f(tmp_uv.x(), tmp_uv.y());
m_camera[0]->spaceToPlane(pt2, tmp_uv);
root_points[0][1] = cv::Point2f(tmp_uv.x(), tmp_uv.y());
m_camera[0]->spaceToPlane(pt3, tmp_uv);
root_points[0][2] = cv::Point2f(tmp_uv.x(), tmp_uv.y());
m_camera[0]->spaceToPlane(pt4, tmp_uv);
root_points[0][3] = cv::Point2f(tmp_uv.x(), tmp_uv.y());
ground_mask = cv::Mat(row, col, CV_8UC1, cv::Scalar(255));
const cv::Point* ppt[1] = { root_points[0] };
int npt[] = { 4 };
cv::fillPoly(ground_mask, ppt, npt, 1, cv::Scalar(0, 0, 0));
}
// 把追踪到的点进行标记
// 设置遮挡部分(鱼眼相机)
// 对检测到的特征点按追踪到的次数排序
// 在mask图像中将追踪到点的地方设置为0,否则为255,目的是为了下面做特征点检测的时候可以选择没有特征点的区域进行检测。
// 在同一区域内,追踪到次数最多的点会被保留,其他的点会被删除
void setMask()
{
mask = cv::Mat(row, col, CV_8UC1, cv::Scalar(255));
// prefer to keep features that are tracked for long time
vector<pair<int, pair<cv::Point2f, int>>> cnt_pts_id;
for (unsigned int i = 0; i < cur_pts.size(); i++)
cnt_pts_id.push_back(make_pair(track_cnt[i], make_pair(cur_pts[i], ids[i])));
sort(cnt_pts_id.begin(), cnt_pts_id.end(), [](const pair<int, pair<cv::Point2f, int>> &a, const pair<int, pair<cv::Point2f, int>> &b)
{
return a.first > b.first;
});
if(FISHEYE_MASK){
// fisheye_mask_time.tic();
cv::bitwise_and(fisheye_mask, mask, mask);
// fisheye_mask_time.toc_with_others();
// fisheye_mask_time.print_curr();
}
cur_pts.clear();
ids.clear();
track_cnt.clear();
for (auto &it : cnt_pts_id)
{
//说明追踪次数多的会先占用mask,
if (mask.at<uchar>(it.second.first) == 255)
{
cur_pts.push_back(it.second.first);
ids.push_back(it.second.second);
track_cnt.push_back(it.first);
cv::circle(mask, it.second.first, MIN_DIST, 0, -1);
}
}
// remove ground features after mask
if(GROUND_MASK){
cv::bitwise_and(ground_mask, mask, mask);
}
if (CONSIDER_UNSTABLE_PTS){
for (auto &bad_pt: unstable_pts){
cv::circle(mask, bad_pt, MIN_DIST, 0, -1);
}
}
cv::imshow("mask", mask);
cv::waitKey(1);
}
/*
通过图像边界剔除outlier
*/
bool inBorder(const cv::Point2f &pt)
{
const int BORDER_SIZE = 1;
int img_x = cvRound(pt.x);//cvRound():返回跟参数最接近的整数值,即四舍五入;
int img_y = cvRound(pt.y);
return BORDER_SIZE <= img_x && img_x < col - BORDER_SIZE && BORDER_SIZE <= img_y && img_y < row - BORDER_SIZE;
//大于BORDER_SIZE,小于col-BORDER_SIZE
}
//计算两点距离
double distance(cv::Point2f pt1, cv::Point2f pt2)
{
//printf("pt1: %f %f pt2: %f %f\n", pt1.x, pt1.y, pt2.x, pt2.y);
double dx = pt1.x - pt2.x;
double dy = pt1.y - pt2.y;
return sqrt(dx * dx + dy * dy);
}
/*
根据状态位,进行“瘦身”
作用:
原有关键点:p0,p1,p2,p3,p4
原有状态位:0, 1, 1, 0, 1
处理后:
关键点:p1, p2, p4
采用双指针算法
*/
void reduceVector(vector<cv::Point2f> &v, vector<uchar> status, vector<cv::Point2f> &removed_pts)
{
int j = 0;
for (int i = 0; i < int(v.size()); i++){
if (status[i])
v[j++] = v[i];
else
removed_pts.push_back(v[i]);
}
v.resize(j);
}
//如果状态为1则放入v中,若状态为0,放入remove_pts中,同时重新v的大小
void reduceVector(vector<cv::Point2f> &v, vector<uchar> status)
{
int j = 0;
for (int i = 0; i < int(v.size()); i++)
if (status[i])
v[j++] = v[i];
v.resize(j);
}
//如果状态为1则放入v中
void reduceVector(vector<int> &v, vector<uchar> status)
{
int j = 0;
for (int i = 0; i < int(v.size()); i++)
if (status[i])
v[j++] = v[i];
v.resize(j);
}
//如果状态为1则放入v中,v为int型
/*
采用RANSAC的方式去除误匹配的点
m_camera中包含相机内参,首先利用liftProjective函数对于上一帧和当前帧的匹配点进行去畸变操作,将像素坐标转化为无畸变的归一化坐标
然后利用OpenCV自带函数findFundamentalMat函数进行RANSAC标记outliers
再利用reduceVector利用双指针法去除outliers
*/
void rejectWithF()
{
if (cur_pts.size() >= 8)
{
reject_with_F_time.tic();
TicToc t_f;
vector<cv::Point2f> un_cur_pts(cur_pts.size()), un_prev_pts(prev_pts.size());
for (unsigned int i = 0; i < cur_pts.size(); i++)
{
Eigen::Vector3d tmp_p;
m_camera[0]->liftProjective(Eigen::Vector2d(cur_pts[i].x, cur_pts[i].y), tmp_p);//当前帧提取点,tmp_p为去畸变后的相机坐标
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + col / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + row / 2.0;
un_cur_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
m_camera[0]->liftProjective(Eigen::Vector2d(prev_pts[i].x, prev_pts[i].y), tmp_p);//前一帧提取点,无畸变归一话相机坐标
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + col / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + row / 2.0;
un_prev_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
}
vector<uchar> status;
cv::findFundamentalMat(un_cur_pts, un_prev_pts, cv::FM_RANSAC, F_THRESHOLD, 0.99, status);//当前帧与前一帧匹配点
vector<uchar> reverse_status;
cv::findFundamentalMat(un_prev_pts, un_cur_pts, cv::FM_RANSAC, F_THRESHOLD, 0.99, reverse_status);//前一帧与当前帧匹配点
for(size_t i = 0; i < status.size(); i++)
{
if(status[i] && reverse_status[i])//两种检测方法都检查到的点记为匹配点
status[i] = 1;
else
status[i] = 0;
}
int size_a = cur_pts.size();//当前帧提取的关键点数量
reject_pts.clear();//存放误匹配点
reduceVector(prev_pts, status);//前一帧提取去除误匹配点
reduceVector(cur_pts, status, reject_pts);//当前帧匹配点,误检测点放入reject_pts中
reduceVector(cur_un_pts, status);//去畸变当前帧,误匹配点去除
reduceVector(ids, status);
reduceVector(track_cnt, status);
int size_b = cur_pts.size();//去除误检测点后,当前帧匹配点数量
// cerr << "outliers: " << size_a - size_b << endl;
// cerr << "reject_pts: " << reject_pts.size() << endl;
/*cv::Mat test_img = cur_img.clone();
cv::cvtColor(test_img, test_img, CV_GRAY2RGB);
for(int i = 0;i < reject_pts.size(); i++)
cv::circle(test_img, reject_pts[i], 2, cv::Scalar(0, 255, 0), 2);
cv::imshow("reject", test_img);
cv::waitKey(1);
reject_with_F_time.toc_with_others();*/
}
}
/*
是否对特征点进行预测
*/
void setPrediction(map<int, Eigen::Vector3d> &predictPts)
{
hasPrediction = true;
predict_pts.clear();
predict_pts_debug.clear();
map<int, Eigen::Vector3d>::iterator itPredict;
for (size_t i = 0; i < ids.size(); i++)
{
//printf("prevLeftId size %d prevLeftPts size %d\n",(int)prevLeftIds.size(), (int)prevLeftPts.size());
int id = ids[i];
itPredict = predictPts.find(id);
if (itPredict != predictPts.end())
{
Eigen::Vector2d tmp_uv;
// itPredict->second为预测的相机坐标系下的坐标
// LOG(INFO) << "itPredict: " << itPredict->second.x() << " " << itPredict->second.y() << " " << itPredict->second.z();
m_camera[0]->spaceToPlane(itPredict->second, tmp_uv);
predict_pts.push_back(cv::Point2f(tmp_uv.x(), tmp_uv.y()));
predict_pts_debug.push_back(cv::Point2f(tmp_uv.x(), tmp_uv.y()));
}
else
predict_pts.push_back(prev_pts[i]);
}
}
/* 函数功能
1、图像均衡化预处理
2、光流追踪
3、提取新的特征点(如果发布)
4、所有特征点去畸变,计算速度*/
void trackFast()
{
if (prev_pts.size() > 0)//前一帧特征点大于0
{
vector<uchar> status;
TicToc t_o;
vector<float> err;
/*void cv::calcOpticalFlowPyrLK (
InputArray prevImg, || buildOpticalFlowPyramid构造的第一个8位输入图像或金字塔。
InputArray nextImg, || 与prevImg相同大小和相同类型的第二个输入图像或金字塔
InputArray prevPts, || 需要找到流的2D点的矢量(vector of 2D points for which the flow needs to be found;);点坐标必须是单精度浮点数。
InputOutputArray nextPts, || 输出二维点的矢量(具有单精度浮点坐标),包含第二图像中输入特征的计算新位置;当传递OPTFLOW_USE_INITIAL_FLOW标志时,向量必须与输入中的大小相同。
OutputArray status, || 输出状态向量(无符号字符);如果找到相应特征的流,则向量的每个元素设置为1,否则设置为0。
OutputArray err, || 输出错误的矢量; 向量的每个元素都设置为相应特征的错误,错误度量的类型可以在flags参数中设置; 如果未找到流,则未定义错误(使用status参数查找此类情况)。
Size winSize = Size(21, 21), || 每个金字塔等级的搜索窗口的winSize大小。
int maxLevel = 3, || 基于0的最大金字塔等级数;如果设置为0,则不使用金字塔(单级),如果设置为1,则使用两个级别,依此类推;如果将金字塔传递给输入,那么算法将使用与金字塔一样多的级别,但不超过maxLevel。
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), || 参数,指定迭代搜索算法的终止条件(在指定的最大迭代次数criteria.maxCount之后或当搜索窗口移动小于criteria.epsilon时)。
int flags = 0,
double minEigThreshold = 1e-4
)
*/
optical_flow_time.tic();
cv::calcOpticalFlowPyrLK(prev_img, cur_img, prev_pts, cur_pts, status, err, cv::Size(21, 21), 3);//根据前后帧图像,及前一帧关键点,检测当前帧是否存在该点,结果存在status中
optical_flow_time.toc_with_others();
/*cv::Mat cur_img_p = cur_img.clone();
cv::cvtColor(cur_img_p, cur_img_p, CV_GRAY2RGB);
for(int i = 0;i < cur_pts.size(); i++)
cv::circle(cur_img_p, cur_pts[i], 2, cv::Scalar(0, 255, 0), 2);
cv::imshow("calLK", cur_img_p);
cv::waitKey(1);*/
std::cout<<"光流的关键帧个数"<<cur_pts.size()<<std::endl;
// reverse check
if(FLOW_BACK)//反向光流检查
{
vector<uchar> reverse_status;
vector<cv::Point2f> reverse_pts = prev_pts;
reverse_flow_time.tic();
cv::calcOpticalFlowPyrLK(cur_img, prev_img, cur_pts, reverse_pts, reverse_status, err, cv::Size(21, 21), 1,
cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 0.01), cv::OPTFLOW_USE_INITIAL_FLOW);
reverse_flow_time.toc_with_others();
//cv::calcOpticalFlowPyrLK(cur_img, prev_img, cur_pts, reverse_pts, reverse_status, err, cv::Size(21, 21), 3);
for(size_t i = 0; i < status.size(); i++)
{
//如果前后都能找到,并且找到的点的距离小于0.5
if(status[i] && reverse_status[i] && distance(prev_pts[i], reverse_pts[i]) <= 0.5)
status[i] = 1;
else
status[i] = 0;
}
}
for (int i = 0; i < int(cur_pts.size()); i++){
if (!status[i] && inBorder(cur_pts[i])){
if (CONSIDER_UNSTABLE_PTS){
unstable_pts.push_back(cur_pts[i]); // 如果这个点不在图像内,则剔除
status[i] = 0;
}
}
else if (status[i] && !inBorder(cur_pts[i])){
status[i] = 1;
}
}
reduceVector(prev_pts, status);
reduceVector(cur_pts, status);
reduceVector(ids, status);
reduceVector(track_cnt, status);
std::cout<<"反向光流删除后的关键帧个数"<<cur_pts.size()<<std::endl;
}
feature_tracking_time.toc_with_others();
feature_extracting_time.tic();
for (auto &n : track_cnt)
n++;
// cerr << "cur_points.size() before rejected: " << cur_pts.size() << endl;
//去除误匹配的点
if(USE_REJECT_F)
rejectWithF();
TicToc t_t;
TicToc t_m;
setMask();
// 如果当前图像的特征点cur_pts数目小于规定的最大特征点数目MAX_CNT,则进行提取
int n_max_cnt = MAX_CNT - static_cast<int>(cur_pts.size());//特征点
std::cout << "tracking features num: " << cur_pts.size()<<std::endl;
if (n_max_cnt > 0) {
fast_corner_detection_time.tic();
cv::Ptr<cv::FastFeatureDetector> fast_detector = cv::FastFeatureDetector::create(FAST_THRE, true); //NMS: true
fast_detector->detect(cur_img,key_points, mask);//fast特征点检测 基于加速分割测试的特征
fast_corner_detection_time.toc_with_others();
sorted_time.tic();
// 因为n_max_cnt大于0,所以才能强制转换
/*class KeyPoint
{
Point2f pt; //特征点坐标
float size; //特征点邻域直径
float angle; //特征点的方向,值为0~360,负值表示不使用
float response; //特征点的响应强度,代表了该点是特征点的程度,可以用于后续处理中特征点排序
int octave; //特征点所在的图像金字塔的组
int class_id; //用于聚类的id
}*/
if (key_points.size() > (unsigned int)n_max_cnt) //关键点数大于采样点数
{
sort(key_points.begin(), key_points.end(), [](cv::KeyPoint &a, cv::KeyPoint &b){return a.response > b.response;});
//排序,开始值,结束值,返回条件函数,谁的response大谁在前,response为fast处理后的响应程度
//vector、deque,适用sort算法
//关系型容器拥有自动排序功能,因为底层采用RB-Tree,所以不需要用到sort算法。
//其次,序列式容器中的stack、queue和priority-queue都有特定的出入口,不允许用户对元素排序
}
sorted_time.toc_with_others();
}
else{
key_points.clear();
}
std::cout << "fast detect num: " << key_points.size()<<std::endl;
// 对于所有新提取出来的特征点,加入到cur_pts
int w = cur_img.cols, h = cur_img.rows;
const int cell_size = cvRound(MIN_DIST);//四舍五入,最小分割间隔30
const int grid_width = (w + cell_size - 1) / cell_size;
const int grid_height = (h + cell_size - 1) / cell_size;
std::vector<std::vector<cv::Point2f> > grid(grid_width*grid_height);
int minDistance = MIN_DIST * MIN_DIST;//网格大小30*30
key_points_selection_time.tic();
for (auto &p : key_points)
{
cv::Point2f c = p.pt;//pt特征点坐标
bool good = true;
int x_cell = c.x / cell_size;//按30分割
int y_cell = c.y / cell_size;//分割30
int x1 = x_cell - 1;
int y1 = y_cell - 1;
int x2 = x_cell + 1;//把点作为一个网格网格大小为2×2,x1,y1为左下角点,x2,y2为右上角点
int y2 = y_cell + 1;
// boundary check
x1 = std::max(0, x1);//边界点检测,判断是否大于0
y1 = std::max(0, y1);//边界点检测,判断y是否大于0
x2 = std::min(grid_width - 1, x2);//判断右侧点是否大于边界,右侧取最小值
y2 = std::min(grid_height - 1, y2);//判断右侧点是否大于边界值,取最小
for( int yy = y1; yy <= y2; yy++ )//y1->y2
for( int xx = x1; xx <= x2; xx++ )
{
//如果某元素对应的原图像角点容器中有已经保存的强角点,则需要进行距离判断。否则指定原图像中该角点就是强角点
std::vector<cv::Point2f> &m = grid[yy * grid_width + xx];
//遍历其对应容器内的其他强角点,并依次判断原图像中当前角点与其邻域内其他强角点之间的欧式距离,如果欧式距离小于minDistance,则将当前角点标志置为good=false(抛弃),并跳出
if( m.size() )
{
for(int j = 0; j < m.size(); j++)
{
float dx = c.x - m[j].x;
float dy = c.y - m[j].y;
if( dx*dx + dy*dy < minDistance )
{
good = false;
goto break_out;
}
}
}
}
break_out:
//如果角点为good,则将该角点保存在当前grid中一个元素对应的容器中,同时保存在输出容器corners中,并累加计数器ncorners。由于已经进行过降序排序,前面保存的都是强角点。
if (good)
{
grid[y_cell*grid_width + x_cell].push_back(cv::Point2f((float)c.x, (float)c.y));
cur_pts.push_back(p.pt);
ids.push_back(n_id++);
track_cnt.push_back(1);
}
fast_it++;
if (fast_it == n_max_cnt) {
break;
}
}
std::cout<<"TrackFast add 后的关键帧个数"<<cur_pts.size()<<std::endl;
key_points_selection_time.toc_with_others();
fast_it = 0;
feature_extracting_time.toc_with_others();
}
vector<cv::Point2f> undistortedPts(vector<cv::Point2f> &pts, camodocal::CameraPtr cam)
{
vector<cv::Point2f> un_pts;
for (unsigned int i = 0; i < pts.size(); i++)
{
Eigen::Vector2d a(pts[i].x, pts[i].y);
Eigen::Vector3d b;
cam->liftProjective(a, b);
un_pts.push_back(cv::Point2f(b.x() / b.z(), b.y() / b.z()));
}
return un_pts;
}
// 其为当前帧相对于前一帧 特征点沿x,y方向的像素移动速度
vector<cv::Point2f> ptsVelocity(vector<int> &ids, vector<cv::Point2f> &pts,
map<int, cv::Point2f> &cur_id_pts, map<int, cv::Point2f> &prev_id_pts)
{
vector<cv::Point2f> pts_velocity;
cur_id_pts.clear();
for (unsigned int i = 0; i < ids.size(); i++)
{
cur_id_pts.insert(make_pair(ids[i], pts[i]));
}
// caculate points velocity
if (!prev_id_pts.empty())
{
double dt = cur_time - prev_time;
for (unsigned int i = 0; i < pts.size(); i++)
{
std::map<int, cv::Point2f>::iterator it;
it = prev_id_pts.find(ids[i]);
if (it != prev_id_pts.end())
{
double v_x = (pts[i].x - it->second.x) / dt;
double v_y = (pts[i].y - it->second.y) / dt;
pts_velocity.push_back(cv::Point2f(v_x, v_y));
}
else
pts_velocity.push_back(cv::Point2f(0, 0));
}
}
else
{
for (unsigned int i = 0; i < cur_pts.size(); i++)
{
pts_velocity.push_back(cv::Point2f(0, 0));
}
}
return pts_velocity;
}
//显示跟踪的路标点
void drawTrack(const cv::Mat &imLeft, const cv::Mat &imRight,
vector<int> &curLeftIds,
vector<cv::Point2f> &curLeftPts,
vector<cv::Point2f> &curRightPts,
map<int, cv::Point2f> &prevLeftPtsMap)
{
int cols = imLeft.cols;
imTrack = imLeft.clone();//单目将图像复制
cv::cvtColor(imTrack, imTrack, CV_GRAY2RGB);
for (size_t j = 0; j < curLeftPts.size(); j++)
{
double len = std::min(1.0, 1.0 * track_cnt[j] / 20);
cv::circle(imTrack, curLeftPts[j], 2, cv::Scalar(255 * (1 - len), 0, 255 * len), 2);//颜色为bgr顺序
if(track_cnt[j] == 1){
//blue
cv::circle(imTrack, curLeftPts[j], 2, cv::Scalar(255, 255, 0), 2);
}
if(track_cnt[j] <= 3 && track_cnt[j] >= 2){
//purple
// cv::circle(imTrack, curLeftPts[j], 2, cv::Scalar(255, 0, 255), 2);
}
}
for (size_t j = 0; j < reject_pts.size(); j++)
{
//yellow
cv::circle(imTrack, reject_pts[j], 2, cv::Scalar(0, 255, 255), 2);
}
map<int, cv::Point2f>::iterator mapIt;
for (size_t i = 0; i < curLeftIds.size(); i++)
{
int id = curLeftIds[i];
mapIt = prevLeftPtsMap.find(id);
if(mapIt != prevLeftPtsMap.end())
{
cv::arrowedLine(imTrack, curLeftPts[i], mapIt->second, cv::Scalar(0, 255, 0), 1, 8, 0, 0.2);
}
}
}
void draw_matching_points(const cv::Mat &prevImg, const cv::Mat &curImg,
vector<int> &curIds, vector<cv::Point2f> &curPts,
map<int, cv::Point2f> &prev_id_pts) {
//int rows = imLeft.rows;
int cols = prevImg.cols;
if (curImg.empty() || prevImg.empty()){
// ROS_ERROR("Image Empty");
}
if (curIds.empty() || curPts.empty()){
//ROS_ERROR("curIds or curPts Empty");
}
cv::hconcat(prevImg, curImg, featureMatchImg); //图像合并在一张图像中
cv::cvtColor(featureMatchImg, featureMatchImg, CV_GRAY2RGB);
for (unsigned int i = 0; i < curPts.size(); i++)
{
std::map<int, cv::Point2f>::iterator it;
it = prev_id_pts.find(curIds[i]);
if (it != prev_id_pts.end())
{
cv::Point2f prevPt, curPt;
prevPt.x = it->second.x;
prevPt.y = it->second.y;
curPt.x = curPts[i].x + cols;
curPt.y = curPts[i].y;
double len = std::min(1.0, 1.0 * track_cnt[i] / 20);
cv::circle(featureMatchImg, prevPt, 2, cv::Scalar(255 * (1 - len), 0, 255 * len), 2);
cv::circle(featureMatchImg, curPt, 2, cv::Scalar(255 * (1 - len), 0, 255 * len), 2);
cv::arrowedLine(featureMatchImg, prevPt, curPt, cv::Scalar(0, 255, 0), 2, 8, 0,0.01);
}
}
}
FeatureFrame_t trackImage(double _cur_time, const cv::Mat &_img)//_img左
{
TicToc t_r;
cur_time = _cur_time;
cur_img = _img;
row = cur_img.rows;
col = cur_img.cols;
cur_pts.clear();
unstable_pts.clear();
frontend_process_time.tic();
feature_tracking_time.tic();
trackFast();
cur_un_pts = undistortedPts(cur_pts, m_camera[0]);
pts_velocity = ptsVelocity(ids, cur_un_pts, cur_un_pts_map, prev_un_pts_map);
// 绘制单目显示的图像
drawTrack(cur_img, cv::Mat(), ids, cur_pts, cur_right_pts, prevLeftPtsMap);
prev_img = cur_img;
prev_pts = cur_pts;
prev_un_pts = cur_un_pts;
prev_un_pts_map = cur_un_pts_map;
prev_time = cur_time;
hasPrediction = false;
if(!prevLeftPtsMap.empty()){
draw_matching_points(prev_img, cur_img, ids, cur_pts, prevLeftPtsMap);
}
else
{
std::cout<<"prevLeftPtsMap is empty"<<std::endl;
}
prevLeftPtsMap.clear();
for(size_t i = 0; i < cur_pts.size(); i++)
prevLeftPtsMap[ids[i]] = cur_pts[i];
// ----------------------构建特征点featureFrame,并加入特征点
FeatureFrame_t featureFrame;
for (size_t i = 0; i < ids.size(); i++)
{
int feature_id = ids[i];
double x, y ,z;
x = cur_un_pts[i].x;
y = cur_un_pts[i].y;
z = 1;
double p_u, p_v;
p_u = cur_pts[i].x;
p_v = cur_pts[i].y;
int camera_id = 0;
double velocity_x, velocity_y;
velocity_x = pts_velocity[i].x;
velocity_y = pts_velocity[i].y;
Feature_t xyz_uv_velocity;
xyz_uv_velocity << x, y, z, p_u, p_v, velocity_x, velocity_y;
featureFrame[feature_id].emplace_back(camera_id, xyz_uv_velocity);
}
frontend_process_time.toc_with_others();
return featureFrame;
}
int main(int argc , char* argv[])
{
int thread_value = 30;
vector<String> files; //存放文件路径
vector<Mat> images; //存放文件数据
Mat after_fast_image,img,gray;
std::vector<cv::KeyPoint> keypoints;
//读取文件夹下所有符合要求的文件路径
std::string CONFIG_FILE="/media/jankin/5622A53322A518CF/linux/0805/FeatureTracker/config/bsd_0621";
std::string config_all=CONFIG_FILE+"/bsd_0621.yaml";
cv::FileStorage fsSettings(config_all, cv::FileStorage::READ);
std::string cam0Calib;
fsSettings["cam0_calib"] >> cam0Calib;
std::string cam0Path = CONFIG_FILE + "/" + cam0Calib;
CAM_NAMES.push_back(cam0Path);
camodocal::CameraPtr camera = CameraFactory::instance()->generateCameraFromYamlFile(CAM_NAMES[0]);
m_camera.push_back(camera);
FAST_THRE=readYaml(fsSettings, "fast_thre", 30);
cv::Ptr<cv::FastFeatureDetector> fast_detector = cv::FastFeatureDetector::create(FAST_THRE, true); //NMS: true
row = fsSettings["image_height"];
col = fsSettings["image_width"];
FLOW_BACK = readYaml(fsSettings, "flow_back", 1);
CONSIDER_UNSTABLE_PTS = readYaml(fsSettings, "consider_unstable_pts", 1);
USE_REJECT_F = readYaml(fsSettings, "use_reject_f", false);
double IMAGE_SCALING_RATIO = readYaml(fsSettings, "image_scaling_ratio", 1.0);
F_THRESHOLD = readYaml(fsSettings, "F_threshold", 1.0);
FISHEYE_MASK = readYaml(fsSettings, "fisheye_mask", 0);
FISHEYE_MASK_PATH = PROJECT_DIR + "/config/mask/fisheye_mask_752_480.jpg";
GROUND_MASK = readYaml(fsSettings, "ground_mask", 0.0);
if(GROUND_MASK)
set_ground_mask();
if(FISHEYE_MASK){
fisheye_mask = cv::imread(FISHEYE_MASK_PATH);
cv::cvtColor(fisheye_mask, fisheye_mask, CV_BGR2GRAY);
cv::resize(fisheye_mask, fisheye_mask, cv::Size(0, 0), IMAGE_SCALING_RATIO, IMAGE_SCALING_RATIO, cv::INTER_AREA);
}
MIN_DIST = readYaml(fsSettings, "min_dist", 30);
MIN_DIST = static_cast<int>(MIN_DIST * IMAGE_SCALING_RATIO);
MAX_CNT = readYaml(fsSettings, "max_cnt", 150);
row = static_cast<int>(row * IMAGE_SCALING_RATIO);
col = static_cast<int>(col * IMAGE_SCALING_RATIO);
mask = cv::Mat(row, col, CV_8UC1, cv::Scalar(255));
std::cout<<"row:"<<row<<" "<<"col"<<col<<std::endl;
bool mask_bit=true;//是否加入去地面遮罩
set_ground_mask();
cv::bitwise_and(ground_mask, mask, mask);
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
double input_t=0;
//int //video=0
int video_image_camera=0;
// if(argc>2)
// video_image_camera=atoi(argv[1]);
//else{
std::cout<<"please inout:0-video;1-image;2-camera"<<std::endl;
cin>>video_image_camera;
//}
if(video_image_camera==0)
{
VideoCapture capture;
capture.open("/media/jankin/5622A53322A518CF/linux/0805/video_record/a.avi");
if (!capture.isOpened()) cout << "video not open.." << endl;
capture.read(img); // 先取出第一帧图像
while (capture.read(img)) {
cvtColor(img, img, COLOR_BGR2GRAY);
//imshow("src", gray);
if (img.empty())
{
std::cout<<"No Data ************"<<std::endl;
continue;
}
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>( t2-t1 );
input_t=time_used.count();
std::cout<<"time delt ="<<input_t<<"rol*col"<<gray.rows<<"*"<<gray.cols<<std::endl;
trackImage(input_t,img);
//prev_pts.clear();
cv::imshow("imTrack", imTrack);
//cv::imshow("Cur_img",img);
usleep(1000);
waitKey(200);
}
}
if(video_image_camera==1)
{
glob("/media/jankin/5622A53322A518CF/linux/picture/15_new/*.jpg", files, true);
size_t num = files.size(); //读取的符合条件的文件个数
for (int i = 0; i < num; i++)
{
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>( t2-t1 );
img = imread(files[i], IMREAD_GRAYSCALE); //读取灰度图像
if (img.empty())
{
cerr << files[i] << " can't be loaded!" << endl;
continue;
}
input_t=time_used.count();
std::cout<<"time delt ="<<input_t<<"rol*col"<<img.rows<<"*"<<img.cols<<std::endl;
trackImage(input_t, img);
cv::imshow("imTrack", imTrack);
waitKey(100);//100ms
}
}
if(video_image_camera==2)
{
cv::VideoCapture cap;
int usb_number=0;
if(cap.open(usb_number) == 0)
{
perror("Open dev error!");
}
while(1)
{
cap.read(img);
cvtColor(img, gray, COLOR_BGR2GRAY);
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>( t2-t1 );
input_t=time_used.count();
std::cout<<"time delt ="<<input_t<<"rol*col"<<img.rows<<"*"<<img.cols<<std::endl;
trackImage(input_t, gray);
cv::imshow("imTrack", imTrack);
usleep(1000);
waitKey(100);//100ms
}
}
fsSettings.release();
return 0;
}