ORBSLAM2
代码
bool Tracking::TrackReferenceKeyFrame()
{
// Compute Bag of Words vector
mCurrentFrame.ComputeBoW();//如果词袋没有计算,需要计算计算词袋
ComputeBow()
void KeyFrame::ComputeBoW()
{
if(mBowVec.empty() || mFeatVec.empty())//如果词袋是空的
{
vector<cv::Mat> vCurrentDesc = Converter::toDescriptorVector(mDescriptors);//计算输入描述子,转化词袋需要的描述子的格式
// Feature vector associate features with nodes in the 4th level (from leaves up)
// We assume the vocabulary tree has 6 levels, change the 4 otherwise
mpORBvocabulary->transform(vCurrentDesc,mBowVec,mFeatVec,4);//词袋模型库进行计算词袋,输入当前的描述子,输出磁带向量以及记录node
}
}
如何实现tansform
//将一幅图像中所有特征点转化为BoWervector 与 FeatureVector
template<class TDescriptor, class F> //这是一个类模板
void TemplatedVocabulary<TDescriptor,F>::transform(
const std::vector<TDescriptor>& features,
BowVector &v, FeatureVector &fv, int levelsup) const
{
v.clear();//将BoWvector与featureVector给清空
fv.clear();
if(empty()) // safe for subclasses
{
return;
}
// normalize
LNorm norm;
bool must = m_scoring_object->mustNormalize(norm);//依据评分类型,确定是否需要归一化
typename vector<TDescriptor>::const_iterator fit;
if(m_weighting == TF || m_weighting == TF_IDF)//
{
unsigned int i_feature = 0;//遍历图像中的特征点
for(fit = features.begin(); fit < features.end(); ++fit, ++i_feature)
{
WordId id;//叶子节点的word id
NodeId nid;//FeatureVector中的NodeID
WordValue w; //叶子节点word对应的权重
// w is the idf value if TF_IDF, 1 if TF
// 将当前描述子转化为Word id, Word weight,节点所属的父节点id(这里的父节点不是叶子
//的上一层,它距离叶子深度为levelsup)
transform(*fit, id, w, &nid, levelsup);//进入
if(w > 0) // not stopped
{ //如果权重大于0
v.addWeight(id, w);//添加wordID与权重
fv.addFeature(nid, i_feature);//添加深度
}
}
if(!v.empty() && !must)
{
// unnecessary when normalizing
const double nd = v.size();
for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++)
vit->second /= nd;
}
}
else // IDF || BINARY
{
unsigned int i_feature = 0;
for(fit = features.begin(); fit < features.end(); ++fit, ++i_feature)
{
WordId id;
NodeId nid;
WordValue w;
// w is idf if IDF, or 1 if BINARY
transform(*fit, id, w, &nid, levelsup);
if(w > 0) // not stopped
{
v.addIfNotExist(id, w);
fv.addFeature(nid, i_feature);
}
}
} // if m_weighting == ...
if(must) v.normalize(norm);
}
词袋树
Transform()
template<class TDescriptor, class F>
void TemplatedVocabulary<TDescriptor,F>::transform(const TDescriptor &feature,
WordId &word_id, WordValue &weight, NodeId *nid, int levelsup) const
{ //传参:特征描述子、word id 、word权重、记录当前描述子转化为world的所属nodeid以及它距离叶子深度的levelup、距离叶子深度
// propagate the feature down the tree
vector<NodeId> nodes;
typename vector<NodeId>::const_iterator nit;
// level at which the node must be stored in nid, if given
const int nid_level = m_L - levelsup;//用总的深度减去定义的深度
if(nid_level <= 0 && nid != NULL) *nid = 0; // root
NodeId final_id = 0; // root
int current_level = 0;//第0层
do
{
++current_level;//深度+1
nodes = m_nodes[final_id].children;//列出该节点所有id
final_id = nodes[0];//将第一个赋值给final_id
double best_d = F::distance(feature, m_nodes[final_id].descriptor);//计算他与传入的描述子的距离
for(nit = nodes.begin() + 1; nit != nodes.end(); ++nit)
{//开始迭代该层所有node
NodeId id = *nit;
double d = F::distance(feature, m_nodes[id].descriptor);//作比较选出最接近的node
if(d < best_d)
{
best_d = d;
final_id = id;
}
}
if(nid != NULL && current_level == nid_level)//判断当前层数是否和选的层数一致
*nid = final_id;//是的话记录
} while( !m_nodes[final_id].isLeaf() );//判断是不是叶子节点,不是的话接着循环
// turn node id into word id
word_id = m_nodes[final_id].word_id;//返回word的id与权重
weight = m_nodes[final_id].weight;
}