解析OpenCV训练出来的haar特征检测xml模型,OpenCV 1.0与2.0以上版本都适用

1、1.0版本的OpenCV在进行haar特征检测的时候函数调用过程,如下图

2、1.0版本OpenCV训练出来的haar特征检测模型xml文件数据结构,如下图

3、解析xml文件的程序代码,如下

// Read_Haar_XML_ALL.cpp : 定义控制台应用程序的入口点。
//

#include "stdafx.h"
#include <iostream>  
#include <fstream>       
#include <vector> 

#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp> 
#include <opencv2/ml/ml.hpp>
#include <opencv2/objdetect/objdetect.hpp>

#include "cascadeclassifier.h"

struct DTree
{
	int nodeCount;
};

struct DTreeNode
{
	int featureIdx;
	float threshold; // for ordered features only
	int left;
	int right;
};

struct Stage
{
	int first;
	int ntrees;
	float threshold;
};

#define THRESHOLD_EPS 0.0000099999997
#define RECT_NUM 3


using namespace std;
using namespace cv;


//	1、需要将objdetect.hpp的第426行的protected改成public
int _tmain(int argc, _TCHAR* argv[])
{
	CascadeClassifier detector;
	std::string cascade_filename = "cascade_10w_2.0.xml";
	detector.load(cascade_filename);
	if (detector.empty())
	{
		cout << "load classifier failed!" << endl;
		return -1;
	}

	Size window_size;							//保存检测框初始尺寸
	int stage_num;								//保存强分类器的个数
	int classifier_flag;						//保存OpenCV 1.0版本模型的flag值
	vector<int> stage_classifier_count;			//保存每个强分类器中弱分类器的个数
	vector<float> stage_classifier_threshold;	//保存每个强分类器中弱分类器的阈值
	vector<int> classifier_node_count;			//保存每个弱分类器中的节点个数
	vector<int> feature_idx;					//保存OpenCV 2.4.9版本模型的featureIdx值
	vector<float> classifier_node_threshold;	//保存每个弱分类器中的节点阈值
	vector<int> left;							//保存每个弱分类器中的节点的left值
	vector<int> right;							//保存每个弱分类器中的节点的right值
	vector<float> classifier_node_alpha;		//保存每个弱分类器中的节点alpha值
	vector<int> tilted;							//保存特征是否倾斜的标志
	vector<float> weight;						//保存haar特征的矩形框的权重值
	vector<CvRect> rects;						//保存haar特征的矩形框

	if (detector.isOldFormatCascade())			//如果是旧版本格式的模型,即OpenCV 1.0版本格式的模型
	{
		window_size.width = detector.oldCascade->orig_window_size.width;
		window_size.height = detector.oldCascade->orig_window_size.height;
		stage_num = detector.oldCascade->count;
		classifier_flag = detector.oldCascade->flags;

		for (int i = 0; i < stage_num; i++)
		{
			CvHaarStageClassifier* stage_classifier = detector.oldCascade->stage_classifier + i;
			stage_classifier_count.push_back(stage_classifier->count);
			stage_classifier_threshold.push_back(stage_classifier->threshold);

			for(int j = 0; j < stage_classifier->count; j++ )
			{
				CvHaarClassifier* classifier = stage_classifier->classifier + j;
				classifier_node_count.push_back(classifier->count);
				classifier_node_threshold.push_back(*classifier->threshold);
				left.push_back(*classifier->left);
				right.push_back(*classifier->right);
				classifier_node_alpha.push_back(*classifier->alpha);
				classifier_node_alpha.push_back(*(classifier->alpha + 1));

				for(int l = 0; l < classifier->count; l++ )
				{
					tilted.push_back(classifier->haar_feature[l].tilted);
					for(int k = 0; k < CV_HAAR_FEATURE_MAX; k++ )
					{
						weight.push_back(classifier->haar_feature[l].rect[k].weight);
						rects.push_back(classifier->haar_feature[l].rect[k].r);
					}
				}
			}
		}
	}
	else  //如果是新版本格式的模型,即OpenCV 2.4.9版本格式的模型
	{
		FileStorage fs;
		fs.open(cascade_filename,FileStorage::READ);
		if (!fs.isOpened())
		{
			cout <<"Read xml failed!"<<endl;
			return -1;
		}

		FileNode root = fs.getFirstTopLevelNode();	//获取根节点


		string stageTypeStr = (string)root[CC_STAGE_TYPE];

		string stageType;
		if( stageTypeStr == CC_BOOST )
		{
			stageType = "BOOST";
		}
		else
		{
			stageType = "Unkown";
		}

		string featureTypeStr = (string)root[CC_FEATURE_TYPE];
		int featureType = 0;
		if( featureTypeStr == CC_HAAR )
			featureType = FeatureEvaluator::HAAR;
		else if( featureTypeStr == CC_LBP )
			featureType = FeatureEvaluator::LBP;
		else if( featureTypeStr == CC_HOG )
			featureType = FeatureEvaluator::HOG;

		int origWin_width = (int)root[CC_WIDTH];	//原始窗口的宽度
		int origWin_height = (int)root[CC_HEIGHT];	//原始窗口的高度

		// load feature params
		FileNode fn = root[CC_FEATURE_PARAMS];
		if( fn.empty() )
			return false;

		int ncategories = fn[CC_MAX_CAT_COUNT];
		int featSize = fn[CC_FEATURE_SIZE];
		int subsetSize = (ncategories + 31)/32,
			nodeStep = 3 + ( ncategories>0 ? subsetSize : 1 );

		// load stages
		fn = root[CC_STAGES];
		if( fn.empty() )
			return false;

		vector<Stage> stages;					//临时保存每个强分类器的阈值、每个强分类器中若分类器的个数
		vector<DTree> classifiers;				//临时保存每个弱分类器中haar_like特征的个数
		vector<DTreeNode> nodes;				//临时保存每个弱分类器的阈值、对应的特征编号featureIdx
		vector<float> leaves;					//临时保存每个弱分类器的alpha值
		vector<int> subsets;
		vector<CvHaarFeature>  haar_features;	//临时保存每个弱分类器的haar特征值

		stages.reserve(fn.size());
		classifiers.clear();
		nodes.clear();

		FileNodeIterator it = fn.begin(), it_end = fn.end();

		for( int si = 0; it != it_end; si++, ++it )
		{
			FileNode fns = *it;
			Stage stage;
			stage.threshold = (float)fns[CC_STAGE_THRESHOLD] - THRESHOLD_EPS;
			fns = fns[CC_WEAK_CLASSIFIERS];
			if(fns.empty())
				return false;
			stage.ntrees = (int)fns.size();
			stage.first = (int)classifiers.size();
			stages.push_back(stage);
			classifiers.reserve(stages[si].first + stages[si].ntrees);

			FileNodeIterator it1 = fns.begin(), it1_end = fns.end();
			for( ; it1 != it1_end; ++it1 ) // weak trees
			{
				FileNode fnw = *it1;
				FileNode internalNodes = fnw[CC_INTERNAL_NODES];
				FileNode leafValues = fnw[CC_LEAF_VALUES];
				if( internalNodes.empty() || leafValues.empty() )
					return false;

				DTree tree;
				tree.nodeCount = (int)internalNodes.size()/nodeStep;
				classifiers.push_back(tree);

				nodes.reserve(nodes.size() + tree.nodeCount);
				leaves.reserve(leaves.size() + leafValues.size());
				if( subsetSize > 0 )
					subsets.reserve(subsets.size() + tree.nodeCount*subsetSize);

				FileNodeIterator internalNodesIter = internalNodes.begin(), internalNodesEnd = internalNodes.end();

				for( ; internalNodesIter != internalNodesEnd; ) // nodes
				{
					DTreeNode node;
					node.left = (int)*internalNodesIter; ++internalNodesIter;
					node.right = (int)*internalNodesIter; ++internalNodesIter;
					node.featureIdx = (int)*internalNodesIter; ++internalNodesIter;
					if( subsetSize > 0 )
					{
						for( int j = 0; j < subsetSize; j++, ++internalNodesIter )
							subsets.push_back((int)*internalNodesIter);
						node.threshold = 0.f;
					}
					else
					{
						node.threshold = (float)*internalNodesIter; ++internalNodesIter;
					}
					nodes.push_back(node);
				}

				internalNodesIter = leafValues.begin(), internalNodesEnd = leafValues.end();

				for( ; internalNodesIter != internalNodesEnd; ++internalNodesIter ) // leaves
					leaves.push_back((float)*internalNodesIter);
			}
		}

		fn = root[CC_FEATURES];
		if( fn.empty() )
			return false;
		
		FileNodeIterator iter = fn.begin();
		for (; iter != fn.end(); ++iter)
		{
			FileNode fn_features = *iter;
			FileNode rnode = fn_features[CC_RECTS];
			FileNodeIterator it = rnode.begin(), it_end = rnode.end();

			CvHaarFeature haar_feature;
			int ri;
			for( ri = 0; ri < RECT_NUM; ri++ )
			{
				haar_feature.rect[ri].r = Rect();
				haar_feature.rect[ri].weight = 0.f;
			}

			for( ri = 0; it != it_end; ++it, ri++)
			{
				FileNodeIterator it2 = (*it).begin();
				it2 >> haar_feature.rect[ri].r.x >> haar_feature.rect[ri].r.y >>
					haar_feature.rect[ri].r.width >> haar_feature.rect[ri].r.height >> haar_feature.rect[ri].weight;
			}

			haar_feature.tilted = (int)fn_features[CC_TILTED] != 0;

			haar_features.push_back(haar_feature);
		}
		fs.release();


		window_size.width = origWin_width;
		window_size.height = origWin_height;
		stage_num = stages.size();
		int classifier_index = 0;
		int haar_feature_idx = 0;
		for (int i = 0; i < stage_num; i++)
		{
			stage_classifier_count.push_back(stages.at(i).ntrees);
			stage_classifier_threshold.push_back(stages.at(i).threshold);

			for (int j = 0; j < stages.at(i).ntrees; j++)
			{
				classifier_node_count.push_back(classifiers.at(classifier_index + j).nodeCount);

				feature_idx.push_back(nodes.at(classifier_index + j).featureIdx);
				classifier_node_threshold.push_back(nodes.at(classifier_index + j).threshold);
				left.push_back(0);
				right.push_back(-1);
				classifier_node_alpha.push_back(leaves.at(2 * (classifier_index + j)));
				classifier_node_alpha.push_back(leaves.at(2 * (classifier_index + j) + 1));

				for(int l = 0; l < classifiers.at(classifier_index + j).nodeCount; l++ )
				{
					int pos = feature_idx.at(haar_feature_idx);
					tilted.push_back(haar_features.at(pos).tilted);
					for(int k = 0; k < CV_HAAR_FEATURE_MAX; k++ )
					{
						weight.push_back(haar_features.at(pos).rect[k].weight);
						rects.push_back(haar_features.at(pos).rect[k].r);
					}
					haar_feature_idx += classifiers.at(classifier_index + j).nodeCount;
				}
			}

			classifier_index += stages.at(i).ntrees;
		}
	}

	return 0;
}

4、注意事项

A、需要将OpenCV 2.4.9源代码中objdetect.hpp的第426行的protected改成public,这样在解析1.0版本的haar特征xml模型时才能取到数据。

B、需要加入工程的几个OpenCV 2.4.9源代码中的.cpp文件:

boost.cpp
cascadeclassifier.cpp
features.cpp
haarfeatures.cpp
HOGfeatures.cpp
imagestorage.cpp
lbpfeatures.cpp

C、需要加入工程的几个OpenCV 2.4.9源代码中的.h文件:

boost.h
cascadeclassifier.h
haarfeatures.h
HOGfeatures.h
imagestorage.h
lbpfeatures.h
traincascade_features.h

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值