我想计算Haar特征,自己手动计算感觉挺麻烦(主要在取各个不同位置、不同scale的特征),而且可能速度不够。
OpenCV 的这个把所有东西都封装起来了,由于我的online-boosting和它的框架不一样,不能直接使用。我在源码中看了半天,发现里面又有 internal haar feature又有fast haar feature,还有什么Thaar feature。源码中注释比较少,看不太明白。请问哪一段是计算Haar特征的呢?主要是涉及到尺度的选择和特征位置的对应。
Re: OpenCV源码中真正计算Haar特征的那段在哪?
//针对大小为winsize的图,计算所有HaarFeature的rect,存入features返回,即获取所有特征坐标
CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,int mode,int symmetric )
//训练样本图集
//创建结构CvHaarTrainigData,分配空间,并未填入sample数据
CvHaarTrainigData* icvCreateHaarTrainingData( CvSize winsize, int maxnumsamples )
//计算训练样本集的特征值,调用icvGetTrainingDataCallback
//已知训练样本的积分图,计算特征值valcache、索引idxcache
void icvPrecalculate( CvHaarTrainingData* data, CvIntHaarFeatures* haarFeatures,int numprecalculated )//numprecalculated:要计算的特征数
//userdata:含训练数据、haar特征
//计算从first开始的num个特征,结果存入mat
//若sampleIdx为null,则计算全部样本,否则计算由sampleIdx指定的样本
void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*,int first, int num, void* userdata )
//计算一个样本(积分图为sum和tilted)的一个HaarFeature,并返回该值
CV_INLINE float cvEvalFastHaarFeature( CvFastHaarFeature* feature,sum_type* sum, sum_type* tilted )
//特征的rect由坐标表示转换为由像素索引表示
//每个haarFeature最多由3个rect组成,但都保留3个的存储空间
void icvConvertToFastHaarFeature( CvTHaarFeature* haarFeature,CvFastHaarFeature* fastHaarFeature,int size, int step )
OpenCV源码中真正计算Haar特征的那段在哪?
我想我要找的就在CvIntHaarFeatures* icvCreateIntHaarFeatures里面
我之前也看过 好像没有看到计算不同scale的 OpenCV计算了不同scale吗?
函数里的那个大循环就是在遍历所有不同的尺度和位置的haar特征
第二部分
cvCreateTreeCascadeClassifier
//CvTreeCascadeNode包含CvStageHaarClassifier* stage;也就是说找最后一个stage作为最深的叶leaf;
CV_CALL( leaves = icvFindDeepestLeaves( tcc ) );
CV_CALL( icvPrintTreeCascade( tcc->root ) );
//根据模式和对称性以及winsize获得haar特征,每个特征由最多三个矩形矩形加减形成,
//这里包含了所有的允许特征,允许特征是可能特征的一部分,过滤掉的是面积比较小的特征
haar_features = icvCreateIntHaarFeatures( winsize, mode, symmetric );
printf( "Number of features used : %d\n", haar_features->count );
//分配用于训练的缓冲区,包括正负样本的矩形积分图和倾斜积分图
// CvMat normfactor;
// CvMat cls;
// CvMat weights;
training_data = icvCreateHaarTrainingData( winsize, npos + nneg );
sprintf( stage_name, "%s/", dirname );
suffix = stage_name + strlen( stage_name );
//获得背景信息,包括读取背景信息里背景文件的文件名信息并索引该文件,
consumed = 0;
//读取正样本,并计算通过所有的前面的stage的正采样数量,这样可以计算出检测率
//调用函数情况
//icvGetHaarTrainingDataFromVec内部调用icvGetHaarTrainingData
//icvGetHaarTrainingData,从规定的回调函数里icvGetHaarTraininDataFromVecCallback获得数据,
//并通过前面训练出的分类器过滤掉少量正采样,然后计算积分图,
//积分图存放在training_data结构中
poscount = icvGetHaarTrainingDataFromVec( training_data, 0, npos,
(CvIntHaarClassifier*) tcc, vecfilename, &consumed );
proctime = -TIME( 0 );
//读负采样,并返回虚警率
//从文件中将负采样读出,并用前面的训练出的stage进行过滤获得若干被错误划分为正采样的负采样,如果得到的数量达不到nneg
//则会重复提取这些负样本,以获得nneg个负采样,所以如果当被错误划分为正采样的负采样在当前的stage后为0,则会出现死循环
//解决方法可以通过reader的round值判断。
//这种情况应该是训练收敛,因为虚警率为0,符合条件if( leaf_fa_rate <= required_leaf_fa_rate ),可以考虑退出训练
nneg = (int) (neg_ratio * poscount);
//icvGetHaarTrainingDataFromBG内部调用
//icvGetBackgroundImage获得数据并计算积分图,将其放在training_data结构分配的内存,位置是在poscount开始nneg个数量
//training_data分配了npos + nneg个积分图内存以及权值
negcount = icvGetHaarTrainingDataFromBG( training_data, poscount, nneg,
(CvIntHaarClassifier*) tcc, &false_alarm );
printf( "NEG: %d %g\n", negcount, false_alarm );
icvSetNumSamples( training_data, poscount + negcount );
posweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/poscount);
negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/negcount);
//这里将正样本设置为1,负样本设置为0,在后面将用于分辨样本类型,统计检测率和虚警率
//这里也设置加权值
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );
//预先计算每个样本(包括正负样本的前面numprecalculated特征)
//内部调用cvGetSortedIndices将所有样本的每一个特征按其特征值升序排序,idx和特征值分别放在training_data
//的 *data->valcache和*data->idxcache中;
icvPrecalculate( training_data, haar_features, numprecalculated );
/训练由多个弱分类器级连的强分类器
single_cluster->stage =
(CvStageHaarClassifier*) icvCreateCARTStageClassifier
//cluster_idx的容量足够吗?CV_CALL( ptr = cvCreateMat( num, total, CV_32FC1 ) );ptr即val,这里的total是特征数,num为poscount
//而cluster_idx仅仅是npos+nneg之和
//CV_CALL( cluster_idx = cvCreateMat( 1, npos + nneg, CV_32SC1 ) );
//难道是KMean2的问题?vals是poscount*featurecount,
//是否综合正样本的stage特征作为一个向量,以一个向量为一个样本,vals有poscount个向量作为输入将分类后的结果放到cluster_idx
//如果这样理解则不会出现越界现象cluster_idx的容量也是够的
CV_CALL( cvKMeans2( vals, k, cluster_idx, CV_TERM_CRITERIA() ) );
//统计每个类别的数量
for( cluster = 0; cluster < k; cluster++ )
num[cluster] = 0;
//minpos大于任意一个分类,则退出分类
for( cluster = 0; cluster < k; cluster++ )
//这里得到符合要求的正样本分类,
cur_num = 0;
cur_node = last_node = NULL;
//best_num保留的是当前强分类器所有弱分类器特征值之和
for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ )
{
CvTreeCascadeNode* new_node;
int num_splits;
int last_pos;
int total_pos;
printf( "Cluster: %d\n", cluster );
last_pos = negcount;
//将分类好的正样本根据cluster与负样本组合,则训练出k个node,
//与前面不一样的是正样本放后面负样本放前面
//重新计算加权
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );
//注意这里的idx和上面的不同,不再是NULL了
new_node->stage = (CvStageHaarClassifier*)
static
CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
CvMat* sampleIdx,
CvIntHaarFeatures* haarFeatures,
float minhitrate,
float maxfalsealarm,
int symmetric,
float weightfraction,
int numsplits,
CvBoostType boosttype,
CvStumpError stumperror,
int maxsplits )
int num_splits;
//N,是弱分类器的序号
//SMP由weightfraction决定,表示通过剔除小权值的样本后与总体样本数的%比值,
//weightfraction会改变参与下一个弱分类器的样本数量,即大权值样本
//考虑AdaBoost算法,权值更新的时候一般正确分类的权值会乘一个因子,应该对应权值会变小,这样是否会减少参与训练的正样本数?
//由于函数cvTrimWeights内部与门限相同的权值会被保留,减少的数目有限,(考虑一种极端情况,正确分类的都被踢除掉)
//F表示symmetric环境下奇数弱分类器,如果考虑是从0计数,则应该是第偶数个弱分类器,是否指示采用对称特征???
#ifdef CV_VERBOSE
printf( "+----+----+-+---------+---------+---------+---------+\n" );
printf( "| N |%%SMP|F| ST.THR | HR | FA | EXP. ERR|\n" );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
#endif
//userdata包含了参与训练的积分图和特征,其指针应该是用于回调的用户参数
userdata = cvUserdata( data, haarFeatures );
//权值只有在LB这样的boosttype中起作用icvBoostStartTrainingLB。
//这里讨论非LB情况,函数根据cls值计算参与训练特征的weakTrainVals,weakTrainVals或为+1或为-1
trainer = cvBoostStartTraining( &data->cls, weakTrainVals, &data->weights,
sampleIdx, boosttype );
乎剔除小权值的特征,将大权值的index放到trimmedIdx数据结构中
trimmedIdx = cvTrimWeights( &data->weights, sampleIdx, weightfraction );
numtrimmed = (trimmedIdx) ? MAX( trimmedIdx->rows, trimmedIdx->cols ) : m;
//data->valcache有预计算特征值
//flags是CV_COL_SAMPLE或CV_ROW_SAMPLE
//weakTrainVals?
//
//得到对称特征
if( classifier->feature[i].desc[0] == 'h' )
{//倾斜
int tmp = 0;
//计算各对称特征的特征值
for( j = 0; j < numtrimmed; j++ )
/调用icvEvalCARTHaarClassifier函数计算训练出来的弱分类器对于所有参与训练的样本的特征值
eval.data.fl[idx] = classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
//这里应该是更新各个样本的weight
alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
&data->weights, trainer );
//统计正样本数量,并且计算出正样本的对于弱分类器的特征值(一个弱分类器对应一个特征)
//icvEvalCARTHaarClassifier中的left right是否是考虑多特征情况下,按照一定顺序编号
//找到最适合的特征值返回
eval.data.fl[numpos] = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
eval.data.fl[numpos] += classifier->eval_r(
(CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
//根据检测率取门限,通过该门限获得虚警率
threshold = eval.data.fl[(int) ((1.0F - minhitrate) * numpos)];
CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
//包含每个样本预计算的特征的特征值,按特征值升序排列的索引数组在trainParams的sortedIdx中
int flags,
CvMat* trainClasses,//+1,-1
CvMat* typeMask,
CvMat* missedMeasurementsMask,
CvMat* compIdx,
CvMat* sampleIdx,//如果不为NULL,样本的序,按权值升序排列
CvMat* weights,//样本权值
CvClassifierTrainParams* trainParams )
CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,//预先计算的特征值
int flags,
CvMat* trainClasses,//样本分类+1或者-1
CvMat* typeMask,
CvMat* missedMeasurementsMask,
CvMat* compIdx,
CvMat* sampleIdx,//剔除小权值样本后的序号数组
CvMat* weights,//样本权值
CvClassifierTrainParams* trainParams )
//datan为预计算特征数,如果能得到新的特征(getTrainData!=NULL)则n应该为所有能够得到的特征数,n肯定大于等于datan
//当在进行对称训练的时候cvCreateMTStumpClassifier中的getTrainData为NULL,此时为训练剩下的特征数
assert( datan <= n );
//m为所有可能参与训练的样本数,如果剔除了小权值样本,则sampleIdx!=NULL,此时为l为实际参与训练的样本数,
//按照权值对样本排序产生sampleIdx与按照特征值对样本排序产生sortedata
if( sampleIdx != NULL )
//filter!=NULL表示剔除小权值样本并且样本按照特特征值排序
if( filter != NULL || sortedn < n )
问:为什么要对正样本进行聚类分析?
主要用于cluster,分cluster有个门限minpos,尽量多分cluster,KMean的原理原理我不是很清楚,所有为什么要用,也不清楚,姑且瞎猜就是把特征值相近的分成k类,如果每一类都不少于minpos,则分成k个串。
http://blog.sina.com.cn/s/blog_75e063c10100za53.html
opencv haartraining 分析一:cvCreateTreeCascadeClassifier
void cvCreateTreeCascadeClassifier( const char* dirname,//存放n个stage的文本数据的位置,例如:c:\data
const char* vecfilename,//存放vec样本文件的位置
const char* bgfilename, //存放bg负样本文件的位置
int npos, int nneg, int nstages,
int numprecalculated,
int numsplits,
float minhitrate, float maxfalsealarm,
float weightfraction,
int mode, int symmetric,
int equalweights,
int winwidth, int winheight,
int boosttype, int stumperror,
int maxtreesplits, int minpos, bool bg_vecfile )//maxtreesplits – maximum number of nodes in tree. If maxtreesplits < nsplits, tree will not be built
{
CvTreeCascadeClassifier* tcc = NULL;
CvIntHaarFeatures* haar_features = NULL;
CvHaarTrainingData* training_data = NULL;
CvMat* vals = NULL;
CvMat* cluster_idx = NULL;
CvMat* idx = NULL;
CvMat* features_idx = NULL;
CV_FUNCNAME( "cvCreateTreeCascadeClassifier" );
__BEGIN__;
int i, k;
CvTreeCascadeNode* leaves;
int best_num, cur_num;
CvSize winsize;
char stage_name[PATH_MAX];
char buf[PATH_MAX];
char* suffix;
int total_splits;
int poscount;
int negcount;
int consumed;
double false_alarm;
double proctime;
int nleaves;
double required_leaf_fa_rate;
float neg_ratio;
int max_clusters;
max_clusters = CV_MAX_CLUSTERS;
neg_ratio = (float) nneg / npos;
nleaves = 1 + MAX( 0, maxtreesplits );
required_leaf_fa_rate = pow( (double) maxfalsealarm, (double) nstages ) / nleaves;//最大虚警率的nstages次方,再除以叶子总数即为叶子虚警率
printf( "Required leaf false alarm rate: %g\n", required_leaf_fa_rate );
total_splits = 0;
winsize = cvSize( winwidth, winheight );
CV_CALL( cluster_idx = cvCreateMat( 1, npos + nneg, CV_32SC1 ) );
CV_CALL( idx = cvCreateMat( 1, npos + nneg, CV_32SC1 ) );
CV_CALL( tcc = (CvTreeCascadeClassifier*)
icvLoadTreeCascadeClassifier( dirname, winwidth + 1, &total_splits ) );
CV_CALL( leaves = icvFindDeepestLeaves( tcc ) );//最后一个stage作为最深的叶子leaf
CV_CALL( icvPrintTreeCascade( tcc->root ) );
haar_features = icvCreateIntHaarFeatures( winsize, mode, symmetric );//这个是计算haar特征的数目以及相关参数(计算每个特征的计算公式),
typedef struct CvTHaarFeature
{
char desc[CV_HAAR_FEATURE_DESC_MAX];
int tilted;
struct
{
CvRect r;
float weight;
} rect[CV_HAAR_FEATURE_MAX];
} CvTHaarFeature;
typedef struct CvFastHaarFeature
{
int tilted;
struct
{
int p0, p1, p2, p3;
float weight;
} rect[CV_HAAR_FEATURE_MAX];
} CvFastHaarFeature;
typedef struct CvIntHaarFeatures
{
CvSize winsize;
int count;
CvTHaarFeature* feature;
CvFastHaarFeature* fastfeature;
} CvIntHaarFeatures;
其中CvTHaarFeature和CvFastHaarFeature的区别在于:CvTHaarFeature是标示特征覆盖的窗口的坐标(Cvrect r),CvFastHaarFeature是将特征覆盖的窗口区域拉直,然后计算cvEvalFastHaarFeature.
CV_INLINE float cvEvalFastHaarFeature( const CvFastHaarFeature* feature,
const sum_type* sum, const sum_type* tilted )
{
const sum_type* img = feature->tilted ? tilted : sum;//此处img是判断是否是旋转后的。如果不是,那么这个是已经计算了每个位置的像素积分和的。
float ret = feature->rect[0].weight*
(img[feature->rect[0].p0] - img[feature->rect[0].p1] -
img[feature->rect[0].p2] + img[feature->rect[0].p3]) +
feature->rect[1].weight*
(img[feature->rect[1].p0] - img[feature->rect[1].p1] -
img[feature->rect[1].p2] + img[feature->rect[1].p3]);
if( feature->rect[2].weight != 0.0f )
ret += feature->rect[2].weight *
( img[feature->rect[2].p0] - img[feature->rect[2].p1] -
img[feature->rect[2].p2] + img[feature->rect[2].p3] );
return ret;
}
printf( "Number of features used : %d\n", haar_features->count );//输出32*32的子窗口所有特征的数目
training_data = icvCreateHaarTrainingData( winsize, npos + nneg );
sprintf( stage_name, "%s/", dirname );
suffix = stage_name + strlen( stage_name );
if (! bg_vecfile)
if( !icvInitBackgroundReaders( bgfilename, winsize ) && nstages > 0 )//icvInitBackgroundReaders创建背景读取指针,并返回1
CV_ERROR( CV_StsError, "Unable to read negative images" );
if( nstages > 0 )
{
do
{
CvSplit* first_split;
CvSplit* last_split;
CvSplit* cur_split;
CvTreeCascadeNode* parent;
CvTreeCascadeNode* cur_node;
CvTreeCascadeNode* last_node;
first_split = last_split = cur_split = NULL;
parent = leaves;
leaves = NULL;
do
{
int best_clusters;
float posweight, negweight;
double leaf_fa_rate;
if( parent ) sprintf( buf, "%d", parent->idx );
else sprintf( buf, "NULL" );
printf( "\nParent node: %s\n\n", buf );
printf( "*** 1 cluster ***\n" );
tcc->eval = icvEvalTreeCascadeClassifierFilter;//此处设定是为了后面样本的过滤作用,用来过滤出能够通过前面各个stage强分类器的正负样本。
icvSetLeafNode( tcc, parent );//设置从根节点到叶子节点的路径
consumed = 0;
poscount = icvGetHaarTrainingDataFromVec( training_data, 0, npos,
(CvIntHaarClassifier*) tcc, vecfilename, &consumed );//从vec正样本文件中获取计算过积分图的正样本到training_data中。
printf( "POS: %d %d %f\n", poscount, consumed, ((double) poscount)/consumed );
if( poscount <= 0 )
CV_ERROR( CV_StsError, "Unable to obtain positive samples" );
fflush( stdout );
proctime = -TIME( 0 );
nneg = (int) (neg_ratio * poscount);
negcount = icvGetHaarTrainingDataFromBG( training_data, poscount, nneg,
(CvIntHaarClassifier*) tcc, &false_alarm, bg_vecfile ? bgfilename : NULL );
printf( "NEG: %d %g\n", negcount, false_alarm );
printf( "BACKGROUND PROCESSING TIME: %.2f\n", (proctime + TIME( 0 )) );
if( negcount <= 0 )
CV_ERROR( CV_StsError, "Unable to obtain negative samples" );
leaf_fa_rate = false_alarm;
if( leaf_fa_rate <= required_leaf_fa_rate )//达到最低要求的叶子误警率即结束。
{
printf( "Required leaf false alarm rate achieved. "
"Branch training terminated.\n" );
}
else if( nleaves == 1 && tcc->next_idx == nstages )
{
printf( "Required number of stages achieved. "
"Branch training terminated.\n" );//达到设定的stages也结束。
}
else
{
CvTreeCascadeNode* single_cluster;
CvTreeCascadeNode* multiple_clusters;
CvSplit* cur_split;
int single_num;
icvSetNumSamples( training_data, poscount + negcount );
posweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/poscount);
negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/negcount);
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );//为每个样本设置权值和类别。
fflush( stdout );
proctime = -TIME( 0 );
icvPrecalculate( training_data, haar_features, numprecalculated );//负责计算所有取出的正负样本的前 numprecalculated个 Haar特征值(由 icvGetTrainingDataCallback 实现),
for( i = 0; i < num_samples; i++ )
{
for( j = 0; j < num; j++ )//num=numprecalculated
{
val = cvEvalFastHaarFeature(
( haar_features->fastfeature
+ first + j ),
(sum_type*) (training_data->sum.data.ptr
+ i * training_data->sum.step),
(sum_type*) (training_data->tilted.data.ptr
+ i * training_data->tilted.step) );
normfactor = training_data->normfactor.data.fl[i];
val = ( normfactor == 0.0F ) ? 0.0F : (val / normfactor);
#ifdef CV_COL_ARRANGEMENT
CV_MAT_ELEM( *mat, float, j, i ) = val;
#else
CV_MAT_ELEM( *mat, float, i, j ) = val;//将计算好的特征值放在mat指针指定的内存中。
#endif
}
}
并且对每种特征,将所有样本标号按其特征值升序排序(由 cvGetSortedIndices实现,每种特征分别排序)。
printf( "Precalculation time: %.2f\n", (proctime + TIME( 0 )) );
CV_CALL( single_cluster = icvCreateTreeCascadeNode() );
fflush( stdout );
proctime = -TIME( 0 );
single_cluster->stage =
(CvStageHaarClassifier*) icvCreateCARTStageClassifier(
training_data, NULL, haar_features,
minhitrate, maxfalsealarm, symmetric,
weightfraction, numsplits, (CvBoostType) boosttype,
(CvStumpError) stumperror, 0 );//这地方开始创建cart的stage强分类器,其中numsplits表示树的最小节点数
printf( "Stage training time: %.2f\n", (proctime + TIME( 0 )) );
single_num = icvNumSplits( single_cluster->stage );//表示强分类器
是由几个弱分类器构建的
best_num = single_num;
best_clusters = 1;
multiple_clusters = NULL;
printf( "Number of used features: %d\n", single_num );
if( maxtreesplits >= 0 )
{
max_clusters = MIN( max_clusters, maxtreesplits - total_splits + 1 );
}
vals = NULL;
for( k = 2; k <= max_clusters; k++ )
{
int cluster;
int stop_clustering;
printf( "*** %d clusters ***\n", k );
stop_clustering = ( k * minpos > poscount );
if( !stop_clustering )
{
int num[CV_MAX_CLUSTERS];
if( k == 2 )
{
proctime = -TIME( 0 );
CV_CALL( vals = icvGetUsedValues( training_data, 0, poscount,
haar_features, single_cluster->stage ) );
printf( "Getting values for clustering time: %.2f\n", (proctime + TIME(0)) );
printf( "Value matirx size: %d x %d\n", vals->rows, vals->cols );
fflush( stdout );
cluster_idx->cols = vals->rows;
for( i = 0; i < negcount; i++ ) idx->data.i[i] = poscount + i;
}
proctime = -TIME( 0 );
CV_CALL( cvKMeans2( vals, k, cluster_idx, CV_TERM_CRITERIA() ) );
printf( "Clustering time: %.2f\n", (proctime + TIME( 0 )) );
for( cluster = 0; cluster < k; cluster++ ) num[cluster] = 0;
for( i = 0; i < cluster_idx->cols; i++ )
num[cluster_idx->data.i[i]]++;
for( cluster = 0; cluster < k; cluster++ )
{
if( num[cluster] < minpos )
{
stop_clustering = 1;
break;
}
}
}
if( stop_clustering )
{
printf( "Clusters are too small. Clustering aborted.\n" );
break;
}
cur_num = 0;
cur_node = last_node = NULL;
for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ )
{
CvTreeCascadeNode* new_node;
int num_splits;
int last_pos;
int total_pos;
printf( "Cluster: %d\n", cluster );
last_pos = negcount;
for( i = 0; i < cluster_idx->cols; i++ )
{
if( cluster_idx->data.i[i] == cluster )
{
idx->data.i[last_pos++] = i;
}
}
idx->cols = last_pos;
total_pos = idx->cols - negcount;
printf( "# pos: %d of %d. (%d%%)\n", total_pos, poscount,
100 * total_pos / poscount );
CV_CALL( new_node = icvCreateTreeCascadeNode() );
if( last_node ) last_node->next = new_node;
else cur_node = new_node;
last_node = new_node;
posweight = (equalweights)
? 1.0F / (total_pos + negcount) : (0.5F / total_pos);
negweight = (equalweights)
? 1.0F / (total_pos + negcount) : (0.5F / negcount);
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );
fflush( stdout );
proctime = -TIME( 0 );
new_node->stage = (CvStageHaarClassifier*)
icvCreateCARTStageClassifier( training_data, idx, haar_features,
minhitrate, maxfalsealarm, symmetric,
weightfraction, numsplits, (CvBoostType) boosttype,
(CvStumpError) stumperror, best_num - cur_num );
printf( "Stage training time: %.2f\n", (proctime + TIME( 0 )) );
if( !(new_node->stage) )
{
printf( "Stage training aborted.\n" );
cur_num = best_num + 1;
}
else
{
num_splits = icvNumSplits( new_node->stage );
cur_num += num_splits;
printf( "Number of used features: %d\n", num_splits );
}
}
if( cur_num < best_num )
{
icvReleaseTreeCascadeNodes( &multiple_clusters );
best_num = cur_num;
best_clusters = k;
multiple_clusters = cur_node;
}
else
{
icvReleaseTreeCascadeNodes( &cur_node );
}
}
cvReleaseMat( &vals );
CV_CALL( cur_split = (CvSplit*) cvAlloc( sizeof( *cur_split ) ) );
CV_ZERO_OBJ( cur_split );
if( last_split ) last_split->next = cur_split;
else first_split = cur_split;
last_split = cur_split;
cur_split->single_cluster = single_cluster;
cur_split->multiple_clusters = multiple_clusters;
cur_split->num_clusters = best_clusters;
cur_split->parent = parent;
cur_split->single_multiple_ratio = (float) single_num / best_num;
}
if( parent ) parent = parent->next_same_level;
} while( parent );
do
{
float max_single_multiple_ratio;
cur_split = NULL;
max_single_multiple_ratio = 0.0F;
last_split = first_split;
while( last_split )
{
if( last_split->single_cluster && last_split->multiple_clusters &&
last_split->single_multiple_ratio > max_single_multiple_ratio )
{
max_single_multiple_ratio = last_split->single_multiple_ratio;
cur_split = last_split;
}
last_split = last_split->next;
}
if( cur_split )
{
if( maxtreesplits < 0 ||
cur_split->num_clusters <= maxtreesplits - total_splits + 1 )
{
cur_split->single_cluster = NULL;
total_splits += cur_split->num_clusters - 1;
}
else
{
icvReleaseTreeCascadeNodes( &(cur_split->multiple_clusters) );
cur_split->multiple_clusters = NULL;
}
}
} while( cur_split );
leaves = last_node = NULL;
last_split = first_split;
while( last_split )
{
cur_node = (last_split->multiple_clusters)
? last_split->multiple_clusters : last_split->single_cluster;
parent = last_split->parent;
if( parent ) parent->child = cur_node;
for( ; cur_node; cur_node = cur_node->next )
{
FILE* file;
if( last_node ) last_node->next_same_level = cur_node;
else leaves = cur_node;
last_node = cur_node;
cur_node->parent = parent;
cur_node->idx = tcc->next_idx;
tcc->next_idx++;
sprintf( suffix, "%d/%s", cur_node->idx, CV_STAGE_CART_FILE_NAME );
file = NULL;
if( icvMkDir( stage_name ) && (file = fopen( stage_name, "w" )) != 0 )
{
cur_node->stage->save( (CvIntHaarClassifier*) cur_node->stage, file );
fprintf( file, "\n%d\n%d\n",
((parent) ? parent->idx : -1),
((cur_node->next) ? tcc->next_idx : -1) );
}
else
{
printf( "Failed to save classifier into %s\n", stage_name );
}
if( file ) fclose( file );
}
if( parent ) sprintf( buf, "%d", parent->idx );
else sprintf( buf, "NULL" );
printf( "\nParent node: %s\n", buf );
printf( "Chosen number of splits: %d\n\n", (last_split->multiple_clusters)
? (last_split->num_clusters - 1) : 0 );
cur_split = last_split;
last_split = last_split->next;
cvFree( &cur_split );
}
printf( "Total number of splits: %d\n", total_splits );
if( !(tcc->root) ) tcc->root = leaves;
CV_CALL( icvPrintTreeCascade( tcc->root ) );
} while( leaves );
{
char xml_path[1024];
int len = (int)strlen(dirname);
CvHaarClassifierCascade* cascade = 0;
strcpy( xml_path, dirname );
if( xml_path[len-1] == '\\' || xml_path[len-1] == '/' )
len--;
strcpy( xml_path + len, ".xml" );
cascade = cvLoadHaarClassifierCascade( dirname, cvSize(winwidth,winheight) );
if( cascade )
cvSave( xml_path, cascade );
cvReleaseHaarClassifierCascade( &cascade );
}
}
printf( "\nCascade performance\n" );
tcc->eval = icvEvalTreeCascadeClassifier;
consumed = 0;
poscount = icvGetHaarTrainingDataFromVec( training_data, 0, npos,
(CvIntHaarClassifier*) tcc, vecfilename, &consumed );
printf( "POS: %d %d %f\n", poscount, consumed,
(consumed > 0) ? (((float) poscount)/consumed) : 0 );
if( poscount <= 0 )
fprintf( stderr, "Warning: unable to obtain positive samples\n" );
proctime = -TIME( 0 );
negcount = icvGetHaarTrainingDataFromBG( training_data, poscount, nneg,
(CvIntHaarClassifier*) tcc, &false_alarm, bg_vecfile ? bgfilename : NULL );
printf( "NEG: %d %g\n", negcount, false_alarm );
printf( "BACKGROUND PROCESSING TIME: %.2f\n", (proctime + TIME( 0 )) );
if( negcount <= 0 )
fprintf( stderr, "Warning: unable to obtain negative samples\n" );
__END__;
if (! bg_vecfile)
icvDestroyBackgroundReaders();
if( tcc ) tcc->release( (CvIntHaarClassifier**) &tcc );
icvReleaseIntHaarFeatures( &haar_features );
icvReleaseHaarTrainingData( &training_data );
cvReleaseMat( &cluster_idx );
cvReleaseMat( &idx );
cvReleaseMat( &vals );
cvReleaseMat( &features_idx );
}
一、树状分类器
1、构造一棵决策树CvCARTClassifier,树状分类器
//层次关系:CvCARTClassifier CvCARTNode CvStumpClassifier
//CvCARTClassifier由count个CvCARTNode组成,每个CvCARTNode有一个CvStumpClassifier,
CvClassifier* cvCreateCARTClassifier( CvMat* trainData,//所有样本的所有特征值
int flags, //标识矩阵按行或列组织
CvMat* trainClasses,
CvMat* typeMask,
CvMat* missedMeasurementsMask,
CvMat* compIdx,
CvMat* sampleIdx,//选择部分样本时的样本号
CvMat* weights,
CvClassifierTrainParams* trainParams )
#define CV_CLASSIFIER_FIELDS() \
int flags; \
float(*eval)( struct CvClassifier*, CvMat* ); \
void (*tune)( struct CvClassifier*, CvMat*, int flags, CvMat*, CvMat*, CvMat*, \
CvMat*, CvMat* ); \
int (*save)( struct CvClassifier*, const char* file_name ); \
void (*release)( struct CvClassifier** );
typedef struct CvClassifier
{
CV_CLASSIFIER_FIELDS()
} CvClassifier;
typedef struct CvCARTNode
{
CvMat* sampleIdx;
CvStumpClassifier* stump;
int parent; //父节点索引号
int leftflag; //1:left节点时;0:right节点
float errdrop;//剩余的误差
} CvCARTNode;
//一个弱分类器,所用特征的索引、阈值及哪侧为正样本
typedef struct CvStumpClassifier
{
CV_CLASSIFIER_FIELDS()
int compidx; //对应特征的索引
float lerror;
float rerror;
float threshold; //该特征阈值
float left; //均值或左侧正样本比例,left=p(y=1)/(p(y=1)+p(y=-1)),对分类器若left为正样本则为1,反之为0
float right;
} CvStumpClassifier;
typedef struct CvCARTClassifier
{
CV_CLASSIFIER_FIELDS()
int count;
int* compidx;
float* threshold;
int* left;//当前节点的左子节点为叶节点时,存叶节点序号的负值(从0开始);非叶节点,存该节点序号
int* right;//当前节点的右子节点为叶节点时,存叶节点序号的负值(从0开始);非叶节点,存该节点序号
float* val;//存叶节点的stump->left或stump->right,值为正样本比例p(y=1)/(p(y=1)+p(y=-1))
} CvCARTClassifier;
typedef struct CvCARTTrainParams
{
CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
int count;//节点数
CvClassifierTrainParams* stumpTrainParams;
CvClassifierConstructor stumpConstructor;
//定义了函数指针变量,变量名为splitIdx,将样本按第compidx个特征的threshold分为left和right
void (*splitIdx)( int compidx, float threshold,
CvMat* idx, CvMat** left, CvMat** right,
void* userdata );
void* userdata;
} CvCARTTrainParams;
2、用树状分类器进行检测
//sample只是一个样本,判断该样本在CvCARTClassifier树中的那个叶节点上
//返回该样本所在叶节点的正样本比例p(y=1)/(p(y=1)+p(y=-1))
float cvEvalCARTClassifier( CvClassifier* classifier, CvMat* sample )
//根据树状分类器判断样本在树上的位置,即在哪个叶节点上。返回叶节点序号
float cvEvalCARTClassifierIdx( CvClassifier* classifier, CvMat* sample )
二、boost
1、boost过程流程,将各种类型归为一个函数cvBoostStartTraining/cvBoostNextWeakClassifier,
通过参数区分不同类型的boost。都是在最优弱分类器已知,各样本对该分类器估计值已计算存入weakEvalVals
typedef struct CvBoostTrainer
{
CvBoostType type;
int count;
int* idx;//要么null,要么存样本索引号
float* F;//存logiBoost的F
} CvBoostTrainer;
调用顺序:cvBoostStartTraining ———> startTraining[type] ———> icvBoostStartTraining等
//定义函数cvBoostStartTraining
CvBoostTrainer* cvBoostStartTraining( ...,CvBoostType type )
{
return startTraining[type]( trainClasses, weakTrainVals, weights, sampleIdx, type );
}
//定义函数指针类型的数组变量startTraining[4]
CvBoostStartTraining startTraining[4] = {
icvBoostStartTraining,
icvBoostStartTraining,
icvBoostStartTrainingLB,
icvBoostStartTraining
};
//定义函数指针类型CvBoostStartTraining
typedef CvBoostTrainer* (*CvBoostStartTraining)( CvMat* trainClasses,
CvMat* weakTrainVals,
CvMat* weights,
CvMat* sampleIdx,
CvBoostType type );
调用顺序:cvBoostNextWeakClassifier———> nextWeakClassifier[trainer->type]———> icvBoostNextWeakClassifierLB等
//定义函数cvBoostNextWeakClassifier
float cvBoostNextWeakClassifier( ..., CvBoostTrainer* trainer )
{
return nextWeakClassifier[trainer->type]( weakEvalVals, trainClasses,weakTrainVals, weights, trainer);
}
//定义函数指针类型的数组变量nextWeakClassifier[4]
CvBoostNextWeakClassifier nextWeakClassifier[4] = {
icvBoostNextWeakClassifierDAB,
icvBoostNextWeakClassifierRAB,
icvBoostNextWeakClassifierLB,
icvBoostNextWeakClassifierGAB
};
//定义函数指针类型CvBoostNextWeakClassifier
typedef float (*CvBoostNextWeakClassifier)( CvMat* weakEvalVals,
CvMat* trainClasses,
CvMat* weakTrainVals,
CvMat* weights,
CvBoostTrainer* data );
2、具体的startTraining和NextWeakClassifier
//y*=2y-1,类别标签由{0,1}变为{-1,1},并将它填入weakTrainVals
//返回CvBoostTrainer,其中F = NULL;
CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,//类别标签{0,1},
CvMat* weakTrainVals,//类别标签{-1,1},
CvMat* weights,
CvMat* sampleIdx,//要么null,要么存样本索引号
CvBoostType type )
//更新权重,特征的响应函数值weakEvalVals已知,即分类器已确定,分类结果在weakEvalVals
float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,//响应函数值{1,-1}
CvMat* trainClasses,//类别标签{0,1},
CvMat* weakTrainVals,//没使用,应该是{1,-1}
CvMat* weights, //将被更新
CvBoostTrainer* trainer )//用于确定要被更新权重的样本
//更新Real AdaBoost权重,特征的响应函数值weakEvalVals已知,即分类器已确定
float icvBoostNextWeakClassifierRAB( CvMat* weakEvalVals,//响应函数值,应该是测对率=(测对数/总数)
CvMat* trainClasses,//类别标签{0,1},
CvMat* weakTrainVals,//没使用
CvMat* weights, //被更新,w=w*[exp(-1/2*log(evaldata/1-evaldata))]
CvBoostTrainer* trainer )//用于确定要被更新的样本
//样本数,权重,类别标签,响应函数值F,z值,样本索引
//由F计算LogitBoost的w和z,z返回到traindata
void icvResponsesAndWeightsLB( int num, uchar* wdata, int wstep,
uchar* ydata, int ystep, //类别标签
uchar* fdata, int fstep, //响应函数值F
uchar* traindata, int trainstep,//用于存z
int* indices ) //样本索引
//初始F=0;得p=1/2,计算w、z
CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,//类别标签{0,1},
CvMat* weakTrainVals, //存Z值
CvMat* weights,
CvMat* sampleIdx,//要么null,要么存样本索引号
CvBoostType type )
//已知f,先算F=F+f,再算p=1/(1+exp(-F)),再算z,w
float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals,//f,是对z的回归
CvMat* trainClasses,//类别标签{0,1}
CvMat* weakTrainVals,//存Z值
CvMat* weights,
CvBoostTrainer* trainer )
//Gentle AdaBoost,已知f,算w=w*exp(-yf)
CV_BOOST_IMPL
float icvBoostNextWeakClassifierGAB( CvMat* weakEvalVals,//f=p(y=1|x)-p(y=-1|x)
CvMat* trainClasses,//类别标签{0,1}
CvMat* weakTrainVals,//没使用
CvMat* weights,
CvBoostTrainer* trainer )
typedef struct CvTreeCascadeNode
{
CvStageHaarClassifier* stage; //与节点对应的分类器
struct CvTreeCascadeNode* next;
struct CvTreeCascadeNode* child;
struct CvTreeCascadeNode* parent;
struct CvTreeCascadeNode* next_same_level;
struct CvTreeCascadeNode* child_eval;
int idx;
int leaf;
} CvTreeCascadeNode;
opencv haartraining 分析二:每级stage正负样本的获取
函数 poscount =icvGetHaarTrainingDataFromVec( training_data, 0, npos, (CvIntHaarClassifier*) tcc, vecfilename, &consumed )负责从正样本集*.vec 文件中载入 count(npos)个正样本。在程序第一次运行到此(即训练第一个分类器之前)时,只要正样本集中有 count 个样本,就一定能取出 count 个正样本。在以后运行到此时,有可能取不到 count 个样本,因为
必须是用前面的级联强分类器((CvIntHaarClassifier*) tcc)分类为正样本(即分类正确的样本)的样本才会被取出作为下一个强分类器训练样本,具体可参考 icvGetHaarTrainingData和icvEvalTreeCascadeClassifierFilter函数。
训练负样本,具体可参考icvGetHaarTrainingDataFromBG和icvEvalTreeCascadeClassifierFilter函数。
int icvGetHaarTrainingDataFromBG( CvHaarTrainingData* data, int first, int count,
CvIntHaarClassifier* cascade, double* acceptance_ratio, const char * filename = NULL )
传递返回值的 acceptance_ratio 参数记录的是实际取出的负样本数与查询过的负样本数(如果通过前面级联stage强分类器的负样本数很少时,那么程序会循环重复读取负样本,并用thread_consumed_count计数)之比(acceptance_ratio = ((double) count) / consumed_count),也就是虚警率,用于判断已训练的级联分类器是否达到指标,若达到指标,则停止训练过程。
注意函数 icvGetHaarTrainingData中一个主要的 For 循环:
for( i = first; i < first + count; i++ ) //共读取 count 个负样本,当读取不到
{ //这么多负样本时将出现死循环!
对上面代码中的注释有必要进一步说明一下:只有当之前的强分类器对负样本集内的样本全部分类正确时才会出现死循环。因为只要有一个样本会被错分为正样本,那么通过 count次扫描整个负样本集就能得到 count 个负样本,当然这 count 个负样本实际上就是一个负样本的 count 个拷贝。为避免这些情况的发生,负样本集中的样本数需要足够多。
在负样本图像大小与正样本大小完全一致时,假设最终的分类器虚警率要求是falsealarm,参加训练的负样本要求是 count 个,则需要的负样本总数可计算如下: TotalCount = count / falsealarm
以 Rainer Lienhart 的文章中的一些参数为例,falsealarm=0.5^20=9.6e-07, count=3000,
则 TotalCount=3000/(0.5^20)= 3,145,728,000=31 亿。
函数 icvGetHaarTrainingDataFromBG ()负责从负样本集中载入 count 个负样本。在程序第一次运行到此(即训练第一个分类器之前)时,只要负样本集中有 count 个样本,就一定能取出 count 个负样本。在以后运行到此时,有可能取不到 count 个样本,因为必须是用前面的级联强分类器分类为正样本的样本(即分类错误的样本)才会被取出作为下一个强分类器的负样本输入。
对于int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count, CvIntHaarClassifier* cascade, CvGetHaarTrainingDataCallback callback, void* userdata,
int* consumed, double* acceptance_ratio )
这个函数的解释:
这是个对于读取正负样本通用的函数,区别在于callback的调用。在这个函数中有个变量thread_getcount,表示将样本分为正样本的数目(不论这个样本是负样本还是正样本)。
传递返回值的 Consumed 参数表示为取 count 个正样本,查询过的正样本总数。对于负样本为空(null),没有返回值。
opencv haartraining 分析三:icvCreateCARTStageClassifier
CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
CvMat* sampleIdx,
CvIntHaarFeatures* haarFeatures,
float minhitrate,
float maxfalsealarm,
int symmetric,
float weightfraction,
int numsplits,
CvBoostType boosttype,
CvStumpError stumperror,
int maxsplits )
{
#ifdef CV_COL_ARRANGEMENT
int flags = CV_COL_SAMPLE;
#else
int flags = CV_ROW_SAMPLE;
#endif
CvStageHaarClassifier* stage = NULL;
CvBoostTrainer* trainer;
CvCARTClassifier* cart = NULL;
CvCARTTrainParams trainParams;
CvMTStumpTrainParams stumpTrainParams;
//CvMat* trainData = NULL;
//CvMat* sortedIdx = NULL;
CvMat eval;
int n = 0;
int m = 0;
int numpos = 0;
int numneg = 0;
int numfalse = 0;
float sum_stage = 0.0F;
float threshold = 0.0F;
float falsealarm = 0.0F;
//CvMat* sampleIdx = NULL;
CvMat* trimmedIdx;
//float* idxdata = NULL;
//float* tempweights = NULL;
//int idxcount = 0;
CvUserdata userdata;
int i = 0;
int j = 0;
int idx;
int numsamples;
int numtrimmed;
CvCARTHaarClassifier* classifier;
CvSeq* seq = NULL;
CvMemStorage* storage = NULL;
CvMat* weakTrainVals;
float alpha;
float sumalpha;
int num_splits;
#ifdef CV_VERBOSE
printf( "+----+----+-+---------+---------+---------+---------+\n" );
printf( "| N |%%SMP|F| ST.THR | HR | FA | EXP. ERR|\n" );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
#endif
n = haarFeatures->count;//这是haar特征的数目,对于32*32的子窗口,特征数目为26万多
m = data->sum.rows;
numsamples = (sampleIdx) ? MAX( sampleIdx->rows, sampleIdx->cols ) : m;
userdata = cvUserdata( data, haarFeatures );
stumpTrainParams.type = ( boosttype == CV_DABCLASS )
? CV_CLASSIFICATION_CLASS : CV_REGRESSION;
stumpTrainParams.error = ( boosttype == CV_LBCLASS || boosttype == CV_GABCLASS )
? CV_SQUARE : stumperror;
stumpTrainParams.portion = CV_STUMP_TRAIN_PORTION;
stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
stumpTrainParams.numcomp = n;
stumpTrainParams.userdata = &userdata;
stumpTrainParams.sortedIdx = data->idxcache;//这是对构建cart的每个节点的stump一级决策树参数的设置
trainParams.count = numsplits;
trainParams.stumpTrainParams = (CvClassifierTrainParams*) &stumpTrainParams;
trainParams.stumpConstructor = cvCreateMTStumpClassifier;
trainParams.splitIdx = icvSplitIndicesCallback;
trainParams.userdata = &userdata;//这是对cart弱分类器参数的设置
eval = cvMat( 1, m, CV_32FC1, cvAlloc( sizeof( float ) * m ) );
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof( *seq ), sizeof( classifier ), storage );
weakTrainVals = cvCreateMat( 1, m, CV_32FC1 );
trainer = cvBoostStartTraining( &data->cls, weakTrainVals, &data->weights,
sampleIdx, boosttype );//这是用data->cls来计算weakTrainVals。其中weakTrainVals=2*cls-1,cls属于{0,1},则weakTrainVals属于{-1,1}
num_splits = 0;
sumalpha = 0.0F;
do
{
#ifdef CV_VERBOSE
int v_wt = 0;
int v_flipped = 0;
#endif
trimmedIdx = cvTrimWeights( &data->weights, sampleIdx, weightfraction );//剔除小权值,由weightfraction来控制。
numtrimmed = (trimmedIdx) ? MAX( trimmedIdx->rows, trimmedIdx->cols ) : m;
#ifdef CV_VERBOSE
v_wt = 100 * numtrimmed / numsamples;
v_flipped = 0;
#endif
cart = (CvCARTClassifier*) cvCreateCARTClassifier( data->valcache,
flags,
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
(CvClassifierTrainParams*) &trainParams );//开始构建cart树弱分类器
classifier = (CvCARTHaarClassifier*) icvCreateCARTHaarClassifier( numsplits );
icvInitCARTHaarClassifier( classifier, cart, haarFeatures );
num_splits += classifier->count;
cart->release( (CvClassifier**) &cart );
if( symmetric && (seq->total % 2) )
{
float normfactor = 0.0F;
CvStumpClassifier* stump;
for( i = 0; i < classifier->count; i++ )
{
if( classifier->feature[i].desc[0] == 'h' )
{
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x -
classifier->feature[i].rect[j].r.width;
}
}
else
{
int tmp = 0;
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x;
CV_SWAP( classifier->feature[i].rect[j].r.width,
classifier->feature[i].rect[j].r.height, tmp );
}
}
}
icvConvertToFastHaarFeature( classifier->feature,
classifier->fastfeature,
classifier->count, data->winsize.width + 1 );
stumpTrainParams.getTrainData = NULL;
stumpTrainParams.numcomp = 1;
stumpTrainParams.userdata = NULL;
stumpTrainParams.sortedIdx = NULL;
for( i = 0; i < classifier->count; i++ )
{
for( j = 0; j < numtrimmed; j++ )
{
idx = icvGetIdxAt( trimmedIdx, j );
eval.data.fl[idx] = cvEvalFastHaarFeature( &classifier->fastfeature[i],
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step) );
normfactor = data->normfactor.data.fl[idx];
eval.data.fl[idx] = ( normfactor == 0.0F )
? 0.0F : (eval.data.fl[idx] / normfactor);
}
stump = (CvStumpClassifier*) trainParams.stumpConstructor( &eval,
CV_COL_SAMPLE,
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
trainParams.stumpTrainParams );
classifier->threshold[i] = stump->threshold;
if( classifier->left[i] <= 0 )
{
classifier->val[-classifier->left[i]] = stump->left;
}
if( classifier->right[i] <= 0 )
{
classifier->val[-classifier->right[i]] = stump->right;
}
stump->release( (CvClassifier**) &stump );
}
stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
stumpTrainParams.numcomp = n;
stumpTrainParams.userdata = &userdata;
stumpTrainParams.sortedIdx = data->idxcache;
#ifdef CV_VERBOSE
v_flipped = 1;
#endif
}
if( trimmedIdx != sampleIdx )
{
cvReleaseMat( &trimmedIdx );
trimmedIdx = NULL;
}
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
eval.data.fl[idx] = classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
&data->weights, trainer );
sumalpha += alpha;
for( i = 0; i <= classifier->count; i++ )
{
if( boosttype == CV_RABCLASS )
{
classifier->val[i] = cvLogRatio( classifier->val[i] );
}
classifier->val[i] *= alpha;
}
cvSeqPush( seq, (void*) &classifier );
numpos = 0;
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
if( data->cls.data.fl[idx] == 1.0F )
{
eval.data.fl[numpos] = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
eval.data.fl[numpos] += classifier->eval_r(
(CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
numpos++;
}
}
icvSort_32f( eval.data.fl, numpos, 0 );
threshold = eval.data.fl[(int) ((1.0F - minhitrate) * numpos)];
numneg = 0;
numfalse = 0;
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
if( data->cls.data.fl[idx] == 0.0F )
{
numneg++;
sum_stage = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
sum_stage += classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
if( sum_stage >= (threshold - CV_THRESHOLD_EPS) )
{
numfalse++;
}
}
}
falsealarm = ((float) numfalse) / ((float) numneg);
#ifdef CV_VERBOSE
{
float v_hitrate = 0.0F;
float v_falsealarm = 0.0F;
float v_experr = 0.0F;
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
sum_stage = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
sum_stage += classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
if( sum_stage >= (threshold - CV_THRESHOLD_EPS) )
{
if( data->cls.data.fl[idx] == 1.0F )
{
v_hitrate += 1.0F;
}
else
{
v_falsealarm += 1.0F;
}
}
if( ( sum_stage >= 0.0F ) != (data->cls.data.fl[idx] == 1.0F) )
{
v_experr += 1.0F;
}
}
v_experr /= numsamples;
printf( "|M|=%%|%c|�|�|�|�|\n",
seq->total, v_wt, ( (v_flipped) ? '+' : '-' ),
threshold, v_hitrate / numpos, v_falsealarm / numneg,
v_experr );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
fflush( stdout );
}
#endif
} while( falsealarm > maxfalsealarm && (!maxsplits || (num_splits < maxsplits) ) );
cvBoostEndTraining( &trainer );
if( falsealarm > maxfalsealarm )
{
stage = NULL;
}
else
{
stage = (CvStageHaarClassifier*) icvCreateStageHaarClassifier( seq->total,
threshold );
cvCvtSeqToArray( seq, (CvArr*) stage->classifier );
}
cvReleaseMemStorage( &storage );
cvReleaseMat( &weakTrainVals );
cvFree( &(eval.data.ptr) );
return (CvIntHaarClassifier*) stage;
}