近年来,基于局部二值模式(local binary pattern,简称LBP)的人脸识别方法受到人们的关注,该方法来源于纹理分析领域.它首先计算图像中每个像素与其局部邻域点在亮度上的序关系,然后对二值序关系进行编码形成局部二值模式,最后采用多区域直方图作为图像的特征描述.这种算法的优点是计算量小,鲁棒性强,识别准确度高等特点。
本人在人脸检测的基础上,采用LBP算法成功在安霸A2S70平台上实现了身份识别的功能。该识别过程主要包括以下几个模块:
1训练模块
通过采集样本的在不同光照和不同姿态下的若干样本(如每种情形下50张连续帧人脸样本),然后分别进行训练,得到训练集。
2识别模块
对识别对象先进行人脸检测,经过LBP变换和计算LBP直方图,最后通过计算对象与训练库中的ChiSquare距离函数,找出最匹配的那一组样本,在事先设定的门限值范围内就可以确认出对象的身份了。
3UI界面部分
为方便测试,事先建立一个训练顺序表,将身份ID(如TONY)输入保存。在成功识别匹配后,可以显示出对方的ID号来。
主要程序代码如下:
void createLBP(IplImage* image1,IplImage* image2)
{
int i=0;
int j=0;
CvScalar s,s1;
double bit1,bit2,bit3,bit4,bit5,bit6,bit7,bit8;
printk("createLBP../n");
cvZero(image2);
for (i=0;i<image1->height;i++)
{
for (j=0;j<image1->width;j++)
{
int p1x,p2x,p3x,p4x,p5x,p6x,p7x,p8x;
int p1y,p2y,p3y,p4y,p5y,p6y,p7y,p8y;
p1x=j-1;
p1y=i-1;
p2x=j;
p2y=i-1;
p3x=j+1;
p3y=i-1;
p4x=j+1;
p4y=i;
p5x=j+1;
p5y=i+1;
p6x=j;
p6y=i+1;
p7x=j-1;
p7y=i+1;
p8x=j-1;
p8y=i;
//CvScalar s;
s=cvGet2D(image1,i,j);
bit1=128*getBIT(image1,p1x,p1y,s.val[0]);
bit2=64*getBIT(image1,p2x,p2y,s.val[0]);
bit3=32*getBIT(image1,p3x,p3y,s.val[0]);
bit4=16*getBIT(image1,p4x,p4y,s.val[0]);
bit5=8*getBIT(image1,p5x,p5y,s.val[0]);
bit6=4*getBIT(image1,p6x,p6y,s.val[0]);
bit7=2*getBIT(image1,p7x,p7y,s.val[0]);
bit8=1*getBIT(image1,p8x,p8y,s.val[0]);
s1.val[0]=bit1+bit2+bit3+bit4+bit5+bit6+bit7+bit8;
s1.val[1]=0;
s1.val[2]=0;
s1.val[3]=0;
cvSet2D(image2,i,j,s1);
}
}
printk("createLBP..down/n");
}
void CalHist(IplImage* img,int row,int col,int* HistData,int img_num)
{
int i,j;
int HistVal;
int hist[Hist_bin];
CvScalar s;
for (i=0;i<Hist_bin;i++)
{
hist[i]=0;
}
for (i=0;i<LBP_HIST_WIN_Y;i++)
{
for (j=0;j<LBP_HIST_WIN_X;j++)
{
s=cvGet2D(img,i,j);
HistVal=s.val[0];
hist[(int)(HistVal/Hist_bin)]++;
}
}
for(i=0;i<Hist_bin;i++)
HistData[col*Hist_bin+row*LBP_region_col*Hist_bin+i+img_num*LBP_region_row*LBP_region_col*Hist_bin] = hist[i];
}
void CalLbpHist(IplImage* img1,int* LbpHistData,int num)
{
int i,j;
int m,n;
printk("CalLbpHist../n");
for (m=0;m<LBP_region_row;m++)
{
for (n=0;n<LBP_region_col;n++)
{
cvSetImageROI(img1,cvRect((LBP_HIST_WIN_X*n),(m*LBP_HIST_WIN_Y),LBP_HIST_WIN_X,LBP_HIST_WIN_Y));
CalHist(img1,m,n,LbpHistData,num);
cvResetImageROI(img1);
}
}
printk("CalLbpHist..down/n");
}
double CalHistChiSquare(int* hist1,int* hist2,double len)
{
double chiSquare=0;
double chiSquareSum=0;
double i;
for ( i=0;i<len;i++)
{
if ((*hist1+*hist2)!=0)
chiSquare=((pow((double)(*hist1-*hist2),2)/(*hist1+*hist2)));
chiSquareSum += chiSquare;
hist1++;
hist2++;
}
//printk("chiSquareSum=%8.4f/n",chiSquareSum);
return chiSquareSum;
}
void LbpTrain(FILE *pfile)
{
int i,nsize,offset=0;
FILE* p_file;
IplImage* Train_Img;
char p_file_name[50],file_name[50],pUniName[50];
printk("lbptrain../n");
nsize = TrainNum*LBP_region_row*LBP_region_col*Hist_bin;
for(i=nTrainFaces-TrainNum; i<nTrainFaces; i++)
{
IplImage* LBPimg=cvCreateImage( cvSize(80,88), 8, 1 );
sprintf(p_file_name, "d://train%1d.raw", i);
asc_to_uni(p_file_name, pUniName);
printk("loading %s..",p_file_name);
Train_Img = LoadImage( pUniName,80,88);
if(Train_Img)
{
//createLBP(faceImgArr[i],LBPimg);
createLBP(Train_Img,LBPimg);
dly_tsk(20);
offset = i<TrainNum?i:i-((nTrainFaces/TrainNum)-1)*TrainNum;
CalLbpHist(LBPimg, LBPtrainHistData,offset);
dly_tsk(20);
}
cvReleaseImage( &LBPimg);
}
dly_tsk(100);
//ff_fseek(pfile,0L,SEEK_END);
writeData(LBPtrainHistData, nsize, pfile);
printk("lbptrain..down/n");
dly_tsk(100);
}
int FindTheBest( int* trainData, int* testdata,int num)
{
double ChiSquare=0;
int i, iTrain, iBest = 0;
double leastDistSq = DBL_MAX;
int nsize = 1*LBP_region_row*LBP_region_col*Hist_bin;
int* data_pointer,*test;
data_pointer = trainData;
test = testdata;
printk("trainData pointer:%10x/n",trainData);
printk("testdata pointer:%10x/n",testdata);
if(!data_pointer||!test){printk("data pointer error!/n");return iBest;}
for(iTrain=0; iTrain<num; iTrain++)
{
ChiSquare = CalHistChiSquare(data_pointer,test,nsize);
if(ChiSquare < leastDistSq)
{
leastDistSq = ChiSquare;
iBest = iTrain+1;
}
data_pointer += nsize;
printk("ChiSquare=%8.4f/n",ChiSquare);
}
return iBest;
}
int LbpRecognize(IplImage* recimg,int train_num)
{
int nsize,Trainsize;
int RecNum = 0;
printk("Lbp Recognize../n");
printk("train num=%d/n",train_num);
createLBP(recimg,LBPimg);
dly_tsk(20);
CalLbpHist(LBPimg, LBPHistData,0);
dly_tsk(50);
if(TrainData)
RecNum = FindTheBest(TrainData,LBPHistData,train_num);
else
printk("TrainData buffer error!/n");
dly_tsk(50);
printk("Lbp Recognize..down/n");
return RecNum;
}
实验结果表明在正面人脸(不超过+—10度偏转)的情况下,识别准确度比较高。但在侧面,或姿态变化(大表情变化)时,则识别效果很差,这是我后续需要努力改进的地方。不管怎样在安霸A2S70平台上实现身份识别的实验效果是有意义的,为实现智能图象识别产品化迈出了重要的一步。