和上面训练对应的函数是
void
face_detector::
train(ft_data &data,
const string fname,
const Mat &ref,
const bool mirror,
const bool visi,
const float frac,
const float scaleFactor,
const int minNeighbours,
const Size minSize)
{
detector.load(fname.c_str()); detector_fname = fname; reference = ref.clone();
vector<float> xoffset(0),yoffset(0),zoffset(0);
for(int i = 0; i < data.n_images(); i++){
Mat im = data.get_image(i,0); if(im.empty())continue;
vector<Point2f> p = data.get_points(i,false); int n = p.size();
Mat pt = Mat(p).reshape(1,2*n);
vector<Rect> faces; Mat eqIm; equalizeHist(im,eqIm);
detector.detectMultiScale(eqIm,faces,scaleFactor,minNeighbours,0
|CV_HAAR_FIND_BIGGEST_OBJECT
|CV_HAAR_SCALE_IMAGE,minSize);
if(faces.size() >= 1){
if(visi){
Mat I; cvtColor(im,I,CV_GRAY2RGB);
for(int i = 0; i < n; i++)circle(I,p[i],1,CV_RGB(0,255,0),2,CV_AA);
rectangle(I,faces[0].tl(),faces[0].br(),CV_RGB(255,0,0),3);
imshow("face detector training",I); waitKey(10);
}
//check if enough points are in detected rectangle
if(this->enough_bounded_points(pt,faces[0],frac)){
Point2f center = this->center_of_mass(pt); float w = faces[0].width;
xoffset.push_back((center.x - (faces[0].x+0.5*faces[0].width ))/w);
yoffset.push_back((center.y - (faces[0].y+0.5*faces[0].height))/w);
zoffset.push_back(this->calc_scale(pt)/w);
}
}
if(mirror){
im = data.get_image(i,1); if(im.empty())continue;
p = data.get_points(i,true);
pt = Mat(p).reshape(1,2*n);
equalizeHist(im,eqIm);
detector.detectMultiScale(eqIm,faces,scaleFactor,minNeighbours,0
|CV_HAAR_FIND_BIGGEST_OBJECT
|CV_HAAR_SCALE_IMAGE,minSize);
if(faces.size() >= 1){
if(visi){
Mat I; cvtColor(im,I,CV_GRAY2RGB);
for(int i = 0; i < n; i++)circle(I,p[i],1,CV_RGB(0,255,0),2,CV_AA);
rectangle(I,faces[0].tl(),faces[0].br(),CV_RGB(255,0,0),3);
imshow("face detector training",I); waitKey(10);
}
//check if enough points are in detected rectangle
if(this->enough_bounded_points(pt,faces[0],frac)){
Point2f center = this->center_of_mass(pt); float w = faces[0].width;
xoffset.push_back((center.x - (faces[0].x+0.5*faces[0].width ))/w);
yoffset.push_back((center.y - (faces[0].y+0.5*faces[0].height))/w);
zoffset.push_back(this->calc_scale(pt)/w);
}
}
}
}
//choose median value
Mat X = Mat(xoffset),Xsort,Y = Mat(yoffset),Ysort,Z = Mat(zoffset),Zsort;
cv::sort(X,Xsort,CV_SORT_EVERY_COLUMN|CV_SORT_ASCENDING); int nx = Xsort.rows;
cv::sort(Y,Ysort,CV_SORT_EVERY_COLUMN|CV_SORT_ASCENDING); int ny = Ysort.rows;
cv::sort(Z,Zsort,CV_SORT_EVERY_COLUMN|CV_SORT_ASCENDING); int nz = Zsort.rows;
detector_offset = Vec3f(Xsort.fl(nx/2),Ysort.fl(ny/2),Zsort.fl(nz/2));
return;
}
在总篇中的第二步
为第一帧或下一帧初始化人脸特征
其实就是根据上面的算法得到三个参数然后使用下面的算法对新的图片进行初始化基本位置的
对应的代码是
vector<Point2f>
face_detector::
detect(const Mat &im,
const float scaleFactor,
const int minNeighbours,
const Size minSize)
{
//convert image to greyscale
Mat gray; if(im.channels()==1)gray = im; else cvtColor(im,gray,CV_RGB2GRAY);
//detect faces
vector<Rect> faces; Mat eqIm; equalizeHist(gray,eqIm);
detector.detectMultiScale(eqIm,faces,scaleFactor,minNeighbours,0
|CV_HAAR_FIND_BIGGEST_OBJECT
|CV_HAAR_SCALE_IMAGE,minSize);
if(faces.size() < 1){return vector<Point2f>();}
//predict face placement
Rect R = faces[0]; Vec3f scale = detector_offset*R.width;
int n = reference.rows/2; vector<Point2f> p(n);
//scale是三个尺度分量,指X、Y、Z;
for(int i = 0; i < n; i++){
p[i].x = scale[2]*reference.fl(2*i ) + R.x + 0.5 * R.width + scale[0];
p[i].y = scale[2]*reference.fl(2*i+1) + R.y + 0.5 * R.height + scale[1];
}return p;
}
返回值就是找到的特征点的初始化位置