SHAPE // results returned asa SHAPE AsmSearch ( SHAPE &StartShape, //out: start shape returned in here DET_PARAMS &DetParams, //out: face detector parameters double &MeanTime, //out: mean time per image (face det failed excluded) const RgbImage &RgbImg, //in: find face features in this image const char sImage[], // in:file path for RgbImg, for err msgs const bool fRowley, // in:true to use Rowley detector, else VJ const char sConfFile0[], // in: 1st config filename const char sConfFile1[], // in: 2nd config filename, "" ifnone const char sDataDir[], // in:data directory const char sShapeFile[], // in: if not NULL then use face detector inhere bool fIssueWarnings) // in:true to issue warnings if needed { static ASM_MODEL Models[NMODELS_MAX]; const int nInitializedModels = nInitAsmModels(Models, sConfFile0,sConfFile1); // nModels will be less thannInitializedModels if we previously // called nInitAsmModels with two modelsbut now are not supplying sConfFile1. // This works because nInitAsmModels onlyactually initializes the models once, // and thereafter keeps track of how manymodels are initialized. const int nModels = (nInitializedModels==2 && sConfFile1&& sConfFile1[0])? 2: 1; static double TotalDetTime, TotalAsmTime; static int nTotalImages; nTotalImages++; static int nGoodImages; clock_t StartTime = clock(); //note that this excludes time in nInitAsmModels SHAPE Shape; // CombinedShape is created by combiningshapes from each model search. // It has the same number of points asModels[0].FileMeanShape, so if // Models[1] has extra points they arediscarded after the Models[1] search. SHAPE CombinedShape; Image Img; ConvertRgbImageToGray(Img, RgbImg); unsigned DetAttr = FA_ViolaJones; // specifies which face detector to use SHAPE DetAv = Models[0].VjAv; if (fRowley) { DetAttr = FA_Rowley; DetAv = Models[0].RowleyAv; } //通过人脸检测输出和 StartShape 估算算法得到SartShape,注意里面包含了M(scale,angle), Tx,Ty 信息 if (fGetStartShape(StartShape, DetParams, sImage,Models[0].FileMeanShape, DetAttr, DetAv,sShapeFile, sDataDir, CONF_fStasmSkipIfNotInShapeFile, fIssueWarnings)) { TotalDetTime += double(clock() - StartTime) / CLOCKS_PER_SEC; StartTime = clock(); nGoodImages++; Shape = StartShape; for (int iModel = 0; iModel < nModels; iModel++) { ASM_MODEL *pModel = &Models[iModel]; if (iModel != 0) GetStartShapeFromPreviousSearch(Shape, CombinedShape, pModel->FileMeanShape); // Scale Shape and Img, so the face width is nStandardFaceWidth, // using the start shape to approximate the face width. double ImageScale = pModel->nStandardFaceWidth / xShapeExtent(Shape); SHAPE Shape1(Shape * ImageScale); // working shape Image Img1(Img); // working Img int nNewWidth = iround(Img1.width* ImageScale); int nNewHeight = iround(Img1.height * ImageScale); ScaleImage(Img1, nNewWidth, nNewHeight, IM_BILINEAR); // dimKeep is needed when this model has different number // of landmarks from previous model Shape1.dimKeep(pModel->nPoints, 2); int nStartLev = pModel->nStartLev; Shape1 /= GetPyrScale(nStartLev, pModel->PyrRatio); for (int iLev = nStartLev; iLev >= 0; iLev--) // for each lev in image pyr { double PyrScale =GetPyrScale(iLev, pModel->PyrRatio); SEARCH_IMAGES SearchImgs; // the images used during search SearchImgs.Img = Img1; // SearchImgs.Img gets scaled to this pyrlev ReduceImage(SearchImgs.Img,PyrScale, pModel->PyrReduceMethod); InitGradsIfNeeded(SearchImgs.Grads, // get SearchImgs.Grads pModel->AsmLevs[iLev].ProfSpecs,SearchImgs.Img, Shape1.nrows()); AsmLevSearch(Shape1,SearchImgs, Models[iModel], iLev, gLandTab); if (iLev != 0) // use bestshape from this iter as starting point for next Shape1 *=pModel->PyrRatio; ReleaseProcessor(); // give others a chance } CombinedShape.assign(Shape1); //use assign not "=" because size may differ CombinedShape = CombinedShape / ImageScale; // descale back to originalsize } TotalAsmTime += double(clock() - StartTime) / CLOCKS_PER_SEC; } MeanTime = (TotalDetTime + TotalAsmTime) / nGoodImages; logprintf("\n[nTotalImages %d nGoodImages %d " "Mean times: FaceDet %.3fAsmSearch %.3f Both %.3f secs]\n", nTotalImages, nGoodImages, TotalDetTime / nGoodImages,TotalAsmTime / nGoodImages, MeanTime); return CombinedShape; } static void AsmLevSearch (SHAPE &Shape, //io SEARCH_IMAGES&SearchImgs, // in const ASM_MODEL &Model, // in int iLev, // in const LANDMARK LandTab[]) // in { int iter = 0, nGoodLandmarks =0; SHAPE SuggestedShape(Shape); //shape after profile matching // The shape params, initialized to 0. The original formulation called for // this to be set to 0 each time we run themodel but we get slightly // better results if we remember the shapeparams from the previous run. // Thus this is outside the loop. Vec b(Model.EigVecs.nrows()); int nPoints = Shape.nrows(); while ((iter < Model.AsmLevs[iLev].nMaxSearchIters) && (nGoodLandmarks <= (Model.AsmLevs[iLev].nQualifyingDisp * nPoints)/100)) { // estimate the best SuggestedShape by profile matching the landmarks inShape nGoodLandmarks = GetSuggestedShape(SuggestedShape, Shape, SearchImgs, Model.AsmLevs[iLev], LandTab, Model.nPixSearch, Model.nPixSearch2d, Model.SigmoidScale, Model.fExplicitPrevNext); // align SuggestedShape to the shape model, put result in Shape bool fFinalIter = (iter == Model.AsmLevs[iLev].nMaxSearchIters - 1); Shape = ConformShapeToModel(b, SuggestedShape, Model, iLev, fFinalIter); iter++; } } //----------------------------------------------------------------------------- // Return a copy of Shape conformed to themodel. In other words, generate a // model shape that is as close as possibleto Shape. Shape itself isn't changed. // // To match the model to Shape we need tofind Pose and b in the following equation: // // Shape = Pose * (MeanShape + EigVecs *b) (where = actually means approxequal) SHAPE ConformShapeToModel (Vec &b, // io const SHAPE&Shape, // in const ASM_MODEL&Model, // in int iLev, // in bool fShapeModelFinalIter)// in { Vec MeanShape(Model.AsmLevs[iLev].MeanShape); // For calculations below we need to seeshapes (nrows x 2) as vectors (1 x 2*nrows). // Note that this is a "view" soif you change MeanShapeAsVec you // are changing MeanShape too, and viceversa. VecView MeanShapeAsVec(MeanShape.viewAsCol()); // find y, the model shape that best fitsShape SHAPE OutShape(Shape); int nEigs = Model.AsmLevs[iLev].nEigs; double BMax = Model.AsmLevs[iLev].BMax; if (fShapeModelFinalIter) { // final iter in main ASM search loop so loosen up the model nEigs = Model.AsmLevs[iLev].nEigsFinal; BMax = Model.AsmLevs[iLev].BMaxFinal; } ASSERT(BMax > 0); ASSERT(nEigs > 0); SHAPE x(Shape.nrows(), 2); x.viewAsCol() = MeanShapeAsVec + Model.EigVecs * b; // generate a modelshape x //估计x到shape的M(scale,angle),Tx,Ty,信息,注意这里其实就是固定b参数,来估算姿态参数信息 Mat Pose(AlignShape(x, Shape)); //还原到局部坐标系(模型坐标系) SHAPE y(TransformShape(Shape, Pose.inverse())); // project Shape intomodel space // update model params b to match y // We limit b to ensure we stay within model limits //在模型空间计算得到更新的b参数 b= Model.EigInverse * (y.viewAsCol() - MeanShapeAsVec); LimitB(b, Model.AsmLevs[iLev].EigVals, nEigs, BMax); //generate OutShape from the model using our calculated b, // and align OutShape to Shape OutShape.viewAsCol() = Model.EigVecs * b; //映射会图片坐标系 OutShape = TransformShape(Model.AsmLevs[iLev].MeanShape + OutShape,Pose); return OutShape; }
stasm 对齐关键源码
最新推荐文章于 2019-04-18 10:56:27 发布