关于系统人脸检测,之前我用opencv,opencv太大了,一个framework就一百到二百M,本来项目的ipa包10来兆,用上opencv倒好一下30多兆了。然后就研究了一下系统的CoreImage。
首先从摄像头获取图像什么的不说了,你获取的图像是视频流,也就是一秒多少帧。我用的是一秒5帧。
self.timer = [NSTimer scheduledTimerWithTimeInterval:0.20f target:self selector:@selector(addfaceFeature) userInfo:nil repeats:YES];
下面是人脸检测具体的方法,我就直接把我项目里的方法赋值粘贴了。
// 人脸检测
- (void)addfaceFeature {
// 初始化上下文
CIContext *context = [CIContext contextWithOptions:nil];
// 把UIimage类型的图片转化为CIImage类型
CIImage* ciimage = [CIImage imageWithCGImage:self.getImage.CGImage];
/**
* CIDetectorTypeFace识别的类型 CIDetectorAccuracyLow识别的精度
*/
CIDetector *faceDetector = [CIDetector detectorOfType:CIDetectorTypeFace context:context options:@{CIDetectorTracking: @YES, CIDetectorAccuracy: CIDetectorAccuracyLow}]; //
/**
* options里可以是CIDetectorEyeBlink、CIDetectorSmile
*/
NSArray *features = [faceDetector featuresInImage:ciimage options:@{CIDetectorEyeBlink:@YES,CIDetectorSmile:@YES}];
// 在整个特征的数组里遍历
for (CIFaceFeature *faceFeature in features){
// 如果有嘴、右眼和左眼
if(faceFeature.hasMouthPosition && faceFeature.hasRightEyePosition && faceFeature.hasLeftEyePosition) {
[self.view addSubview:self.eyeBlinkLabel];
#pragma mark - 闭眼三次成功然后popViewControllerAnimated
// 如果右眼和左眼闭着
if (faceFeature.rightEyeClosed && faceFeature.leftEyeClosed) {
// 这里我加了个布尔值因为addfaceFeature方法是几毫秒执行一次的,不加bool会闭眼一次稍微长一点儿就会popViewControllerAnimated了
if (self.isEyeBlink == NO) {
self.eyeBlinkNumber ++;
self.eyeBlinkLabel.text = [NSString stringWithFormat:@"请第%ld次眨眼",(long)self.eyeBlinkNumber+1];
if (self.eyeBlinkNumber > 2) {
[self.delegate faceDetectionResult:@"Success" andImage:self.uploadImage];
[self.navigationController popViewControllerAnimated:YES];
}
}
self.isEyeBlink = YES;
} else {
self.isEyeBlink = NO;
self.number ++;
if (self.number == 5) {
self.uploadImage = self.getImage;
self.imgView.image = self.uploadImage;
[self.view addSubview:self.imgView];
}
}
}
}
if (features.count < 1) { // 如果没有特征
[self.eyeBlinkLabel removeFromSuperview];
}
}
就这样了,这是根据我的工程写的,工程和工程肯定是不一样的,希望可以有你用到的代码。