[iOS]CIDetector之CIDetectorTypeFace人脸识别

- (void)viewDidLoad
{
    [super viewDidLoad];
    // Do any additional setup after loading the view, typically from a nib.
    
    UIImageView *imageView = [[UIImageView alloc] initWithFrame:self.view.bounds];
    imageView.image = [UIImage imageNamed:@"timg.jpeg"];
    [self.view addSubview:imageView];
    
    CGFloat width = imageView.image.size.width;
    CGFloat height = imageView.image.size.height;
    
    CGFloat sWidth = imageView.bounds.size.width;
    CGFloat sHeight = imageView.bounds.size.height;
    
    CGFloat x = width/sWidth;
    CGFloat y = height/sHeight;
    
    // 根据image修改imageview的frame
    if (x > y)
    {
        imageView.frame = CGRectMake(0, (sHeight-height/x)/2, sWidth, height/x);
    }else{
        imageView.frame = CGRectMake((sWidth-width/y)/2, 0, width/y, sHeight);
    }
    
    // 压缩image
    UIGraphicsBeginImageContext(CGSizeMake(imageView.bounds.size.width , imageView.bounds.size.height));
    [imageView.image drawInRect:CGRectMake(0, 0, imageView.bounds.size.width, imageView.bounds.size.height)];
    UIImage *scaledImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();
    
    imageView.image = scaledImage;
    
    //识别
    dispatch_async(dispatch_get_global_queue(0, 0), ^{
        CIImage *cImage = [CIImage imageWithCGImage:imageView.image.CGImage];
        // 设置识别模式
        NSDictionary  *opts = [NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh
                                                          forKey:CIDetectorAccuracy];
        /* Lower accuracy, higher performance */
        //CORE_IMAGE_EXPORT NSString* const CIDetectorAccuracyLow NS_AVAILABLE(10_7, 5_0);
        
        /* Lower performance, higher accuracy */
        //CORE_IMAGE_EXPORT NSString* const CIDetectorAccuracyHigh NS_AVAILABLE(10_7, 5_0);
        
        CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeFace
                                                  context:nil
                                                  options:opts];
        
        NSArray *features = [detector featuresInImage:cImage];
        
        if ([features count] == 0)
        {
            dispatch_async(dispatch_get_main_queue(), ^{
                NSLog(@"检测失败");
            });
            return ;
        }
        
        for (CIFaceFeature *feature in features)
        {
            // 是否微笑
            BOOL smile = feature.hasSmile;
            
            NSLog(smile ? @"微笑" : @"没微笑");
            
            // 眼睛是否睁开
            BOOL leftEyeClosed = feature.leftEyeClosed;
            BOOL rightEyeClosed = feature.rightEyeClosed;
            
            NSLog(leftEyeClosed ? @"左眼没睁开" : @"左眼睁开");
            NSLog(rightEyeClosed ? @"右眼没睁开" : @"右眼睁开");
            
            // 获取脸部frame
            CGRect rect = feature.bounds;
            rect.origin.y = imageView.bounds.size.height - rect.size.height - rect.origin.y;// Y轴旋转180度
            faceRect = rect;
            NSLog(@"脸 %@",NSStringFromCGRect(rect));
            
            // 左眼
            if (feature.hasLeftEyePosition)
            {
                CGPoint eye = feature.leftEyePosition;
                eye.y = imageView.bounds.size.height - eye.y;// Y轴旋转180度
                NSLog(@"左眼 %@",NSStringFromCGPoint(eye));
            }
            
            // 右眼
            if (feature.hasRightEyePosition)
            {
                CGPoint eye = feature.rightEyePosition;
                eye.y = imageView.bounds.size.height - eye.y;// Y轴旋转180度
                NSLog(@"右眼 %@",NSStringFromCGPoint(eye));
            }
            
            //
            if (feature.hasMouthPosition)
            {
                CGPoint mouth = feature.mouthPosition;
                mouth.y = imageView.bounds.size.height - mouth.y;// Y轴旋转180度
                NSLog(@"嘴 %@",NSStringFromCGPoint(mouth));
            }
        }
        
        dispatch_async(dispatch_get_main_queue(), ^{
            NSLog(@"检测完成");
            UIView *view = [[UIView alloc] initWithFrame:faceRect];
            view.backgroundColor = [UIColor blueColor];
            view.alpha = 0.3;
            [imageView addSubview:view];
        });
    });
}

关于图片压缩:imageview的大小与image的大小不一致,识别的时候,是按照image的大小进行计算,通过重绘,使imageview与image保持一致。

关于坐标Y轴翻转:屏幕的坐标原点即(0,0)点在左上角,识别图片的坐标原点在左下角。

运行结果

控制台输出结果

转载于:https://www.cnblogs.com/EverNight/p/7067200.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
iOS上进行人脸识别并抠取人脸的方法可以使用Core Image框架中的CIDetector类。通过以下代码可以实现人脸识别和抠取人脸的功能: ```swift func detectFace(withImage image: UIImage) { // 将图像转为CIImage,使用Core Image需要使用CIImage guard let personCIImg = CIImage(image: image) else { return } // 设置识别精度 let opts: \[String: Any\] = \[CIDetectorAccuracy: CIDetectorAccuracyHigh\] // 初始化识别器 let detector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: opts) let result: \[CIFaceFeature\] = (detector?.features(in: personCIImg, options: opts) as? \[CIFaceFeature\])! if result.count > 0 { for face in result { let faceBox = UIView(frame: face.bounds) // 画一个红框画出面部位置 faceBox.layer.borderWidth = 3 faceBox.layer.borderColor = UIColor.red.cgColor faceBox.backgroundColor = UIColor.clear // 添加红框到图片上 imgView.addSubview(faceBox) print("面部坐标------> %d ", faceBox.frame) } } } ``` 这段代码会将传入的UIImage对象转换为CIImage对象,然后使用CIDetector进行人脸识别。识别到的人脸会通过在UIImageView上添加红色边框的方式进行标记。你可以根据需要对这段代码进行修改和扩展,以满足你的具体需求。 #### 引用[.reference_title] - *1* *2* [ios人脸识别_适用于Android和iOS的10种最佳人脸识别应用程序](https://blog.csdn.net/cumian8165/article/details/108160585)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] - *3* [iOS人脸识别Demo](https://blog.csdn.net/kangpengpeng1/article/details/79197201)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值