iOS 人脸识别

1.在控制其中添加如下属性

/** 输入设备 */
@property (nonatomic,strong) AVCaptureDevice *device;
/** 摄像头输入流 */
@property (nonatomic,strong) AVCaptureDeviceInput *deviceInput;
/** 摄像头输出流 */
@property (nonatomic,strong) AVCaptureVideoDataOutput *videoDataOutput;
/** 流 */
@property (nonatomic,strong) AVCaptureSession *session;
/** 输出刷新线程 */
@property (nonatomic,strong) dispatch_queue_t videoDataOutputQueue;
/**  */
@property (nonatomic,strong) AVCaptureVideoPreviewLayer *previewLayer;
/** 人脸识别 */
@property (nonatomic,strong) CIDetector *faceDetector;


2.懒加载

- (CIDetector *)faceDetector{
    if (_faceDetector == nil) {
        NSDictionary *faceDetectorOptions = [NSDictionary dictionaryWithObjectsAndKeys:CIDetectorAccuracyLow, CIDetectorAccuracy, nil];
        _faceDetector = [CIDetector detectorOfType:CIDetectorTypeFace context:nil options:faceDetectorOptions];
    }
    return _faceDetector;
}

3.初始化所有属性

/**
 * 初始化 人脸识别
 */
- (void)setupDetector {
    // 1.设备方向
    self.devicePosition = AVCaptureDevicePositionFront;
    
    // 2.获取设别
    for (AVCaptureDevice *device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
        if (device.position == self.devicePosition) {
            self.device = device;
            break;
        }
    }
    
    //   处理设别不存在
    if (!self.device) {
        // 设别不存在
#warning TODO
        NSLog(@"设别不存在");
    }
    
    NSError *error = nil;
    
    // 3.输入流
    self.deviceInput = [[AVCaptureDeviceInput alloc] initWithDevice:self.device error:&error];
    //   处理输入流异常
    if (error) {
#warning TODO
        NSLog(@"输入流初始化异常");
    }

    // 4.处理输出流线程
#define FYFVideoDataOutputQueue "VideoDataOutputQueue"
    self.videoDataOutputQueue = dispatch_queue_create(FYFVideoDataOutputQueue, DISPATCH_QUEUE_SERIAL);
    
    // 5.输出流(从指定的视频中采集数据)
    self.videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
    //   设置采集相片的像素格式
    NSDictionary *rgbOutputSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCMPixelFormat_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];
    [self.videoDataOutput setVideoSettings:rgbOutputSettings];
    //   处理输出线程被阻塞时,丢弃掉没有处理的画面
    [self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
    //
    [self.videoDataOutput setSampleBufferDelegate:self queue:self.videoDataOutputQueue];
    
    // 6.设置 session
    self.session = [[AVCaptureSession alloc] init];
    //   1>设置采样质量
    [self.session setSessionPreset:AVCaptureSessionPresetHigh];
    //   2>添加输入流
    if ([self.session canAddInput:self.deviceInput]) {
        [self.session addInput:self.deviceInput];
    } else {
        // 处理不能添加 input
    }
    //   3>添加输出流
    if ([self.session canAddOutput:self.videoDataOutput]) {
        [self.session addOutput:self.videoDataOutput];
    } else {
        // 处理不能添加 output
    }
    
    // 7.相机层创建
    self.previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.session];
    self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspect;
    [self.previewLayer.connection setVideoOrientation:AVCaptureVideoOrientationLandscapeRight];
    //self.previewLayer.frame = self.view.bounds;
    
    // 8.在 view 中添加 PreviewLayer
    CALayer *rootLayer = [self.view layer];
    [rootLayer setMasksToBounds:YES];
    self.previewLayer.frame = rootLayer.bounds;
    [rootLayer insertSublayer:self.previewLayer atIndex:0];
       
    // 9.开始
    [self.session startRunning];
}


4.实现代理,检测人脸

#pragma mark - AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
    // 获取图片
    // CVPixelBuffer(core video pixel buffer): 指的是主内存中的图片缓存,用来保存图片像素数据。应用程序在产生图片帧、解压缩视频数据或调用Core Image的时候可以调用此对象
    // 实时处理带监测的图片
    CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
    // CMAttachmentBearer是一个基于CF的对象,支持键/值/模式 附件API。 任何CF对象都可以添加到CMAttachmentBearer对象,来存储额外信息
    CFDictionaryRef attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate);
    CIImage *ciImage = [[CIImage alloc] initWithCVPixelBuffer:pixelBuffer options:(__bridge NSDictionary *)(attachments)];
    
    if (attachments) {
        CFRelease(attachments);
    }
    
    NSDictionary *imageOptionPortrait = [NSDictionary dictionaryWithObject:[self getImageOrientationByDeviceOrientation:UIDeviceOrientationPortrait] forKey:CIDetectorImageOrientation];
    NSDictionary *imageOptionPortraitUpsideDown = [NSDictionary dictionaryWithObject:[self getImageOrientationByDeviceOrientation:UIDeviceOrientationPortraitUpsideDown] forKey:CIDetectorImageOrientation];
    NSDictionary *imageOptionLandscapeLeft = [NSDictionary dictionaryWithObject:[self getImageOrientationByDeviceOrientation:UIDeviceOrientationLandscapeLeft] forKey:CIDetectorImageOrientation];
    NSDictionary *imageOptionLandscapeRight = [NSDictionary dictionaryWithObject:[self getImageOrientationByDeviceOrientation:UIDeviceOrientationLandscapeRight] forKey:CIDetectorImageOrientation];
    
    NSMutableArray *allFeatures = [NSMutableArray array];
    [allFeatures addObjectsFromArray:[self.faceDetector featuresInImage:ciImage options:imageOptionPortrait]];
    [allFeatures addObjectsFromArray:[self.faceDetector featuresInImage:ciImage options:imageOptionPortraitUpsideDown]];
    [allFeatures addObjectsFromArray:[self.faceDetector featuresInImage:ciImage options:imageOptionLandscapeLeft]];
    [allFeatures addObjectsFromArray:[self.faceDetector featuresInImage:ciImage options:imageOptionLandscapeRight]];
    
    if (allFeatures.count) {
        // 检测到人脸

    } else {
        // 未检测到人脸
    }
}

5.其他方法

/**
 *  根据设备方向获取图片的方向
 */
- (NSNumber *)getImageOrientationByDeviceOrientation:(UIDeviceOrientation)deviceOrientation {
    int imageOrientation;
    enum {
        PHOTOS_EXIF_0ROW_TOP_0COL_LEFT			= 1, //   1  =  0th row is at the top, and 0th column is on the left (THE DEFAULT).
        PHOTOS_EXIF_0ROW_TOP_0COL_RIGHT			= 2, //   2  =  0th row is at the top, and 0th column is on the right.
        PHOTOS_EXIF_0ROW_BOTTOM_0COL_RIGHT      = 3, //   3  =  0th row is at the bottom, and 0th column is on the right.
        PHOTOS_EXIF_0ROW_BOTTOM_0COL_LEFT       = 4, //   4  =  0th row is at the bottom, and 0th column is on the left.
        PHOTOS_EXIF_0ROW_LEFT_0COL_TOP          = 5, //   5  =  0th row is on the left, and 0th column is the top.
        PHOTOS_EXIF_0ROW_RIGHT_0COL_TOP         = 6, //   6  =  0th row is on the right, and 0th column is the top.
        PHOTOS_EXIF_0ROW_RIGHT_0COL_BOTTOM      = 7, //   7  =  0th row is on the right, and 0th column is the bottom.
        PHOTOS_EXIF_0ROW_LEFT_0COL_BOTTOM       = 8  //   8  =  0th row is on the left, and 0th column is the bottom.
    };
    
    switch (deviceOrientation) {
        case UIDeviceOrientationPortraitUpsideDown:  // Device oriented vertically, home button on the top
            imageOrientation = PHOTOS_EXIF_0ROW_LEFT_0COL_BOTTOM;
            break;
        case UIDeviceOrientationLandscapeLeft:       // Device oriented horizontally, home button on the right
            //			if (self.isUsingFrontFacingCamera)
            imageOrientation =  PHOTOS_EXIF_0ROW_BOTTOM_0COL_RIGHT;
            //			else
            //				exifOrientation = PHOTOS_EXIF_0ROW_TOP_0COL_LEFT;
            break;
        case UIDeviceOrientationLandscapeRight:      // Device oriented horizontally, home button on the left
            //			if (self.isUsingFrontFacingCamera)
            imageOrientation = PHOTOS_EXIF_0ROW_TOP_0COL_LEFT;
            //			else
            //				exifOrientation = PHOTOS_EXIF_0ROW_BOTTOM_0COL_RIGHT;
            break;
        case UIDeviceOrientationPortrait:            // Device oriented vertically, home button on the bottom
        default:
            imageOrientation = PHOTOS_EXIF_0ROW_RIGHT_0COL_TOP;
            break;
    }
    return [NSNumber numberWithInt:imageOrientation];
}

6.重置

// clean up capture setup
- (void)teardownAVCapture {
    for(AVCaptureInput *input in self.session.inputs){
        [self.session removeInput:input];
    }
    for(AVCaptureOutput *output in self.session.outputs){
        [self.session removeOutput:output];
    }
    [self.session stopRunning];
    self.videoDataOutput = nil;
    self.videoDataOutputQueue = nil;
    self.device = nil;
    [self.previewLayer removeFromSuperlayer];
    self.previewLayer = nil;
}


  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
iOS上进行人脸识别并抠取人脸的方法可以使用Core Image框架中的CIDetector类。通过以下代码可以实现人脸识别和抠取人脸的功能: ```swift func detectFace(withImage image: UIImage) { // 将图像转为CIImage,使用Core Image需要使用CIImage guard let personCIImg = CIImage(image: image) else { return } // 设置识别精度 let opts: \[String: Any\] = \[CIDetectorAccuracy: CIDetectorAccuracyHigh\] // 初始化识别器 let detector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: opts) let result: \[CIFaceFeature\] = (detector?.features(in: personCIImg, options: opts) as? \[CIFaceFeature\])! if result.count > 0 { for face in result { let faceBox = UIView(frame: face.bounds) // 画一个红框画出面部位置 faceBox.layer.borderWidth = 3 faceBox.layer.borderColor = UIColor.red.cgColor faceBox.backgroundColor = UIColor.clear // 添加红框到图片上 imgView.addSubview(faceBox) print("面部坐标------> %d ", faceBox.frame) } } } ``` 这段代码会将传入的UIImage对象转换为CIImage对象,然后使用CIDetector进行人脸识别。识别到的人脸会通过在UIImageView上添加红色边框的方式进行标记。你可以根据需要对这段代码进行修改和扩展,以满足你的具体需求。 #### 引用[.reference_title] - *1* *2* [ios人脸识别_适用于Android和iOS的10种最佳人脸识别应用程序](https://blog.csdn.net/cumian8165/article/details/108160585)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] - *3* [iOS人脸识别Demo](https://blog.csdn.net/kangpengpeng1/article/details/79197201)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值