AVFoundation之高级功能


title: AVFoundation之高级功能
date: 2019-09-23 17:32:34
tags:

一、人脸识别

A、步骤

1.创建AVCaptureSession

self.captureSession = AVCaptureSession()
if self.captureSession.canSetSessionPreset(.high) {
    self.captureSession.sessionPreset = .high
}


2.设置AVCaptureDeviceInput

if let videoDevice = AVCaptureDevice.default(for: .video) {
    do {
        videoInput = try AVCaptureDeviceInput(device: videoDevice)
    } catch {

    }
}
if self.captureSession.canAddInput(videoInput) {
    self.captureSession.addInput(videoInput)
}


3.创建AVCapturePhotoOutput和AVCaptureMetadataOutput

self.photoOutput = AVCapturePhotoOutput()
if self.captureSession.canAddOutput(self.photoOutput) {
    self.captureSession.addOutput(self.photoOutput)
}

self.metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(self.metadataOutput) {
    self.captureSession.addOutput(self.metadataOutput)
    
		//指定输出的元数据类型。
    self.metadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.face]
    
    //有新的元数据被检测到时,会都回调代理AVCaptureMetadataOutputObjectsDelegate中的方法
    //可以自定义系列的调度队列,不过由于人脸检测用到硬件加速,而且许多人物都要在主线程中执行,所以需要为这个参数指定主队列。
    self.metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
}


4.设置AVCaptureVideoPreviewLayer

private func setupPreviewLayer() {
    self.previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
    self.previewLayer.frame = self.view.bounds
    self.previewLayer.videoGravity = .resizeAspectFill
    self.view.layer.addSublayer(self.previewLayer)
    self.view.layer.masksToBounds = true


		//标记人脸的图层
    self.overlayLayer = CALayer()
    self.overlayLayer.frame = self.view.bounds
    self.overlayLayer.sublayerTransform = self.THMakePerspectiveTransform(eyePosition: 10000)
    self.previewLayer.addSublayer(self.overlayLayer)
}

private func THMakePerspectiveTransform(eyePosition: CGFloat) -> CATransform3D {
    var transform = CATransform3DIdentity
    transform.m34 = -1.0 / eyePosition
    return transform
}


5.开始识别

if !self.captureSession.isRunning {
    DispatchQueue.global().async {
        self.captureSession.startRunning()
    }
}


6.后置摄像头转前置摄像头

if self.videoInput.device.position == AVCaptureDevice.Position.back {
    let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .front)
    let devices = discoverySession.devices
    for device in devices {
        if device.position == .front {
            var input: AVCaptureDeviceInput! = nil
            do {
                let deviceInput = try AVCaptureDeviceInput(device: device)
                input = deviceInput
            } catch {

            }

            if input != nil {
                self.captureSession.beginConfiguration()
                self.captureSession.removeInput(self.videoInput)
                self.captureSession.sessionPreset = .high
                if self.captureSession.canAddInput(input) {
                    self.captureSession.addInput(input)
                    self.videoInput = input
                } else {
                    self.captureSession.addInput(self.videoInput)
                }
                self.captureSession.commitConfiguration()
            }
        }
    }
}


7.AVCaptureMetadataOutputObjectsDelegate

AVMetadataFaceObject

// private var faceLayers = Int: CALayer

func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
    let transformFaces = self.transformFacesFromFaces(faces: metadataObjects)

    var lastFaces = [Int]()
    for faceId in faceLayers.keys {
        lastFaces.append(faceId)
    }
    for face in transformFaces {
        if let tFace = face as? AVMetadataFaceObject {
            let faceId = tFace.faceID
            if let index = lastFaces.lastIndex(of: faceId) {
                lastFaces.remove(at: index)
            }

            var layer = self.faceLayers[faceId]
            if layer == nil {
                layer = self.makerLayer()
                //添加标记人脸的图层
                self.overlayLayer.addSublayer(layer!)
                self.faceLayers[faceId] = layer
            }
            layer?.transform = CATransform3DIdentity
            layer?.frame = face.bounds
        }
    }

		//移除旧的标记人脸图层
    for id in lastFaces {
        let layer = self.faceLayers[id]
        layer?.removeFromSuperlayer()
        self.faceLayers.removeValue(forKey: id)
    }
}

//将取得的人脸元数据的坐标做相应转换
func transformFacesFromFaces(faces: [AVMetadataObject]) -> [AVMetadataObject] {
    var transformdFaces = [AVMetadataObject]()

    for face in faces {
    //self.previewLayer AVCaptureVideoPreviewLayer
        if let transformFace = self.previewLayer.transformedMetadataObject(for: face) {
           transformdFaces.append(transformFace)
        }
    }

    return transformdFaces
}

//创建框着人脸的图层
func makerLayer() -> CALayer{
    let layer = CALayer()
    layer.borderWidth = 5
    layer.borderColor = UIColor(displayP3Red: 0.188, green: 0.517, blue: 0.877, alpha: 1.0).cgColor
    return layer
}


B、参考链接

https://www.jianshu.com/p/0a9dfd0cb30c

https://blog.csdn.net/s12117719679/article/details/100853140

二、二维码识别

A、步骤

1.创建AVCaptureSession

*设置捕捉会话预设类型,可以使用任意类型

*苹果建议使用最低合理解决方案以提高性能

self.captureSession = AVCaptureSession()
if self.captureSession.canSetSessionPreset(.vga640x480) {
     self.captureSession.sessionPreset = .vga640x480
}

2.设置AVCaptureDeviceInput

if let videoDevice = AVCaptureDevice.default(for: .video) {
    do {
        videoInput = try AVCaptureDeviceInput(device: videoDevice)
    } catch {

    }
}
if self.captureSession.canAddInput(videoInput) {
    self.captureSession.addInput(videoInput)
}


3.对焦

if self.activeCamera.isAutoFocusRangeRestrictionSupported {// 判断是否支持自动对焦
    do {
        try self.activeCamera.lockForConfiguration()
    } catch {
        print("activeCamera.lockForConfiguration error")
    }
    self.activeCamera.autoFocusRangeRestriction = .near
    //捕捉设备的自动对焦通常在任何距离都可以进行扫描
    //不过大部分条码距离都不远,所以可以缩小扫描区域来提升识别成功率
    self.activeCamera.unlockForConfiguration()
}


4.创建AVCapturePhotoOutput和AVCaptureMetadataOutput

self.photoOutput = AVCapturePhotoOutput()
if self.captureSession.canAddOutput(self.photoOutput) {
    self.captureSession.addOutput(self.photoOutput)
}

self.metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(self.metadataOutput) {
    self.captureSession.addOutput(self.metadataOutput)

    self.metadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr, AVMetadataObject.ObjectType.aztec]
    self.metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
}


5.设置AVCaptureVideoPreviewLayer

private func setupPreviewLayer() {
    self.previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
    self.previewLayer.frame = self.view.bounds
    self.previewLayer.videoGravity = .resizeAspectFill
    self.view.layer.addSublayer(self.previewLayer)
    self.view.layer.masksToBounds = true
}


6.AVCaptureMetadataOutputObjectsDelegate

AVMetadataMachineReadableCodeObject
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
    self.didDetectCode(codes: metadataObjects)
}

func didDetectCode(codes: [AVMetadataObject]) {
    for code in codes {
        if let readableCode = code as? AVMetadataMachineReadableCodeObject {
            let stringValue = readableCode.stringValue
            //这个就是条形码的值
            //不过一般一次只有一个值,或者直接取第一个元素即为条码值
            print(stringValue)
        }
    }
}


三、音视频合成

A、AVComposition和AVCompositionTrack

*AVFoundation有关资源的组合功能源于AVAsset的子类AVComposition

*AVComposition中的轨道都是AVAssetTrack的子类AVCompositionTrack

*AVComposition和AVCompositionTrack都是不可变对象,提供对资源的只读操作。

创建自己的组合,就需要使用AVMutableComposition和AVMutableCompositionTrack所提供的可变子类。

B、步骤

1.获取资源

let videoUrl : NSURL =  NSURL(fileURLWithPath: Bundle.main.path(forResource: "haha", ofType: "mov")!)
let videoUrl1: NSURL = NSURL(fileURLWithPath: Bundle.main.path(forResource: "hubblecast", ofType: "m4v")!)
let audioUrl : NSURL = NSURL(fileURLWithPath: Bundle.main.path(forResource: "薛之谦-像风一样", ofType: "mp3")!)

let aVideoAsset : AVAsset = AVAsset(url: videoUrl as URL)
let aVideoAsset1: AVAsset = AVAsset(url: videoUrl1 as URL)
let aAudioAsset : AVAsset = AVAsset(url: audioUrl as URL)

2、创建AVMutableComposition和添加音视频轨道

let mixComposition : AVMutableComposition = AVMutableComposition()
let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)!
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)!


3、插入音视频片段

let aVideoAssetTrack : AVAssetTrack = aVideoAsset.tracks(withMediaType: AVMediaType.video)[0]
let aVideoAssetTrack1 : AVAssetTrack = aVideoAsset1.tracks(withMediaType: AVMediaType.video)[0]
let aAudioAssetTrack : AVAssetTrack = aAudioAsset.tracks(withMediaType: AVMediaType.audio)[0]

do{
		//插入第一段
    try videoTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration), of: aVideoAssetTrack, at: CMTime.zero)
    //插入第二段
    try videoTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack1.timeRange.duration), of: aVideoAssetTrack1, at: aVideoAssetTrack.timeRange.duration)
		//插入音频
    try audioTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration + aVideoAssetTrack1.timeRange.duration), of: aAudioAssetTrack, at: CMTime.zero)

}catch{

}


4、导出(AVAssetExportSession)

print(NSHomeDirectory() + "/Documents/newVideo.mp4")
let savePathUrl : NSURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/newVideo.mp4")

let assetExport: AVAssetExportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)!
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = savePathUrl as URL
assetExport.shouldOptimizeForNetworkUse = true

assetExport.exportAsynchronously { () -> Void in
    switch assetExport.status {

    case AVAssetExportSessionStatus.completed:

        //Uncomment this if u want to store your video in asset

        //let assetsLib = ALAssetsLibrary()
        //assetsLib.writeVideoAtPathToSavedPhotosAlbum(savePathUrl, completionBlock: nil)

        print("success")
    case  AVAssetExportSessionStatus.failed:
        print("failed \(assetExport.error)")
    case AVAssetExportSessionStatus.cancelled:
        print("cancelled \(assetExport.error)")
    default:
        print("complete")
    }
}


C、参考链接

https://stackoverflow.com/questions/31984474/swift-merge-audio-and-video-files-into-one-video?r=SearchResults

https://www.jianshu.com/p/f0d294395d6a

四、视频过渡效果

参考链接

https://www.jianshu.com/p/180bbd05006e

https://juejin.im/post/5bee688ae51d45313b1ac683

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
提供的源码资源涵盖了安卓应用、小程序、Python应用和Java应用等多个领域,每个领域都包含了丰富的实例和项目。这些源码都是基于各自平台的最新技术和标准编写,确保了在对应环境下能够无缝运行。同时,源码中配备了详细的注释和文档,帮助用户快速理解代码结构和实现逻辑。 适用人群: 这些源码资源特别适合大学生群体。无论你是计算机相关专业的学生,还是对其他领域编程感兴趣的学生,这些资源都能为你提供宝贵的学习和实践机会。通过学习和运行这些源码,你可以掌握各平台开发的基础知识,提升编程能力和项目实战经验。 使用场景及目标: 在学习阶段,你可以利用这些源码资源进行课程实践、课外项目或毕业设计。通过分析和运行源码,你将深入了解各平台开发的技术细节和最佳实践,逐步培养起自己的项目开发和问题解决能力。此外,在求职或创业过程中,具备跨平台开发能力的大学生将更具竞争力。 其他说明: 为了确保源码资源的可运行性和易用性,特别注意了以下几点:首先,每份源码都提供了详细的运行环境和依赖说明,确保用户能够轻松搭建起开发环境;其次,源码中的注释和文档都非常完善,方便用户快速上手和理解代码;最后,我会定期更新这些源码资源,以适应各平台技术的最新发展和市场需求。
提供的源码资源涵盖了安卓应用、小程序、Python应用和Java应用等多个领域,每个领域都包含了丰富的实例和项目。这些源码都是基于各自平台的最新技术和标准编写,确保了在对应环境下能够无缝运行。同时,源码中配备了详细的注释和文档,帮助用户快速理解代码结构和实现逻辑。 适用人群: 这些源码资源特别适合大学生群体。无论你是计算机相关专业的学生,还是对其他领域编程感兴趣的学生,这些资源都能为你提供宝贵的学习和实践机会。通过学习和运行这些源码,你可以掌握各平台开发的基础知识,提升编程能力和项目实战经验。 使用场景及目标: 在学习阶段,你可以利用这些源码资源进行课程实践、课外项目或毕业设计。通过分析和运行源码,你将深入了解各平台开发的技术细节和最佳实践,逐步培养起自己的项目开发和问题解决能力。此外,在求职或创业过程中,具备跨平台开发能力的大学生将更具竞争力。 其他说明: 为了确保源码资源的可运行性和易用性,特别注意了以下几点:首先,每份源码都提供了详细的运行环境和依赖说明,确保用户能够轻松搭建起开发环境;其次,源码中的注释和文档都非常完善,方便用户快速上手和理解代码;最后,我会定期更新这些源码资源,以适应各平台技术的最新发展和市场需求。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值