title: AVFoundation之高级功能
date: 2019-09-23 17:32:34
tags:
一、人脸识别
A、步骤
1.创建AVCaptureSession
self.captureSession = AVCaptureSession()
if self.captureSession.canSetSessionPreset(.high) {
self.captureSession.sessionPreset = .high
}
2.设置AVCaptureDeviceInput
if let videoDevice = AVCaptureDevice.default(for: .video) {
do {
videoInput = try AVCaptureDeviceInput(device: videoDevice)
} catch {
}
}
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
}
3.创建AVCapturePhotoOutput和AVCaptureMetadataOutput
self.photoOutput = AVCapturePhotoOutput()
if self.captureSession.canAddOutput(self.photoOutput) {
self.captureSession.addOutput(self.photoOutput)
}
self.metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(self.metadataOutput) {
self.captureSession.addOutput(self.metadataOutput)
//指定输出的元数据类型。
self.metadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.face]
//有新的元数据被检测到时,会都回调代理AVCaptureMetadataOutputObjectsDelegate中的方法
//可以自定义系列的调度队列,不过由于人脸检测用到硬件加速,而且许多人物都要在主线程中执行,所以需要为这个参数指定主队列。
self.metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
}
4.设置AVCaptureVideoPreviewLayer
private func setupPreviewLayer() {
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
self.previewLayer.frame = self.view.bounds
self.previewLayer.videoGravity = .resizeAspectFill
self.view.layer.addSublayer(self.previewLayer)
self.view.layer.masksToBounds = true
//标记人脸的图层
self.overlayLayer = CALayer()
self.overlayLayer.frame = self.view.bounds
self.overlayLayer.sublayerTransform = self.THMakePerspectiveTransform(eyePosition: 10000)
self.previewLayer.addSublayer(self.overlayLayer)
}
private func THMakePerspectiveTransform(eyePosition: CGFloat) -> CATransform3D {
var transform = CATransform3DIdentity
transform.m34 = -1.0 / eyePosition
return transform
}
5.开始识别
if !self.captureSession.isRunning {
DispatchQueue.global().async {
self.captureSession.startRunning()
}
}
6.后置摄像头转前置摄像头
if self.videoInput.device.position == AVCaptureDevice.Position.back {
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .front)
let devices = discoverySession.devices
for device in devices {
if device.position == .front {
var input: AVCaptureDeviceInput! = nil
do {
let deviceInput = try AVCaptureDeviceInput(device: device)
input = deviceInput
} catch {
}
if input != nil {
self.captureSession.beginConfiguration()
self.captureSession.removeInput(self.videoInput)
self.captureSession.sessionPreset = .high
if self.captureSession.canAddInput(input) {
self.captureSession.addInput(input)
self.videoInput = input
} else {
self.captureSession.addInput(self.videoInput)
}
self.captureSession.commitConfiguration()
}
}
}
}
7.AVCaptureMetadataOutputObjectsDelegate
AVMetadataFaceObject
// private var faceLayers = Int: CALayer
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
let transformFaces = self.transformFacesFromFaces(faces: metadataObjects)
var lastFaces = [Int]()
for faceId in faceLayers.keys {
lastFaces.append(faceId)
}
for face in transformFaces {
if let tFace = face as? AVMetadataFaceObject {
let faceId = tFace.faceID
if let index = lastFaces.lastIndex(of: faceId) {
lastFaces.remove(at: index)
}
var layer = self.faceLayers[faceId]
if layer == nil {
layer = self.makerLayer()
//添加标记人脸的图层
self.overlayLayer.addSublayer(layer!)
self.faceLayers[faceId] = layer
}
layer?.transform = CATransform3DIdentity
layer?.frame = face.bounds
}
}
//移除旧的标记人脸图层
for id in lastFaces {
let layer = self.faceLayers[id]
layer?.removeFromSuperlayer()
self.faceLayers.removeValue(forKey: id)
}
}
//将取得的人脸元数据的坐标做相应转换
func transformFacesFromFaces(faces: [AVMetadataObject]) -> [AVMetadataObject] {
var transformdFaces = [AVMetadataObject]()
for face in faces {
//self.previewLayer AVCaptureVideoPreviewLayer
if let transformFace = self.previewLayer.transformedMetadataObject(for: face) {
transformdFaces.append(transformFace)
}
}
return transformdFaces
}
//创建框着人脸的图层
func makerLayer() -> CALayer{
let layer = CALayer()
layer.borderWidth = 5
layer.borderColor = UIColor(displayP3Red: 0.188, green: 0.517, blue: 0.877, alpha: 1.0).cgColor
return layer
}
B、参考链接
https://www.jianshu.com/p/0a9dfd0cb30c
https://blog.csdn.net/s12117719679/article/details/100853140
二、二维码识别
A、步骤
1.创建AVCaptureSession
*设置捕捉会话预设类型,可以使用任意类型
*苹果建议使用最低合理解决方案以提高性能
self.captureSession = AVCaptureSession()
if self.captureSession.canSetSessionPreset(.vga640x480) {
self.captureSession.sessionPreset = .vga640x480
}
2.设置AVCaptureDeviceInput
if let videoDevice = AVCaptureDevice.default(for: .video) {
do {
videoInput = try AVCaptureDeviceInput(device: videoDevice)
} catch {
}
}
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
}
3.对焦
if self.activeCamera.isAutoFocusRangeRestrictionSupported {// 判断是否支持自动对焦
do {
try self.activeCamera.lockForConfiguration()
} catch {
print("activeCamera.lockForConfiguration error")
}
self.activeCamera.autoFocusRangeRestriction = .near
//捕捉设备的自动对焦通常在任何距离都可以进行扫描
//不过大部分条码距离都不远,所以可以缩小扫描区域来提升识别成功率
self.activeCamera.unlockForConfiguration()
}
4.创建AVCapturePhotoOutput和AVCaptureMetadataOutput
self.photoOutput = AVCapturePhotoOutput()
if self.captureSession.canAddOutput(self.photoOutput) {
self.captureSession.addOutput(self.photoOutput)
}
self.metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(self.metadataOutput) {
self.captureSession.addOutput(self.metadataOutput)
self.metadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr, AVMetadataObject.ObjectType.aztec]
self.metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
}
5.设置AVCaptureVideoPreviewLayer
private func setupPreviewLayer() {
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
self.previewLayer.frame = self.view.bounds
self.previewLayer.videoGravity = .resizeAspectFill
self.view.layer.addSublayer(self.previewLayer)
self.view.layer.masksToBounds = true
}
6.AVCaptureMetadataOutputObjectsDelegate
AVMetadataMachineReadableCodeObject
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
self.didDetectCode(codes: metadataObjects)
}
func didDetectCode(codes: [AVMetadataObject]) {
for code in codes {
if let readableCode = code as? AVMetadataMachineReadableCodeObject {
let stringValue = readableCode.stringValue
//这个就是条形码的值
//不过一般一次只有一个值,或者直接取第一个元素即为条码值
print(stringValue)
}
}
}
三、音视频合成
A、AVComposition和AVCompositionTrack
*AVFoundation有关资源的组合功能源于AVAsset的子类AVComposition
*AVComposition中的轨道都是AVAssetTrack的子类AVCompositionTrack
*AVComposition和AVCompositionTrack都是不可变对象,提供对资源的只读操作。
创建自己的组合,就需要使用AVMutableComposition和AVMutableCompositionTrack所提供的可变子类。
B、步骤
1.获取资源
let videoUrl : NSURL = NSURL(fileURLWithPath: Bundle.main.path(forResource: "haha", ofType: "mov")!)
let videoUrl1: NSURL = NSURL(fileURLWithPath: Bundle.main.path(forResource: "hubblecast", ofType: "m4v")!)
let audioUrl : NSURL = NSURL(fileURLWithPath: Bundle.main.path(forResource: "薛之谦-像风一样", ofType: "mp3")!)
let aVideoAsset : AVAsset = AVAsset(url: videoUrl as URL)
let aVideoAsset1: AVAsset = AVAsset(url: videoUrl1 as URL)
let aAudioAsset : AVAsset = AVAsset(url: audioUrl as URL)
2、创建AVMutableComposition和添加音视频轨道
let mixComposition : AVMutableComposition = AVMutableComposition()
let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)!
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)!
3、插入音视频片段
let aVideoAssetTrack : AVAssetTrack = aVideoAsset.tracks(withMediaType: AVMediaType.video)[0]
let aVideoAssetTrack1 : AVAssetTrack = aVideoAsset1.tracks(withMediaType: AVMediaType.video)[0]
let aAudioAssetTrack : AVAssetTrack = aAudioAsset.tracks(withMediaType: AVMediaType.audio)[0]
do{
//插入第一段
try videoTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration), of: aVideoAssetTrack, at: CMTime.zero)
//插入第二段
try videoTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack1.timeRange.duration), of: aVideoAssetTrack1, at: aVideoAssetTrack.timeRange.duration)
//插入音频
try audioTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration + aVideoAssetTrack1.timeRange.duration), of: aAudioAssetTrack, at: CMTime.zero)
}catch{
}
4、导出(AVAssetExportSession)
print(NSHomeDirectory() + "/Documents/newVideo.mp4")
let savePathUrl : NSURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/newVideo.mp4")
let assetExport: AVAssetExportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)!
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = savePathUrl as URL
assetExport.shouldOptimizeForNetworkUse = true
assetExport.exportAsynchronously { () -> Void in
switch assetExport.status {
case AVAssetExportSessionStatus.completed:
//Uncomment this if u want to store your video in asset
//let assetsLib = ALAssetsLibrary()
//assetsLib.writeVideoAtPathToSavedPhotosAlbum(savePathUrl, completionBlock: nil)
print("success")
case AVAssetExportSessionStatus.failed:
print("failed \(assetExport.error)")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(assetExport.error)")
default:
print("complete")
}
}
C、参考链接
https://stackoverflow.com/questions/31984474/swift-merge-audio-and-video-files-into-one-video?r=SearchResults
https://www.jianshu.com/p/f0d294395d6a
四、视频过渡效果
参考链接
https://www.jianshu.com/p/180bbd05006e
https://juejin.im/post/5bee688ae51d45313b1ac683