视频的采集和编码全部采用的是之前的内容,不再过多进行赘述
//
// KFMP4Muxer.swift
// VideoDemo
//
// Created by ricard.li on 2025/5/15.
//
import AVFoundation
/// 封装器错误码
enum KFMP4MuxerError: Int {
case addOutputError = 1000
}
/// 最大队列数量
let KFMP4MuxerMaxQueueCount: Int32 = 10000
/// 封装器状态
enum KFMP4MuxerStatus: Int {
case unknown = 0 // 未知状态
case running = 1 // 运行中
case failed = 2 // 失败
case completed = 3 // 完成
case cancelled = 4 // 取消
}
/// MP4封装器
class KFMP4Muxer {
// MARK: - 属性
/// 配置
private(set) var config: KFMuxerConfig
/// 封装器实例
private var muxWriter: AVAssetWriter?
/// 视频输入
private var writerVideoInput: AVAssetWriterInput?
/// 音频输入
private var writerAudioInput: AVAssetWriterInput?
/// 音频队列
private var audioQueue: CMSimpleQueue?
/// 视频队列
private var videoQueue: CMSimpleQueue?
/// 封装队列
private let muxerQueue: DispatchQueue
/// 信号量
private let semaphore: DispatchSemaphore
/// 封装状态
private var muxerStatus: KFMP4MuxerStatus = .unknown
/// 错误回调
var errorCallBack: ((Error) -> Void)?
// MARK: - 生命周期
/// 初始化封装器
/// - Parameter config: 封装配置
init(config: KFMuxerConfig) {
self.config = config
self.muxerQueue = DispatchQueue(label: "com.KeyFrameKit.muxerQueue", qos: .default)
self.semaphore = DispatchSemaphore(value: 1)
// 创建音频队列
var audioQueueRef: CMSimpleQueue?
CMSimpleQueueCreate(allocator: kCFAllocatorDefault, capacity: KFMP4MuxerMaxQueueCount, queueOut: &audioQueueRef)
self.audioQueue = audioQueueRef
// 创建视频队列
var videoQueueRef: CMSimpleQueue?
CMSimpleQueueCreate(allocator: kCFAllocatorDefault, capacity: KFMP4MuxerMaxQueueCount, queueOut: &videoQueueRef)
self.videoQueue = videoQueueRef
}
deinit {
semaphore.wait()
reset() // 清理
semaphore.signal()
}
// MARK: - 公共方法
/// 开始写入
func startWriting() {
weak var weakSelf = self
muxerQueue.async {
guard let self = weakSelf else { return }
self.semaphore.wait()
self.reset() // 清理
self.muxerStatus = .running // 标记状态
self.semaphore.signal()
}
}
/// 取消写入
func cancelWriting() {
weak var weakSelf = self
muxerQueue.async {
guard let self = weakSelf else { return }
self.semaphore.wait()
if let muxWriter = self.muxWriter, muxWriter.status == .writing {
muxWriter.cancelWriting()
}
self.muxerStatus = .cancelled // 标记状态
self.semaphore.signal()
}
}
/// 添加采样缓冲区
/// - Parameter sampleBuffer: 采样缓冲区
func appendSampleBuffer(_ sampleBuffer: CMSampleBuffer) {
// 数据校验 - 如果sample buffer没有数据缓冲区或者状态不是running,则不处理
if !CMSampleBufferDataIsReady(sampleBuffer) || muxerStatus != .running {
return
}
// 异步添加数据
weak var weakSelf = self
muxerQueue.async {
guard let self = weakSelf else { return }
self.semaphore.wait()
// 1、添加数据到队列
self.enqueueSampleBuffer(sampleBuffer)
// 2、第一次添加数据时,创建Muxer实例并触发写数据操作
if self.muxWriter == nil {
// 检查数据是否正常
if !self.checkFormatDescriptionLoadSuccess() {
self.semaphore.signal()
return
}
// 创建Muxer实例
do {
try self.setupMuxWriter()
} catch {
self.muxerStatus = .failed
self.semaphore.signal()
self.callBackError(error)
return
}
// 开始写入
guard let muxWriter = self.muxWriter, muxWriter.startWriting() else {
self.muxerStatus = .failed
self.semaphore.signal()
if let error = self.muxWriter?.error {
self.callBackError(error)
}
return
}
// 启动会话,设置源时间
muxWriter.startSession(atSourceTime: self.sessionSourceTime())
}
// 3、检查Muxer状态
if self.muxWriter == nil || self.muxWriter?.status != .writing {
self.muxerStatus = .failed
self.semaphore.signal()
if let error = self.muxWriter?.error {
self.callBackError(error)
}
return
}
// 4、音视频数据交织
self.avInterLeavedSample()
self.semaphore.signal()
}
}
/// 停止写入
/// - Parameter completeHandler: 完成回调
func stopWriting(completeHandler: @escaping (Bool, Error?) -> Void) {
weak var weakSelf = self
muxerQueue.async {
guard let self = weakSelf else { return }
self.semaphore.wait()
self.stopWritingInternal { success, error in
self.muxerStatus = success ? .completed : .failed
self.semaphore.signal()
completeHandler(success, error)
}
}
}
// MARK: - 私有方法
/// 设置封装器
private func setupMuxWriter() throws {
// 直接使用outputURL,没有必要进行可选绑定,因为它不是可选类型
let outputURL = config.outputURL
// 1、清理写入路径的文件
if FileManager.default.fileExists(atPath: outputURL.path) {
try? FileManager.default.removeItem(atPath: outputURL.path)
}
// 2、创建封装器实例
if muxWriter != nil {
return
}
// 使用AVAssetWriter作为封装器,类型使用AVFileTypeMPEG4
muxWriter = try AVAssetWriter(outputURL: outputURL, fileType: .mp4)
muxWriter?.movieTimeScale = CMTimeScale(1_000_000_000)
muxWriter?.shouldOptimizeForNetworkUse = true // 这个选项会将MP4的moov box前置
// 3、当封装内容包含视频时,创建视频输入
if config.muxerType.contains(.video) && writerVideoInput == nil,
let videoQueue = videoQueue,
let videoHead = CMSimpleQueueGetHead(videoQueue) {
// 直接转换为可选类型并检查
let sampleBuffer: CMSampleBuffer = unsafeBitCast(videoHead, to: CMSampleBuffer.self)
if let videoDescription = CMSampleBufferGetFormatDescription(sampleBuffer) {
writerVideoInput = AVAssetWriterInput(mediaType: .video, outputSettings: nil, sourceFormatHint: videoDescription)
writerVideoInput?.expectsMediaDataInRealTime = true // 输入是否为实时数据源
writerVideoInput?.transform = config.preferredTransform // 视频变换
if let videoInput = writerVideoInput, let writer = muxWriter, writer.canAdd(videoInput) {
writer.add(videoInput)
} else {
throw NSError(
domain: String(describing: KFMP4Muxer.self),
code: KFMP4MuxerError.addOutputError.rawValue,
userInfo: nil
)
}
}
}
// 4、当封装内容包含音频时,创建音频输入
if config.muxerType.contains(.audio) && writerAudioInput == nil,
let audioQueue = audioQueue,
let audioHead = CMSimpleQueueGetHead(audioQueue) {
// 直接转换为可选类型并检查
let sampleBuffer: CMSampleBuffer = unsafeBitCast(audioHead, to: CMSampleBuffer.self)
if let audioDescription = CMSampleBufferGetFormatDescription(sampleBuffer) {
writerAudioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: nil, sourceFormatHint: audioDescription)
writerAudioInput?.expectsMediaDataInRealTime = true // 输入是否为实时数据源
if let audioInput = writerAudioInput, let writer = muxWriter, writer.canAdd(audioInput) {
writer.add(audioInput)
} else {
throw NSError(
domain: String(describing: KFMP4Muxer.self),
code: KFMP4MuxerError.addOutputError.rawValue,
userInfo: nil
)
}
}
}
}
/// 将采样缓冲区加入队列
private func enqueueSampleBuffer(_ sampleBuffer: CMSampleBuffer) {
// 根据媒体类型将数据放入不同队列
guard let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer) else {
return
}
// Swift中我们需要手动管理Core Foundation对象在队列中的引用计数
let mediaType = CMFormatDescriptionGetMediaType(formatDescription)
// 我们需要增加引用计数,因为CMSimpleQueue不会自动增加引用计数
// 在Swift中,使用Unmanaged<T>来处理Core Foundation对象的引用计数
let sampleBufferRef = Unmanaged.passRetained(sampleBuffer)
let pointer = sampleBufferRef.toOpaque()
if mediaType == kCMMediaType_Audio, let audioQueue = audioQueue {
CMSimpleQueueEnqueue(audioQueue, element: pointer)
} else if mediaType == kCMMediaType_Video, let videoQueue = videoQueue {
CMSimpleQueueEnqueue(videoQueue, element: pointer)
} else {
// 不需要保留在队列中,释放引用
sampleBufferRef.release()
}
}
/// 清空封装器数据
private func flushMuxer() {
appendAudioSample()
appendVideoSample()
}
/// 添加音频采样
private func appendAudioSample() {
guard let audioQueue = audioQueue else { return }
// 音频写入封装
while let audioInput = writerAudioInput, audioInput.isReadyForMoreMediaData && CMSimpleQueueGetCount(audioQueue) > 0 {
guard let dequeuedItem = CMSimpleQueueDequeue(audioQueue) else { break }
// 从队列中取出后需要处理引用计数:获取引用并在使用后释放
let audioSampleRef = Unmanaged<CMSampleBuffer>.fromOpaque(dequeuedItem)
let audioSample = audioSampleRef.takeRetainedValue() // 这会减少引用计数
audioInput.append(audioSample)
}
}
/// 添加视频采样
private func appendVideoSample() {
guard let videoQueue = videoQueue else { return }
// 视频写入封装
while let videoInput = writerVideoInput, videoInput.isReadyForMoreMediaData && CMSimpleQueueGetCount(videoQueue) > 0 {
guard let dequeuedItem = CMSimpleQueueDequeue(videoQueue) else { break }
// 从队列中取出后需要处理引用计数:获取引用并在使用后释放
let videoSampleRef = Unmanaged<CMSampleBuffer>.fromOpaque(dequeuedItem)
let videoSample = videoSampleRef.takeRetainedValue() // 这会减少引用计数
videoInput.append(videoSample)
}
}
/// 音视频交织采样
private func avInterLeavedSample() {
let hasAudio = config.muxerType.contains(.audio)
let hasVideo = config.muxerType.contains(.video)
guard let audioQueue = audioQueue, let videoQueue = videoQueue else { return }
// 当同时封装音频和视频时,需要做好交织
if hasAudio && hasVideo {
while CMSimpleQueueGetCount(audioQueue) > 0 && CMSimpleQueueGetCount(videoQueue) > 0 {
if let audioInput = writerAudioInput, let videoInput = writerVideoInput,
audioInput.isReadyForMoreMediaData && videoInput.isReadyForMoreMediaData {
// 取队列头部数据比较时间戳
guard let audioHeaderItem = CMSimpleQueueGetHead(audioQueue),
let videoHeaderItem = CMSimpleQueueGetHead(videoQueue) else { break }
let audioHeader = unsafeBitCast(audioHeaderItem, to: CMSampleBuffer.self)
let videoHeader = unsafeBitCast(videoHeaderItem, to: CMSampleBuffer.self)
let audioDtsTime = CMSampleBufferGetPresentationTimeStamp(audioHeader)
// 获取视频的解码时间戳,如果无效则使用显示时间戳
var videoDtsTime = CMSampleBufferGetDecodeTimeStamp(videoHeader)
if CMTIME_IS_INVALID(videoDtsTime) {
videoDtsTime = CMSampleBufferGetPresentationTimeStamp(videoHeader)
}
// 比较时间戳,将较小的先写入
if CMTimeGetSeconds(audioDtsTime) >= CMTimeGetSeconds(videoDtsTime) {
guard let dequeuedItem = CMSimpleQueueDequeue(videoQueue) else { break }
// 从队列中取出后需要处理引用计数
let videoSampleRef = Unmanaged<CMSampleBuffer>.fromOpaque(dequeuedItem)
let videoSample = videoSampleRef.takeRetainedValue() // 这会减少引用计数
videoInput.append(videoSample)
} else {
guard let dequeuedItem = CMSimpleQueueDequeue(audioQueue) else { break }
// 从队列中取出后需要处理引用计数
let audioSampleRef = Unmanaged<CMSampleBuffer>.fromOpaque(dequeuedItem)
let audioSample = audioSampleRef.takeRetainedValue() // 这会减少引用计数
audioInput.append(audioSample)
}
} else {
break
}
}
} else if hasAudio {
// 只封装音频
appendAudioSample()
} else if hasVideo {
// 只封装视频
appendVideoSample()
}
}
/// 检查格式描述是否加载成功
private func checkFormatDescriptionLoadSuccess() -> Bool {
guard let audioQueue = audioQueue, let videoQueue = videoQueue else { return false }
// 检查数据是否正常
if muxWriter == nil {
let hasAudio = config.muxerType.contains(.audio)
let hasVideo = config.muxerType.contains(.video)
if hasAudio && hasVideo {
return CMSimpleQueueGetCount(videoQueue) > 0 && CMSimpleQueueGetCount(audioQueue) > 0
} else if hasAudio {
return CMSimpleQueueGetCount(audioQueue) > 0
} else if hasVideo {
return CMSimpleQueueGetCount(videoQueue) > 0
}
}
return false
}
/// 获取会话源时间
private func sessionSourceTime() -> CMTime {
guard let audioQueue = audioQueue, let videoQueue = videoQueue else { return CMTime.invalid }
// 数据起始时间:音视频pts的最小值
if CMSimpleQueueGetCount(audioQueue) > 0, CMSimpleQueueGetCount(videoQueue) > 0,
let audioHeaderItem = CMSimpleQueueGetHead(audioQueue),
let videoHeaderItem = CMSimpleQueueGetHead(videoQueue) {
let audioFirstBuffer = unsafeBitCast(audioHeaderItem, to: CMSampleBuffer.self)
let videoFirstBuffer = unsafeBitCast(videoHeaderItem, to: CMSampleBuffer.self)
let audioPtsTime = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(audioFirstBuffer))
let videoPtsTime = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(videoFirstBuffer))
return audioPtsTime >= videoPtsTime ?
CMSampleBufferGetPresentationTimeStamp(videoFirstBuffer) :
CMSampleBufferGetPresentationTimeStamp(audioFirstBuffer)
} else if CMSimpleQueueGetCount(audioQueue) > 0, let audioHeaderItem = CMSimpleQueueGetHead(audioQueue) {
let audioFirstBuffer = unsafeBitCast(audioHeaderItem, to: CMSampleBuffer.self)
return CMSampleBufferGetPresentationTimeStamp(audioFirstBuffer)
} else if CMSimpleQueueGetCount(videoQueue) > 0, let videoHeaderItem = CMSimpleQueueGetHead(videoQueue) {
let videoFirstBuffer = unsafeBitCast(videoHeaderItem, to: CMSampleBuffer.self)
return CMSampleBufferGetPresentationTimeStamp(videoFirstBuffer)
}
return CMTime.invalid
}
/// 停止写入
private func stopWritingInternal(completion: @escaping (Bool, Error?) -> Void) {
// 1、状态不对,回调错误
guard let muxWriter = muxWriter, muxWriter.status == .writing else {
let error = muxWriter?.error ?? NSError(
domain: String(describing: type(of: self)),
code: muxWriter?.status.rawValue ?? AVAssetWriter.Status.unknown.rawValue,
userInfo: nil
)
completion(false, error)
return
}
// 2、消费队列中剩余数据
avInterLeavedSample() // 先做交织
flushMuxer() // 消费剩余数据
// 3、标记输入源为结束状态
markVideoAsFinished()
markAudioAsFinished()
// 4、结束写入
weak var weakSelf = self
muxWriter.finishWriting {
guard let self = weakSelf else { return }
let complete = self.muxWriter?.status == .completed
completion(complete, complete ? nil : self.muxWriter?.error)
}
}
/// 标记视频输入结束
private func markVideoAsFinished() {
if let muxWriter = muxWriter, muxWriter.status == .writing, let videoInput = writerVideoInput {
videoInput.markAsFinished()
}
}
/// 标记音频输入结束
private func markAudioAsFinished() {
if let muxWriter = muxWriter, muxWriter.status == .writing, let audioInput = writerAudioInput {
audioInput.markAsFinished()
}
}
/// 重置
private func reset() {
// 取消写入操作
if let muxWriter = muxWriter, muxWriter.status == .writing {
muxWriter.cancelWriting()
}
// 清理实例
self.muxWriter = nil
self.writerVideoInput = nil
self.writerAudioInput = nil
// 清理音频队列
if let audioQueue = audioQueue {
while CMSimpleQueueGetCount(audioQueue) > 0 {
if let item = CMSimpleQueueDequeue(audioQueue) {
// 释放队列中的对象
Unmanaged<CMSampleBuffer>.fromOpaque(item).release()
}
}
}
// 清理视频队列
if let videoQueue = videoQueue {
while CMSimpleQueueGetCount(videoQueue) > 0 {
if let item = CMSimpleQueueDequeue(videoQueue) {
// 释放队列中的对象
Unmanaged<CMSampleBuffer>.fromOpaque(item).release()
}
}
}
}
/// 回调错误
private func callBackError(_ error: Error) {
if let errorCallBack = errorCallBack {
DispatchQueue.main.async {
errorCallBack(error)
}
}
}
}
从代码上可以看到主要有这几个部分:
-
1)创建封装器实例及对应的音频和视频数据输入源。第一次调用
-appendSampleBuffer:
添加待封装数据时才会创建封装器实例。 -
在
-_setupMuxWriter:
方法中实现。音频和视频的输入源分别是writerAudioInput
和writerVideoInput
。 -
2)用两个队列作为缓冲区,分别管理音频和视频待封装数据。
-
这两个队列分别是
_audioQueue
和_videoQueue
。 -
每次当外部调用
-appendSampleBuffer:
方法送入待封装数据时,其实都是先调用-_enqueueSampleBuffer:
把数据放入两个队列中的一个,以便根据情况进行后续的音视频数据交织。 -
3)同时封装音频和视频数据时,进行音视频数据交织。
-
在
-_avInterLeavedSample
方法中实现音视频数据交织。当带封装的数据既有音频又有视频,就需要根据他们的时间戳信息进行交织,这样便于在播放该音视频时提升体验。 -
4)音视频数据写入封装。
-
同时封装音频和视频数据时,在做完音视频交织后,即分别将交织后的音视频数据写入对应的
writerAudioInput
和writerVideoInput
。在-_avInterLeavedSample
中实现。 -
单独封装音频或视频数据时,则直接将数据写入对应的
writerAudioInput
和writerVideoInput
。分别在-_appendAudioSample
和-_appendVideoSample
方法中实现。 -
5)停止写入。
-
在
-stopWriting:
→-_stopWriting:
方法中实现。 -
在停止前,还需要消费掉
_audioQueue
和_videoQueue
的剩余数据,要调用-_avInterLeavedSample
→-_flushMuxer
。 -
并将视频输入源和音频输入源标记位结束,分别在
-_markVideoAsFinished
和-_markAudioAsFinished
方法中实现。 -
6)贯穿整个封装过程的状态机管理。
-
在枚举
KFMP4MuxerStatus
中定义了封装器的各种状态,对于封装器的状态机管理贯穿在封装的整个过程中。 -
7)错误回调。
-
在
-callBackError:
方法向外回调错误。 -
8)清理封装器实例及数据缓冲区。
-
在
-reset
方法中实现。需要调用-_reset
方法清理封装器实例、音频和视频输入源、音频和视频缓冲区。
接下来来说一下具体的实现步骤,在对应配置的controller中,会初始化封装器,然后点击开始按钮就会调用start 方法
后续都是KFMP4Muxer
中的具体逻辑了,我们接着看KFMP4Muxer
的后续实现步骤
controller中会调用muxer中的startwriting方法,然后在采集过程中,每一帧的采集成功都会有一个回调,然后我们回调回处理初始的采集数据,把它进行编码,然后会调用muxer中的appendSampleBuffer 对编码后的内容进行封装。
关于appendSampleBuffer
方法:
-
校验样本数据是否准备好,状态是否允许写入。
-
异步串行队列
muxerQueue
执行,使用信号量保证线程安全。 -
调用
setupMuxWriter
方法设置封装器 -
调用
enqueueSampleBuffer(_:)
根据样本类型(音频/视频)分别入对应的CMSimpleQueue
队列,注意用Unmanaged.passRetained
增加引用计数。 -
调用
avInterLeavedSample()
方法处理音视频数据交织
关于avInterLeavedSample()
方法:
-
在音视频队列都不为空时,取队列头样本进行时间戳比较。
-
根据时间戳,选择较小时间戳的样本出队(
CMSimpleQueueDequeue
)。 -
使用
Unmanaged.fromOpaque(dequeuedItem).takeRetainedValue()
恰当管理引用计数,获取CMSampleBuffer
。 -
将样本写入对应轨道:
writerVideoInput.append(videoSample)
或writerAudioInput.append(audioSample)
。 -
循环交错写入,保证音视频同步。
大体的执行顺序就是这样,其他具体细节可以看代码注释。
以下是controller相关代码
//
// KFVideoMuxerViewController.swift
// VideoDemo
//
// Created by ricard.li on 2025/5/15.
//
import UIKit
import AVFoundation
class KFVideoMuxerViewController: UIViewController {
private lazy var videoCaptureConfig: VideoCaptureConfig = {
return VideoCaptureConfig()
}()
private lazy var videoCapture: VideoCapture = {
let capture = VideoCapture(config: videoCaptureConfig)
capture.sessionInitSuccessCallBack = { [weak self] in
guard let self = self else { return }
DispatchQueue.main.async {
if let previewLayer = self.videoCapture.previewLayer {
previewLayer.backgroundColor = UIColor.black.cgColor
previewLayer.frame = self.view.bounds
self.view.layer.insertSublayer(previewLayer, at: 0)
}
}
}
capture.sampleBufferOutputCallBack = { [weak self] sampleBuffer in
guard let self = self, self.isWriting else { return }
if let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
self.videoEncoder.encode(pixelBuffer: imageBuffer, ptsTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
}
// 错误回调
capture.sessionErrorCallBack = { error in
if let nsError = error as NSError? {
print("KFVideoCapture Error: \(nsError.code) \(error.localizedDescription)")
} else {
print("KFVideoCapture Error: \(error.localizedDescription)")
}
}
return capture
}()
private lazy var videoEncoderConfig: KFVideoEncoderConfig = {
return KFVideoEncoderConfig()
}()
private lazy var videoEncoder: KFVideoEncoder = {
let encoder = KFVideoEncoder(config: videoEncoderConfig)
encoder.sampleBufferOutputCallBack = { [weak self] sampleBuffer in
guard let self = self, self.isWriting else { return }
self.muxer.appendSampleBuffer(sampleBuffer)
}
return encoder
}()
private lazy var muxerConfig: KFMuxerConfig = {
let videoPath = (NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).last! as NSString).appendingPathComponent("test.mp4")
print("MP4 file path: \(videoPath)")
try? FileManager.default.removeItem(atPath: videoPath)
let config = KFMuxerConfig(outputURL: URL(fileURLWithPath: videoPath))
config.muxerType = .video
return config
}()
private lazy var muxer: KFMP4Muxer = {
return KFMP4Muxer(config: muxerConfig)
}()
private var isWriting = false
// MARK: - Lifecycle
override func viewDidLoad() {
super.viewDidLoad()
requestAccessForVideo()
setupUI()
}
override func viewWillLayoutSubviews() {
super.viewWillLayoutSubviews()
videoCapture.previewLayer?.frame = view.bounds
}
// MARK: - Actions
@objc private func start() {
if !isWriting {
muxer.startWriting()
isWriting = true
}
}
@objc private func stop() {
if isWriting {
videoEncoder.flush { [weak self] in
guard let self = self else { return }
self.isWriting = false
self.muxer.stopWriting { success, error in
print("muxer stop \(success ? "success" : "failed")")
}
}
}
}
@objc private func changeCamera() {
let newPosition: AVCaptureDevice.Position = videoCapture.config.position == .back ? .front : .back
videoCapture.changeDevicePosition(to: newPosition)
}
@objc private func singleTap(_ sender: UIGestureRecognizer) {
// 可自定义处理
}
@objc private func handleDoubleTap(_ sender: UIGestureRecognizer) {
let newPosition: AVCaptureDevice.Position = videoCapture.config.position == .back ? .front : .back
videoCapture.changeDevicePosition(to: newPosition)
}
// MARK: - Private Methods
private func requestAccessForVideo() {
let status = AVCaptureDevice.authorizationStatus(for: .video)
switch status {
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { [weak self] granted in
if granted {
self?.videoCapture.startRunning()
} else {
// 用户拒绝
}
}
case .authorized:
videoCapture.startRunning()
default:
break
}
}
private func setupUI() {
edgesForExtendedLayout = .all
extendedLayoutIncludesOpaqueBars = true
title = "Video Muxer"
view.backgroundColor = .white
let singleTapGesture = UITapGestureRecognizer(target: self, action: #selector(singleTap(_:)))
singleTapGesture.numberOfTapsRequired = 1
singleTapGesture.numberOfTouchesRequired = 1
view.addGestureRecognizer(singleTapGesture)
let doubleTapGesture = UITapGestureRecognizer(target: self, action: #selector(handleDoubleTap(_:)))
doubleTapGesture.numberOfTapsRequired = 2
doubleTapGesture.numberOfTouchesRequired = 1
view.addGestureRecognizer(doubleTapGesture)
singleTapGesture.require(toFail: doubleTapGesture)
let startBarButton = UIBarButtonItem(title: "Start", style: .plain, target: self, action: #selector(start))
let stopBarButton = UIBarButtonItem(title: "Stop", style: .plain, target: self, action: #selector(stop))
let cameraBarButton = UIBarButtonItem(title: "Camera", style: .plain, target: self, action: #selector(changeCamera))
navigationItem.rightBarButtonItems = [stopBarButton, startBarButton, cameraBarButton]
}
}