解码部分
//
// TestVideoBox.swift
// TestSwiftUIApp
//
// Created by bob bob on 2021/2/19.
//
import Foundation
import VideoToolbox
class Decoder1: NSObject {
var width:Int32 = 480
var height:Int32 = 640
var decodeQueue:DispatchQueue = DispatchQueue(label: "decode")
var callbackQueue:DispatchQueue = DispatchQueue(label: "callback")
var decodeDesc:CMVideoFormatDescription?
var spsData:Data?
var ppsData:Data?
var decompressionSession:VTDecompressionSession?
var callback:VTDecompressionOutputCallback?
var videoDecodeCallback:((CVImageBuffer?)->Void?)?
func setVideoDecodeCallback(block:@escaping (CVImageBuffer?)->Void?) {
videoDecodeCallback = block
}
init(width:Int32,height:Int32) {
self.width = width
self.height = height
super.init()
}
//解码
func decode(data:Data){
self.decodeQueue.async {
let length:UInt32 = UInt32(data.count)
self.decodeByte(data: data, size: length)
}
}
//初始化解码器
func initDecoder() -> Bool {
guard spsData != nil,ppsData != nil else {
return false
}
var sps:[UInt8] = []
spsData!.suffix(from: 4).forEach({ (value) in
sps.append(value)
})
var pps:[UInt8] = []
ppsData!.suffix(from: 4).forEach { (value) in
pps.append(value)
}
let spsAndpps = [sps.withUnsafeBufferPointer{$0}.baseAddress! ,pps.withUnsafeBufferPointer{$0}.baseAddress!]
let size = [sps.count , pps.count]
let allocer:CFAllocator = kCFAllocatorDefault
/**
根据sps pps设置解码参数
param kCFAllocatorDefault 分配器
param 2 参数个数
param parameterSetPointers 参数集指针
param parameterSetSizes 参数集大小
param naluHeaderLen nalu nalu start code 的长度 4
param _decodeDesc 解码器描述
return 状态
*/
let descriptionState = CMVideoFormatDescriptionCreateFromH264ParameterSets(allocator:allocer , parameterSetCount: 2, parameterSetPointers: spsAndpps, parameterSetSizes: size, nalUnitHeaderLength: 4, formatDescriptionOut: &decodeDesc)
if descriptionState != 0 {
return false
}
//UnsafeMutableRawPointer 用于访问和处理无类型数据的原始指针
/*
UnsafeMutableRawPointer 类型不提供自动内存管理,类型安全性和对齐保证。你有责任处理你创造的任何不安全内存的生命周期,以避免泄漏或不确定的行为。
您手动管理的内存可以 未类型化 或 绑定 特定类型。无论该内存是否已绑定到特定类型,您可以使用 UnsafeMutableRawPointer 类型访问和管理内存中的原始字节
https://www.jianshu.com/p/7d0fadbdd6c6
*/
let rawpointer:UnsafeMutableRawPointer = unsafeBitCast(self, to: UnsafeMutableRawPointer.self)
//设置解码回调
setCallback()
var callbackRecord = VTDecompressionOutputCallbackRecord(decompressionOutputCallback: callback, decompressionOutputRefCon: rawpointer)
let imageBufferAttributes = [
kCVPixelBufferPixelFormatTypeKey:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
kCVPixelBufferWidthKey:width,
kCVPixelBufferHeightKey:height,
// kCVPixelBufferOpenGLCompatibilityKey:true
] as [CFString : Any]
let callocker:CFAllocator = kCFAllocatorDefault
//创建解码会话
/*!
@function VTDecompressionSessionCreate
@abstract 创建用于解压缩视频帧的会话。
@discussion 解压后的帧将通过调用OutputCallback发出
@param allocator 内存的会话。通过使用默认的kCFAllocatorDefault的分配器。
@param videoFormatDescription 描述源视频帧
@param videoDecoderSpecification 指定必须使用的特定视频解码器.NULL
@param destinationImageBufferAttributes 描述源像素缓冲区的要求 NULL
@param outputCallback 使用已解压缩的帧调用的回调
@param decompressionSessionOut 指向一个变量以接收新的解压会话
*/
let state = VTDecompressionSessionCreate(allocator: callocker, formatDescription: decodeDesc!, decoderSpecification: nil, imageBufferAttributes: imageBufferAttributes as CFDictionary, outputCallback: &callbackRecord, decompressionSessionOut: &decompressionSession)
if state != 0 {
//创建decodeSession失败
}
guard (self.decompressionSession != nil) else {
return false
}
//设置实时解码
VTSessionSetProperty(self.decompressionSession!, key: kVTDecompressionPropertyKey_RealTime, value: kCFBooleanTrue)
return true
}
private func setCallback(){
//(UnsafeMutableRawPointer?, UnsafeMutableRawPointer?, OSStatus, VTDecodeInfoFlags, CVImageBuffer?, CMTime, CMTime) -> Void
callback = {decompressionOutputRefCon, sourceFrameRefCon,status,inforFlags,imageBuffer,presentationTimeStamp,presentationDuration in
let decoder:Decoder1 = unsafeBitCast(decompressionOutputRefCon, to: Decoder1.self)
guard imageBuffer != nil else {
return
}
if let block = decoder.videoDecodeCallback {
decoder.callbackQueue.async {
block(imageBuffer)
}
}
}
}
private func decodeByte(data:Data,size:UInt32){
// 数据类型:frame的前4个字节是NALU数据的开始码,也就是00 00 00 01,
// 将NALU的开始码转为4字节大端NALU的长度信息
let naluSize = size - 4
let lengths:[UInt8] = [UInt8(truncatingIfNeeded: naluSize >> 24),
UInt8(truncatingIfNeeded: naluSize >> 16),
UInt8(truncatingIfNeeded: naluSize >> 8),
UInt8(truncatingIfNeeded: naluSize)]
var frameByte:[UInt8] = lengths
data.suffix(from: 4).forEach{bb in
frameByte.append(bb)
}
let bytes = frameByte
//转为10进制后, 第5个字节是表示数据类型,7是sps, 8是pps, 5是IDR(I帧)信息
let type:Int = Int(bytes[4] & 0x1f)
switch type {
case 0x05:
if initDecoder() {
decode(frame: bytes, size: size)
}
case 0x06:
//增强信息
break
case 0x07:
spsData = data
case 0x08:
ppsData = data
default:
if initDecoder() {
decode(frame: bytes, size: size)
}
}
}
private func decode(frame:[UInt8],size:UInt32){
var blockBuffer:CMBlockBuffer?
var frame1 = frame
//创建blockBuffer
/*!
参数1: structureAllocator kCFAllocatorDefault
参数2: memoryBlock frame
参数3: frame size
参数4: blockAllocator: Pass NULL
参数5: customBlockSource Pass NULL
参数6: offsetToData 数据偏移
参数7: dataLength 数据长度
参数8: flags 功能和控制标志
参数9: newBBufOut blockBuffer地址,不能为空
*/
let blockstate = CMBlockBufferCreateWithMemoryBlock(allocator: kCFAllocatorDefault, memoryBlock: &frame1, blockLength: Int(size), blockAllocator: kCFAllocatorNull, customBlockSource: nil, offsetToData: 0, dataLength: Int(size), flags: 0, blockBufferOut: &blockBuffer)
if blockstate != 0 {
//创建blickbuffer失败
}
//创建sampleBuffer
/*
参数1: allocator 分配器,使用默认内存分配, kCFAllocatorDefault
参数2: blockBuffer.需要编码的数据blockBuffer.不能为NULL
参数3: formatDescription,视频输出格式
参数4: numSamples.CMSampleBuffer 个数.
参数5: numSampleTimingEntries 必须为0,1,numSamples
参数6: sampleTimingArray. 数组.为空
参数7: numSampleSizeEntries 默认为1
参数8: sampleSizeArray
参数9: sampleBuffer对象
*/
var sampleBuffer:CMSampleBuffer? = nil
var sampleSizes:[Int] = [Int(size)]
let readyState = CMSampleBufferCreateReady(allocator: kCFAllocatorDefault, dataBuffer: blockBuffer, formatDescription: decodeDesc, sampleCount: CMItemCount(1), sampleTimingEntryCount: CMItemCount(), sampleTimingArray: nil, sampleSizeEntryCount: CMItemCount(1), sampleSizeArray: &sampleSizes, sampleBufferOut: &sampleBuffer)
if readyState != 0 {
//创建sample buffer 失败
}
//解码数据
/*
参数1: 解码session
参数2: 源数据 包含一个或多个视频帧的CMsampleBuffer
参数3: 解码标志
参数4: 解码后数据outputPixelBuffer
参数5: 同步/异步解码标识
*/
var sourceFrame:UnsafeMutableRawPointer? = nil
var infoFalg = VTDecodeInfoFlags.asynchronous
let decodeState = VTDecompressionSessionDecodeFrame(self.decompressionSession!, sampleBuffer: sampleBuffer!, flags: VTDecodeFrameFlags._EnableAsynchronousDecompression, frameRefcon: &sourceFrame, infoFlagsOut: &infoFalg)
if decodeState != 0 {
print("解码失败")
}
}
deinit {
if decompressionSession != nil {
VTDecompressionSessionInvalidate(decompressionSession!)
decompressionSession = nil
}
}
}
编码
//
// VideoEncoder.swift
// TestSwiftUIApp
//
// Created by bob bob on 2021/2/19.
//
import Foundation
import VideoToolbox
class DQVideoEncoder: NSObject {
var frameID:Int64 = 0
var hasSpsPps = false
var width: Int32 = 480
var height:Int32 = 640
var bitRate : Int32 = 480 * 640 * 3 * 4
var fps : Int32 = 10
var encodeQueue = DispatchQueue(label: "encode")
var callBackQueue = DispatchQueue(label: "callBack")
var encodeSession:VTCompressionSession!
var encodeCallBack:VTCompressionOutputCallback?
var videoEncodeCallback : ((Data)-> Void)?
func videoEncodeCallback(block:@escaping (Data)-> Void){
self.videoEncodeCallback = block
}
var videoEncodeCallbackSPSAndPPS :((Data,Data)->Void)?
func videoEncodeCallbackSPSAndPPS(block:@escaping (Data,Data)->Void) {
videoEncodeCallbackSPSAndPPS = block
}
init(width:Int32 = 480,height:Int32 = 640,bitRate : Int32? = nil,fps: Int32? = nil ) {
self.width = width
self.height = height
self.bitRate = bitRate != nil ? bitRate! : 480 * 640 * 3 * 4
self.fps = (fps != nil) ? fps! : 10
super.init()
setCallBack()
initVideoToolBox()
}
//初始化编码器
func initVideoToolBox() {
print(self)
//创建VTCompressionSession
// var bself = self
let state = VTCompressionSessionCreate(allocator: kCFAllocatorDefault, width: width, height: height, codecType: kCMVideoCodecType_H264, encoderSpecification: nil, imageBufferAttributes: nil, compressedDataAllocator: nil, outputCallback:encodeCallBack , refcon: unsafeBitCast(self, to: UnsafeMutableRawPointer.self), compressionSessionOut: &self.encodeSession)
if state != 0{
print("creat VTCompressionSession failed")
return
}
//设置实时编码输出
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_RealTime, value: kCFBooleanTrue)
//设置编码方式
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ProfileLevel, value: kVTProfileLevel_H264_Baseline_AutoLevel)
//设置是否产生B帧(因为B帧在解码时并不是必要的,是可以抛弃B帧的)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AllowFrameReordering, value: kCFBooleanFalse)
//设置关键帧间隔
var frameInterval = 10
let number = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &frameInterval)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_MaxKeyFrameInterval, value: number)
//设置期望帧率,不是实际帧率
let fpscf = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &fps)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ExpectedFrameRate, value: fpscf)
//设置码率平均值,单位是bps。码率大了话就会非常清晰,但同时文件也会比较大。码率小的话,图像有时会模糊,但也勉强能看
//码率计算公式参考笔记
// var bitrate = width * height * 3 * 4
let bitrateAverage = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &bitRate)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AverageBitRate, value: bitrateAverage)
//码率限制
let bitRatesLimit :CFArray = [bitRate * 2,1] as CFArray
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_DataRateLimits, value: bitRatesLimit)
}
//开始编码
func encodeVideo(sampleBuffer:CMSampleBuffer){
if self.encodeSession == nil {
initVideoToolBox()
}
encodeQueue.async {
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let time = CMTime(value: self.frameID, timescale: 1000)
let state = VTCompressionSessionEncodeFrame(self.encodeSession, imageBuffer: imageBuffer!, presentationTimeStamp: time, duration: .invalid, frameProperties: nil, sourceFrameRefcon: nil, infoFlagsOut: nil)
if state != 0{
print("encode filure")
}
}
}
private func setCallBack() {
//编码完成回调
encodeCallBack = {(outputCallbackRefCon, sourceFrameRefCon, status, flag, sampleBuffer) in
//指针对象转换
let encoder :DQVideoEncoder = unsafeBitCast(outputCallbackRefCon, to: DQVideoEncoder.self)
guard sampleBuffer != nil else {
return
}
/// 0. 原始字节数据 8字节
let buffer : [UInt8] = [0x00,0x00,0x00,0x01]
/// 1. [UInt8] -> UnsafeBufferPointer<UInt8>
let unsafeBufferPointer = buffer.withUnsafeBufferPointer {$0}
/// 2.. UnsafeBufferPointer<UInt8> -> UnsafePointer<UInt8>
let unsafePointer = unsafeBufferPointer.baseAddress
guard let startCode = unsafePointer else {return}
let attachArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer!, createIfNecessary: false)
var strkey = unsafeBitCast(kCMSampleAttachmentKey_NotSync, to: UnsafeRawPointer.self)
let cfDic = unsafeBitCast(CFArrayGetValueAtIndex(attachArray, 0), to: CFDictionary.self)
let keyFrame = !CFDictionaryContainsKey(cfDic, strkey);//没有这个键就意味着同步,就是关键帧
// 获取sps pps
if keyFrame && !encoder.hasSpsPps{
if let description = CMSampleBufferGetFormatDescription(sampleBuffer!){
var spsSize: Int = 0, spsCount :Int = 0,spsHeaderLength:Int32 = 0
var ppsSize: Int = 0, ppsCount: Int = 0,ppsHeaderLength:Int32 = 0
//var spsData:UInt8 = 0, ppsData:UInt8 = 0
var spsDataPointer : UnsafePointer<UInt8>? = UnsafePointer(UnsafeMutablePointer<UInt8>.allocate(capacity: 0))
var ppsDataPointer : UnsafePointer<UInt8>? = UnsafePointer<UInt8>(bitPattern: 0)
let spsstatus = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(description, parameterSetIndex: 0, parameterSetPointerOut: &spsDataPointer, parameterSetSizeOut: &spsSize, parameterSetCountOut: &spsCount, nalUnitHeaderLengthOut: &spsHeaderLength)
if spsstatus != 0{
print("sps失败")
}
let ppsStatus = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(description, parameterSetIndex: 1, parameterSetPointerOut: &ppsDataPointer, parameterSetSizeOut: &ppsSize, parameterSetCountOut: &ppsCount, nalUnitHeaderLengthOut: &ppsHeaderLength)
if ppsStatus != 0 {
print("pps失败")
}
if let spsData = spsDataPointer,let ppsData = ppsDataPointer{
var spsDataValue = Data(capacity: 4 + spsSize)
spsDataValue.append(buffer, count: 4)
spsDataValue.append(spsData, count: spsSize)
var ppsDataValue = Data(capacity: 4 + ppsSize)
ppsDataValue.append(startCode, count: 4)
ppsDataValue.append(ppsData, count: ppsSize)
encoder.callBackQueue.async {
encoder.videoEncodeCallbackSPSAndPPS!(spsDataValue, ppsDataValue)
}
}
}
}
let dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer!)
// var arr = [Int8]()
// let pointer = arr.withUnsafeMutableBufferPointer({$0})
var dataPointer: UnsafeMutablePointer<Int8>? = nil
var totalLength :Int = 0
let blockState = CMBlockBufferGetDataPointer(dataBuffer!, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: &totalLength, dataPointerOut: &dataPointer)
if blockState != 0{
print("获取data失败\(blockState)")
}
//NALU
var offset :UInt32 = 0
//返回的nalu数据前四个字节不是0001的startcode(不是系统端的0001),而是大端模式的帧长度length
let lengthInfoSize = 4
//循环写入nalu数据
while offset < totalLength - lengthInfoSize {
//获取nalu 数据长度
var naluDataLength:UInt32 = 0
memcpy(&naluDataLength, dataPointer! + UnsafeMutablePointer<Int8>.Stride(offset), lengthInfoSize)
//大端转系统端
naluDataLength = CFSwapInt32BigToHost(naluDataLength)
//获取到编码好的视频数据
var data = Data(capacity: Int(naluDataLength) + lengthInfoSize)
data.append(buffer, count: 4)
//转化pointer;UnsafeMutablePointer<Int8> -> UnsafePointer<UInt8>
let naluUnsafePoint = unsafeBitCast(dataPointer, to: UnsafePointer<UInt8>.self)
data.append(naluUnsafePoint + UnsafePointer<UInt8>.Stride(offset + UInt32(lengthInfoSize)) , count: Int(naluDataLength))
encoder.callBackQueue.async {
encoder.videoEncodeCallback!(data)
}
offset += (naluDataLength + UInt32(lengthInfoSize))
}
}
}
deinit {
if ((encodeSession) != nil) {
VTCompressionSessionCompleteFrames(encodeSession, untilPresentationTimeStamp: .invalid)
VTCompressionSessionInvalidate(encodeSession);
encodeSession = nil;
}
}
}