1、使用videoToolBox解码空间视频
第一步: 创建解码会话
1、创建解码器方法介绍
VTDecompressionSessionCreate (
CFAllocatorRef allocator,
CMVideoFormatDescriptionRef videoFormatDescription,
CFDictionaryRef videoDecoderSpecification,
CFDictionaryRef destinationImageBufferAttributes,
const VTDecompressionOutputCallbackRecord * CM_NULLABLE outputCallback,
VTDecompressionSessionRef * CM_NONNULL decompressionSessionOut) API_AVAILABLE ( macosx ( 10.8 ) , ios ( 8.0 ) , tvos ( 10.2 ) ) ;
allocator(分配器): - 类型:CM_NULLABLE CFAllocatorRef - 描述:指定用于分配解压缩会话及其相关数据结构的内存分配器。可以传入 NULL,表示使用默认的内存分配器。 videoFormatDescription(视频格式描述): - 类型:CM_NONNULL CMVideoFormatDescriptionRef - 描述:包含有关视频数据格式的信息的CoreMedia对象,通常由视频流的元数据提供。此参数用于描述待解压缩的视频数据的格式。 videoDecoderSpecification(视频解码器规格): - 类型:CM_NULLABLE CFDictionaryRef - 描述:可选参数,允许您指定用于解码视频的特定解码器。传入 NULL 表示使用默认的解码器。 destinationImageBufferAttributes(目标图像缓冲区属性): - 类型:CM_NULLABLE CFDictionaryRef - 描述:可选参数,允许您指定输出图像缓冲区的属性。例如,可以指定图像的像素格式等信息。 outputCallback(输出回调): - 类型:const VTDecompressionOutputCallbackRecord * CM_NULLABLE - 描述:可选参数,如果不为 NULL,则在解压缩每帧后调用此回调函数以处理输出。可以为 NULL,表示不需要输出回调。 decompressionSessionOut(解压缩会话输出): - 类型:CM_RETURNS_RETAINED_PARAMETER CM_NULLABLE VTDecompressionSessionRef * CM_NONNULL - 描述:指向将由此函数创建的解压缩会话的指针的指针。如果函数成功,则此指针将包含对新创建的解压缩会话的引用。需要调用者负责释放这个引用。
2、创建解码器需要的参数-CMFormatDescriptionRef
- ( CMFormatDescriptionRef) getVideoFormatDescriptionFromURL: ( NSURL * ) videoURL {
AVURLAsset * videoAsset = [ AVURLAsset assetWithURL: videoURL] ;
AVAssetTrack * videoTrack = [ [ videoAsset tracksWithMediaType: AVMediaTypeVideo] firstObject] ;
if ( ! videoTrack) {
NSLog ( @"Video track not found in the asset." ) ;
return NULL ;
}
CMFormatDescriptionRef formatDescription = ( __bridge CMFormatDescriptionRef) videoTrack. formatDescriptions. firstObject;
if ( ! formatDescription) {
NSLog ( @"Format description not found for the video track." ) ;
return NULL ;
}
return formatDescription;
}
3、创建解码器需要的参数-CFDictionaryRef
- ( NSDictionary * ) createDecoderSpecification {
NSDictionary * decoderSpecification = @ {
( __bridge NSString * ) kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder : @ YES,
( __bridge NSString * ) kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder : @ YES,
} ;
return decoderSpecification;
}
4、创建解码器
- ( VTDecompressionSessionRef) createDecompressionSessionWithFormatDescription: ( CMFormatDescriptionRef) videoFormatDescription {
if ( ! videoFormatDescription) {
NSLog ( @"Invalid video format description." ) ;
return NULL ;
}
NSDictionary * decoderSpecification = [ self createDecoderSpecification] ;
VTDecompressionSessionRef decompressionSession;
OSStatus status = VTDecompressionSessionCreate ( NULL ,
videoFormatDescription,
( __bridge CFDictionaryRef) decoderSpecification,
NULL ,
NULL ,
& decompressionSession) ;
NSLog ( @"status: %d" , status) ;
if ( status != noErr) {
NSLog ( @"Failed to create decompression session" ) ;
return NULL ;
}
return decompressionSession;
}
第二步:设置解码器属性
NSArray * requestedLayerIDs = @ [ @ 0 , @ 1 ] ;
OSStatus status = VTSessionSetProperty ( sessionRef, kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs, ( __bridge CFArrayRef) requestedLayerIDs) ;
if ( status != noErr) {
NSLog ( @"设置 kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs 属性失败,错误代码:%d" , ( int ) status) ;
} else {
NSLog ( @"成功设置 kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs 属性" ) ;
}
第三步:解析视频
- ( void ) processVideoWithURLV2: ( NSURL * ) url {
AVAsset * asset = [ AVAsset assetWithURL: url] ;
NSError * error = nil;
AVAssetReader * assetReader = [ [ AVAssetReader alloc] initWithAsset: asset error: & error] ;
if ( error) {
NSLog ( @"Error creating asset reader: %@" , [ error localizedDescription] ) ;
return ;
}
AVAssetTrack * videoTrack = [ [ asset tracksWithMediaType: AVMediaTypeVideo] firstObject] ;
AVAssetReaderTrackOutput * output = [ [ AVAssetReaderTrackOutput alloc] initWithTrack: videoTrack outputSettings: nil] ;
[ assetReader addOutput: output] ;
[ assetReader startReading] ;
while ( assetReader. status == AVAssetReaderStatusReading) {
CMSampleBufferRef sampleBuffer = [ output copyNextSampleBuffer] ;
if ( sampleBuffer) {
VTDecodeFrameFlags decodeFrameFlags = 0 ;
decodeFrameFlags | = kVTDecodeFrame_EnableAsynchronousDecompression;
decodeFrameFlags | = kVTDecodeFrame_1xRealTimePlayback;
VTDecodeInfoFlags decodeInfoFlags = 0 ;
decodeInfoFlags | = kVTDecodeInfo_Asynchronous;
decodeInfoFlags | = kVTDecodeInfo_FrameDropped;
VTDecompressionMultiImageCapableOutputHandler multiImageCapableOutputHandler = ^ ( OSStatus status, VTDecodeInfoFlags infoFlags, CVImageBufferRef _Nullable imageBuffer, CMTaggedBufferGroupRef _Nullable taggedBufferGroup, CMTime presentationTimeStamp, CMTime presentationDuration) {
[ self processMultiImageBufferGroup: status
infoFlags: infoFlags
imageBuffer: imageBuffer
taggedBufferGroup: taggedBufferGroup
presentationTimeStamp: presentationTimeStamp
presentationDuration: presentationDuration] ;
} ;
OSStatus status = VTDecompressionSessionDecodeFrameWithMultiImageCapableOutputHandler ( _decomSession, sampleBuffer, decodeFrameFlags, & decodeInfoFlags, multiImageCapableOutputHandler) ;
NSString * statusString = [ NSString stringWithFormat: @"Status: %d" , ( int ) status] ;
NSLog ( @"OutputHandler status: %d" , status) ;
CFRelease ( sampleBuffer) ;
}
}
if ( assetReader. status == AVAssetReaderStatusCompleted) {
NSLog ( @"Finished reading asset." ) ;
} else {
NSLog ( @"Error reading asset: %@" , [ assetReader. error localizedDescription] ) ;
}
[ assetReader cancelReading] ;
}
第四步:获取到左右视图
- ( void ) processMultiImageBufferGroup: ( OSStatus) status infoFlags: ( VTDecodeInfoFlags) infoFlags imageBuffer: ( CVImageBufferRef) imageBuffer taggedBufferGroup: ( CMTaggedBufferGroupRef) taggedBufferGroup presentationTimeStamp: ( CMTime) presentationTimeStamp presentationDuration: ( CMTime) presentationDuration {
NSLog ( @"走到多图像帧回调 taggedBufferGroup :%@" , taggedBufferGroup) ;
if ( status == noErr) {
if ( taggedBufferGroup) {
CVPixelBufferRef pixelBfLef = CMTaggedBufferGroupGetCVPixelBufferAtIndex ( taggedBufferGroup, 0 ) ;
CVPixelBufferRef pixelBfRit = CMTaggedBufferGroupGetCVPixelBufferAtIndex ( taggedBufferGroup, 1 ) ;
} else {
NSLog ( @"解码图像时发生错误,错误码:%d" , ( int ) status) ;
}
}
}
2、使用AVPlayer
1、播放视频,获取左右眼信息
@IBAction func check ( _ sender: UIButton ) {
view. addSubview ( imageView)
guard let videoUrl = Bundle . main. url ( forResource: "IMG_0056" , withExtension: "MOV" ) else { return }
let asset = AVAsset ( url: videoUrl)
let player = AVPlayer ( playerItem: AVPlayerItem ( asset: asset) )
let outputSpecification = AVVideoOutputSpecification ( tagCollections: [ . stereoscopicForVideoOutput ( ) ] )
let videoOutput = AVPlayerVideoOutput ( specification: outputSpecification)
player. videoOutput = videoOutput
let playerLayer = AVPlayerLayer ( player: player)
playerLayer. frame = CGRect ( x: 0 , y: 0 , width: 100 , height: 100 )
view. layer. addSublayer ( playerLayer)
player. addPeriodicTimeObserver ( forInterval: CMTime ( value: 1 , timescale: 60 ) , queue: . main) { time in
guard let ( taggedBufferGroup, presentationTime, activeConfiguration) = videoOutput. taggedBuffers ( forHostTime: CMClockGetTime ( . hostTimeClock) ) else {
return
}
for taggedBuffer in taggedBufferGroup {
guard let leftEyeBuffer = taggedBuffer. first ( where : { $0 . tags. first ( matchingCategory: . stereoView) == . stereoView ( . leftEye) } ) ? . buffer,
let rightEyeBuffer = taggedBuffer. first ( where : { $0 . tags. first ( matchingCategory: . stereoView) == . stereoView ( . rightEye) } ) ? . buffer,
case let . pixelBuffer ( leftEyePixelBuffer) = leftEyeBuffer,
case let . pixelBuffer ( rightEyePixelBuffer) = rightEyeBuffer else {
continue
}
let leftEyeImage = self . imageFromPixelBuffer ( pixelBuffer: leftEyePixelBuffer)
let rightEyeImage = self . imageFromPixelBuffer ( pixelBuffer: rightEyePixelBuffer)
let combinedImage = self . combineImages ( leftEyeImage: leftEyeImage, rightEyeImage: rightEyeImage)
self . imageView. image = combinedImage
if ! self . isWritingStarted {
self . isWritingStarted = true
self . startVideoWriting ( )
}
self . writeFrame ( image: combinedImage! )
}
}
player. play ( )
}
2、CVPixelBuffer转换成UIImage
func imageFromPixelBuffer ( pixelBuffer: CVPixelBuffer ) -> UIImage ? {
let ciImage = CIImage ( cvPixelBuffer: pixelBuffer)
let context = CIContext ( )
if let cgImage = context. createCGImage ( ciImage, from: ciImage. extent) {
return UIImage ( cgImage: cgImage)
}
return nil
}
3、CGImage转换成CVPixelBuffer
private func pixelBufferFromCGImage ( cgImage: CGImage ) -> CVPixelBuffer ? {
let options: [ String : Any ] = [
kCVPixelBufferCGImageCompatibilityKey as String : true ,
kCVPixelBufferCGBitmapContextCompatibilityKey as String : true
]
var pixelBuffer: CVPixelBuffer ?
let status = CVPixelBufferCreate (
kCFAllocatorDefault ,
cgImage. width,
cgImage. height,
kCVPixelFormatType_32ARGB,
options as CFDictionary ,
& pixelBuffer
)
guard status == kCVReturnSuccess , let buffer = pixelBuffer else {
print ( "Error: Failed to create pixel buffer." )
return nil
}
CVPixelBufferLockBaseAddress ( buffer, CVPixelBufferLockFlags ( rawValue: 0 ) )
let context = CGContext (
data: CVPixelBufferGetBaseAddress ( buffer) ,
width: cgImage. width,
height: cgImage. height,
bitsPerComponent: cgImage. bitsPerComponent,
bytesPerRow: CVPixelBufferGetBytesPerRow ( buffer) ,
space: CGColorSpaceCreateDeviceRGB ( ) ,
bitmapInfo: CGImageAlphaInfo . noneSkipFirst. rawValue
)
context? . draw ( cgImage, in : CGRect ( origin: . zero, size: CGSize ( width: cgImage. width, height: cgImage. height) ) )
CVPixelBufferUnlockBaseAddress ( buffer, CVPixelBufferLockFlags ( rawValue: 0 ) )
return buffer
}
4、左右眼合并
func combineImages ( leftEyeImage: UIImage ? , rightEyeImage: UIImage ? ) -> UIImage ? {
let size = CGSize ( width: 1080 , height: 720 )
UIGraphicsBeginImageContext ( size)
leftEyeImage? . draw ( in : CGRect ( x: 0 , y: 0 , width: size. width / 2 , height: size. height) )
rightEyeImage? . draw ( in : CGRect ( x: size. width / 2 , y: 0 , width: size. width / 2 , height: size. height) )
let combinedImage = UIGraphicsGetImageFromCurrentImageContext ( )
UIGraphicsEndImageContext ( )
return combinedImage
}
5、初始化 保存mp4文件
var adaptor: AVAssetWriterInputPixelBufferAdaptor ?
func startVideoWriting ( ) {
let outputURL = FileManager . default . temporaryDirectory. appendingPathComponent ( "zyb103.mp4" )
do {
videoWriter = try AVAssetWriter ( outputURL: outputURL, fileType: . mp4)
} catch {
print ( "Error initializing AVAssetWriter: \( error ) " )
return
}
let videoSettings: [ String : Any ] = [
AVVideoCodecKey : AVVideoCodecType . h264,
AVVideoWidthKey : 1080 ,
AVVideoHeightKey : 720
]
videoWriterInput = AVAssetWriterInput ( mediaType: . video, outputSettings: videoSettings)
videoWriterInput? . expectsMediaDataInRealTime = true
let pixelBufferAttributes: [ String : Any ] = [
kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_32BGRA
]
adaptor = AVAssetWriterInputPixelBufferAdaptor ( assetWriterInput: videoWriterInput! , sourcePixelBufferAttributes: pixelBufferAttributes)
if let videoWriter = videoWriter, videoWriter. canAdd ( videoWriterInput! ) {
videoWriter. add ( videoWriterInput! )
} else {
print ( "Error: Cannot add asset writer input." )
return
}
videoWriter? . startWriting ( )
videoWriter? . startSession ( atSourceTime: CMTime . zero)
}
6、开始写入mp4文件
var index= 0.0
func writeFrame ( image: UIImage ) {
guard let cgImage = image. cgImage else {
print ( "Error: Failed to get CGImage from UIImage." )
return
}
let presentationTime = CMTimeMake ( value: Int64 ( index) , timescale: 30 )
if videoWriterInput? . isReadyForMoreMediaData == true {
if let pixelBuffer = pixelBufferFromCGImage ( cgImage: cgImage) {
adaptor? . append ( pixelBuffer, withPresentationTime: presentationTime)
index= index+ 1.0
}
}
}
7、监听视频播放完成,结束写入文件
NotificationCenter . default . addObserver ( self ,
selector: #selector ( playerDidFinishPlaying) ,
name: . AVPlayerItemDidPlayToEndTime ,
object: nil )
@objc func playerDidFinishPlaying ( ) {
finishWriting ( )
}
func finishWriting ( ) {
videoWriterInput? . markAsFinished ( )
videoWriter? . finishWriting {
print ( "Video writing completed." )
if let outputURL = self . videoWriter? . outputURL {
if FileManager . default . fileExists ( atPath: outputURL. path) {
self . saveVideoToPhotosLibrary ( videoURL: outputURL)
} else {
print ( "Error: Video file does not exist at path \( outputURL. path ) " )
}
}
}
}
8、将音频和无声视频合并
func addAudioToVideo ( _ temp: String , _ finalSource: String ) {
do {
print ( "temp: \( temp ) finalSource: \( finalSource ) " )
let videoUrl = URL ( fileURLWithPath: temp)
let videoAsset = AVAsset ( url: videoUrl)
let audioUrl = Bundle . main. url ( forResource: "IMG_0056" , withExtension: "MOV" ) !
let audioAsset = AVAsset ( url: audioUrl)
let mixComposition = AVMutableComposition ( )
let videoTrack = mixComposition. addMutableTrack ( withMediaType: . video, preferredTrackID: kCMPersistentTrackID_Invalid )
try videoTrack? . insertTimeRange ( CMTimeRangeMake ( start: CMTime . zero, duration: videoAsset. duration) , of: videoAsset. tracks ( withMediaType: . video) [ 0 ] , at: CMTime . zero)
let audioTrack = mixComposition. addMutableTrack ( withMediaType: . audio, preferredTrackID: kCMPersistentTrackID_Invalid )
try audioTrack? . insertTimeRange ( CMTimeRangeMake ( start: CMTime . zero, duration: audioAsset. duration) , of: audioAsset. tracks ( withMediaType: . audio) [ 0 ] , at: CMTime . zero)
let outputPath = NSTemporaryDirectory ( ) + finalSource+ ".mp4"
let outputUrl = URL ( fileURLWithPath: outputPath)
guard let exportSession = AVAssetExportSession ( asset: mixComposition, presetName: AVAssetExportPresetHighestQuality ) else {
print ( "Error creating export session" )
return
}
exportSession. outputFileType = . mp4
exportSession. outputURL = outputUrl
exportSession. exportAsynchronously {
if exportSession. status == . completed {
print ( "Export successful: \( outputUrl ) " )
} else if let error = exportSession. error {
print ( "Export failed with error: \( error ) " )
}
}
} catch {
print ( "Error: \( error ) " )
}
}