(由于本文使用swift3调用底层C来实现 h264硬编码,所以读者需要对swift3 OC C均要有一定的基础才能看懂本文,文后附有代码执行思路)
创建一个类用于设置h264的设置属性(参数通过类的对象的属性的方式来做)
//
// TGVTSessionSetProperty.h
// videocapture
//
// Created by targetcloud on 2017/3/31.
// Copyright © 2017年 targetcloud. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface TGVTSessionSetProperty : NSObject
@property(nonatomic,assign) int width;
@property(nonatomic,assign) int height;
@property(nonatomic,assign) int expectedFrameRate;
@property(nonatomic,assign) int averageBitRate;
@property(nonatomic,assign) int maxKeyFrameInterval;
@end
//
// TGVTSessionSetProperty.m
// videocapture
//
// Created by targetcloud on 2017/3/31.
// Copyright © 2017年 targetcloud. All rights reserved.
//
#import "TGVTSessionSetProperty.h"
@implementation TGVTSessionSetProperty
@end
每次创建编码器模式
//
// TGH264Encoder.h
// videocapture
//
// Created by targetcloud on 2017/3/30.
// Copyright © 2017年 targetcloud. All rights reserved.
//
#import <UIKit/UIKit.h>
#import <VideoToolbox/VideoToolbox.h>
@class TGVTSessionSetProperty;
@interface TGH264Encoder : NSObject
- (instancetype)initWithProperty : (TGVTSessionSetProperty *) properties;
- (void)encodeSampleBuffer:(CMSampleBufferRef)sampleBuffer;
- (void)endEncode;
@end
//
// TGH264Encoder.m
// videocapture
//
// Created by targetcloud on 2017/3/30.
// Copyright © 2017年 targetcloud. All rights reserved.
//
#import "TGH264Encoder.h"
#import "TGVTSessionSetProperty.h"
@interface TGH264Encoder()
@property (nonatomic, assign) NSInteger frameID;
@property (nonatomic, assign) VTCompressionSessionRef compressionSession;
@property (nonatomic, strong) NSFileHandle *fileHandle;
@property(nonatomic, strong) TGVTSessionSetProperty * properties ;
@end
@implementation TGH264Encoder
- (instancetype)initWithProperty : (TGVTSessionSetProperty *) properties {
if (self = [super init]) {
self.properties = properties;
[self setupFileHandle];
[self setupVideoSession];
}
return self;
}
- (void)setupFileHandle {
NSString *file = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject]
stringByAppendingPathComponent:@"videoAudioCapture.h264"];
[[NSFileManager defaultManager] removeItemAtPath:file error:nil];
[[NSFileManager defaultManager] createFileAtPath:file contents:nil attributes:nil];
self.fileHandle = [NSFileHandle fileHandleForWritingAtPath:file];
}
- (void)setupVideoSession {
self.frameID = 0;
int width = self.properties.width;
int height = self.properties.height;
// 创建CompressionSession对象,该对象用于对画面进行编码,kCMVideoCodecType_H264 : 表示使用h.264进行编码,h264VTCompressionOutputCallback : 当一次编码结束会在该函数进行回调,可以在该函数中将数据,写入文件中
VTCompressionSessionCreate(NULL,
width,
height,
kCMVideoCodecType_H264,
NULL,
NULL,
NULL,
h264VTCompressionOutputCallback,
(__bridge void *)(self),
&_compressionSession);
// 设置实时编码输出(直播是实时输出,否则会有延迟)
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_RealTime, (__bridge CFTypeRef _Nonnull)(@YES));//kCFBooleanTrue
// 设置期望帧率(每秒多少帧,如果帧率过低,会造成画面卡顿)
int fps = self.properties.expectedFrameRate;
CFNumberRef fpsRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &fps);
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, fpsRef);
// 设置比特率(或叫码率: 编码效率, 码率越高则画面越清晰)
int bitRate = self.properties.averageBitRate;
CFNumberRef bitRateRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bitRate);
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitRateRef);//bit
NSArray *limit = @[@(bitRate * 1.5/8), @(1)];
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_DataRateLimits, (__bridge CFArrayRef)limit);//byte
// 设置关键帧(GOPsize)间隔
int frameInterval = self.properties.maxKeyFrameInterval;
CFNumberRef frameIntervalRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &frameInterval);
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, frameIntervalRef);
// 设置结束, 准备进行编码
VTCompressionSessionPrepareToEncodeFrames(self.compressionSession);
}
// 编码完成回调
void h264VTCompressionOutputCallback(void *outputCallbackRefCon, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags, CMSampleBufferRef sampleBuffer) {
if (status != noErr) {
return;
}
TGH264Encoder* encoder = (__bridge TGH264Encoder*)outputCallbackRefCon;
//判断是否是关键帧
//bool isKeyframe = !CFDictionaryContainsKey( (CFArrayGetValueAtIndex(CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true), 0)), kCMSampleAttachmentKey_NotSync);
CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true);
CFDictionaryRef dict = CFArrayGetValueAtIndex(attachments,0);
BOOL isKeyframe = !CFDictionaryContainsKey(dict,kCMSampleAttachmentKey_NotSync);
if (isKeyframe){//是关键帧则获取sps & pps数据
// 获取编码后的信息
CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
// 获取SPS
size_t sparameterSetSize, sparameterSetCount;
const uint8_t *sparameterSet;
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 0, &sparameterSet, &sparameterSetSize, &sparameterSetCount, NULL );
// 获取PPS
size_t pparameterSetSize, pparameterSetCount;
const uint8_t *pparameterSet;
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pparameterSet, &pparameterSetSize, &pparameterSetCount, NULL );
// sps/pps转NSData,以便写入文件
NSData *sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];
NSData *pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];
// 写入文件
[encoder gotSpsPps:sps pps:pps];
}
// 获取数据块
CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t length, totalLength;
char *dataPointer;
OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer);
if (statusCodeRet == noErr) {
size_t bufferOffset = 0;
static const int h264AVCCHeaderLength = 4;
// 循环获取NALU
while (bufferOffset < totalLength - h264AVCCHeaderLength) {//一帧的图像可能需要寓情于景入多个NALU单元,slice切片处理
uint32_t NALUnitLength = 0;
memcpy(&NALUnitLength, dataPointer + bufferOffset, h264AVCCHeaderLength);//NALU length
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);// 从h264编码的数据的大端模式(字节序)转系统端模式
NSData* data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + h264AVCCHeaderLength) length:NALUnitLength];
[encoder gotEncodedData:data isKeyFrame:isKeyframe];
bufferOffset += h264AVCCHeaderLength + NALUnitLength;
}
}
}
- (void)gotSpsPps:(NSData*)sps pps:(NSData*)pps{
// NALU header
const char bytes[] = "\x00\x00\x00\x01";//有一个隐藏的'\0'结束符 所以要-1
size_t length = (sizeof bytes) - 1;
NSData *ByteHeader = [NSData dataWithBytes:bytes length:length];
[self.fileHandle writeData:ByteHeader];
[self.fileHandle writeData:sps];
[self.fileHandle writeData:ByteHeader];
[self.fileHandle writeData:pps];
}
- (void)gotEncodedData:(NSData*)data isKeyFrame:(BOOL)isKeyFrame{
NSLog(@" --- gotEncodedData %d --- ", (int)[data length]);
if (self.fileHandle != NULL){
const char bytes[] = "\x00\x00\x00\x01";
size_t length = (sizeof bytes) - 1; //string literals have implicit trailing '\0'
NSData *ByteHeader = [NSData dataWithBytes:bytes length:length];
[self.fileHandle writeData:ByteHeader];
[self.fileHandle writeData:data];
}
}
//从这里开始 -> h264VTCompressionOutputCallback
- (void)encodeSampleBuffer:(CMSampleBufferRef)sampleBuffer {
CVImageBufferRef imageBuffer = (CVImageBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);//将sampleBuffer转成imageBuffer
CMTime presentationTimeStamp = CMTimeMake(self.frameID++, self.properties.expectedFrameRate);//PTS DTS 根据当前的帧数,创建CMTime的时间
VTEncodeInfoFlags flag;
// 开始编码该帧数据
OSStatus statusCode = VTCompressionSessionEncodeFrame(self.compressionSession,
imageBuffer,
presentationTimeStamp,
kCMTimeInvalid,
NULL,
(__bridge void * _Nullable)(self),//h264VTCompressionOutputCallback sourceFrameRefCon
&flag);//h264VTCompressionOutputCallback infoFlags
if (statusCode == noErr) {
NSLog(@" --- H264: VTCompressionSessionEncodeFrame Success --- ");
}
}
- (void)endEncode {
VTCompressionSessionCompleteFrames(self.compressionSession, kCMTimeInvalid);
VTCompressionSessionInvalidate(self.compressionSession);
CFRelease(self.compressionSession);
self.compressionSession = NULL;
}
@end
使用
//
// TGVideoCapture.swift
// videocapture
//
// Created by targetcloud on 2017/3/30.
// Copyright © 2017年 targetcloud. All rights reserved.
//
import UIKit
import AVFoundation
class TGVideoCapture: NSObject {
fileprivate lazy var videoQueue = DispatchQueue.global()
fileprivate lazy var audioQueue = DispatchQueue.global()
fileprivate lazy var session : AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPreset1280x720;
return session
}()
//MARK:- 每次创建方式 1
fileprivate var encoder : TGH264Encoder?
fileprivate lazy var previewLayer : AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session)
fileprivate var connection : AVCaptureConnection?
fileprivate var videoOutput : AVCaptureVideoDataOutput?
fileprivate var videoInput : AVCaptureDeviceInput?
fileprivate var view : UIView
init(_ view : UIView){
self.view = view
super.init()
setupVideo()
setupAudio()
}
func startCapture() {
//MARK:- 每次创建方式 1(每次开始都是一个新的encoder)
encoder = { () -> TGH264Encoder! in
let p = TGVTSessionSetProperty()
p.height = 1280
p.width = 720
p.expectedFrameRate = 30
p.averageBitRate = 1280*720//1920*1080 1280*720 720*576 640*480 480*360
p.maxKeyFrameInterval = 30//GOP大小 数值越大,压缩后越小
return TGH264Encoder(property: p)
}()
if connection?.isVideoOrientationSupported ?? false {
connection?.videoOrientation = .portrait
}
connection?.preferredVideoStabilizationMode = .auto
previewLayer.frame = view.bounds
view.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
func endCapture() {
session.stopRunning()
previewLayer.removeFromSuperlayer()
//MARK:- 每次创建方式 3
encoder?.endEncode()
}
func switchFrontOrBack() {
// CATransition
let rotaionAnim = CATransition()
rotaionAnim.type = "oglFlip"
rotaionAnim.subtype = "fromLeft"
rotaionAnim.duration = 0.5
view.layer.add(rotaionAnim, forKey: nil)
// Check Current videoInput
guard let videoInput = videoInput else { return }
// Change Position
let position : AVCaptureDevicePosition = videoInput.device.position == .front ? .back : .front
// New DeviceInput
guard let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as? [AVCaptureDevice] else { return }
guard let newDevice = devices.filter({$0.position == position}).first else { return }
guard let newVideoInput = try? AVCaptureDeviceInput(device: newDevice) else { return }
// Remove videoInput & Add newVideoInput
session.beginConfiguration()
session.removeInput(videoInput)
session.addInput(newVideoInput)
session.commitConfiguration()
// Save Current videoInput
self.videoInput = newVideoInput
// portrait
connection = videoOutput?.connection(withMediaType: AVMediaTypeVideo)
if connection?.isVideoOrientationSupported ?? false {
connection?.videoOrientation = .portrait
}
connection?.preferredVideoStabilizationMode = .auto
}
}
extension TGVideoCapture {
fileprivate func setupVideo() {
//info.plist add Privacy - Camera Usage Description
guard let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as? [AVCaptureDevice] else {return}
guard let device = devices.filter({$0.position == .back}).first else {return}
guard let videoInput = try? AVCaptureDeviceInput(device: device) else {return}
if session.canAddInput(videoInput){
session.addInput(videoInput)
}
self.videoInput = videoInput
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue:videoQueue)
videoOutput.alwaysDiscardsLateVideoFrames = true
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}
connection = videoOutput.connection(withMediaType: AVMediaTypeVideo)
self.videoOutput = videoOutput
}
fileprivate func setupAudio() {
//info.plist add Privacy - Microphone Usage Description
guard let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio) else {return}
guard let audioInput = try? AVCaptureDeviceInput(device: device) else {return}
if session.canAddInput(audioInput){
session.addInput(audioInput)
}
let audioOutput = AVCaptureAudioDataOutput()
audioOutput.setSampleBufferDelegate(self, queue:audioQueue)
if session.canAddOutput(audioOutput){
session.addOutput(audioOutput)
}
}
}
extension TGVideoCapture : AVCaptureVideoDataOutputSampleBufferDelegate,AVCaptureAudioDataOutputSampleBufferDelegate{
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if connection == self.connection {
print("-采集到视频画面");
}else{
print("采集到音频数据-");
}
//MARK:- 每次创建方式 2
encoder?.encode(sampleBuffer)
}
}
懒加载方式创建编码器模式
//
// TGH264Encoder.m
// videocapture
//
// Created by targetcloud on 2017/3/30.
// Copyright © 2017年 targetcloud. All rights reserved.
//
#import "TGH264Encoder.h"
#import "TGVTSessionSetProperty.h"
@interface TGH264Encoder()
@property (nonatomic, assign) NSInteger frameID;
@property (nonatomic, assign) VTCompressionSessionRef compressionSession;
@property (nonatomic, strong) NSFileHandle *fileHandle;
@property(nonatomic, strong) TGVTSessionSetProperty * properties ;
@end
@implementation TGH264Encoder
- (instancetype)initWithProperty : (TGVTSessionSetProperty *) properties {
if (self = [super init]) {
self.properties = properties;
[self setupFileHandle];
[self setupVideoSession];
}
return self;
}
- (void)setupFileHandle {
NSString *file = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject]
stringByAppendingPathComponent:@"videoAudioCapture.h264"];
[[NSFileManager defaultManager] removeItemAtPath:file error:nil];
[[NSFileManager defaultManager] createFileAtPath:file contents:nil attributes:nil];
self.fileHandle = [NSFileHandle fileHandleForWritingAtPath:file];
}
- (void)setupVideoSession {
self.frameID = 0;
int width = self.properties.width;
int height = self.properties.height;
// 创建CompressionSession对象,该对象用于对画面进行编码,kCMVideoCodecType_H264 : 表示使用h.264进行编码,h264VTCompressionOutputCallback : 当一次编码结束会在该函数进行回调,可以在该函数中将数据,写入文件中
VTCompressionSessionCreate(NULL,
width,
height,
kCMVideoCodecType_H264,
NULL,
NULL,
NULL,
h264VTCompressionOutputCallback,
(__bridge void *)(self),
&_compressionSession);
// 设置实时编码输出(直播是实时输出,否则会有延迟)
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_RealTime, (__bridge CFTypeRef _Nonnull)(@YES));//kCFBooleanTrue
// 设置期望帧率(每秒多少帧,如果帧率过低,会造成画面卡顿)
int fps = self.properties.expectedFrameRate;
CFNumberRef fpsRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &fps);
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, fpsRef);
// 设置比特率(或叫码率: 编码效率, 码率越高则画面越清晰)
int bitRate = self.properties.averageBitRate;
CFNumberRef bitRateRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bitRate);
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitRateRef);//bit
NSArray *limit = @[@(bitRate * 1.5/8), @(1)];
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_DataRateLimits, (__bridge CFArrayRef)limit);//byte
// 设置关键帧(GOPsize)间隔
int frameInterval = self.properties.maxKeyFrameInterval;
CFNumberRef frameIntervalRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &frameInterval);
VTSessionSetProperty(self.compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, frameIntervalRef);
// 设置结束, 准备进行编码
VTCompressionSessionPrepareToEncodeFrames(self.compressionSession);
}
// 编码完成回调
void h264VTCompressionOutputCallback(void *outputCallbackRefCon, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags, CMSampleBufferRef sampleBuffer) {
if (status != noErr) {
return;
}
TGH264Encoder* encoder = (__bridge TGH264Encoder*)outputCallbackRefCon;
//判断是否是关键帧
//bool isKeyframe = !CFDictionaryContainsKey( (CFArrayGetValueAtIndex(CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true), 0)), kCMSampleAttachmentKey_NotSync);
CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true);
CFDictionaryRef dict = CFArrayGetValueAtIndex(attachments,0);
BOOL isKeyframe = !CFDictionaryContainsKey(dict,kCMSampleAttachmentKey_NotSync);
if (isKeyframe){//是关键帧则获取sps & pps数据
// 获取编码后的信息
CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
// 获取SPS
size_t sparameterSetSize, sparameterSetCount;
const uint8_t *sparameterSet;
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 0, &sparameterSet, &sparameterSetSize, &sparameterSetCount, NULL );
// 获取PPS
size_t pparameterSetSize, pparameterSetCount;
const uint8_t *pparameterSet;
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pparameterSet, &pparameterSetSize, &pparameterSetCount, NULL );
// sps/pps转NSData,以便写入文件
NSData *sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];
NSData *pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];
// 写入文件
[encoder gotSpsPps:sps pps:pps];
}
// 获取数据块
CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t length, totalLength;
char *dataPointer;
OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer);
if (statusCodeRet == noErr) {
size_t bufferOffset = 0;
static const int h264AVCCHeaderLength = 4;
// 循环获取NALU
while (bufferOffset < totalLength - h264AVCCHeaderLength) {//一帧的图像可能需要寓情于景入多个NALU单元,slice切片处理
uint32_t NALUnitLength = 0;
memcpy(&NALUnitLength, dataPointer + bufferOffset, h264AVCCHeaderLength);//NALU length
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);// 从h264编码的数据的大端模式(字节序)转系统端模式
NSData* data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + h264AVCCHeaderLength) length:NALUnitLength];
[encoder gotEncodedData:data isKeyFrame:isKeyframe];
bufferOffset += h264AVCCHeaderLength + NALUnitLength;
}
}
}
- (void)gotSpsPps:(NSData*)sps pps:(NSData*)pps{
// NALU header
const char bytes[] = "\x00\x00\x00\x01";//有一个隐藏的'\0'结束符 所以要-1
size_t length = (sizeof bytes) - 1;
NSData *ByteHeader = [NSData dataWithBytes:bytes length:length];
[self.fileHandle writeData:ByteHeader];
[self.fileHandle writeData:sps];
[self.fileHandle writeData:ByteHeader];
[self.fileHandle writeData:pps];
}
- (void)gotEncodedData:(NSData*)data isKeyFrame:(BOOL)isKeyFrame{
NSLog(@" --- gotEncodedData %d --- ", (int)[data length]);
if (self.fileHandle != NULL){
const char bytes[] = "\x00\x00\x00\x01";
size_t length = (sizeof bytes) - 1; //string literals have implicit trailing '\0'
NSData *ByteHeader = [NSData dataWithBytes:bytes length:length];
[self.fileHandle writeData:ByteHeader];
[self.fileHandle writeData:data];
}
}
//从这里开始 -> h264VTCompressionOutputCallback
- (void)encodeSampleBuffer:(CMSampleBufferRef)sampleBuffer {
CVImageBufferRef imageBuffer = (CVImageBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);//将sampleBuffer转成imageBuffer
CMTime presentationTimeStamp = CMTimeMake(self.frameID++, self.properties.expectedFrameRate);//PTS DTS 根据当前的帧数,创建CMTime的时间
VTEncodeInfoFlags flag;
// 开始编码该帧数据
OSStatus statusCode = VTCompressionSessionEncodeFrame(self.compressionSession,
imageBuffer,
presentationTimeStamp,
kCMTimeInvalid,
NULL,
(__bridge void * _Nullable)(self),//h264VTCompressionOutputCallback sourceFrameRefCon
&flag);//h264VTCompressionOutputCallback infoFlags
if (statusCode == noErr) {
NSLog(@" --- H264: VTCompressionSessionEncodeFrame Success --- ");
}
}
- (void)endEncode {
VTCompressionSessionCompleteFrames(self.compressionSession, kCMTimeInvalid);
//以下代码是结束编码后 把此次的编码改名存放,并重置videoAudioCapture.h264为初始化状态,适用于懒加载编码器
NSString * path = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
NSDateFormatter *formatter = [[NSDateFormatter alloc] init];
[formatter setDateFormat:@"yyyy-MM-dd HH:mm:ss"];
NSString * dateStr = [formatter stringFromDate:[NSDate date]];
[[NSFileManager defaultManager] copyItemAtPath:[ path stringByAppendingPathComponent:@"videoAudioCapture.h264"]
toPath:[ path stringByAppendingPathComponent:[NSString stringWithFormat:@"%@.h264",dateStr]] error:NULL];
[self setupFileHandle];
//因为外面是懒加载创建TGH264Encoder,所以这里不置空Session,如果外面不是懒加载创建的,则置空,取消下面的三行注释
//VTCompressionSessionInvalidate(self.compressionSession);
//CFRelease(self.compressionSession);
//self.compressionSession = NULL;
}
@end
//
// TGVideoCapture.swift
// videocapture
//
// Created by targetcloud on 2017/3/30.
// Copyright © 2017年 targetcloud. All rights reserved.
//
import UIKit
import AVFoundation
class TGVideoCapture: NSObject {
fileprivate lazy var videoQueue = DispatchQueue.global()
fileprivate lazy var audioQueue = DispatchQueue.global()
fileprivate lazy var session : AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPreset1280x720;
return session
}()
//MARK:- 懒方式 1
fileprivate lazy var encoder : TGH264Encoder = {
let p = TGVTSessionSetProperty()
p.height = 1280
p.width = 720
p.expectedFrameRate = 30
p.averageBitRate = 1280*720//1920*1080 1280*720 720*576 640*480 480*360
p.maxKeyFrameInterval = 30//GOP大小 数值越大,压缩后越小
return TGH264Encoder(property: p)
}()
fileprivate lazy var previewLayer : AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session)
fileprivate var connection : AVCaptureConnection?
fileprivate var videoOutput : AVCaptureVideoDataOutput?
fileprivate var videoInput : AVCaptureDeviceInput?
fileprivate var view : UIView
init(_ view : UIView){
self.view = view
super.init()
setupVideo()
setupAudio()
}
func startCapture() {
if connection?.isVideoOrientationSupported ?? false {
connection?.videoOrientation = .portrait
}
connection?.preferredVideoStabilizationMode = .auto
previewLayer.frame = view.bounds
view.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
func endCapture() {
session.stopRunning()
previewLayer.removeFromSuperlayer()
//MARK:- 懒方式 3
encoder.endEncode()
}
func switchFrontOrBack() {
// CATransition
let rotaionAnim = CATransition()
rotaionAnim.type = "oglFlip"
rotaionAnim.subtype = "fromLeft"
rotaionAnim.duration = 0.5
view.layer.add(rotaionAnim, forKey: nil)
// Check Current videoInput
guard let videoInput = videoInput else { return }
// Change Position
let position : AVCaptureDevicePosition = videoInput.device.position == .front ? .back : .front
// New DeviceInput
guard let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as? [AVCaptureDevice] else { return }
guard let newDevice = devices.filter({$0.position == position}).first else { return }
guard let newVideoInput = try? AVCaptureDeviceInput(device: newDevice) else { return }
// Remove videoInput & Add newVideoInput
session.beginConfiguration()
session.removeInput(videoInput)
session.addInput(newVideoInput)
session.commitConfiguration()
// Save Current videoInput
self.videoInput = newVideoInput
// portrait
connection = videoOutput?.connection(withMediaType: AVMediaTypeVideo)
if connection?.isVideoOrientationSupported ?? false {
connection?.videoOrientation = .portrait
}
connection?.preferredVideoStabilizationMode = .auto
}
}
extension TGVideoCapture {
fileprivate func setupVideo() {
//info.plist add Privacy - Camera Usage Description
guard let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as? [AVCaptureDevice] else {return}
guard let device = devices.filter({$0.position == .back}).first else {return}
guard let videoInput = try? AVCaptureDeviceInput(device: device) else {return}
if session.canAddInput(videoInput){
session.addInput(videoInput)
}
self.videoInput = videoInput
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue:videoQueue)
videoOutput.alwaysDiscardsLateVideoFrames = true
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}
connection = videoOutput.connection(withMediaType: AVMediaTypeVideo)
self.videoOutput = videoOutput
}
fileprivate func setupAudio() {
//info.plist add Privacy - Microphone Usage Description
guard let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio) else {return}
guard let audioInput = try? AVCaptureDeviceInput(device: device) else {return}
if session.canAddInput(audioInput){
session.addInput(audioInput)
}
let audioOutput = AVCaptureAudioDataOutput()
audioOutput.setSampleBufferDelegate(self, queue:audioQueue)
if session.canAddOutput(audioOutput){
session.addOutput(audioOutput)
}
}
}
extension TGVideoCapture : AVCaptureVideoDataOutputSampleBufferDelegate,AVCaptureAudioDataOutputSampleBufferDelegate{
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if connection == self.connection {
print("-采集到视频画面");
}else{
print("采集到音频数据-");
}
//MARK:- 懒方式 2
encoder.encode(sampleBuffer)
}
}
由于编码器采用OC编码,外层使用用swift3编码,所以还有一个桥接文件
//
// Use this file to import your target's public headers that you would like to expose to Swift.
//
#import "TGH264Encoder.h"
#import "TGVTSessionSetProperty.h"
UI(VC/控制器)最外层调用的是swift3写的TGVideoCapture
//
// ViewController.swift
// videocapture
//
// Created by targetcloud on 2016/11/12.
// Copyright © 2016年 targetcloud. All rights reserved.
//
import UIKit
class ViewController: UIViewController {
fileprivate lazy var videoCapture : TGVideoCapture = TGVideoCapture(self.view)
override func viewDidLoad() {
super.viewDidLoad()
}
@IBAction func startCapture(_ sender: Any) {
videoCapture.startCapture()
}
@IBAction func endCapture(_ sender: Any) {
videoCapture.endCapture()
}
@IBAction func switchFrontOrBack(_ sender: Any) {
videoCapture.switchFrontOrBack()
}
}
总体代码执行过程是
1、ViewController创建了一个懒加载的videoCapture,开始捕捉视频时用videoCapture.startCapture(),结束(停止)时调用videoCapture.endCapture(),需要切换前置或后置摄像头时调用videoCapture.switchFrontOrBack()
2、videoCapture初始化时把1的view传进来,用于预览层,初始化同时设置了setupVideo
3、startCapture开始时,作者介绍了两种方式来使用h264编码,根据需要来进行选择,如果不需要每次创建h264编码器,那么请使用懒加载方式
3.1、懒加载时,我们对 h264解码器进行了各种属性设置,根据需要在这里进行设置
fileprivatelazyvar encoder :TGH264Encoder = {
let p =TGVTSessionSetProperty()
p.height =1280
p.width =720
p.expectedFrameRate =30
p.averageBitRate =1280*720//1920*1080 1280*720 720*576 640*480 480*360
p.maxKeyFrameInterval =30//GOP大小数值越大,压缩后越小
returnTGH264Encoder(property: p)//关键代码
}()
3.2、在懒加载内部,我们用TGVTSessionSetProperty正式对各种属性进行了设置,主要对回调进行了设置h264VTCompressionOutputCallback
- (instancetype)initWithProperty : (TGVTSessionSetProperty *) properties;
- (instancetype)initWithProperty : (TGVTSessionSetProperty *) properties {
if (self = [superinit]) {
self.properties = properties;
[selfsetupFileHandle];
[selfsetupVideoSession];
}
returnself;
}
- (void)setupVideoSession {
self.frameID =0;
int width =self.properties.width;
int height =self.properties.height;
//创建CompressionSession对象,该对象用于对画面进行编码,kCMVideoCodecType_H264 : 表示使用h.264进行编码,h264VTCompressionOutputCallback :当一次编码结束会在该函数进行回调,可以在该函数中将数据,写入文件中
VTCompressionSessionCreate(NULL,
width,
height,
kCMVideoCodecType_H264,
NULL,
NULL,
NULL,
h264VTCompressionOutputCallback,
(__bridgevoid *)(self),
&_compressionSession);
//设置实时编码输出(直播是实时输出,否则会有延迟)
VTSessionSetProperty(self.compressionSession,kVTCompressionPropertyKey_RealTime, (__bridgeCFTypeRef_Nonnull)(@YES));//kCFBooleanTrue
//设置期望帧率(每秒多少帧,如果帧率过低,会造成画面卡顿)
int fps =self.properties.expectedFrameRate;
CFNumberRef fpsRef =CFNumberCreate(kCFAllocatorDefault,kCFNumberIntType, &fps);
VTSessionSetProperty(self.compressionSession,kVTCompressionPropertyKey_ExpectedFrameRate, fpsRef);
//设置比特率(或叫码率:编码效率,码率越高则画面越清晰)
int bitRate =self.properties.averageBitRate;
CFNumberRef bitRateRef =CFNumberCreate(kCFAllocatorDefault,kCFNumberSInt32Type, &bitRate);
VTSessionSetProperty(self.compressionSession,kVTCompressionPropertyKey_AverageBitRate, bitRateRef);//bit
NSArray *limit =@[@(bitRate *1.5/8),@(1)];
VTSessionSetProperty(self.compressionSession,kVTCompressionPropertyKey_DataRateLimits, (__bridgeCFArrayRef)limit);//byte
//设置关键帧(GOPsize)间隔
int frameInterval =self.properties.maxKeyFrameInterval;
CFNumberRef frameIntervalRef =CFNumberCreate(kCFAllocatorDefault,kCFNumberIntType, &frameInterval);
VTSessionSetProperty(self.compressionSession,kVTCompressionPropertyKey_MaxKeyFrameInterval, frameIntervalRef);
//设置结束,准备进行编码
VTCompressionSessionPrepareToEncodeFrames(self.compressionSession);
}
4、captureOutput是session.startRunning()时会调用的,此时正式进入-(void)encodeSampleBuffer:(CMSampleBufferRef)sampleBuffer;
触发代码是
encoder.encode(sampleBuffer)
5、 encodeSampleBuffer 将调用我们第3步中的 h264VTCompressionOutputCallback,h264VTCompressionOutputCallback完成编码
//从这里开始 -> h264VTCompressionOutputCallback
- (void)encodeSampleBuffer:(CMSampleBufferRef)sampleBuffer {
CVImageBufferRef imageBuffer = (CVImageBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);//将sampleBuffer转成imageBuffer
CMTime presentationTimeStamp = CMTimeMake(self.frameID++, self.properties.expectedFrameRate);//PTS DTS 根据当前的帧数,创建CMTime的时间
VTEncodeInfoFlags flag;
// 开始编码该帧数据
OSStatus statusCode = VTCompressionSessionEncodeFrame(self.compressionSession,
imageBuffer,
presentationTimeStamp,
kCMTimeInvalid,
NULL,
(__bridge void * _Nullable)(self),//h264VTCompressionOutputCallback sourceFrameRefCon
&flag);//h264VTCompressionOutputCallback infoFlags
if (statusCode == noErr) {
NSLog(@" --- H264: VTCompressionSessionEncodeFrame Success --- ");
}
}