ios之audio unit的录音和播放一起,解决audioqueue播放PCM延迟问题

    因为用audioqueue的录音播放,或者用audioqueue录音,openal播放都有延迟。


    然后用底层些的audio unit,果然延迟问题就好很多了,至少一边录一边播的问题可以很好的解决。。有不少audio unit的三方库,暂时没去细研究,查了点,自己修改了下。需要在进行录音的时候和播放单开线程。。之前有问题没明白,卡了一天突然明白了。。。直接上代码来得方便。。。多余的代码和变量也不在进行删除修改了,也为自己以后看吧

.h文件   

#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>

#define kOutputBus 0
#define kInputBus 1
#define  kSampleRate 8000
#define kFramesPerPacket 1
#define kChannelsPerFrame 1
#define kBitsPerChannel 16
#define BUFFER_SIZE 1024

@interface AudioController : NSObject

@property (readonly) AudioComponentInstance audioUnit;

@property (readonly) AudioBuffer audioBuffer;

@property (strong, readwrite) NSMutableData *mIn;

@property (strong, readwrite) NSMutableData *mOut;

@property (strong, readwrite) NSMutableData *mAllAudioData;

- (void)hasError:(int)statusCode file:(char*)file line:(int)line;

- (void)processBuffer: (AudioBufferList* )audioBufferList;

+ (AudioController *) sharedAudioManager;

-(void)clearDataArray;

-(void)startAudio;

@end


.m文件的内容

#import "AudioController.h"

static NSMutableData *mIn;
static NSMutableData *mOut;
static NSMutableData *mAllAudioData;
static bool mIsStarted; // audio unit start
static bool mSendServerStart; // send server continue loop
static bool mRecServerStart; // rec server continue loop
static bool mIsTele; // telephone call

static OSStatus recordingCallback(void *inRefCon,
                                  
                                  AudioUnitRenderActionFlags *ioActionFlags,
                                  const AudioTimeStamp *inTimeStamp,
                                  UInt32 inBusNumber,
                                  UInt32 inNumberFrames,
                                  AudioBufferList *ioData) {
    
    // the data gets rendered here
    AudioBuffer buffer;
    // a variable where we check the status
    OSStatus status;
    //This is the reference to the object who owns the callback.
    AudioController *audioProcessor = (__bridge AudioController* )inRefCon;
    /**
     on this point we define the number of channels, which is mono
     for the iphone. the number of frames is usally 512 or 1024.
     */
    buffer.mDataByteSize = inNumberFrames * 2; // sample size
    buffer.mNumberChannels = 1; // one channel
    buffer.mData = malloc( inNumberFrames * 2 ); // buffer size
    // we put our buffer into a bufferlist array for rendering
    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0] = buffer;
    // render input and check for error
    status = AudioUnitRender([audioProcessor audioUnit], ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
//    [audioProcessor hasError:status file:__FILE__ line:__LINE__];
    // process the bufferlist in the audio processor
    [audioProcessor processBuffer: &bufferList];
    // clean up the buffer
    free(bufferList.mBuffers[0].mData);
    
    dispatch_async(dispatch_get_global_queue(0, 0), ^{
        [audioProcessor clearDataArray];
    });
    
    return noErr;
    
}

#pragma mark Playback callback

static OSStatus playbackCallback(void *inRefCon,
                                 
                                 AudioUnitRenderActionFlags *ioActionFlags,
                                 const AudioTimeStamp *inTimeStamp,
                                 UInt32 inBusNumber,
                                 UInt32 inNumberFrames,
                                 AudioBufferList *ioData) {
    
    long len = [mIn length];
    len = len > 1024 ? 1024 : len;
    if (len <= 0)
    {
        return noErr;
    }
    
    for (int i = 0; i < ioData -> mNumberBuffers; i++)
    {
//        NSLog( @"len:%ld", len);
        AudioBuffer buffer = ioData -> mBuffers[i];
        NSData *pcmBlock = [mIn subdataWithRange: NSMakeRange(0, len)];
        UInt32 size = (UInt32)MIN(buffer.mDataByteSize, [pcmBlock length]);
        memcpy(buffer.mData, [pcmBlock bytes], size);
        [mIn replaceBytesInRange: NSMakeRange(0, size) withBytes: NULL length: 0];
        buffer.mDataByteSize = size;
    }
    return noErr;
}

@implementation AudioController

@synthesize audioUnit;

@synthesize audioBuffer;

/*
 
 * It's Singleton pattern
 
 * the flow is init(if there isn't existed self) -> initializeAudioConfig(set audio format, io pipe and callback functions)
 
 *                                              -> recordingCallback -> processBuffer
 
 *                                              -> playbackCallback
 
 */

// 封装一个单例

+ (AudioController *) sharedAudioManager{
    
    static AudioController *sharedAudioManager;
    @synchronized(self)
    {
        if (!sharedAudioManager) {
            sharedAudioManager = [[AudioController alloc] init];
        }
        return sharedAudioManager;
    }
}

- (AudioController* )init {
    
    self = [super init];
    
    if (self) {
        
        [self initializeAudioConfig];
        
        mIn = [[NSMutableData alloc] init];
        mOut = [[NSMutableData alloc] init];
        mAllAudioData = [[NSMutableData alloc] init];
        
        mIsStarted = false;
        mSendServerStart = false;
        mRecServerStart = false;
        mIsTele = false;

    }
    return self;
}

- (void)initializeAudioConfig {
    
    OSStatus status;
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output; // we want to ouput
    desc.componentSubType = kAudioUnitSubType_RemoteIO; // we want in and ouput
    desc.componentFlags = 0; // must be zero
    desc.componentFlagsMask = 0; // must be zero
    desc.componentManufacturer = kAudioUnitManufacturer_Apple; // select provider
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);
    [self hasError:status file:__FILE__ line:__LINE__];
    // define that we want record io on the input bus
    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_EnableIO, // use io
                                  kAudioUnitScope_Input, // scope to input
                                  kInputBus, // select input bus (1)
                                  &flag, // set flag
                                  sizeof(flag));
    [self hasError:status file:__FILE__ line:__LINE__];
    // define that we want play on io on the output bus
    status = AudioUnitSetProperty(audioUnit,
                                kAudioOutputUnitProperty_EnableIO, // use io
                                  kAudioUnitScope_Output, // scope to output
                                  kOutputBus, // select output bus (0)
                                  &flag, // set flag
                                  sizeof(flag));
    [self hasError:status file:__FILE__ line:__LINE__];
    // specifie our format on which we want to work.
    AudioStreamBasicDescription audioFormat;
    audioFormat.mSampleRate	= kSampleRate;
    audioFormat.mFormatID	= kAudioFormatLinearPCM;
    audioFormat.mFormatFlags	= kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
    audioFormat.mFramesPerPacket	= kFramesPerPacket;
    audioFormat.mChannelsPerFrame	= kChannelsPerFrame;
    audioFormat.mBitsPerChannel	= kBitsPerChannel;
    audioFormat.mBytesPerPacket	= kBitsPerChannel * kChannelsPerFrame * kFramesPerPacket / 8;
    audioFormat.mBytesPerFrame	= kBitsPerChannel * kChannelsPerFrame / 8;
    // set the format on the output stream
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  kInputBus,
                                  &audioFormat,
                                  sizeof(audioFormat));
    [self hasError:status file:__FILE__ line:__LINE__];
    // set the format on the input stream
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Input,
                                  kOutputBus,
                                  &audioFormat,
                                  sizeof(audioFormat));
    [self hasError:status file:__FILE__ line:__LINE__];
    /**
     We need to define a callback structure which holds
     a pointer to the recordingCallback and a reference to
     the audio processor object
     */
    AURenderCallbackStruct callbackStruct;
    // set recording callback struct
    callbackStruct.inputProc = recordingCallback; // recordingCallback pointer
    callbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
    // set input callback to recording callback on the input bus
    status = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_SetInputCallback,
                                  kAudioUnitScope_Global,
                                  kInputBus,
                                  &callbackStruct,
                                  sizeof(callbackStruct));
    [self hasError:status file:__FILE__ line:__LINE__];
    // set playback callback struct
    callbackStruct.inputProc = playbackCallback;
    callbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
    // set playbackCallback as callback on our renderer for the output bus
    status = AudioUnitSetProperty(audioUnit,
        kAudioUnitProperty_SetRenderCallback,
                                  kAudioUnitScope_Global,
                                  kOutputBus,
                                  &callbackStruct,
                                  sizeof(callbackStruct));
    [self hasError:status file:__FILE__ line:__LINE__];
    // reset flag to 0
    flag = 0;
    /*
     we need to tell the audio unit to allocate the render buffer,
     that we can directly write into it.
     */
    status = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ShouldAllocateBuffer,
                                  kAudioUnitScope_Output,
                                  kInputBus,
                                  &flag,
                                  sizeof(flag));
    /*
     we set the number of channels to mono and allocate our block size to
     1024 bytes.
     kiki: I don't know where the size 1024 bytes comes from...
     */
    audioBuffer.mNumberChannels = kChannelsPerFrame;
    audioBuffer.mDataByteSize = 512 * 2;
    audioBuffer.mData = malloc( 512 * 2 );
    // Initialize the Audio Unit and cross fingers =)
    status = AudioUnitInitialize(audioUnit);
    [self hasError:status file:__FILE__ line:__LINE__];
}

- (void)processBuffer: (AudioBufferList* )audioBufferList {
    
    AudioBuffer sourceBuffer = audioBufferList -> mBuffers[0];
    // we check here if the input data byte size has changed
    if (audioBuffer.mDataByteSize != sourceBuffer.mDataByteSize)
    {
        // clear old buffer
        free(audioBuffer.mData);
        // assing new byte size and allocate them on mData
        audioBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
        audioBuffer.mData = malloc(sourceBuffer.mDataByteSize);
    }
    // copy incoming audio data to the audio buffer
    
    memcpy(audioBuffer.mData, audioBufferList -> mBuffers[0].mData, audioBufferList -> mBuffers[0].mDataByteSize);
    NSData *pcmBlock = [NSData dataWithBytes:sourceBuffer.mData length:sourceBuffer.mDataByteSize];
    [mOut appendData: pcmBlock];
}

-(void)clearDataArray
{
    if ([mOut length] <= 0)
    {
        return;
    }
//    [mAllAudioData appendBytes:mOut.bytes length:mOut.length];
    [mIn appendBytes:mOut.bytes length:mOut.length];
    [mOut replaceBytesInRange: NSMakeRange(0, mOut.length) withBytes: NULL length: 0];
}

- (void)start
{
    if (mIsStarted)
    {
//        NSLog( @"-- already start --");
        return;
    }
//    NSLog( @"-- start --");
    mIsStarted = true;
    [mIn replaceBytesInRange: NSMakeRange(0, [mIn length]) withBytes: NULL length: 0];
    [mOut replaceBytesInRange: NSMakeRange(0, [mOut length]) withBytes: NULL length: 0];
    OSStatus status = AudioOutputUnitStart(audioUnit);
    [self hasError:status file:__FILE__ line:__LINE__];
}

- (void)stop {
    NSLog( @"-- stop --");
    OSStatus status = AudioOutputUnitStop(audioUnit);
    [self hasError:status file:__FILE__ line:__LINE__];
    mIsStarted = false;
    [mIn replaceBytesInRange: NSMakeRange(0, [mIn length]) withBytes: NULL length: 0];
    [mOut replaceBytesInRange: NSMakeRange(0, [mOut length]) withBytes: NULL length: 0];
}

#pragma mark Error handling

- (void)hasError:(int)statusCode file:(char*)file line:(int)line
{
    if (statusCode)
    {
        NSLog(@"Error Code responded %d in file %s on line %d", statusCode, file, line);
        exit(-1);
    }
}




-(void)startAudio
{
    [self stop];
    [self start];
}

@end


  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值