audio unit 和audio queue实现音频流的采集

1 audio queue

#import <Foundation/Foundation.h>

#import <AudioToolbox/AudioToolbox.h>
#import <CoreAudio/CoreAudioTypes.h>
#import <AVFoundation/AVFoundation.h>

#define kNumberBuffers      3
#define t_sample             SInt16
#define kSamplingRate       20000
#define kNumberChannels     1
#define kBitsPerChannels    (sizeof(t_sample) * 8)
#define kBytesPerFrame      (kNumberChannels * sizeof(t_sample))
//#define kFrameSize          (kSamplingRate * sizeof(t_sample))
#define kFrameSize          1000
typedef struct AQCallbackStruct
{
    AudioStreamBasicDescription mDataFormat;
    AudioQueueRef               queue;
    AudioQueueBufferRef         mBuffers[kNumberBuffers];
    AudioFileID                 outputFile;
    unsigned long               frameSize;
    long long                   recPtr;
    int                         run;
    
} AQCallbackStruct;
@interface record : NSObject
{
    AQCallbackStruct aqc;
    AudioFileTypeID fileFormat;
}
- (id) initWithSampleRate:(NSInteger)sampleRate  atChannels:(UInt32)channels;
- (void) start;
- (void) stop;
- (void) pause;
- (void) processAudioBuffer:(AudioQueueBufferRef) buffer withQueue:(AudioQueueRef) queue;
@property (nonatomic, assign) AQCallbackStruct aqc;
@property (nonatomic, assign) long audioDataLength;

//回调函数
static void AudioPlayerAQInputCallback(void *input, AudioQueueRef inQ, AudioQueueBufferRef outQB);


@end

#import "record.h"
#import "ViewController.h"
@implementation record
@synthesize aqc;
@synthesize audioDataLength;

static void AQInputCallback (void *inUserData,
                             AudioQueueRef  inAudioQueue,
                             AudioQueueBufferRef inBuffer,
                             const AudioTimeStamp *inStartTime,UInt32 inNumPackets,
            const AudioStreamPacketDescription * inPacketDesc)
{
    record * engine = (__bridge record *) inUserData;
    if (inNumPackets > 0)
    {
        [engine processAudioBuffer:inBuffer withQueue:inAudioQueue];
    }
    if (engine.aqc.run)
    {
        AudioQueueEnqueueBuffer(engine.aqc.queue, inBuffer, 0, NULL);
    }
}
- (id) initWithSampleRate:(NSInteger)sampleRate  atChannels:(UInt32)channels
{
    self = [super init];
    if (self)
    {
        aqc.mDataFormat.mSampleRate = sampleRate;
        aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM;
        aqc.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
        aqc.mDataFormat.mFramesPerPacket = 1;
        aqc.mDataFormat.mChannelsPerFrame = channels;
        aqc.mDataFormat.mBitsPerChannel = kBitsPerChannels;
        aqc.mDataFormat.mBytesPerPacket = kBytesPerFrame;
        aqc.mDataFormat.mBytesPerFrame = kBytesPerFrame;
        aqc.frameSize = kFrameSize;
        //创建一个录音音频队列对象
        AudioQueueNewInput(&aqc.mDataFormat, AQInputCallback, (__bridge void*)(self), NULL, kCFRunLoopCommonModes, 0, &aqc.queue);
        for (int i=0;i<kNumberBuffers;i++)
        {
            //请求音频队列对象来分配一个音频队列缓存。
            AudioQueueAllocateBuffer(aqc.queue, (UInt32)aqc.frameSize, &aqc.mBuffers[i]);
            //给录音或者回放音频队列的缓存中添加一个缓存数据
            AudioQueueEnqueueBuffer(aqc.queue, aqc.mBuffers[i], 0, NULL);
        }
        aqc.recPtr = 0;
        aqc.run = 1;
        int status = AudioQueueStart(aqc.queue, NULL);
        NSLog(@"AudioQueueStart = %d", status);
    }
    return self;
}
- (void) start
{
    AudioQueueStart(aqc.queue, NULL);
    NSLog(@"ddddddddddddddd");
}
- (void) stop
{
    AudioQueueStop(aqc.queue, true);
    
}

- (void) pause
{
    AudioQueuePause(aqc.queue);
}
- (void) dealloc
{
    AudioQueueStop(aqc.queue, true);
    aqc.run = 0;
    AudioQueueDispose(aqc.queue, true);
}
- (void) processAudioBuffer:(AudioQueueBufferRef) buffer withQueue:(AudioQueueRef) queue
{
    long size = buffer->mAudioDataByteSize / aqc.mDataFormat.mBytesPerPacket;
    t_sample * data = (t_sample *) buffer->mAudioData;
    NSData *codeData = [[NSData alloc] initWithBytes:data length:size];
    NSLog(@"%@", codeData);
    //NSLog(@"processAudioData :%ld", buffer->mAudioDataByteSize);

}
//回调函数
static void AudioPlayerAQInputCallback(void *input, AudioQueueRef inQ, AudioQueueBufferRef outQB)
{
    record *rec = (__bridge record *)input;
    if (rec->aqc.run == 0) {
        return;
    }
    AudioQueueEnqueueBuffer(rec.aqc.queue, outQB, 0, NULL);
    
    long size = outQB->mAudioDataByteSize / rec.aqc.mDataFormat.mBytesPerPacket;
    t_sample * data = (t_sample *) outQB->mAudioData;
    NSData *codeData = [[NSData alloc] initWithBytes:data length:size];
    NSLog(@"%@", codeData);
    //NSLog(@"processAudioData :%ld", buffer->mAudioDataByteSize);
    
    //处理data
    
    NSArray *path = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
    NSString *Pathes = path.lastObject;
    NSString *filePath = [Pathes stringByAppendingPathComponent:@"testAudio"];
    NSLog(@"%@", filePath);
    [codeData writeToFile:filePath atomically:YES];
    
    
    
    
}


@end

2 audio unit

#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>


#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif

#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif

FILE *pFile;
@interface IosAudioController : NSObject {
    AudioComponentInstance audioUnit;
    AudioBuffer tempBuffer; // this will hold the latest data from the microphone
}

@property (readonly) AudioComponentInstance audioUnit;
@property (readonly) AudioBuffer tempBuffer;

- (void) start;
- (void) stop;
- (void) processAudio: (AudioBufferList*) bufferList;




@end

// setup a global iosAudio variable, accessible everywhere
extern IosAudioController* iosAudio;

#import "IosAudioController.h"
#import <AudioToolbox/AudioToolbox.h>
#import "faac.h"
#define kOutputBus 0
#define kInputBus 1

IosAudioController* iosAudio;

void checkStatus(int status){
    if (status) {
        printf("Status not 0! %d\n", status);
//        exit(1);
    }
}

/**
 This callback is called when new audio data from the microphone is
 available.
 */
static OSStatus recordingCallback(void *inRefCon,
                                  AudioUnitRenderActionFlags *ioActionFlags,
                                  const AudioTimeStamp *inTimeStamp,
                                  UInt32 inBusNumber,
                                  UInt32 inNumberFrames,
                                  AudioBufferList *ioData) {
    
    
    AudioBuffer buffer;
    OSStatus status;
    buffer.mDataByteSize = inNumberFrames *2;
    buffer.mNumberChannels = 1;
    buffer.mData= malloc(inNumberFrames *2);
    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0] = buffer;
    status = AudioUnitRender([iosAudio audioUnit], ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
    [iosAudio processAudio:&bufferList];
    NSLog(@"%u", (unsigned int)bufferList.mBuffers[0].mDataByteSize);
//    NSLog(@"%@", bufferList.mBuffers[0].mData);
    
    
    fwrite(bufferList.mBuffers[0].mData, bufferList.mBuffers[0].mDataByteSize,1 , pFile);
    fflush(pFile);
    free(bufferList.mBuffers[0].mData);
    
    return noErr;
}

/**
 This callback is called when the audioUnit needs new data to play through the
 speakers. If you don't have any, just don't write anything in the buffers
 */

@implementation IosAudioController

@synthesize audioUnit, tempBuffer;

/**
 Initialize the audioUnit and allocate our own temporary buffer.
 The temporary buffer will hold the latest data coming in from the microphone,
 and will be copied to the output when this is requested.
 */
- (id) init {
    self = [super init];
    
    NSArray *path = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
    NSString *Pathes = path.lastObject;
    NSString *filePath = [Pathes stringByAppendingPathComponent:@"testaudio5.pcm2"];
    const char *str = [filePath UTF8String];
    pFile = fopen(str, "w");
    
    
    
    OSStatus status;
    
    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    
    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
    
    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);
    checkStatus(status);
    
    // Enable IO for recording
    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Input,
                                  kInputBus,
                                  &flag,
                                  sizeof(flag));
    checkStatus(status);
    
//    // Enable IO for playback
//    status = AudioUnitSetProperty(audioUnit,
//                                  kAudioOutputUnitProperty_EnableIO,
//                                  kAudioUnitScope_Output,
//                                  kOutputBus,
//                                  &flag,
//                                  sizeof(flag));
//    checkStatus(status);
    
    // Describe format
    AudioStreamBasicDescription audioFormat;
    audioFormat.mSampleRate            = 44100.00;
    audioFormat.mFormatID            = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame    = 1;
    audioFormat.mBitsPerChannel        = 16;
    audioFormat.mBytesPerPacket        = 2;
    audioFormat.mBytesPerFrame        = 2;
    
    // Apply format
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  kInputBus,
                                  &audioFormat,
                                  sizeof(audioFormat));
    checkStatus(status);
//    status = AudioUnitSetProperty(audioUnit,
//                                  kAudioUnitProperty_StreamFormat,
//                                  kAudioUnitScope_Input,
//                                  kOutputBus,
//                                  &audioFormat,
//                                  sizeof(audioFormat));
//    checkStatus(status);
    
    
    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_SetInputCallback,
                                  kAudioUnitScope_Global,
                                  kInputBus,
                                  &callbackStruct,
                                  sizeof(callbackStruct));
    checkStatus(status);
    
    // Set output callback
//    callbackStruct.inputProc = playbackCallback;
//    callbackStruct.inputProcRefCon = self;
//    status = AudioUnitSetProperty(audioUnit,
//                                  kAudioUnitProperty_SetRenderCallback,
//                                  kAudioUnitScope_Global,
//                                  kOutputBus,
//                                  &callbackStruct,
//                                  sizeof(callbackStruct));
//    checkStatus(status);
    
    // Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
    flag = 0;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_ShouldAllocateBuffer,
                                  kAudioUnitScope_Output,
                                  kInputBus,
                                  &flag,
                                  sizeof(flag));
    
    // Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
    // Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
    tempBuffer.mNumberChannels = 1;
    tempBuffer.mDataByteSize = 512 * 2;
    tempBuffer.mData = malloc( 512 * 2 );
    
    // Initialise
    status = AudioUnitInitialize(audioUnit);
    checkStatus(status);
    
    return self;
}

/**
 Start the audioUnit. This means data will be provided from
 the microphone, and requested for feeding to the speakers, by
 use of the provided callbacks.
 */
- (void) start {
    OSStatus status = AudioOutputUnitStart(audioUnit);
    checkStatus(status);
}

/**
 Stop the audioUnit
 */
- (void) stop {
    OSStatus status = AudioOutputUnitStop(audioUnit);
    checkStatus(status);
    
    
}

/**
 Change this funtion to decide what is done with incoming
 audio data from the microphone.
 Right now we copy it to our own temporary buffer.
 */
- (void) processAudio: (AudioBufferList*) bufferList{
    AudioBuffer sourceBuffer = bufferList->mBuffers[0];
    
    // fix tempBuffer size if it's the wrong size
    if (tempBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {
        free(tempBuffer.mData);
        tempBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
        tempBuffer.mData = malloc(sourceBuffer.mDataByteSize);
    }
    // copy incoming audio data to temporary buffer
    memcpy(tempBuffer.mData, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
}

- (void) dealloc {
    [super    dealloc];
    AudioUnitUninitialize(audioUnit);
    free(tempBuffer.mData);
}

@end

参考网址

http://atastypixel.com/blog/using-remoteio-audio-unit/

http://www.csdn123.com/html/itweb/20131023/182961.htm

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值