我们会使用一种为经过压缩的PCM数据格式,具体参数喂16位、44KHz、单声道。这些信息通过程序顶部的三个预定义值指定:
1 |
#define BYTES_PER_SAMPLE 2
|
16位等于两个字节
1 |
#define SAMPLE_PATE 44100
|
每秒采样率44100次等于44KHz
#import<UIKit/UIKit.h>
#import"AppDelegate.h"
#include<AudioToolbox/AudioQueue.h>
#include"AKLib.h"
#define BYTES_PER_SAMPLE2
#define SAMPLE_PATE 44100
typedefshort sampleFrame;
#define FRAME_COUNT735
#define AUDIO_BUFFERS3
typedefstruct AQCallbackStruct{
AudioQueueRef queue;//播放队列
UInt32 FrameCount;
AudioQueueBufferRef mBuffers[AUDIO_BUFFERS];
AudioStreamBasicDescription mDataFormat;
UInt32 sampleLen;
UInt32 playPtr;
sampleFrame *pcmBuffer;
}AQCallbackStruct;
int playbuffer(void *pcm,unsignedlong len);
void AQBufferCallback(void *in,AudioQueueRef inQ,AudioQueueBufferRef outQB);
int main(int argc,char *argv[])
{
unsigned char *pcmBuffer;
unsigned char source[11] ={0x01,'9','0','0','6','0','2','3','4','1','6'};
printf("要编码数据的长度 %ld\n",sizeof(source));
int outLen;
pcmBuffer = CreatePacket(source,sizeof(source),&outLen);
// FILE *stream;
// if ( (stream = fopen("/Users/weiwen/Desktop/encode.pcm", "wb+")) == NULL) {
// fprintf(stderr,"Cannot open output file.\n");
// return 1;
// }
// fwrite(pcmBuffer, 1, outLen, stream);
// fclose(stream);
printf("编码之后的数据长度 %d\n",outLen);
int i;
for (i=0; i<1000; i++) {
printf("%d\n",i);
playbuffer(pcmBuffer, outLen);
}
@autoreleasepool {
return UIApplicationMain(argc, argv, nil,NSStringFromClass([AppDelegateclass]));
}
}
int playbuffer(void *pcmBuffer,unsignedlong len)
{
AQCallbackStruct agc;
UInt32 err,bufferSize;
int i;
agc.mDataFormat.mSampleRate =SAMPLE_PATE;
agc.mDataFormat.mFormatID = kAudioFormatLinearPCM;
agc.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
agc.mDataFormat.mBytesPerPacket =4;
agc.mDataFormat.mFramesPerPacket =1;
agc.mDataFormat.mBytesPerFrame =4;
agc.mDataFormat.mChannelsPerFrame =2;
agc.mDataFormat.mBitsPerChannel =16;
agc.FrameCount =FRAME_COUNT;
agc.sampleLen = len/BYTES_PER_SAMPLE;
agc.playPtr =0;
agc.pcmBuffer = (short *)pcmBuffer;
err = AudioQueueNewOutput(&agc.mDataFormat,AQBufferCallback,&agc,NULL,
kCFRunLoopCommonModes,0,&agc.queue);
if(err) return err;
agc.FrameCount =FRAME_COUNT;
bufferSize = agc.FrameCount * agc.mDataFormat.mBytesPerFrame;
for (i=0; i<AUDIO_BUFFERS; i++)
{
err = AudioQueueAllocateBuffer(agc.queue,bufferSize,&agc.mBuffers[i]);
if(err) return err;
AQBufferCallback(&agc,agc.queue,agc.mBuffers[i]);
}
err = AudioQueueStart(agc.queue,NULL);
if(err) return err;
int v=0;
while (agc.playPtr<agc.sampleLen)
{
struct timeval tv={1,0};
select(v,NULL,NULL,NULL,&tv);
}
//AudioQueueReset(agc.queue);
AudioQueueStop(agc.queue,true);
sleep(1);
return 0;
}
void AQBufferCallback(void *pIn, AudioQueueRef inQ,AudioQueueBufferRef outQB)
{
AQCallbackStruct *agc;
short *audioBuf=NULL;
short sample;
int i;
agc=(AQCallbackStruct *)pIn;
audioBuf=(short*)(outQB->mAudioData);
//printf("Sync:%lu / %lu \n",agc->playPtr,agc->sampleLen);
if (agc->FrameCount >0)
{
outQB->mAudioDataByteSize =4*agc->FrameCount;
for (i=0; i<agc->FrameCount*2; i++)
{
if(agc->playPtr > agc->sampleLen)
{
sample =0;
}
else
{
sample = (agc->pcmBuffer[agc->playPtr]);
}
audioBuf[i] = sample;
audioBuf[i+1] = sample;
agc->playPtr++;
}
AudioQueueEnqueueBuffer(inQ,outQB,0,NULL);
}
}