ios录音功能的实现

调好的工程:http://download.csdn.net/detail/fzxy002763/4107786

这两天也调了一下ios的录音,原文链接:http://www.iphoneam.com/blog/index.php?title=using-the-iphone-to-record-audio-a-guide&more=1&c=1&tb=1&pb=1

这里ios的录音功能主要依靠AVFoundation.framework与CoreAudio.framework来实现


在工程内添加这两个framework

我这里给工程命名audio_text

在生成的audio_textViewController.h里的代码如下

#import <UIKit/UIKit.h>
#import <AVFoundation/AVFoundation.h>
#import <CoreAudio/CoreAudioTypes.h>

@interface audio_textViewController : UIViewController {

	IBOutlet UIButton *bthStart;
	IBOutlet UIButton *bthPlay;
	IBOutlet UITextField *freq;
	IBOutlet UITextField *value;
	IBOutlet UIActivityIndicatorView *actSpinner;
	BOOL toggle;
	
	//Variable setup for access in the class
	NSURL *recordedTmpFile;
	AVAudioRecorder *recorder;
	NSError *error;
}

@property (nonatomic,retain)IBOutlet UIActivityIndicatorView *actSpinner;
@property (nonatomic,retain)IBOutlet UIButton *bthStart;
@property (nonatomic,retain)IBOutlet UIButton *bthPlay;

-(IBAction)start_button_pressed;
-(IBAction)play_button_pressed;
@end
audio_textViewController.m

#import "audio_textViewController.h"

@implementation audio_textViewController


- (void)viewDidLoad {
    [super viewDidLoad];
	
	//Start the toggle in true mode.
	toggle = YES;
	bthPlay.hidden = YES;
	
	//Instanciate an instance of the AVAudioSession object.
	AVAudioSession * audioSession = [AVAudioSession sharedInstance];
	//Setup the audioSession for playback and record. 
	//We could just use record and then switch it to playback leter, but
	//since we are going to do both lets set it up once.
	[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error: &error];
	//Activate the session
	[audioSession setActive:YES error: &error];
	
}
/*
// The designated initializer. Override to perform setup that is required before the view is loaded.
- (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil {
    self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil];
    if (self) {
        // Custom initialization
    }
    return self;
}
*/

/*
// Implement loadView to create a view hierarchy programmatically, without using a nib.
- (void)loadView {
}
*/


/*
// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
- (void)viewDidLoad {
    [super viewDidLoad];
}
*/


/*
// Override to allow orientations other than the default portrait orientation.
- (BOOL)shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation {
    // Return YES for supported orientations
    return (interfaceOrientation == UIInterfaceOrientationPortrait);
}
*/

- (IBAction)  start_button_pressed{
	
	if(toggle)
	{
		toggle = NO;
		[actSpinner startAnimating];
		[bthStart setTitle:@"停" forState: UIControlStateNormal ];	
		bthPlay.enabled = toggle;
		bthPlay.hidden = !toggle;
		
		//Begin the recording session.
		//Error handling removed.  Please add to your own code.
		
		//Setup the dictionary object with all the recording settings that this 
		//Recording sessoin will use
		//Its not clear to me which of these are required and which are the bare minimum.
		//This is a good resource: http://www.totodotnet.net/tag/avaudiorecorder/
		NSMutableDictionary* recordSetting = [[NSMutableDictionary alloc] init];
		
		[recordSetting setValue :[NSNumber numberWithInt:kAudioFormatAppleIMA4] forKey:AVFormatIDKey];
		
		[recordSetting setValue:[NSNumber numberWithFloat:[freq.text floatValue]] forKey:AVSampleRateKey]; 
		[recordSetting setValue:[NSNumber numberWithInt: [value.text intValue]] forKey:AVNumberOfChannelsKey];
		
		//Now that we have our settings we are going to instanciate an instance of our recorder instance.
		//Generate a temp file for use by the recording.
		//This sample was one I found online and seems to be a good choice for making a tmp file that
		//will not overwrite an existing one.
		//I know this is a mess of collapsed things into 1 call.  I can break it out if need be.
		recordedTmpFile = [NSURL fileURLWithPath:[NSTemporaryDirectory() stringByAppendingPathComponent: [NSString stringWithFormat: @"%.0f.%@", [NSDate timeIntervalSinceReferenceDate] * 1000.0, @"caf"]]];
		NSLog(@"Using File called: %@",recordedTmpFile);
		//Setup the recorder to use this file and record to it.
		recorder = [[ AVAudioRecorder alloc] initWithURL:recordedTmpFile settings:recordSetting error:&error];
		//Use the recorder to start the recording.
		//Im not sure why we set the delegate to self yet.  
		//Found this in antother example, but Im fuzzy on this still.
		[recorder setDelegate:self];
		//We call this to start the recording process and initialize 
		//the subsstems so that when we actually say "record" it starts right away.
		[recorder prepareToRecord];
		//Start the actual Recording
		[recorder record];
		//There is an optional method for doing the recording for a limited time see 
		//[recorder recordForDuration:(NSTimeInterval) 10]
		
	}
	else
	{
		toggle = YES;
		[actSpinner stopAnimating];
		[bthStart setTitle:@"开始录音" forState:UIControlStateNormal ];
		bthPlay.enabled = toggle;
		bthPlay.hidden = !toggle;
		
		NSLog(@"Using File called: %@",recordedTmpFile);
		//Stop the recorder.
		[recorder stop];
	}
}

- (void)didReceiveMemoryWarning {
	// Releases the view if it doesn't have a superview.
    [super didReceiveMemoryWarning];
	
	// Release any cached data, images, etc that aren't in use.
}

-(IBAction) play_button_pressed{
	
	//The play button was pressed... 
	//Setup the AVAudioPlayer to play the file that we just recorded.
	AVAudioPlayer * avPlayer = [[AVAudioPlayer alloc] initWithContentsOfURL:recordedTmpFile error:&error];
	[avPlayer prepareToPlay];
	[avPlayer play];
	
}

- (void)viewDidUnload {
	// Release any retained subviews of the main view.
	// e.g. self.myOutlet = nil;
	//Clean up the temp file.
	NSFileManager * fm = [NSFileManager defaultManager];
	[fm removeItemAtPath:[recordedTmpFile path] error:&error];
	//Call the dealloc on the remaining objects.
	[recorder dealloc];
	recorder = nil;
	recordedTmpFile = nil;
}


- (void)dealloc {
    [super dealloc];
}

@end
最后在interface builder里面绘制好界面,如


设置下按键的属性


基本就ok了,可以开始录音了。

  • 5
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
实现 iOS AudioUnit 录音分贝检测,可以参考以下步骤: 1. 配置音频会话 在使用 AudioUnit 之前,需要先配置音频会话。可以设置为录音模式,同时指定要使用的音频输入设备。 ```objc AVAudioSession *audioSession = [AVAudioSession sharedInstance]; NSError *error; [audioSession setCategory:AVAudioSessionCategoryRecord error:&error]; [audioSession setPreferredSampleRate:44100.0 error:&error]; [audioSession setPreferredIOBufferDuration:0.005 error:&error]; [audioSession setActive:YES error:&error]; ``` 2. 创建 AudioUnit 使用 `AudioComponentFindNext` 函数来查找可用的音频组件,并使用 `AudioComponentInstanceNew` 函数创建 AudioUnit 实例。 ```objc AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc); AudioComponentInstanceNew(inputComponent, &_audioUnit); ``` 3. 配置 AudioUnit 设置 AudioUnit 的音频格式和 IO 属性,并启用录音和回放功能。 ```objc // 设置音频输入格式 AudioStreamBasicDescription audioFormat; audioFormat.mSampleRate = 44100.0; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mChannelsPerFrame = 1; audioFormat.mBitsPerChannel = 16; audioFormat.mBytesPerPacket = 2; audioFormat.mBytesPerFrame = 2; // 设置 AudioUnit 输入流 IO 属性 AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat)); UInt32 enable = 1; AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enable, sizeof(enable)); // 设置 AudioUnit 输出流 IO 属性 AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat)); AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enable, sizeof(enable)); // 启用录音和回放功能 AURenderCallbackStruct input; input.inputProc = recordingCallback; input.inputProcRefCon = (__bridge void *)(self); AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); ``` 4. 实现录音回调函数 在录音回调函数中,可以获取录音数据的分贝值,用来检测录音音量大小。 ```objc static OSStatus recordingCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) { AudioUnitRender(AudioUnitRecorder.audioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, ioData); float decibels = 0.0; if (ioData->mNumberBuffers > 0) { AudioBuffer buffer = ioData->mBuffers[0]; // 计算分贝值 int channels = buffer.mNumberChannels; float peak = 0; for (int i = 0; i < inNumberFrames * channels; i++) { SInt16 sample = ((SInt16 *)buffer.mData)[i]; float sampleValue = sample / 32768.0; if (sampleValue < 0) { sampleValue = -sampleValue; } if (sampleValue > peak) { peak = sampleValue; } } decibels = 20.0 * log10(peak); } NSLog(@"Decibels: %f", decibels); return noErr; } ``` 5. 启动 AudioUnit 启动 AudioUnit,开始录音。 ```objc AudioUnitInitialize(_audioUnit); AudioOutputUnitStart(_audioUnit); ``` 通过以上步骤,就可以实现 iOS AudioUnit 录音分贝检测。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值