前言: http://blog.csdn.net/AdamBieber/article/details/51112077
使用Unity中自带的Microphone类进行录音保存,由于录音是比较简单,但是保存就有问题了,需要保存就要注意音频格式了,Unity录音默认是wav格式。
- 1
- 2
正文:
开始录音
private AudioClip clip;
//录音的采样率
const int samplingRate = 44100;
private TimerInfo timerInfo;
/// <summary>
/// 开始录音
/// </summary>
public void Recording()
{
string[] micDevices = Microphone.devices;
if (micDevices.Length == 0)
{
Util.Log("没有找到录音组件");
UpdateMessage("没有找到录音组件");
return;
}
Util.Log("录音时长为30秒");
UpdateMessage("录音时长为30秒");
Microphone.End(null);//录音前先停掉录音,录音参数为null时采用的是默认的录音驱动
clip = Microphone.Start(null, false, 30, samplingRate);
timerInfo = new TimerInfo(this);//时间管理
TimerManager.AddTimerEvent(timerInfo);//添加到时间管理器中
}
/// <summary>
/// 停止录音
/// </summary>
public void StopRecord()
{
TimerManager.StopTimerEvent(timerInfo);
TimerManager.RemoveTimerEvent(timerInfo);
index = 0;
int audioLength;
int lastPos = Microphone.GetPosition(null);
if (Microphone.IsRecording(null))
{
audioLength = lastPos / samplingRate;
}
else
{
audioLength = 30;
}
Microphone.End(null);
if (audioLength < 1.0f)
{
Util.Log("录音时长短");
UpdateMessage("录音时长短");
}
}
/// <summary>
/// 播放录音
/// </summary>
public void PlayRecord()
{
StopRecord();
AudioSource.PlayClipAtPoint(clip, Vector3.zero);
}
/// <summary>
/// 保存录音
/// </summary>
public void SaveRecord()
{
try
{
Util.Save(clip, Util.DataPath + "testmodel/testproject/other/record.wav");
Util.Log("保存完毕");
UpdateMessage("保存完毕");
}
catch (Exception ex)
{
Util.Log(ex.Message + ex.StackTrace);
UpdateMessage(ex.Message + ex.StackTrace);
}
}
public static void Save(AudioClip clip, string path)
{
string filePath = Path.GetDirectoryName(path);
if (!Directory.Exists(filePath))
{
Directory.CreateDirectory(filePath);
}
using (FileStream fileStream = CreateEmpty(path))
{
ConvertAndWrite(fileStream, clip);
WriteHeader(fileStream, clip);
}
}
private static void ConvertAndWrite(FileStream fileStream, AudioClip clip)
{
float[] samples = new float[clip.samples];
clip.GetData(samples, 0);
Int16[] intData = new Int16[samples.Length];
Byte[] bytesData = new Byte[samples.Length * 2];
int rescaleFactor = 32767; //to convert float to Int16
for (int i = 0; i < samples.Length; i++)
{
intData[i] = (short)(samples[i] * rescaleFactor);
Byte[] byteArr = new Byte[2];
byteArr = BitConverter.GetBytes(intData[i]);
byteArr.CopyTo(bytesData, i * 2);
}
fileStream.Write(bytesData, 0, bytesData.Length);
}
private static FileStream CreateEmpty(string filepath)
{
FileStream fileStream = new FileStream(filepath, FileMode.Create);
byte emptyByte = new byte();
for (int i = 0; i < 44; i++) //preparing the header
{
fileStream.WriteByte(emptyByte);
}
return fileStream;
}
private static void WriteHeader(FileStream stream, AudioClip clip)
{
int hz = clip.frequency;
int channels = clip.channels;
int samples = clip.samples;
stream.Seek(0, SeekOrigin.Begin);
Byte[] riff = System.Text.Encoding.UTF8.GetBytes("RIFF");
stream.Write(riff, 0, 4);
Byte[] chunkSize = BitConverter.GetBytes(stream.Length - 8);
stream.Write(chunkSize, 0, 4);
Byte[] wave = System.Text.Encoding.UTF8.GetBytes("WAVE");
stream.Write(wave, 0, 4);
Byte[] fmt = System.Text.Encoding.UTF8.GetBytes("fmt ");
stream.Write(fmt, 0, 4);
Byte[] subChunk1 = BitConverter.GetBytes(16);
stream.Write(subChunk1, 0, 4);
UInt16 two = 2;
UInt16 one = 1;
Byte[] audioFormat = BitConverter.GetBytes(one);
stream.Write(audioFormat, 0, 2);
Byte[] numChannels = BitConverter.GetBytes(channels);
stream.Write(numChannels, 0, 2);
Byte[] sampleRate = BitConverter.GetBytes(hz);
stream.Write(sampleRate, 0, 4);
Byte[] byteRate = BitConverter.GetBytes(hz * channels * 2); // sampleRate * bytesPerSample*number of channels, here 44100*2*2
stream.Write(byteRate, 0, 4);
UInt16 blockAlign = (ushort)(channels * 2);
stream.Write(BitConverter.GetBytes(blockAlign), 0, 2);
UInt16 bps = 16;
Byte[] bitsPerSample = BitConverter.GetBytes(bps);
stream.Write(bitsPerSample, 0, 2);
Byte[] datastring = System.Text.Encoding.UTF8.GetBytes("data");
stream.Write(datastring, 0, 4);
Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2);
stream.Write(subChunk2, 0, 4);
}