unity3D实现录音功能,并将真实录音时长保存至本地(不能用可私信,附可执行文件下载地址)

  1. 项目实现功能:在unity3D中通过Microphone的API实现录音功能,并将真正时长的录音文件以”.wav“格式保存到本地。
  2. 环境:Win10     unity版本:2018.2.15f1             VS版本:2017
  3. 界面展示
  4.  

    说明:要提前了解.wav文件的格式

  5. 根据.wav文件格式,需要对录音的声音流进行重新编码。
  6. 代码
    using UnityEngine;
    using UnityEngine.UI;
    using System;
    using System.IO;
    
    public class TestMicro : MonoBehaviour {
        private bool micConnected = false;//麦克风是否连接
        private int minFreq, maxFreq;//最小和最大频率
        public AudioClip RecordedClip;//录音
        public AudioSource audioSource;//播放的音频
        public Text Infotxt;//提示信息
        public Text Adress;//音频保存地址
        private string fileName;//保存的文件名
        private byte[] data;
        private void Start()
        {
            if (Microphone.devices.Length <= 0)
            {
                Infotxt.text = "缺少麦克风设备!";
            }
            else
            {
                Infotxt.text = "设备名称为:"+Microphone.devices[0].ToString()+"请点击Start开始录音!";
                micConnected = true;
                Microphone.GetDeviceCaps(null, out minFreq, out maxFreq);
                if (minFreq == 0 && maxFreq == 0)
                {
                    maxFreq = 44100;
                }
            }
        }
        /// <summary>
        /// 开始录音
        /// </summary>
        public void Begin()
        {
            if (micConnected)
            {
                if (!Microphone.IsRecording(null))
                {
                    RecordedClip = Microphone.Start(null, false, 60, maxFreq);
                    Infotxt.text = "开始录音!";
                }
                else
                {
                    Infotxt.text = "正在录音中,请勿重复点击Start!";
                }
            }
            else
            {
                Infotxt.text = "请确认麦克风设备是否已连接!";
            }
        }
        /// <summary>
        /// 停止录音
        /// </summary>
        public void Stop()
        {
            data = GetRealAudio(ref RecordedClip);
            Microphone.End(null);
            Infotxt.text = "录音结束!"; 
            
        }
        /// <summary>
        /// 播放录音
        /// </summary>
        public void Player()
        {
            if (!Microphone.IsRecording(null))
            {
                audioSource.clip = RecordedClip;
                audioSource.Play();
                Infotxt.text = "正在播放录音!";
            }
            else
            {
                Infotxt.text = "正在录音中,请先停止录音!";
            }
        }
        /// <summary>
        /// 保存录音
        /// </summary>
        public void Save()
        {
            if (!Microphone.IsRecording(null))
            { 
                fileName = DateTime.Now.ToString("yyyyMMddHHmmssffff");
                if (!fileName.ToLower().EndsWith(".wav"))
                {//如果不是“.wav”格式的,加上后缀
                    fileName += ".wav";
                }
                string path= Path.Combine(Application.persistentDataPath, fileName);//录音保存路径
                print(path);//输出路径
                Adress.text = path;
                using (FileStream fs = CreateEmpty(path))
                {
                    fs.Write(data, 0, data.Length);
                    WriteHeader(fs, RecordedClip); //wav文件头
                }
            }
            else
            {
                Infotxt.text = "正在录音中,请先停止录音!"; 
            }
        }
        /// <summary>
        /// 获取真正大小的录音
        /// </summary>
        /// <param name="recordedClip"></param>
        /// <returns></returns>
        public static byte[] GetRealAudio(ref AudioClip recordedClip)
        {
            int position = Microphone.GetPosition(null);
            if (position <= 0 || position > recordedClip.samples)
            {
                position = recordedClip.samples;
            }
            float[] soundata = new float[position * recordedClip.channels];
            recordedClip.GetData(soundata, 0);
            recordedClip = AudioClip.Create(recordedClip.name, position,
            recordedClip.channels, recordedClip.frequency, false);
            recordedClip.SetData(soundata, 0);
            int rescaleFactor = 32767;
            byte[] outData = new byte[soundata.Length * 2];
            for (int i = 0; i < soundata.Length; i++)
            {
                short temshort = (short)(soundata[i] * rescaleFactor);
                byte[] temdata = BitConverter.GetBytes(temshort);
                outData[i * 2] = temdata[0];
                outData[i * 2 + 1] = temdata[1];
            }
            Debug.Log("position=" + position + "  outData.leng=" + outData.Length);
            return outData;
        }
        /// <summary>
        /// 写文件头
        /// </summary>
        /// <param name="stream"></param>
        /// <param name="clip"></param>
        public static void WriteHeader(FileStream stream, AudioClip clip)
        {
            int hz = clip.frequency;
            int channels = clip.channels;
            int samples = clip.samples;
    
            stream.Seek(0, SeekOrigin.Begin);
    
            Byte[] riff = System.Text.Encoding.UTF8.GetBytes("RIFF");
            stream.Write(riff, 0, 4);
    
            Byte[] chunkSize = BitConverter.GetBytes(stream.Length - 8);
            stream.Write(chunkSize, 0, 4);
    
            Byte[] wave = System.Text.Encoding.UTF8.GetBytes("WAVE");
            stream.Write(wave, 0, 4);
    
            Byte[] fmt = System.Text.Encoding.UTF8.GetBytes("fmt ");
            stream.Write(fmt, 0, 4);
    
            Byte[] subChunk1 = BitConverter.GetBytes(16);
            stream.Write(subChunk1, 0, 4);
    
            UInt16 one = 1;
    
            Byte[] audioFormat = BitConverter.GetBytes(one);
            stream.Write(audioFormat, 0, 2);
    
            Byte[] numChannels = BitConverter.GetBytes(channels);
            stream.Write(numChannels, 0, 2);
    
            Byte[] sampleRate = BitConverter.GetBytes(hz);
            stream.Write(sampleRate, 0, 4);
    
            Byte[] byteRate = BitConverter.GetBytes(hz * channels * 2); 
            stream.Write(byteRate, 0, 4);
    
            UInt16 blockAlign = (ushort)(channels * 2);
            stream.Write(BitConverter.GetBytes(blockAlign), 0, 2);
    
            UInt16 bps = 16;
            Byte[] bitsPerSample = BitConverter.GetBytes(bps);
            stream.Write(bitsPerSample, 0, 2);
    
            Byte[] datastring = System.Text.Encoding.UTF8.GetBytes("data");
            stream.Write(datastring, 0, 4);
    
            Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2);
            stream.Write(subChunk2, 0, 4);
        }
        /// <summary>
        /// 创建wav格式文件头
        /// </summary>
        /// <param name="filepath"></param>
        /// <returns></returns>
        private FileStream CreateEmpty(string filepath)
        {
            FileStream fileStream = new FileStream(filepath, FileMode.Create);
            byte emptyByte = new byte();
    
            for (int i = 0; i < 44; i++) //为wav文件头留出空间
            {
                fileStream.WriteByte(emptyByte);
            }
    
            return fileStream;
        }
    }
  7. 可执行文件(有个bug目前还没修改,不过不影响使用):

bug:当没有录音的时候,点击save也会出现保存文件的地址,可以在Save()方法中判断一下if(recordedclip!=null)

可执行文件下载地址:https://download.csdn.net/download/qq_40878840/12942233

有问题欢迎私信交流!

 
  • 4
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
好的,以下是使用 WebRTC 实现录音并压缩为 Base64,然后传递给后端保存为本地文件的详细代码: 前端代码: ```javascript // 获取音频流 navigator.mediaDevices.getUserMedia({ audio: true }) .then(stream => { // 创建 MediaRecorder 对象 const options = { mimeType: 'audio/webm' }; const mediaRecorder = new MediaRecorder(stream, options); // 存储录音数据 const recordedChunks = []; // 开始录音 mediaRecorder.start(); // 监听录音数据 mediaRecorder.addEventListener('dataavailable', event => { recordedChunks.push(event.data); }); // 停止录音 mediaRecorder.addEventListener('stop', () => { // 将录音数据转换为 Blob const blob = new Blob(recordedChunks, { type: 'audio/webm' }); // 创建 FileReader 对象 const fileReader = new FileReader(); // 监听 FileReader 对象的 onload 事件 fileReader.onload = event => { // 将录音数据转换为 Base64 const base64Data = event.target.result.split(',')[1]; // 将 Base64 数据传给后端保存为本地文件 fetch('/api/save-audio', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ audioData: base64Data }) }) .then(response => response.json()) .then(data => { console.log(data); }); }; // 读取录音数据 fileReader.readAsDataURL(blob); }); // 录音 10 秒后停止 setTimeout(() => { mediaRecorder.stop(); }, 10000); }) .catch(error => { console.error(error); }); ``` 后端代码(使用 Node.js + Express): ```javascript const express = require('express'); const fs = require('fs'); const app = express(); const PORT = 3000; app.use(express.json()); app.post('/api/save-audio', (req, res) => { const { audioData } = req.body; // 将 Base64 数据转换为二进制数据 const binaryData = Buffer.from(audioData, 'base64'); // 生成文件名 const fileName = `recording_${Date.now()}.webm`; // 将二进制数据保存为文件 fs.writeFile(fileName, binaryData, (err) => { if (err) { console.error(err); res.status(500).json({ error: 'Failed to save audio file' }); } else { console.log(`Saved audio file: ${fileName}`); res.json({ success: true }); } }); }); app.listen(PORT, () => { console.log(`Server listening at http://localhost:${PORT}`); }); ``` 以上代码实现了以下功能: 1. 获取音频流 2. 创建 `MediaRecorder` 对象,并存储录音数据 3. 将录音数据转换为 `Blob`,再转换为 Base64 4. 将 Base64 数据传给后端保存为本地文件 5. 在后端将 Base64 数据转换为二进制数据,并保存为本地文件 你只需要将 `/api/save-audio` 替换为你的后端保存接口即可。另外,由于 `MediaRecorder` 对象在不同浏览器中的实现可能有所不同,所以你需要对不同浏览器进行兼容性处理。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值