Unity运行时加载外部mp3/wav音频

本文介绍Unity开发中,在运行时加载外部音频(mp3/wav)的方法,非 WWW 或 UnityWebRequest,需要www方式的同学请自行baidu

参考库:

  1. NAudio:功能全,但仅限windows平台
  2. NLayer:读取mp3音频文件并解析,正好满足需求;

github地址(都是NAudio名下的)

  1. NAudio: GitHub - naudio/NAudio: Audio and MIDI library for .NET
  2. NLayer: GitHub - naudio/NLayer: MPEG 1 & 2 Decoder for Layers 1, 2, & 3

NAudio

        输入:mp3文件

        输出:AudioClip

// Use NAudio
// file => mp3 byte[] => {NAudio} => wav byte[] => AudioClip

using System;
using System.IO;
using NAudio.Wave;
// parse mp3
AudioClip LoadMp3Audio(byte[] audioBytes, string fileName)
{
    MemoryStream mp3stream = new MemoryStream(audioBytes);
    Mp3FileReader mp3audio = new Mp3FileReader(mp3stream);
    WaveStream waveStream = WaveFormatConversionStream.CreatePcmStream(mp3audio);
    MemoryStream outputStream = new MemoryStream();
    using (WaveFileWriter waveFileWriter = new WaveFileWriter(outputStream, waveStream.WaveFormat))
    {
        byte[] bytes = new byte[waveStream.Length];
        waveStream.Position = 0;
        waveStream.Read(bytes, 0, Convert.ToInt32(waveStream.Length));
        waveFileWriter.Write(bytes, 0, bytes.Length);
        waveFileWriter.Flush();
    }
    byte[] wavBytes = outputStream.ToArray();
    return LoadWavAudio(wavBytes, fileName);
}
// parse wav
AudioClip LoadWavAudio(byte[] audioBytes, string fileName)
{
    // WAV 自定义wav解析类
    WAV wav = new WAV(audioBytes);
    AudioClip audioClip;
    if (wav.ChannelCount == 2)
    {
        audioClip = AudioClip.Create(fileName, wav.SampleCount, 2, wav.Frequency, false);
        audioClip.SetData(wav.StereoChannel, 0);
    }
    else
    {
        audioClip = AudioClip.Create(fileName, wav.SampleCount, 1, wav.Frequency, false);
        audioClip.SetData(wav.LeftChannel, 0);
    }
    return audioClip;
}
// 逻辑不完整,但是能用,所以只要你和代码一个能跑就可以了
public class WAV
{

    // convert two bytes to one float in the range -1 to 1
    static float bytesToFloat(byte firstByte, byte secondByte)
    {
        // convert two bytes to one short (little endian)
        short s = (short)((secondByte << 8) | firstByte);
        // convert to range from -1 to (just below) 1
        return s / 32768.0F;
    }

    static int bytesToInt(byte[] bytes, int offset = 0)
    {
        int value = 0;
        for (int i = 0; i < 4; i++)
        {
            value |= ((int)bytes[offset + i]) << (i * 8);
        }
        return value;
    }
    // properties
    public float[] LeftChannel { get; internal set; }
    public float[] RightChannel { get; internal set; }
    public float[] StereoChannel { get; internal set; }
    public int ChannelCount { get; internal set; }
    public int SampleCount { get; internal set; }
    public int Frequency { get; internal set; }


    public WAV(byte[] wav)
    {
        // Determine if mono or stereo
        ChannelCount = wav[22];     // Forget byte 23 as 99.999% of WAVs are 1 or 2 channels

        // Get the frequency
        Frequency = bytesToInt(wav, 24);

        // Get past all the other sub chunks to get to the data subchunk:
        int pos = 12;   // First Subchunk ID from 12 to 16

        // Keep iterating until we find the data chunk (i.e. 64 61 74 61 ...... (i.e. 100 97 116 97 in decimal))
        while (!(wav[pos] == 100 && wav[pos + 1] == 97 && wav[pos + 2] == 116 && wav[pos + 3] == 97))
        {
            pos += 4;
            int chunkSize = wav[pos] + wav[pos + 1] * 256 + wav[pos + 2] * 65536 + wav[pos + 3] * 16777216;
            pos += 4 + chunkSize;
        }
        pos += 8;

        // Pos is now positioned to start of actual sound data.
        SampleCount = (wav.Length - pos) / 2;     // 2 bytes per sample (16 bit sound mono)
        if (ChannelCount == 2) SampleCount /= 2;        // 4 bytes per sample (16 bit stereo)

        // Allocate memory (right will be null if only mono sound)
        LeftChannel = new float[SampleCount];
        if (ChannelCount == 2) RightChannel = new float[SampleCount];
        else RightChannel = null;

        // Write to double array/s:
        int i = 0;
        while (pos < wav.Length)
        {
            LeftChannel[i] = bytesToFloat(wav[pos], wav[pos + 1]);
            pos += 2;
            if (ChannelCount == 2)
            {
                RightChannel[i] = bytesToFloat(wav[pos], wav[pos + 1]);
                pos += 2;
            }
            i++;
        }

        //Merge left and right channels for stereo sound
        if (ChannelCount == 2)
        {
            StereoChannel = new float[SampleCount * 2];
            //Current position in our left and right channels
            int channelPos = 0;
            //After we've changed two values for our Stereochannel, we want to increase our channelPos
            short posChange = 0;

            for (int index = 0; index < (SampleCount * 2); index++)
            {

                if (index % 2 == 0)
                {
                    StereoChannel[index] = LeftChannel[channelPos];
                    posChange++;
                }
                else
                {
                    StereoChannel[index] = RightChannel[channelPos];
                    posChange++;
                }
                //Two values have been changed, so update our channelPos
                if (posChange % 2 == 0)
                {
                    if (channelPos < SampleCount)
                    {
                        channelPos++;
                        //Reset the counter for next iterations
                        posChange = 0;
                    }
                }
            }
        }
        else
        {
            StereoChannel = null;
        }

        Debug.Log($"mpegFile.Length={wav.Length},ChannelCount={ChannelCount},lenSample={LeftChannel.Length}");
    }
}

NLayer

// NLayer
// file => mp3 byte[] => {MpegFile} => {ReadSamples} => sample float[] => AudioClip
// 重点是ReadSamples方法的使用

using NLayer;

AudioClip LoadMp3Audio(byte[] audioBytes, string fileName)
{
    System.IO.Stream memStream = new System.IO.MemoryStream(audioBytes);
    var mpegFile = new MpegFile(memStream);
    int lengthSamples = (int)(mpegFile.Length / sizeof(float) / mpegFile.Channels);
    float[] samples = new float[lengthSamples * mpegFile.Channels];
    int readCount = mpegFile.ReadSamples(samples, 0, lengthSamples * mpegFile.Channels);
    AudioClip ac = AudioClip.Create(fileName, lengthSamples, mpegFile.Channels, mpegFile.SampleRate, false);
    ac.SetData(samples, 0);
    return ac;
}
// wav加载同NAudio部分

参考:

  1. https://www.jianshu.com/p/bf20e69f2cc4
  2. Audio - Import mp3 at runtime? - Unity Forum
  3. unity-load-mp3-at-runtime/Assets/NLayer at master · greggman/unity-load-mp3-at-runtime · GitHub
  4. (Solved)Load a 16bit .Wav file WITHOUT using WWW (at runtime) - Unity Forum

改进加载效果:

https://github.com/Cysharp/UniTask

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 16
    评论
评论 16
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

GrimRaider

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值