public void GetSpectrumData(float[] samples, int channel, FFTWindow window);
这是AudioSoucre类型的方法,获取频谱数据;
-
samples:采样率,就是说采集多长一段的频谱,必须为2^n,但是最小64,最大8192。
-
channel:信道,默认0
-
window:采样方式(Enum),有下列几种
Rectangular W[n] = 1.0.
Triangle W[n] = TRI(2n/N).
Hamming W[n] = 0.54 - (0.46 * COS(n/N) ).
Hanning W[n] = 0.5 * (1.0 - COS(n/N) ).
Blackman W[n] = 0.42 - (0.5 * COS(n/N) ) + (0.08 * COS(2.0 * n/N) ).
BlackmanHarris W[n] = 0.35875 - (0.48829 * COS(1.0 * n/N)) + (0.14128 * COS(2.0 * n/N)) - (0.01168 * COS(3.0 * n/N)).
22050HZ的频率分在几个范围:(从小到大声音越来越尖锐)
20~60:
60~250:
250~500:
500~2000:
2000~4000:
4000~6000:
6000~20000:
22050采样512个数据,一个sample 43hz;
分成8段:
0: 2 86hz
1: 4 172hz
2: 8 344hz
3: 16 688hz
4: 32 1376hz
5: 64 2752hz
6: 128 5504hz
7: 256 11008hz
Unity中新建一个类,AudioVisable
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
[RequireComponent(typeof(AudioSource))]
public class AudioVisable : MonoBehaviour {
AudioSource audioSource;
public float[] samples = new float[512];
float[] freqBand = new float[8];
float[] bandBuffer = new float[8];
float[] bufferDecrease = new float[8];
float[] _freqBandHighest = new float[8];
public static float[] _audioBand = new float[8];
public static float[] _audioBandBuffer = new float[8];
public static float _Amplitude, _AmplitudeBuffer;
float _AmplitudeHighest;
void Start () {
audioSource = GetComponent<AudioSource>();
}
void CreateAudioBands()
{
for (int i = 0; i < 8; i++)
{
if (freqBand[i] > _freqBandHighest[i])
{
_freqBandHighest[i] = freqBand[i];
}
_audioBand[i] = (freqBand[i]/_freqBandHighest[i]);
_audioBandBuffer[i] = (bandBuffer[i]/_freqBandHighest[i]);
}
}
void Update () {
GetSpectrumAduioSource();
MakeFrequencyBands();
BandBuffer();
CreateAudioBands();
GetAmplitude();
}
void GetAmplitude()
{
float _CurrentAmplitude = 0;
float _CurrentAmplitudeBuffer = 0;
for (int i = 0; i < 8; i++)
{
_CurrentAmplitude += _audioBand[i];
_CurrentAmplitudeBuffer += _audioBandBuffer[i];
}
if (_CurrentAmplitude > _AmplitudeHighest)
_AmplitudeHighest = _CurrentAmplitude;
_Amplitude = _CurrentAmplitude / _AmplitudeHighest;
_AmplitudeBuffer = _CurrentAmplitudeBuffer / _AmplitudeHighest;
}
void GetSpectrumAduioSource()
{
audioSource.GetSpectrumData(samples, 0, FFTWindow.Blackman);
}
void MakeFrequencyBands()
{
int count = 0;
for (int i = 0; i < 8; i++)
{
float average = 0;
int sampleCount = (int)Mathf.Pow(2,i+1);
if (i == 7)
{
sampleCount += 2;
}
for (int j = 0; j < sampleCount; j++)
{
average += samples[count]*(count+1);
count++;
}
average /= count;
freqBand[i] = average * 10;
}
}
void BandBuffer()
{
for (int i = 0; i < 8; i++)
{
if (freqBand[i] > bandBuffer[i])
{
bandBuffer[i] = freqBand[i];
bufferDecrease[i] = 0.005f;
}
if (freqBand[i] < bandBuffer[i])
{
bandBuffer[i] -= bufferDecrease[i];
bufferDecrease[i] *= 1.2f;
}
}
}
}
这个脚本挂在Camera上,可以看到采样的数据的变化;
Unity中,新建一个cube预制体开启,使用standard材质,打开hdr的外发光emission;
再新建一个脚本ParamCube,挂在cube预制体上;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class ParamCube : MonoBehaviour {
public int band;
public float startScale, scaleMultiplier;
Material material;
void Start () {
material = GetComponent<MeshRenderer>().materials[0];
}
// Update is called once per frame
void Update () {
transform.localScale = new Vector3(transform.localScale.x, (AudioVisable._audioBandBuffer[band]) * scaleMultiplier + startScale, transform.localScale.z);
Color color = new Color(AudioVisable._audioBandBuffer[band], AudioVisable._audioBandBuffer[band], AudioVisable._audioBandBuffer[band]);
material.SetColor("_EmissionColor",color);
}
}
然后在Scene中放8个cube预制体,把脚本中StartScale和scaleMultiplier调为1和10,band为0~7;
最后,在Camera的AudioSource组件上添加音频,运行~
参考:
国外Youtube上的大神,链接如下:
https://www.youtube.com/playlist?list=PL3POsQzaCw53p2tA6AWf7_AWgplskR0Vo