摘要
使用AudioContext是先调整音量,获取实时音量和频谱,还可以分离左右声道等等
参考
https://developer.mozilla.org/zh-
CN/docs/Web/API/AudioContext/createMediaElementSource
https://developer.mozilla.org/zh-CN/docs/Web/API/AudioContext/createAnalyser
https://developer.mozilla.org/zh-
CN/docs/Web/API/AnalyserNode/getFloatFrequencyData
https://www.jianshu.com/p/12464a2595fb
https://blog.csdn.net/weixin_43946812/article/details/90755375
https://www.html5rocks.com/en/tutorials/webaudio/intro/
代码
// 通过audio标签
var sound, audio = new Audio();
audio.addEventListener('canplay', function() {
sound = context.createMediaElementSource(audio);
sound.connect(context.destination);
});
audio.src = '/audio.mp3';
var v =document.getElementsByTagName('video')[0];
var aCtx = new (window.AudioContext || window.webkitAudioContext)();
var source = aCtx.createMediaElementSource(v);
// 创建声音源处理器
// var sourceNode = aCtx.createBufferSource();
// 创建声音终点处理器
// var destinationNode = aCtx.destination;
// 创建声道分离处理器 此处的2是声道数 可以从a中获取
var splitterNode = aCtx.createChannelSplitter(2);
// 源连接声道分离处理器
source.connect(splitterNode);
// 创建左右声道频谱分析器
var leftAnalyserNode = aCtx.createAnalyser();
var rightAnalyserNode = aCtx.createAnalyser();
splitterNode.connect(leftAnalyserNode, 0)
splitterNode.connect(rightAnalyserNode, 1)
// 合并声道
var mergerNode = aCtx.createChannelMerger(2);
leftAnalyserNode.connect(mergerNode, 0, 0)
rightAnalyserNode.connect(mergerNode, 0, 1)
mergerNode.connect(aCtx.destination)
// 获取左右声道频谱数据
var leftBuff = new Uint8Array(leftAnalyserNode.frequencyBinCount);
leftAnalyserNode.getByteTimeDomainData(leftBuff)
var rightBuff = new Uint8Array(rightAnalyserNode.frequencyBinCount);
rightAnalyserNode.getByteTimeDomainData(rightBuff) // 字节值的范围介于0-255之间,是的,映射到-1到+1,因此128为零。 (它不是伏特,而是全范围无单位值。)
var rightBuff = new Float32Array(rightAnalyserNode.frequencyBinCount);
rightAnalyserNode.getFloatFrequencyData(rightBuff) // 单位dB
// 修改声音大小
// 创建音量节点
var gainNode = aCtx.createGain();
source.connect(gainNode);
gainNode.connect(aCtx.destination);
gainNode.gain.value=0.1
// 获取音量
scriptProcessor = aCtx.createScriptProcessor(4096,2,2);
source.connect(scriptProcessor);
scriptProcessor.connect(aCtx.destination);
// 开始处理音频
scriptProcessor.onaudioprocess = (e) => {
// 获得缓冲区的输入音频,转换为包含了PCM通道数据的32位浮点数组
const lBuffer = e.inputBuffer.getChannelData(0);
const rBuffer = e.inputBuffer.getChannelData(1);
// 获取缓冲区中最大的音量值
const lMaxVal = Math.max(...lBuffer);
const rMaxVal = Math.max(...rBuffer);
// 左右显示音量值
const lVol = Math.round(lMaxVal * 100);
const rVol = Math.round(rMaxVal * 100);
console.log(lVol,rVol);
};