前言
先来看看mms(Massively Multilingual Speech)的介绍吧
大规模多语言语音(MMS)项目通过建立一个支持超过1100种语言的单一多语言语音识别模型(比以前多10倍),能够识别超过4000种语言的语言识别模型(比以前多40倍),支持超过1400种语言的预训练模型,以及超过1100种语言的文本到语音模型,将语音技术从大约100种语言扩展到超过1000种。我们的目标是使人们更容易以他们喜欢的语言访问信息和使用设备。
ASR支持4000多种语言,确实变态哈
支持的语言:https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html
模型路径:https://huggingface.co/facebook/mms-1b-all
虽然支持的语言多,但是这玩意文档是真少啊,想本地推理我都找了半天,太费劲了,所以写下这篇文章,帮助后面想尝试mms的同学节省的时间,话不多说,上代码
推理
import librosa
import torch
import json
from loguru import logger
from transformers import Wav2Vec2ForCTC, AutoProcessor
from common.error import ErrorCode
class MMSASRService:
def __init__(self, config, options):
self.asr_sampling_rate = 16_000
self.model_path = "./data/models/mms-1b-all"
# set device
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
def load(self):
logger.info("加载mms asr模型")
self.processor = AutoProcessor.from_pretrained(self.model_path)
self.model = Wav2Vec2ForCTC.from_pretrained(self.model_path)
def get_asr_result(self, audio_fp, lang_code="cmn-script_simplified"):
try:
audio_samples = librosa.load(audio_fp, sr=self.asr_sampling_rate, mono=True)[0]
logger.info(f"load {lang_code} adapter...")
self.processor.tokenizer.set_target_lang(lang_code)
self.model.load_adapter(lang_code)
inputs = self.processor(
audio_samples, sampling_rate=self.asr_sampling_rate, return_tensors="pt"
)
self.model.to(self.device)
inputs = inputs.to(self.device)
with torch.no_grad():
outputs = self.model(**inputs).logits
if lang_code != "eng" or True:
ids = torch.argmax(outputs, dim=-1)[0]
transcription = self.processor.decode(ids)
else:
assert False
except Exception as e:
logger.error(f"mms asr failed, Error: {e}")
result = {
"err": ErrorCode.FAILED.name.lower(),
"result": None
}
return {
"err": ErrorCode.SUCCESS.name.lower(),
"result": transcription
}
所有的lang_code
可以从模型保存路径中的vocab.json
里找到
还可以结合mms-1b-all
对应的语言模型mms-cclms
优化解码效果,不过有不少语言都不支持,连中文普通话的都缺少了,以下是推理代码
import librosa
from transformers import Wav2Vec2ForCTC, AutoProcessor
import torch
import json
from huggingface_hub import hf_hub_download
from torchaudio.models.decoder import ctc_decoder
ASR_SAMPLING_RATE = 16_000
MODEL_ID = "./data/models/mms-1b-all"
processor = AutoProcessor.from_pretrained(MODEL_ID)
model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
lm_decoding_config = {}
lm_decoding_configfile = hf_hub_download(
repo_id="facebook/mms-cclms",
filename="decoding_config.json",
subfolder="mms-1b-all",
)
with open(lm_decoding_configfile) as f:
lm_decoding_config = json.loads(f.read())
# allow language model decoding for "eng"
decoding_config = lm_decoding_config["eng"]
lm_file = hf_hub_download(
repo_id="facebook/mms-cclms",
filename=decoding_config["lmfile"].rsplit("/", 1)[1],
subfolder=decoding_config["lmfile"].rsplit("/", 1)[0],
)
token_file = hf_hub_download(
repo_id="facebook/mms-cclms",
filename=decoding_config["tokensfile"].rsplit("/", 1)[1],
subfolder=decoding_config["tokensfile"].rsplit("/", 1)[0],
)
lexicon_file = None
if decoding_config["lexiconfile"] is not None:
lexicon_file = hf_hub_download(
repo_id="facebook/mms-cclms",
filename=decoding_config["lexiconfile"].rsplit("/", 1)[1],
subfolder=decoding_config["lexiconfile"].rsplit("/", 1)[0],
)
beam_search_decoder = ctc_decoder(
lexicon=lexicon_file,
tokens=token_file,
lm=lm_file,
nbest=1,
beam_size=500,
beam_size_token=50,
lm_weight=float(decoding_config["lmweight"]),
word_score=float(decoding_config["wordscore"]),
sil_score=float(decoding_config["silweight"]),
blank_token="<s>",
)
audio_fp = "./test/asr/data/55.wav"
lang_code = "cmn-script_simplified"
audio_samples = librosa.load(audio_fp, sr=ASR_SAMPLING_RATE, mono=True)[0]
processor.tokenizer.set_target_lang(lang_code)
model.load_adapter(lang_code)
inputs = processor(
audio_samples, sampling_rate=ASR_SAMPLING_RATE, return_tensors="pt"
)
# set device
if torch.cuda.is_available():
device = torch.device("cuda")
elif (
hasattr(torch.backends, "mps")
and torch.backends.mps.is_available()
and torch.backends.mps.is_built()
):
device = torch.device("mps")
else:
device = torch.device("cpu")
model.to(device)
inputs = inputs.to(device)
with torch.no_grad():
outputs = model(**inputs).logits
beam_search_result = beam_search_decoder(outputs.to("cpu"))
transcription = " ".join(beam_search_result[0][0].words).strip()
print('----'*20)
print(transcription)
print('----'*20)
我试了下中文的效果,和whisper还是差得远,文档又少,怪不得这玩意没啥人用