首先调用接口的时候不要调用一次就加载一下模型,直接一开始就加载好
其次能用GPU就不要cpu
import os
import json
from tqdm import tqdm
from transformers import pipeline, AutoModelWithLMHead, AutoTokenizer
from tqdm import tqdm
import torch
torch.cuda.is_available()
def get_en_to_zh_model():
model = AutoModelWithLMHead.from_pretrained("Helsinki-NLP/opus-mt-en-zh")
tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-zh")
translation = pipeline("translation_en_to_zh", model=model, tokenizer=tokenizer,device=1)
return translation
translation = get_en_to_zh_model()
def en_to_ch(text):
# 英文翻译成中文
#text = "Student accommodation centres, resorts"
translated_text = translation(text, max_length=888)[0]['translation_text']
return translated_text
大家需要把pytorch换成GPU版本