from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM
import transformers
import torch
import os
os.environ["HF_TOKEN"] = '*******'
# 设置环境变量,用于存储Hugging Face的访问令牌
model='meta-llama/Meta-Llama-3-8B'
# 定义模型名称
tokenizer=AutoTokenizer.from_pretrained(model)
# 使用预训练模型名称加载分词器
llama=AutoModelForCausalLM.from_pretrained(model, device_map="cuda:1")
# 使用预训练模型名称加载因果语言模型,并将其加载到指定的GPU设备上
llama.device
#device(type='cuda', index=1)
2 推理
import time
begin=time.time()
input_text = "Write me a poem about maching learning."
input_ids = tokenizer(input_text, return_tensors="pt").to(llama.device)
outputs = llama.generate(**input_ids)
print(tokenizer.decode(outputs[0]))
end=time.time()
print(end-begin)
'''
<|begin_of_text|>Write me a poem about maching learning. I will use it for a project in my class. You can use whatever words you want. I will use it for a project in my class. You can use whatever words you want.<|end_of_text|>
1.718801736831665
'''