方式1:
from transformers import LlamaForCausalLM, AutoTokenizer
#下载好的hf模型地址
hf_model_path = './Llama-2-7b'
model = LlamaForCausalLM.from_pretrained(hf_model_path, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(hf_model_path)
prompt = "Hey, are you conscious? Can you talk to me?"
inputs = tokenizer(prompt, return_tensors="pt")
# Generate
generate_ids = model.generate(inputs.input_ids, max_length=30)
res = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
print(res)
方式2:
import transformers,torch
from transformers import LlamaForCausalLM, AutoTokenizer
#下载好的hf模型地址
hf_model_path = './Llama-2-7b'
tokenizer = AutoTokenizer.from_pretrained(hf_model_path)
pipeline = transformers.pipeline(
"text-generation",