model_test.py
from transformers import AutoTokenizer, AutoModelForCausalLM
import warnings
import os
warnings.filterwarnings("ignore")
hf_path_list=["hf_qwen2_1b"]
for hf_path in hf_path_list:
print("[M.M]", hf_path)
# 加载分词器和模型
tokenizer = AutoTokenizer.from_pretrained(hf_path)
model = AutoModelForCausalLM.from_pretrained(hf_path)
# 编码输入文本并生成注意力掩码
# input_text = "Hey, are you conscious? Can you talk to me?"
input_text = "There was a thick forest on the sides of a mountain. Many kinds of animals lived in the forest."
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
attention_mask = (input_ids != tokenizer.pad_t