github源码解析:https://github.com/ArtificialZeng/Baichuan2-Explained
import json
import torch
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
st.set_page_config(page_title="Baichuan 2")
st.title("Baichuan 2")
@st.cache_resource
def init_model():
model = AutoModelForCausalLM.from_pretrained(
"baichuan-inc/Baichuan2-13B-Chat",
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
model.generation_config