from transformers import AutoTokenizer, AutoModel, GPTQConfig
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "THUDM/chatglm3-6b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype="auto",
trust_remote_code=True ,
device_map="auto",
cache_dir="D:/chatglm3-6b/"
).half().cuda()
model = model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_id,
trust_remote_code=True,
cache_dir="D:/chatglm3-6b/")
print(type(model))
print(type(tokenizer))
model = model.eval()
import gradio as gr
def greet2(name):
response, history = model.chat(tokenizer, name, history=[]) # print(response)
return response
def alternatingly_agree(message, history):
return greet2(message)
gr.ChatInterface(alternatingly_agree).launch()
以下是在langchain中的使用.
import gradio
from transformers import AutoTokenizer, AutoModel
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
import time
from langchain.prompts import PromptTemplate
from transformers import AutoModelForCausalLM
# from ChatGLM_new import zhipu_llm
# model = zhipu_llm
# model = HuggingFacePipeline.from_model_id(
# model_id="THUDM/chatglm3-6b",
# task="text-generation",
# device=0,
# model_kwargs={"trust_remote_code":True},
# pipeline_kwargs={"max_new_tokens": 5000},
# )
model_id = "THUDM/chatglm3-6b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype="auto",
trust_remote_code=True ,
device_map="auto",
cache_dir="D:/chatglm3-6b/"
).half().cuda()
model = model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_id,
trust_remote_code=True,
cache_dir="D:/chatglm3-6b/")
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=8000
)
hf = HuggingFacePipeline(pipeline=pipe)
prompt = ChatPromptTemplate.from_template("{user_input}")
# prompt_template = PromptTemplate.from_template(template, **kwargs)
# message = HumanMessagePromptTemplate(prompt=prompt_template)
# return cls.from_messages([message])
# prompt = ChatPromptTemplate.from_messages([
# ("system", "记住:对所有问题你只回答下面的4个字:我不知道,"),
# # ("human", "Hello, how are you doing?"),
# # ("ai", "I'm doing well, thanks!"),
# ("human", "{user_input}"),
# ])
output_parser = StrOutputParser()
chain = prompt | hf | output_parser
def greet(name):
response = chain.invoke({"user_input": name})
return response
demo = gradio.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()