下载Huggingface的model到本地的cache_dir

本文介绍了如何在Python中使用HuggingFaceTransformers库创建一个基于预训练模型THUDM/chatglm3-6b的聊天机器人,通过gradio接口与用户交互,进行文本生成任务。
摘要由CSDN通过智能技术生成
from transformers import AutoTokenizer, AutoModel,  GPTQConfig
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "THUDM/chatglm3-6b"

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype="auto",
    trust_remote_code=True ,
    device_map="auto",
    cache_dir="D:/chatglm3-6b/"
).half().cuda()


model = model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_id,
                                          trust_remote_code=True, 
                                          cache_dir="D:/chatglm3-6b/")

print(type(model))

print(type(tokenizer))

model = model.eval()

import gradio as gr
def greet2(name):
    response, history = model.chat(tokenizer, name, history=[])    # print(response)
    return response

def alternatingly_agree(message, history):
   return greet2(message)
gr.ChatInterface(alternatingly_agree).launch()



以下是在langchain中的使用.

import gradio

from transformers import AutoTokenizer, AutoModel
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
import time
from langchain.prompts import PromptTemplate

from transformers import AutoModelForCausalLM



# from ChatGLM_new import zhipu_llm
# model  = zhipu_llm 


# model = HuggingFacePipeline.from_model_id(
#     model_id="THUDM/chatglm3-6b",
#     task="text-generation",
#     device=0,
#     model_kwargs={"trust_remote_code":True},
#     pipeline_kwargs={"max_new_tokens": 5000},
# )



model_id = "THUDM/chatglm3-6b"
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype="auto",
    trust_remote_code=True ,
    device_map="auto",
    cache_dir="D:/chatglm3-6b/"
).half().cuda()

model = model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_id,
                                          trust_remote_code=True, 
                                          cache_dir="D:/chatglm3-6b/")

pipe = pipeline(
    "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=8000
)
hf = HuggingFacePipeline(pipeline=pipe)


prompt = ChatPromptTemplate.from_template("{user_input}")


# prompt_template = PromptTemplate.from_template(template, **kwargs)
# message = HumanMessagePromptTemplate(prompt=prompt_template)
#         return cls.from_messages([message])




# prompt = ChatPromptTemplate.from_messages([
#                 ("system", "记住:对所有问题你只回答下面的4个字:我不知道,"),
#                 # ("human", "Hello, how are you doing?"),
#                 # ("ai", "I'm doing well, thanks!"),
#                 ("human", "{user_input}"),
#             ])


output_parser = StrOutputParser()
chain = prompt | hf | output_parser
def greet(name):
    response = chain.invoke({"user_input": name})
    return response
demo = gradio.Interface(fn=greet, inputs="text", outputs="text")
demo.launch() 
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值