chatglm4使用

1.Token

glm4开始闭源了,所以就使用api来试试。首先去开发平台登录,这里注册就送10万token,去API keys拿到自己的key。给自己的环境安装一下他们的库pip install zhipuai,

2.使用

from zhipuai import ZhipuAI
client = ZhipuAI(api_key=zhipuai_api_key) #APIKey
response = client.chat.completions.create(
    model="glm-4",  # 填写需要调用的模型名称
    messages=[
        {"role": "assistant", "content": "我是人工智能助手GLM4"},
        {"role": "user", "content": "给我写一首优美的诗歌"},
    ],
)
print(response.choices[0].message.content)

使用流式输出时,在messages里面添加stream参数,打印时使用循环。

response = client.chat.completions.create(
    model="glm-4",  # 填写需要调用的模型名称
    messages=[
        {"role": "assistant", "content": "我是人工智能助手GLM4"},
        {"role": "user", "content": "给我写一首优美的诗歌"},
    ],
    stream=True,
)
for chunk in response:
    print(chunk.choices[0].delta.content,end="")

3.结合Langchain框架

1.LLM

首先封装LLm。

from zhipuai import ZhipuAI
import json,os,yaml
from langchain.llms.base import LLM
from typing import List, Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
def tool_config_from_file(tool_name, directory="Tool/"):
    """search tool yaml and return json format"""
    for filename in os.listdir(directory):
        if filename.endswith('.yaml') and tool_name in filename:
            file_path = os.path.join(directory, filename)
            with open(file_path, encoding='utf-8') as f:
                return yaml.safe_load(f)
    return None
class ChatGLM4(LLM):
    max_token: int = 8192
    do_sample: bool = True
    temperature: float = 0.8
    top_p = 0.8
    tokenizer: object = None
    model: object = None
    history: List = []
    tool_names: List = []
    has_search: bool = False
    client:object =None
    def __init__(self):
        super().__init__()
        self.client = ZhipuAI(api_key=zhipuai_api_key) 

    @property
    def _llm_type(self) -> str:
        return "ChatGLM4"
    
    def stream(self,prompt:str,history=[]):
        if history is None:
            history=[]
            
        history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="glm-4",  # 填写需要调用的模型名称
            messages=history,
            stream=True,
        )
        for chunk in response:
            yield chunk.choices[0].delta.content
        
    def _tool_history(self, prompt: str):
        ans = []
        tool_prompts = prompt.split(
            "You have access to the following tools:\n\n")[1].split("\n\nUse a json blob")[0].split("\n")

        tool_names = [tool.split(":")[0] for tool in tool_prompts]
        self.tool_names = tool_names
        tools_json = []
        for i, tool in enumerate(tool_names):
            tool_config = tool_config_from_file(tool)
            if tool_config:
                tools_json.append(tool_config)
            else:
                ValueError(
                    f"Tool {tool} config not found! It's description is {tool_prompts[i]}"
                )

        ans.append({
            "role": "system",
            "content": "Answer the following questions as best as you can. You have access to the following tools:",
            "tools": tools_json
        })
        query = f"""{prompt.split("Human: ")[-1].strip()}"""
        return ans, query

    def _extract_observation(self, prompt: str):
        return_json = prompt.split("Observation: ")[-1].split("\nThought:")[0]
        self.history.append({
            "role": "observation",
            "content": return_json
        })
        return

    def _extract_tool(self):
        if len(self.history[-1]["metadata"]) > 0:
            metadata = self.history[-1]["metadata"]
            content = self.history[-1]["content"]
            if "tool_call" in content:
                for tool in self.tool_names:
                    if tool in metadata:
                        input_para = content.split("='")[-1].split("'")[0]
                        action_json = {
                            "action": tool,
                            "action_input": input_para
                        }
                        self.has_search = True
                        return f"""
Action: 
\```
{json.dumps(action_json, ensure_ascii=False)}
```"""
        final_answer_json = {
            "action": "Final Answer",
            "action_input": self.history[-1]["content"]
        }
        self.has_search = False
        return f"""
Action: 
\```
{json.dumps(final_answer_json, ensure_ascii=False)}
```"""

    def _call(self, prompt: str, history: List = [], stop: Optional[List[str]] = ["<|user|>"]):
        if history is None:
            history=[]
            
        history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="glm-4",  # 填写需要调用的模型名称
            messages=history,
        )
        
        
        result = response.choices[0].message.content
        print(result)
        return result

然后实例化并使用

llm = ChatGLM4()
template = """{question}"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "介绍一下北京?"
llm_chain.invoke(question)

2.知识库

1.文本知识库

参考LangChain官网Faiss库

from langchain.text_splitter import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
query = "小明现在住在那里呢?"
llm = ChatGLM4()
embedding_path="C:/Users/dz/Desktop/bge-large-zh-v1.5"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)
loader = TextLoader("C:/Users/dz/Desktop/qu.txt",encoding="UTF-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
vectorstore = FAISS.from_documents(docs, embeddings)
docs = vectorstore.similarity_search(query)#找出匹配的文字段
retriever=docs[0].page_content
print(retriever)
template = """只根据以下情境回答问题:
{context}
问题: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
output_parser = StrOutputParser()
print(vectorstore.similarity_search_with_score(query))
chain = prompt | llm | output_parser
chain.invoke({"context":retriever,"question":query})

2.文字知识库

vectorstore = FAISS.from_texts(["小明住在洛杉矶", "小黄住在伦敦","小兰住在纽约"],embedding=embeddings)
retriever = vectorstore.as_retriever()
print(retriever)
template = """只根据以下情境回答问题:
{context}
问题: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
print(vectorstore.similarity_search_with_score(query))
chain = setup_and_retrieval | prompt | llm | output_parser
chain.invoke(query)

3.自定义tool并使用

1.使用shell运行本地py脚本

这个脚本去年用gpt测试成功,然后chatglm一直没成功,我就改写了langchain里面的agent,可以参考博文,但现在glm4兼容langchain,简直太好用了。我在本地写了一个打卡相机的py脚本:

 #coding=utf-8
import cv2
cap=cv2.VideoCapture(0)
success=cap.isOpened()
while success:
    ret,frames=cap.read()
    frame_rgb=cv2.cvtColor(frames,cv2.COLOR_BGR2RGB)
    cv2.imshow("windom",frame_rgb)
    if cv2.waitKey(10)&0xFF==ord("q"):
        break
cap.release()
cv2.destroyAllWindows()

然后自定义个调用shell的tool,把这个tool交给agent。

from langchain.tools import BaseTool
from langchain.agents import initialize_agent
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.tools import ShellTool

shell_tool = ShellTool()
class Cam(BaseTool):
    name = "相机"
    description = '使用此工具打开我的相机。'

    def _run(self, expr: str):
        return shell_tool.run({"commands": "conda activate langchain & python C:/Users/dz/Desktop/camera.py"})

    def _arun(self, query: str):
        raise NotImplementedError("Async operation not supported yet")

tools = [Cam()]
agent = initialize_agent(
    agent='chat-conversational-react-description',
    tools=tools,
    llm=llm,
    verbose=True,
    max_iterations=3,
    early_stopping_method='generate',
    memory=ConversationBufferWindowMemory(
        memory_key='chat_history',
        k=5,
        return_messages=True
    )
)
agent(f"打开我的相机?")

测试成功
在这里插入图片描述

  • 13
    点赞
  • 24
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 5
    评论
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

我是小z呀

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值