先给出参考链接https://gitee.com/os-share/tiny-universe/tree/main/content/TinyAgent
下面可以说是一个学习笔记。大家还可以参考【Datawhale-tinyuniverse】TinyAgent学习-CSDN博客
上面这篇文章中给了谷歌搜索的注册地址。
下面给出全部代码,适合直接copy。
先给出代码目录
下面给出工具代码
import json
import requests
class Tools:
def __init__(self) -> None:
self.toolConfig = self._tools()
def _tools(self):
tools = [
{
'name_for_human': '谷歌搜索',
'name_for_model': 'google_search',
'description_for_model': '谷歌搜索是一个通用搜索引擎,可用于访问互联网、查询百科知识、了解时事新闻等。',
'parameters': [
{
'name': 'search_query',
'description': '搜索关键词或短语',
'required': True,
'schema': {'type': 'string'},
}
],
}
]
return tools
def google_search(self, search_query: str):
url = "https://google.serper.dev/search"
payload = json.dumps({"q": search_query})
headers = {
'X-API-KEY': '改成自己的keyapi',
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload).json()
return response['organic'][0]['snippet']
我采用的是Ollama拉取到本地的llama3.1模型,我前面的文章有介绍。(因为我不知道我下载到电脑的哪里,所以用模型名称)下面给出调用代码。
from typing import List, Tuple, Dict
import requests
PROMPT_TEMPLATE = """
你是一个AI助手。
问题: {question}
···
{context}
···
请给出你的回答:
"""
class BaseModel:
def __init__(self, path: str = '') -> None:
self.path = path
def chat(self, prompt: str, history: List[dict], content: str) -> Tuple[str, List[dict]]:
pass
def load_model(self):
pass
class OllamaChat(BaseModel):
def __init__(self, model: str = "llama3.1") -> None:
super().__init__()
self.model = model
def _build_messages(self, prompt: str, content: str):
prompt_message = PROMPT_TEMPLATE.format(question=prompt, context=content)
messages = [
{"role": "system", "content": "你是一个AI助手"},
{"role": "user", "content": prompt_message}
]
return messages
def chat(self, prompt: str, history: List[Dict], content: str) -> Tuple[str, List[Dict]]:
import ollama
# 给语言模型发送请求
messages = self._build_messages(prompt, content)
response = ollama.chat(
model=self.model,
messages=messages,
stream=True
)
# 解析并组装响应结果
final_response = ''
for chunk in response:
if isinstance(chunk, str):
final_response += chunk
elif 'content' in chunk.get('message', {}):
final_response += chunk['message']['content']
# 更新历史记录
updated_history = history + messages
return final_response, updated_history
下面给出Agent调用的代码:
from typing import Dict, List
import json5
from LLM import OllamaChat
from tools import Tools
class Agent:
def __init__(self, model_name: str = "llama3.1"):
self.model = OllamaChat(model_name)
self.tools = Tools()
self.history = []
def build_system_input(self):
tool_descs = []
tool_names = []
for tool in self.tools.toolConfig:
desc = f"{tool['name_for_human']}: {tool['description_for_model']}"
tool_descs.append(desc)
tool_names.append(tool['name_for_model'])
return "\n".join(tool_descs), ",".join(tool_names)
def parse_latest_plugin_call(self, text: str) -> Dict[str, str]:
plugin_name = ''
plugin_args = ''
i = text.rfind('\nAction:')
j = text.rfind('\nAction Input:')
if 0 <= i < j:
plugin_name = text[i + len('\nAction:'): j].strip()
plugin_args = text[j + len('\nAction Input:'):].strip()
return {'plugin_name': plugin_name, 'plugin_args': plugin_args}
def call_plugin(self, plugin_name: str, plugin_args: str):
if plugin_name == 'google_search':
search_query = json5.loads(plugin_args)
return self.tools.google_search(search_query['search_query'])
return ''
def text_completion(self, text: str) -> str:
system_input, tool_names = self.build_system_input()
full_prompt = f"Question: {text}\nTool Names: {tool_names}\n{system_input}"
content = "You are an AI assistant designed to answer questions based on the given context and tools."
response, self.history = self.model.chat(full_prompt, self.history, content) # Ensure all required arguments are passed
while '\nAction:' in response:
plugin_call = self.parse_latest_plugin_call(response)
observation = self.call_plugin(plugin_call['plugin_name'], plugin_call['plugin_args'])
response += observation
full_prompt = f"Observation: {observation}\nFinal Answer:"
content = "Please provide the final answer based on the observation."
response, self.history = self.model.chat(full_prompt, self.history, content) # Ensure all required arguments are passed
return response
if __name__ == '__main__':
agent = Agent()
query = "What is the weather like tomorrow in Wuhan?"
answer = agent.text_completion(query)
print(answer)
给出输出的结果示例:下面是用英文来提问,回复也是英文,我查看了明天的天气,相差是不多。
下面是中文提问,给的数据就差了蛮多
下面是验证。我终端调用llama3.1后发现,大模型并没有给出答案。进一步说明调用工具成功。