agent 入门

实战简单的agent示例

代码功能概述

  • 目标:创建一个能自主思考(推理)并调用工具(行动)的 AI Agent,用于回答需要实时信息的问题(如“Agent 最新研究进展”)。

  • 核心组件

    • LLM(GPT-4):负责推理和决策。

    • SerpAPI(搜索引擎):当 LLM 缺乏知识时,用于搜索外部信息。

    • ReAct 框架:让 Agent 循环执行“思考→行动→观察→优化”。

from dotenv import load_dotenv
load_dotenv() #load_dotenv() 加载 .env 文件中的环境变量(如 OpenAI 和 SerpAPI 的 API 密钥)

from langchain import hub
from langchain_openai import ChatOpenAI
from langchain_community.utilities import SerpAPIWrapper
from langchain_core.tools import Tool
from langchain.agents import create_react_agent, AgentExecutor

# Initialize prompt
prompt = hub.pull("hwchase17/react")
print(prompt)   # 从 LangChain Hub 加载预定义的 ReAct 提示模板(hwchase17/react),该模板包含思考、工具调用和最终回答的引导语。

# Initialize LLM - using ChatOpenAI instead of OpenAI for better compatibility
llm = ChatOpenAI(
    model="gpt-4o-mini",  # Use a known working model
    temperature=0
)

# Initialize search tool
search = SerpAPIWrapper()
tools = [
    Tool(
        name="Search",
        func=search.run,
        description="当大模型没有相关知识时,用于搜索知识"
    ),
]

# Create agent
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

#通过 AgentExecutor 执行两次相同查询,观察结果一致性。
#verbose=True 会打印工具调用和 LLM 输出的详细日志,便于调试。

# Run queries
print("第一次运行的结果:")
try:
    result1 = agent_executor.invoke({"input": "当前Agent最新研究进展是什么?"})
    print(result1)
except Exception as e:
    print(f"Error in first query: {e}")

print("第二次运行的结果:")
try:
    result2 = agent_executor.invoke({"input": "当前Agent最新研究进展是什么?"})
    print(result2)
except Exception as e:
    print(f"Error in second query: {e}")




 自行解决各类api key

.env 文件

OPENAI_API_KEY="sk-xxx"
OPENAI_API_BASE="https:xxx"
SERPAPI_API_KEY="1abd77afxxxxx"

有记忆版本:

import sys
from dotenv import load_dotenv
load_dotenv()

from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain_community.utilities import SerpAPIWrapper
from langchain_core.tools import Tool
from langchain.agents import create_react_agent, AgentExecutor
from langchain.memory import ConversationBufferMemory

# Initialize LLM 
llm = ChatOpenAI(
    model="gpt-4o-mini",
    temperature=0
)

# Enhanced search tool
search = SerpAPIWrapper()
tools = [
    Tool(
        name="Search",
        func=search.run,
        description="Useful for searching latest AI research."
    ),
]

# Standard ReAct template (English)
react_template = """Answer the following questions as best you can. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Conversation History:
{chat_history}

Begin!

Question: {input}
Thought:{agent_scratchpad}"""

custom_prompt = PromptTemplate.from_template(react_template)

# Create agent with memory
agent = create_react_agent(llm, tools, custom_prompt)
memory = ConversationBufferMemory(
    memory_key="chat_history",
    return_messages=True,
    input_key="input",
    output_key="output"
)

agent_executor = AgentExecutor(
    agent=agent,
    tools=tools,
    memory=memory,
    verbose=True,
    handle_parsing_errors=True,
    max_iterations=3,
    return_intermediate_steps=False
)

def clean_output(output):
    """Extract final answer if exists"""
    if "Final Answer:" in output:
        return output.split("Final Answer:")[-1].strip()
    return output

# First query
print("\n🔍 Query 1:")
result1 = agent_executor.invoke({"input": "What are the top 3 AI Agent research breakthroughs"})
answer1 = clean_output(result1["output"])
print("\n💡 Answer:", answer1)

# Second query (with memory)
print("\n🔍 Query 2 (with memory):")
result2 = agent_executor.invoke({
    "input": "just list these 3 breakthroughs briefly",
    # Explicit memory injection
    "chat_history": [
        ("human", result1["input"]),
        ("ai", answer1)
    ]
})
print("\n💡 Answer:", clean_output(result2["output"]))

# Debug: Show full memory
print("\n🧠 Current Memory:")
print(memory.load_memory_variables({}))

测试 接口

from openai import OpenAI

client = OpenAI(
  api_key="sk-xxx28aD5",
  base_url = "https://xxxx"
)

completion = client.chat.completions.create(
  model="gpt-4o-mini",
  store=True,
  messages=[
    {"role": "user", "content": "write a haiku about ai"}
  ]
)

print(completion.choices[0].message)

测试 图片模型

from openai import OpenAI
import os
from dotenv import load_dotenv
import requests

# 加载环境变量
load_dotenv()

# 初始化客户端(自动从环境变量读取OPENAI_API_KEY)
client = OpenAI(
    api_key="sk-xxx",
    base_url = "https://xxxxv1")

try:
    response = client.images.generate(
        model="dall-e-3",  # 必须使用DALL·E模型
        prompt="电商生日礼物宣传海报",
        size="1024x1024",
        quality="standard",
        n=1,
    )

    # 安全获取URL
    if response.data and len(response.data) > 0:
        image_url = response.data[0].url
        print("图片生成成功,URL:", image_url)

        # 下载图片
        image_data = requests.get(image_url).content
        with open("flower_poster.png", "wb") as f:
            f.write(image_data)
        print("图片已保存为 flower_poster.png")
    else:
        print("错误:未收到有效的图片URL")

except Exception as e:
    print(f"发生错误: {str(e)}")

参考书籍:大模型应用开发动手做AI Agent
 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值