『MCP』初体验
介绍
- MCP 其实就是 Function Calling 的一个统一接口协议,网上介绍会有很多,所以这里不就重复介绍,这里主要是想记录说明一下 MCP 使用体验,可以帮助新人入门一下
安装 VSCode 以及 MCP client
- VSCode 自行安装吧,网上会有一大堆
- 安装 cline
配置大模型
- 这里主要使用的是 OpenRouter.ai:https://openrouter.ai/
- 创建密钥
- 例如创建一个名称为 Default 的 key,限制使用量最多 1 美元,要记住这个 key,没了就找不到了,只能重新创
- 在 cline 里面配置一下刚刚申请的 key 就好了。我这里用的是免费的模型,其他模型要收费的
- 然后尝试咨询个问题,如果有正常回复就是成功
配置 MCP
- Model Context Protocol (MCP) 支持两种主要的传输机制,用于 Cline 和 MCP 服务器之间的通信:标准输入/输出 (STDIO) 和服务器发送事件 (SSE)
- 本次实现:主要采用 STDIO 方式进行
- 配置环境
# uv 其实是另一种 pip
pip install uv
# 初始化环境一下
uv init
# 添加一下 mcp 客户端,这会自动装上,把 httpx 也安装上,主要用于调其他 http 服务时用得上
uv add "mcp[cli]" httpx
- 首先编写一个 weather.py
- get_alerts():用于获取美国某个州的天气预警
- get_forecast():预测天气
from typing import Any
import httpx
from mcp.server.fastmcp import FastMCP
# Initialize FastMCP server,创建对象
mcp = FastMCP("weather", log_level="ERROR")
# Constants
NWS_API_BASE = "https://api.weather.gov" # 美国气象局地址
USER_AGENT = "weather-app/1.0" # 请求数据时的标识,告诉是谁
# 用于请求天气数据,接受个参数,就是请求天气数据时用的 url
# 内部使用了 http 来拿到对应的结果
async def make_nws_request(url: str) -> dict[str, Any] | None:
"""Make a request to the NWS API with proper error handling."""
headers = {
"User-Agent": USER_AGENT,
"Accept": "application/geo+json"
}
async with httpx.AsyncClient() as client:
try:
response = await client.get(url, headers=headers, timeout=30.0)
response.raise_for_status()
return response.json()
except Exception:
return None
# 对告警数据做格式化
def format_alert(feature: dict) -> str:
"""Format an alert feature into a readable string."""
props = feature["properties"]
return f"""
Event: {props.get('event', 'Unknown')}
Area: {props.get('areaDesc', 'Unknown')}
Severity: {props.get('severity', 'Unknown')}
Description: {props.get('description', 'No description available')}
Instructions: {props.get('instruction', 'No specific instructions provided')}
"""
# 第一个 tool,用于获取美国某个州的天气预警
# 入参是美国州代码
@mcp.tool()
async def get_alerts(state: str) -> str:
"""Get weather alerts for a US state.
Args:
state: Two-letter US state code (e.g. CA, NY)
"""
url = f"{NWS_API_BASE}/alerts/active/area/{state}"
data = await make_nws_request(url) # 调了刚刚的接口
if not data or "features" not in data: # 这里确保没有失败
return "Unable to fetch alerts or no alerts found."
if not data["features"]: # 检查调用的地区否则不存在预警信息
return "No active alerts for this state."
alerts = [format_alert(feature) for feature in data["features"]]
return "\n---\n".join(alerts)
# 第二个 tool
# 入参是经度和纬度
@mcp.tool() # 装饰器,将函数装饰为 tool。函数名,入参,第一个注释都会提取
async def get_forecast(latitude: float, longitude: float) -> str:
"""Get weather forecast for a location.
Args:
latitude: Latitude of the location
longitude: Longitude of the location
"""
# First get the forecast grid endpoint
points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}" # 获取到天气预报办公室信息
points_data = await make_nws_request(points_url)
if not points_data:
return "Unable to fetch forecast data for this location."
# Get the forecast URL from the points response
forecast_url = points_data["properties"]["forecast"] # 提取到天气预告的 url
forecast_data = await make_nws_request(forecast_url) # 这才真正获取到天气预告
if not forecast_data:
return "Unable to fetch detailed forecast."
# Format the periods into a readable forecast
periods = forecast_data["properties"]["periods"]
forecasts = []
for period in periods[:5]: # Only show next 5 periods
forecast = f"""
{period['name']}:
Temperature: {period['temperature']}°{period['temperatureUnit']}
Wind: {period['windSpeed']} {period['windDirection']}
Forecast: {period['detailedForecast']}
"""
forecasts.append(forecast)
return "\n---\n".join(forecasts)
if __name__ == "__main__":
# Initialize and run the server
mcp.run(transport='stdio') # transport 表示 MCP server 与 cline 的沟通方式是 MCP server 的输入和输出沟通
- 配置 STDIO 式 Server
{
"mcpServers": {
"weather": {
"disabled": false,
"timeout": 60,
"command": "D:/DevelopEnv/Python/Scripts/uv.exe",
"args": [
"--directory",
"D:/DevelopProject/PythonProject/weather",
"run",
"weather.py"
],
"transportType": "stdio"
},
}
}
5. 然后尝试问它:纽约明天的天气怎么样。这样就可以看到调用了自己的文件
感知 MCP 调用
- 如果说想知道 MCP 的调用过程,可以尝试使用这段代码,他会调用子进程来做监听调用过程
#!/usr/bin/env python3
import sys
import subprocess
import threading
import argparse
import os
# --- Configuration ---
LOG_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mcp_io.log")
# --- End Configuration ---
# --- Argument Parsing ---
parser = argparse.ArgumentParser(
description="Wrap a command, passing STDIN/STDOUT verbatim while logging them.",
usage="%(prog)s <command> [args...]"
)
# Capture the command and all subsequent arguments
parser.add_argument('command', nargs=argparse.REMAINDER,
help='The command and its arguments to execute.')
open(LOG_FILE, 'w', encoding='utf-8')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
if not args.command:
print("Error: No command provided.", file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(1)
target_command = args.command
# --- End Argument Parsing ---
# --- I/O Forwarding Functions ---
# These will run in separate threads
def forward_and_log_stdin(proxy_stdin, target_stdin, log_file):
"""Reads from proxy's stdin, logs it, writes to target's stdin."""
try:
while True:
# Read line by line from the script's actual stdin
line_bytes = proxy_stdin.readline()
if not line_bytes: # EOF reached
break
# Decode for logging (assuming UTF-8, adjust if needed)
try:
line_str = line_bytes.decode('utf-8')
except UnicodeDecodeError:
line_str = f"[Non-UTF8 data, {len(line_bytes)} bytes]\n" # Log representation
# Log with prefix
log_file.write(f"输入: {line_str}")
log_file.flush() # Ensure log is written promptly
# Write the original bytes to the target process's stdin
target_stdin.write(line_bytes)
target_stdin.flush() # Ensure target receives it promptly
except Exception as e:
# Log errors happening during forwarding
try:
log_file.write(f"!!! STDIN Forwarding Error: {e}\n")
log_file.flush()
except: pass # Avoid errors trying to log errors if log file is broken
finally:
# Important: Close the target's stdin when proxy's stdin closes
# This signals EOF to the target process (like test.sh's read loop)
try:
target_stdin.close()
log_file.write("--- STDIN stream closed to target ---\n")
log_file.flush()
except Exception as e:
try:
log_file.write(f"!!! Error closing target STDIN: {e}\n")
log_file.flush()
except: pass
def forward_and_log_stdout(target_stdout, proxy_stdout, log_file):
"""Reads from target's stdout, logs it, writes to proxy's stdout."""
try:
while True:
# Read line by line from the target process's stdout
line_bytes = target_stdout.readline()
if not line_bytes: # EOF reached (process exited or closed stdout)
break
# Decode for logging
try:
line_str = line_bytes.decode('utf-8')
except UnicodeDecodeError:
line_str = f"[Non-UTF8 data, {len(line_bytes)} bytes]\n"
# Log with prefix
log_file.write(f"输出: {line_str}")
log_file.flush()
# Write the original bytes to the script's actual stdout
proxy_stdout.write(line_bytes)
proxy_stdout.flush() # Ensure output is seen promptly
except Exception as e:
try:
log_file.write(f"!!! STDOUT Forwarding Error: {e}\n")
log_file.flush()
except: pass
finally:
try:
log_file.flush()
except: pass
# Don't close proxy_stdout (sys.stdout) here
# --- Main Execution ---
process = None
log_f = None
exit_code = 1 # Default exit code in case of early failure
try:
# Open log file in append mode ('a') for the threads
log_f = open(LOG_FILE, 'a', encoding='utf-8')
log_f.write(f"input target_command is {target_command}")
# Start the target process
# We use pipes for stdin/stdout
# We work with bytes (bufsize=0 for unbuffered binary, readline() still works)
# stderr=subprocess.PIPE could be added to capture stderr too if needed.
process = subprocess.Popen(
target_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, # Capture stderr too, good practice
bufsize=0 # Use 0 for unbuffered binary I/O
)
# Pass binary streams to threads
stdin_thread = threading.Thread(
target=forward_and_log_stdin,
args=(sys.stdin.buffer, process.stdin, log_f),
daemon=True # Allows main thread to exit even if this is stuck (e.g., waiting on stdin) - reconsider if explicit join is needed
)
stdout_thread = threading.Thread(
target=forward_and_log_stdout,
args=(process.stdout, sys.stdout.buffer, log_f),
daemon=True
)
# Optional: Handle stderr similarly (log and pass through)
stderr_thread = threading.Thread(
target=forward_and_log_stdout, # Can reuse the function
args=(process.stderr, sys.stderr.buffer, log_f), # Pass stderr streams
# Add a different prefix in the function if needed, or modify function
# For now, it will log with "STDOUT:" prefix - might want to change function
# Let's modify the function slightly for this
daemon=True
)
# A slightly modified version for stderr logging
def forward_and_log_stderr(target_stderr, proxy_stderr, log_file):
"""Reads from target's stderr, logs it, writes to proxy's stderr."""
try:
while True:
line_bytes = target_stderr.readline()
if not line_bytes: break
try: line_str = line_bytes.decode('utf-8')
except UnicodeDecodeError: line_str = f"[Non-UTF8 data, {len(line_bytes)} bytes]\n"
log_file.write(f"STDERR: {line_str}") # Use STDERR prefix
log_file.flush()
proxy_stderr.write(line_bytes)
proxy_stderr.flush()
except Exception as e:
try:
log_file.write(f"!!! STDERR Forwarding Error: {e}\n")
log_file.flush()
except: pass
finally:
try:
log_file.flush()
except: pass
stderr_thread = threading.Thread(
target=forward_and_log_stderr,
args=(process.stderr, sys.stderr.buffer, log_f),
daemon=True
)
# Start the forwarding threads
stdin_thread.start()
stdout_thread.start()
stderr_thread.start() # Start stderr thread too
# Wait for the target process to complete
process.wait()
exit_code = process.returncode
# Wait briefly for I/O threads to finish flushing last messages
# Since they are daemons, they might exit abruptly with the main thread.
# Joining them ensures cleaner shutdown and logging.
# We need to make sure the pipes are closed so the reads terminate.
# process.wait() ensures target process is dead, pipes should close naturally.
stdin_thread.join(timeout=1.0) # Add timeout in case thread hangs
stdout_thread.join(timeout=1.0)
stderr_thread.join(timeout=1.0)
except Exception as e:
print(f"MCP Logger Error: {e}", file=sys.stderr)
# Try to log the error too
if log_f and not log_f.closed:
try:
log_f.write(f"!!! MCP Logger Main Error: {e}\n")
log_f.flush()
except: pass # Ignore errors during final logging attempt
exit_code = 1 # Indicate logger failure
finally:
# Ensure the process is terminated if it's still running (e.g., if logger crashed)
if process and process.poll() is None:
try:
process.terminate()
process.wait(timeout=1.0) # Give it a moment to terminate
except: pass # Ignore errors during cleanup
if process.poll() is None: # Still running?
try: process.kill() # Force kill
except: pass # Ignore kill errors
# Final log message
if log_f and not log_f.closed:
try:
log_f.close()
except: pass # Ignore errors during final logging attempt
# Exit with the target process's exit code
sys.exit(exit_code)
- 同时,修改 MCP 配置
{
"mcpServers": {
"weather": {
"disabled": false,
"timeout": 60,
"command": "D:/DevelopEnv/Python/python.exe", # 这个地方就要用调 python
"args": [
"D:/DevelopProject/PythonProject/weather/mcp_logger.py", # 通过 logger 来执行 mcp 进程
"D:/DevelopEnv/Python/Scripts/uv.exe",
"--directory",
"D:/DevelopProject/PythonProject/weather",
"run",
"weather.py"
],
"transportType": "stdio"
}
}
}
- 当再次咨询 LLM 的时候,你就会发现相应的调用流程就被记录下来