MCP之二_服务器与客户端实现
1 MCP Server
1.1 准备
pip install mcp
1.1.1 提供本地服务
# server.py
from mcp.server.fastmcp import FastMCP# Create an MCP server
mcp = FastMCP("Demo")# Add an addition tool
@mcp.tool()
def add(a: int, b: int) -> int:"""Add two numbers"""return a + b# Add a dynamic greeting resource
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:"""Get a personalized greeting"""return f"Hello, {name}!"if __name__ == "__main__":# 启动服务器mcp.run() # local# mcp.run('sse') # remove service
运行,或者直接通过 client.py 调用
python server.py
官网上展示了如何利用 FastMCP 提供的多种数据和工具。
1.1.2 提供远程服务
以 SSE 方式提供服务,当前的 Python SDK 已实现启动 HTTP 服务的功能,只需在运行时指定相应参数即可,详情见上例的最后一行:
mcp.run('sse')
访问时请加 sse,形如:
python client.py http://ip:port/sse
服务默认被启动在 8000 端口的 /sse
,也可以手动在 Setting 中设置,详见 API 的 server.py。
2 MCP Client
使用 Cursor 和 Claude 桌面版是常见选择,本文将介绍如何在自定义应用中使用 MCP。
注意事项:
- 运行前,请确保设置环境变量:OPENAI_API_KEY 和 OPENAI_BASE_URL。
- 本文仅为原理演示示范,不建议直接应用于生产环境!!!
import os
import asyncio
from typing import Optional
from contextlib import AsyncExitStack
import json
import sys
from urllib.parse import urlparsefrom mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.client.sse import sse_clientfrom openai import OpenAI
from dotenv import load_dotenvload_dotenv()class MCPClient:def __init__(self):self.session: Optional[ClientSession] = Noneself.exit_stack = AsyncExitStack()self.openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY"), base_url=os.getenv("OPENAI_BASE_URL"))self.model = "gpt-4o"def get_response(self, messages: list, tools: list):response = self.openai.chat.completions.create(model=self.model,max_tokens=1000,messages=messages,tools=tools,)return responseasync def get_tools(self):response = await self.session.list_tools()available_tools = [{"type": "function","function": {"name": tool.name,"description": tool.description,"parameters": tool.inputSchema,},}for tool in response.tools]return available_toolsasync def connect_to_server(self, server_path: str = None):"""连接到 MCP 服务器参数:server_path: 可以是以下三种形式之一:1. HTTP(S) URL - 使用 SSE 客户端连接2. 服务器脚本路径 (.py 或 .js)3. None - 使用默认的 mcp_server_fetch"""try:if server_path and urlparse(server_path).scheme in ("http", "https"):print(f"正在连接到 SSE 服务器: {server_path}")sse_transport = await self.exit_stack.enter_async_context(sse_client(server_path))self.stdio, self.write = sse_transportelse:if server_path:is_python = server_path.endswith(".py")is_js = server_path.endswith(".js")if not (is_python or is_js):raise ValueError("服务器脚本必须是 .py 或 .js 文件")command = "python" if is_python else "node"print(f"正在启动服务器: {command} {server_path}")server_params = StdioServerParameters(command=command, args=[server_path], env=None)else:print("正在启动默认 MCP 服务器…")server_params = StdioServerParameters(# command="npx",# args=["/exports/git/tavily-mcp/build/index.js"],# env={"TAVILY_API_KEY": "tvly-dev-xxx"}## command="python",# args=["-m", "mcp_server_fetch"],# command="uvx",# args=["mcp-server-fetch"],#command="uv",args=["run","--with","mcp[cli]","mcp","run","/exports/git/baidu-map-mcp/src/baidu-map/python/map.py",],env={"BAIDU_MAPS_API_KEY": "xxx"},)stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))self.stdio, self.write = stdio_transportself.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))await self.session.initialize()response = await self.session.list_tools()tools = response.toolsprint("\n连接到服务器,工具列表:", [tool.name for tool in tools])print("服务器初始化完成")except Exception as e:print(f"连接服务器时出错: {str(e)}")raiseasync def process_query(self, query: str) -> str:"""使用 OpenAI 和可用工具处理查询"""# 创建消息列表messages = [{"role": "user", "content": query}]# 列出可用工具available_tools = await self.get_tools()# 处理消息response = self.get_response(messages, available_tools)# 处理LLM响应和工具调用tool_results = []final_text = []for choice in response.choices:message = choice.messageis_function_call = message.tool_calls# 如果不调用工具,则添加到 final_text 中if not is_function_call:final_text.append(message.content)# 如果是工具调用,则获取工具名称和输入else:# 解包tool_callstool_name = message.tool_calls[0].function.nametool_args = json.loads(message.tool_calls[0].function.arguments)print(f"准备调用工具: {tool_name}")print(f"参数: {json.dumps(tool_args, ensure_ascii=False, indent=2)}")# 执行工具调用,获取结果result = await self.session.call_tool(tool_name, tool_args)tool_results.append({"call": tool_name, "result": result})final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")# 继续与工具结果进行对话if message.content and hasattr(message.content, "text"):messages.append({"role": "assistant", "content": message.content})# 将工具调用结果添加到消息messages.append({"role": "user", "content": result.content})# 获取下一个LLM响应response = self.get_response(messages, available_tools)# 将结果添加到 final_textif response.choices[0].message.content:final_text.append(response.choices[0].message.content)return "\\n".join(final_text)async def chat_loop(self):"""运行交互式聊天循环(没有记忆)"""print("\\nMCP Client 启动!")print("输入您的查询或 'quit' 退出.")while True:try:query = input("\\nQuery: ").strip()if query.lower() == "quit":breakresponse = await self.process_query(query)print("\\n" + response)except Exception as e:import tracebacktraceback.print_exc()print(f"\\n错误: {str(e)}")async def cleanup(self):"""清理资源"""await self.exit_stack.aclose()async def main():"""主函数:初始化并运行 MCP 客户端支持三种模式:1. python client.py <url> # 使用 SSE 连接2. python client.py <path_to_server_script> # 使用自定义服务器脚本3. python client.py # 使用默认服务器"""client = MCPClient()try:server_path = sys.argv[1] if len(sys.argv) > 1 else Noneawait client.connect_to_server(server_path)await client.chat_loop()finally:await client.cleanup()if __name__ == "__main__":asyncio.run(main())
2.1 在 client 中调用 server
使用 TypeScript 编写的 MCP server 可以通过 npx 命令来运行,使用 Python 编写的 MCP server 可以通过 uvx 命令或 Python 命令来运行。
2.1.1 js 程序运行环境
apt-get install -y nodejs
2.1.2 python 程序运行
## 方法一:uv方式
pip install uv
uvx mcp-server-fetch # 安装和运行现成的fetch服务
## 方法二: python方式
pip install mcp-server-fetch
python -m mcp_server_fetch
## 方法三:运行代码
uv run --with mcp[cli] mcp run baidu_map_mcp_server/map.py