Demonstrating the Full Lifecycle of Model Context Protocol (MCP) with Tool Calls
This article explains how the Model Context Protocol (MCP) enables large language models to retrieve up‑to‑date external information through standardized tool calls, illustrating the complete end‑to‑end workflow with Python code for the MCP server, client, and host, and discussing its advantages for building AI agents.
In the rapid development of LLM applications, a core challenge is enabling models to obtain the latest and most accurate external knowledge and to use tools effectively. The article introduces Model Context Protocol (MCP) as a unified standard that promotes cross‑model compatibility and facilitates tool‑driven interactions.
The overall MCP interaction is visualized in a flow diagram, showing the steps from user query submission, context forwarding to the LLM, tool call generation, server‑side execution, and final answer delivery.
Example Code – MCP Server
The server is implemented in Python using the FastMCP class, registering two simple tools: get_weather (returns hard‑coded weather data) and suggest_activity (recommends activities based on weather conditions).
#mcp_server_demo.py
from mcp.server.fastmcp import FastMCP
import asyncio
mcp = FastMCP(name="weather-demo", host="0.0.0.0", port=1234)
@mcp.tool(name="get_weather", description="获取指定城市的天气信息")
async def get_weather(city: str) -> str:
"""获取指定城市的天气信息"""
weather_data = {
"北京": "北京:晴,25°C",
"上海": "上海:多云,27°C"
}
return weather_data.get(city, f"{city}:天气信息未知")
@mcp.tool(name="suggest_activity", description="根据天气描述推荐适合的活动")
async def suggest_activity(condition: str) -> str:
"""根据天气描述推荐适合的活动"""
if "晴" in condition:
return "天气晴朗,推荐你去户外散步或运动。"
elif "多云" in condition:
return "多云天气适合逛公园或咖啡馆。"
elif "雨" in condition:
return "下雨了,建议你在家阅读或看电影。"
else:
return "建议进行室内活动。"
async def main():
print("✅ 启动 MCP Server: http://127.0.0.1:1234")
await mcp.run_sse_async()
if __name__ == "__main__":
asyncio.run(main())Example Code – LLM Router
A lightweight wrapper around the OpenRouter API is provided to generate LLM responses.
# llm_router.py
import json
import requests
OPENROUTER_API_KEY = '这里写入使用的Key'
OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
OPENROUTER_HEADERS = {
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json",
"HTTP-Referer": "http://localhost",
"X-Title": "MCP Demo Server"
}
class OpenRouterLLM:
"""Custom LLM class using OpenRouter API"""
def __init__(self, model: str = LLM_MODEL):
self.model = model
def generate(self, messages):
request_body = {"model": self.model, "messages": messages}
print(f"发送请求到 OpenRouter: {json.dumps(request_body, ensure_ascii=False)}")
response = requests.post(OPENROUTER_API_URL, headers=OPENROUTER_HEADERS, json=request_body)
if response.status_code != 200:
print(f"OpenRouter API 错误: {response.status_code}")
print(f"错误详情: {response.text}")
raise Exception(f"OpenRouter API 返回错误: {response.status_code}")
response_json = response.json()
print(f"OpenRouter API 响应: {json.dumps(response_json, ensure_ascii=False)}")
try:
content = response_json['choices'][0]['message']['content']
return content
except KeyError:
raise Exception("无法从 OpenRouter 响应中提取内容")
if __name__ == "__main__":
llm = OpenRouterLLM()
messages = [
{"role": "system", "content": "你是一个智能助手,可以帮助查询天气信息。"},
{"role": "user", "content": "请告诉我北京今天的天气情况。"}
]
try:
result = llm.generate(messages)
print("LLM 返回结果:")
print(result)
except Exception as e:
print(f"调用 OpenRouter 时发生异常: {e}")Example Code – MCP Client
The client uses Server‑Side Events (SSE) to communicate with the MCP server, list available tools/resources, and invoke tools.
# mcp_client_demo.py
import asyncio
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
class WeatherMCPClient:
def __init__(self, server_url="http://127.0.0.1:1234/sse"):
self.server_url = server_url
self._sse_context = None
self._session = None
async def __aenter__(self):
self._sse_context = sse_client(self.server_url)
self.read, self.write = await self._sse_context.__aenter__()
self._session = ClientSession(self.read, self.write)
await self._session.__aenter__()
await self._session.initialize()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self._session:
await self._session.__aexit__(exc_type, exc_val, exc_tb)
if self._sse_context:
await self._sse_context.__aexit__(exc_type, exc_val, exc_tb)
async def list_tools(self):
return await self._session.list_tools()
async def list_resources(self):
return await self._session.list_resources()
async def call_tool(self, name, arguments):
return await self._session.call_tool(name, arguments)
async def main():
async with WeatherMCPClient() as client:
print("✅ 成功连接 MCP Server")
tools = await client.list_tools()
print("\n🛠 可用工具:")
print(tools)
resources = await client.list_resources()
print("\n📚 可用资源:")
print(resources)
result = await client.call_tool("get_weather", {"city": "北京"})
print("\n🎯 工具返回:")
for item in result.content:
print(" -", item.text)
if __name__ == "__main__":
asyncio.run(main())Example Code – MCP Host
The host orchestrates the conversation, builds system prompts, sends messages to the LLM, parses JSON tool calls, invokes tools via the client, and feeds tool results back to the LLM until a final textual answer is produced.
# mcp_host_demo.py
import asyncio, json, re
from llm_router import OpenRouterLLM
from mcp_client_demo import WeatherMCPClient
def extract_json_from_reply(reply: str):
"""Extract JSON from LLM reply, handling markdown wrappers and nested strings."""
if isinstance(reply, dict):
return reply
if isinstance(reply, str):
reply = re.sub(r"^```(?:json)?|```$", "", reply.strip(), flags=re.IGNORECASE).strip()
for _ in range(3):
try:
parsed = json.loads(reply)
if isinstance(parsed, dict):
return parsed
else:
reply = parsed
except Exception:
break
return reply
llm = OpenRouterLLM()
async def main():
client = WeatherMCPClient()
await client.__aenter__()
tools = await client.list_tools()
resources = await client.list_resources()
tool_names = [t.name for t in tools.tools]
tool_descriptions = "\n".join(f"- {t.name}: {t.description}" for t in tools.tools)
resource_descriptions = "\n".join(f"- {r.uri}" for r in resources.resources)
while True:
user_input = input("\n请输入你的问题(输入 exit 退出):\n> ")
if user_input.lower() in ("exit", "退出"):
break
system_prompt = (
"你是一个智能助手,拥有以下工具和资源可以调用:\n\n"
f"🛠 工具列表:\n{tool_descriptions or '(无)'}\n\n"
f"📚 资源列表:\n{resource_descriptions or '(无)'}\n\n"
"请优先调用可用的Tool或Resource,而不是llm内部生成。仅根据上下文调用工具,不传入不需要的参数进行调用\n"
"如果需要,请以 JSON 返回 tool_calls,格式如下:\n"
"{'tool_calls': [{'name': 'get_weather', 'arguments': {'city': '北京'}}]}\n"
"如无需调用工具,返回:{\"tool_calls\": null}"
)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_input}
]
final_reply = ""
while True:
reply = llm.generate(messages)
print(f"\n🤖 LLM 回复:\n{reply}")
parsed = extract_json_from_reply(reply)
if isinstance(parsed, str):
final_reply = parsed
break
tool_calls = parsed.get("tool_calls")
if not tool_calls:
final_reply = parsed.get("content", "")
break
for tool_call in tool_calls:
tool_name = tool_call["name"]
arguments = tool_call["arguments"]
if tool_name not in tool_names:
raise ValueError(f"❌ 工具 {tool_name} 未注册")
print(f"🛠 调用工具 {tool_name} 参数: {arguments}")
result = await client.call_tool(tool_name, arguments)
tool_output = result.content[0].text
print(f"📦 工具 {tool_name} 返回:{tool_output}")
messages.append({"role": "tool", "name": tool_name, "content": tool_output})
print(f"\n🎯 最终答复:{final_reply}")
await client.__aexit__(None, None, None)
if __name__ == "__main__":
asyncio.run(main())The article walks through a concrete user query—"上海的天气如何"—showing how the LLM decides to invoke the get_weather tool, how the tool result is returned, and how the LLM finally composes a natural‑language answer. It concludes with a summary of MCP’s modularity, standardisation, real‑time capability, fine‑grained security, and debuggability, and hints at future extensions such as multi‑step tool chains and dynamic tool discovery.
Java Captain
Focused on Java technologies: SSM, the Spring ecosystem, microservices, MySQL, MyCat, clustering, distributed systems, middleware, Linux, networking, multithreading; occasionally covers DevOps tools like Jenkins, Nexus, Docker, ELK; shares practical tech insights and is dedicated to full‑stack Java development.
How this landed with the community
Was this worth your time?
0 Comments
Thoughtful readers leave field notes, pushback, and hard-won operational detail here.