lint docs examples (#286)

* lint docs examples

* format
This commit is contained in:
Samuel Colvin
2025-03-14 11:30:57 +00:00
committed by GitHub
parent 97201cce59
commit 3775916c5c
4 changed files with 185 additions and 37 deletions

129
README.md
View File

@@ -99,12 +99,14 @@ from mcp.server.fastmcp import FastMCP
# Create an MCP server
mcp = FastMCP("Demo")
# Add an addition tool
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers"""
return a + b
# Add a dynamic greeting resource
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
@@ -139,9 +141,13 @@ The FastMCP server is your core interface to the MCP protocol. It handles connec
```python
# Add lifespan support for startup/shutdown with strong typing
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import AsyncIterator
from mcp.server.fastmcp import FastMCP
from fake_database import Database # Replace with your actual DB type
from mcp.server.fastmcp import Context, FastMCP
# Create a named server
mcp = FastMCP("My App")
@@ -149,24 +155,28 @@ mcp = FastMCP("My App")
# Specify dependencies for deployment and development
mcp = FastMCP("My App", dependencies=["pandas", "numpy"])
@dataclass
class AppContext:
db: Database # Replace with your actual DB type
db: Database
@asynccontextmanager
async def app_lifespan(server: FastMCP) -> AsyncIterator[AppContext]:
"""Manage application lifecycle with type-safe context"""
# Initialize on startup
db = await Database.connect()
try:
# Initialize on startup
await db.connect()
yield AppContext(db=db)
finally:
# Cleanup on shutdown
await db.disconnect()
# Pass lifespan to server
mcp = FastMCP("My App", lifespan=app_lifespan)
# Access type-safe lifespan context in tools
@mcp.tool()
def query_db(ctx: Context) -> str:
@@ -180,11 +190,17 @@ def query_db(ctx: Context) -> str:
Resources are how you expose data to LLMs. They're similar to GET endpoints in a REST API - they provide data but shouldn't perform significant computation or have side effects:
```python
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("My App")
@mcp.resource("config://app")
def get_config() -> str:
"""Static configuration data"""
return "App configuration here"
@mcp.resource("users://{user_id}/profile")
def get_user_profile(user_id: str) -> str:
"""Dynamic user data"""
@@ -196,10 +212,17 @@ def get_user_profile(user_id: str) -> str:
Tools let LLMs take actions through your server. Unlike resources, tools are expected to perform computation and have side effects:
```python
import httpx
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("My App")
@mcp.tool()
def calculate_bmi(weight_kg: float, height_m: float) -> float:
"""Calculate BMI given weight in kg and height in meters"""
return weight_kg / (height_m ** 2)
return weight_kg / (height_m**2)
@mcp.tool()
async def fetch_weather(city: str) -> str:
@@ -214,16 +237,22 @@ async def fetch_weather(city: str) -> str:
Prompts are reusable templates that help LLMs interact with your server effectively:
```python
from mcp.server.fastmcp import FastMCP, types
mcp = FastMCP("My App")
@mcp.prompt()
def review_code(code: str) -> str:
return f"Please review this code:\n\n{code}"
@mcp.prompt()
def debug_error(error: str) -> list[Message]:
def debug_error(error: str) -> list[types.Message]:
return [
UserMessage("I'm seeing this error:"),
UserMessage(error),
AssistantMessage("I'll help debug that. What have you tried so far?")
types.UserMessage("I'm seeing this error:"),
types.UserMessage(error),
types.AssistantMessage("I'll help debug that. What have you tried so far?"),
]
```
@@ -235,6 +264,9 @@ FastMCP provides an `Image` class that automatically handles image data:
from mcp.server.fastmcp import FastMCP, Image
from PIL import Image as PILImage
mcp = FastMCP("My App")
@mcp.tool()
def create_thumbnail(image_path: str) -> Image:
"""Create a thumbnail from an image"""
@@ -250,6 +282,9 @@ The Context object gives your tools and resources access to MCP capabilities:
```python
from mcp.server.fastmcp import FastMCP, Context
mcp = FastMCP("My App")
@mcp.tool()
async def long_task(files: list[str], ctx: Context) -> str:
"""Process multiple files with progress tracking"""
@@ -322,16 +357,19 @@ from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Echo")
@mcp.resource("echo://{message}")
def echo_resource(message: str) -> str:
"""Echo a message as a resource"""
return f"Resource echo: {message}"
@mcp.tool()
def echo_tool(message: str) -> str:
"""Echo a message as a tool"""
return f"Tool echo: {message}"
@mcp.prompt()
def echo_prompt(message: str) -> str:
"""Create an echo prompt"""
@@ -343,20 +381,21 @@ def echo_prompt(message: str) -> str:
A more complex example showing database integration:
```python
from mcp.server.fastmcp import FastMCP
import sqlite3
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("SQLite Explorer")
@mcp.resource("schema://main")
def get_schema() -> str:
"""Provide the database schema as a resource"""
conn = sqlite3.connect("database.db")
schema = conn.execute(
"SELECT sql FROM sqlite_master WHERE type='table'"
).fetchall()
schema = conn.execute("SELECT sql FROM sqlite_master WHERE type='table'").fetchall()
return "\n".join(sql[0] for sql in schema if sql[0])
@mcp.tool()
def query_data(sql: str) -> str:
"""Execute SQL queries safely"""
@@ -378,20 +417,27 @@ For more control, you can use the low-level server implementation directly. This
from contextlib import asynccontextmanager
from typing import AsyncIterator
from fake_database import Database # Replace with your actual DB type
from mcp.server import Server
@asynccontextmanager
async def server_lifespan(server: Server) -> AsyncIterator[dict]:
"""Manage server startup and shutdown lifecycle."""
# Initialize resources on startup
db = await Database.connect()
try:
# Initialize resources on startup
await db.connect()
yield {"db": db}
finally:
# Clean up on shutdown
await db.disconnect()
# Pass lifespan to server
server = Server("example-server", lifespan=server_lifespan)
# Access lifespan context in handlers
@server.call_tool()
async def query_db(name: str, arguments: dict) -> list:
@@ -406,14 +452,15 @@ The lifespan API provides:
- Type-safe context passing between lifespan and request handlers
```python
from mcp.server.lowlevel import Server, NotificationOptions
from mcp.server.models import InitializationOptions
import mcp.server.stdio
import mcp.types as types
from mcp.server.lowlevel import NotificationOptions, Server
from mcp.server.models import InitializationOptions
# Create a server instance
server = Server("example-server")
@server.list_prompts()
async def handle_list_prompts() -> list[types.Prompt]:
return [
@@ -422,18 +469,16 @@ async def handle_list_prompts() -> list[types.Prompt]:
description="An example prompt template",
arguments=[
types.PromptArgument(
name="arg1",
description="Example argument",
required=True
name="arg1", description="Example argument", required=True
)
]
],
)
]
@server.get_prompt()
async def handle_get_prompt(
name: str,
arguments: dict[str, str] | None
name: str, arguments: dict[str, str] | None
) -> types.GetPromptResult:
if name != "example-prompt":
raise ValueError(f"Unknown prompt: {name}")
@@ -443,14 +488,12 @@ async def handle_get_prompt(
messages=[
types.PromptMessage(
role="user",
content=types.TextContent(
type="text",
text="Example prompt text"
)
content=types.TextContent(type="text", text="Example prompt text"),
)
]
],
)
async def run():
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
@@ -462,12 +505,14 @@ async def run():
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
)
)
),
),
)
if __name__ == "__main__":
import asyncio
asyncio.run(run())
```
@@ -476,18 +521,21 @@ if __name__ == "__main__":
The SDK provides a high-level client interface for connecting to MCP servers:
```python
from mcp import ClientSession, StdioServerParameters
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="python", # Executable
args=["example_server.py"], # Optional command line arguments
env=None # Optional environment variables
command="python", # Executable
args=["example_server.py"], # Optional command line arguments
env=None, # Optional environment variables
)
# Optional: create a sampling callback
async def handle_sampling_message(message: types.CreateMessageRequestParams) -> types.CreateMessageResult:
async def handle_sampling_message(
message: types.CreateMessageRequestParams,
) -> types.CreateMessageResult:
return types.CreateMessageResult(
role="assistant",
content=types.TextContent(
@@ -498,9 +546,12 @@ async def handle_sampling_message(message: types.CreateMessageRequestParams) ->
stopReason="endTurn",
)
async def run():
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write, sampling_callback=handle_sampling_message) as session:
async with ClientSession(
read, write, sampling_callback=handle_sampling_message
) as session:
# Initialize the connection
await session.initialize()
@@ -508,7 +559,9 @@ async def run():
prompts = await session.list_prompts()
# Get a prompt
prompt = await session.get_prompt("example-prompt", arguments={"arg1": "value"})
prompt = await session.get_prompt(
"example-prompt", arguments={"arg1": "value"}
)
# List available resources
resources = await session.list_resources()
@@ -522,8 +575,10 @@ async def run():
# Call a tool
result = await session.call_tool("tool-name", arguments={"arg1": "value"})
if __name__ == "__main__":
import asyncio
asyncio.run(run())
```