mirror of
https://github.com/aljazceru/mcp-python-sdk.git
synced 2025-12-18 22:44:20 +01:00
StreamableHttp -- resumability support for servers (#587)
This commit is contained in:
@@ -9,6 +9,7 @@ A simple MCP server example demonstrating the StreamableHttp transport, which en
|
||||
- Task management with anyio task groups
|
||||
- Ability to send multiple notifications over time to the client
|
||||
- Proper resource cleanup and lifespan management
|
||||
- Resumability support via InMemoryEventStore
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -32,6 +33,23 @@ The server exposes a tool named "start-notification-stream" that accepts three a
|
||||
- `count`: Number of notifications to send (e.g., 5)
|
||||
- `caller`: Identifier string for the caller
|
||||
|
||||
## Resumability Support
|
||||
|
||||
This server includes resumability support through the InMemoryEventStore. This enables clients to:
|
||||
|
||||
- Reconnect to the server after a disconnection
|
||||
- Resume event streaming from where they left off using the Last-Event-ID header
|
||||
|
||||
|
||||
The server will:
|
||||
- Generate unique event IDs for each SSE message
|
||||
- Store events in memory for later replay
|
||||
- Replay missed events when a client reconnects with a Last-Event-ID header
|
||||
|
||||
Note: The InMemoryEventStore is designed for demonstration purposes only. For production use, consider implementing a persistent storage solution.
|
||||
|
||||
|
||||
|
||||
## Client
|
||||
|
||||
You can connect to this server using an HTTP client, for now only Typescript SDK has streamable HTTP client examples or you can use (Inspector)[https://github.com/modelcontextprotocol/inspector]
|
||||
You can connect to this server using an HTTP client, for now only Typescript SDK has streamable HTTP client examples or you can use [Inspector](https://github.com/modelcontextprotocol/inspector)
|
||||
@@ -0,0 +1,105 @@
|
||||
"""
|
||||
In-memory event store for demonstrating resumability functionality.
|
||||
|
||||
This is a simple implementation intended for examples and testing,
|
||||
not for production use where a persistent storage solution would be more appropriate.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from uuid import uuid4
|
||||
|
||||
from mcp.server.streamable_http import (
|
||||
EventCallback,
|
||||
EventId,
|
||||
EventMessage,
|
||||
EventStore,
|
||||
StreamId,
|
||||
)
|
||||
from mcp.types import JSONRPCMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventEntry:
|
||||
"""
|
||||
Represents an event entry in the event store.
|
||||
"""
|
||||
|
||||
event_id: EventId
|
||||
stream_id: StreamId
|
||||
message: JSONRPCMessage
|
||||
|
||||
|
||||
class InMemoryEventStore(EventStore):
|
||||
"""
|
||||
Simple in-memory implementation of the EventStore interface for resumability.
|
||||
This is primarily intended for examples and testing, not for production use
|
||||
where a persistent storage solution would be more appropriate.
|
||||
|
||||
This implementation keeps only the last N events per stream for memory efficiency.
|
||||
"""
|
||||
|
||||
def __init__(self, max_events_per_stream: int = 100):
|
||||
"""Initialize the event store.
|
||||
|
||||
Args:
|
||||
max_events_per_stream: Maximum number of events to keep per stream
|
||||
"""
|
||||
self.max_events_per_stream = max_events_per_stream
|
||||
# for maintaining last N events per stream
|
||||
self.streams: dict[StreamId, deque[EventEntry]] = {}
|
||||
# event_id -> EventEntry for quick lookup
|
||||
self.event_index: dict[EventId, EventEntry] = {}
|
||||
|
||||
async def store_event(
|
||||
self, stream_id: StreamId, message: JSONRPCMessage
|
||||
) -> EventId:
|
||||
"""Stores an event with a generated event ID."""
|
||||
event_id = str(uuid4())
|
||||
event_entry = EventEntry(
|
||||
event_id=event_id, stream_id=stream_id, message=message
|
||||
)
|
||||
|
||||
# Get or create deque for this stream
|
||||
if stream_id not in self.streams:
|
||||
self.streams[stream_id] = deque(maxlen=self.max_events_per_stream)
|
||||
|
||||
# If deque is full, the oldest event will be automatically removed
|
||||
# We need to remove it from the event_index as well
|
||||
if len(self.streams[stream_id]) == self.max_events_per_stream:
|
||||
oldest_event = self.streams[stream_id][0]
|
||||
self.event_index.pop(oldest_event.event_id, None)
|
||||
|
||||
# Add new event
|
||||
self.streams[stream_id].append(event_entry)
|
||||
self.event_index[event_id] = event_entry
|
||||
|
||||
return event_id
|
||||
|
||||
async def replay_events_after(
|
||||
self,
|
||||
last_event_id: EventId,
|
||||
send_callback: EventCallback,
|
||||
) -> StreamId | None:
|
||||
"""Replays events that occurred after the specified event ID."""
|
||||
if last_event_id not in self.event_index:
|
||||
logger.warning(f"Event ID {last_event_id} not found in store")
|
||||
return None
|
||||
|
||||
# Get the stream and find events after the last one
|
||||
last_event = self.event_index[last_event_id]
|
||||
stream_id = last_event.stream_id
|
||||
stream_events = self.streams.get(last_event.stream_id, deque())
|
||||
|
||||
# Events in deque are already in chronological order
|
||||
found_last = False
|
||||
for event in stream_events:
|
||||
if found_last:
|
||||
await send_callback(EventMessage(event.message, event.event_id))
|
||||
elif event.event_id == last_event_id:
|
||||
found_last = True
|
||||
|
||||
return stream_id
|
||||
@@ -17,12 +17,24 @@ from starlette.requests import Request
|
||||
from starlette.responses import Response
|
||||
from starlette.routing import Mount
|
||||
|
||||
from .event_store import InMemoryEventStore
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Global task group that will be initialized in the lifespan
|
||||
task_group = None
|
||||
|
||||
# Event store for resumability
|
||||
# The InMemoryEventStore enables resumability support for StreamableHTTP transport.
|
||||
# It stores SSE events with unique IDs, allowing clients to:
|
||||
# 1. Receive event IDs for each SSE message
|
||||
# 2. Resume streams by sending Last-Event-ID in GET requests
|
||||
# 3. Replay missed events after reconnection
|
||||
# Note: This in-memory implementation is for demonstration ONLY.
|
||||
# For production, use a persistent storage solution.
|
||||
event_store = InMemoryEventStore()
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def lifespan(app):
|
||||
@@ -79,9 +91,14 @@ def main(
|
||||
|
||||
# Send the specified number of notifications with the given interval
|
||||
for i in range(count):
|
||||
# Include more detailed message for resumability demonstration
|
||||
notification_msg = (
|
||||
f"[{i+1}/{count}] Event from '{caller}' - "
|
||||
f"Use Last-Event-ID to resume if disconnected"
|
||||
)
|
||||
await ctx.session.send_log_message(
|
||||
level="info",
|
||||
data=f"Notification {i+1}/{count} from caller: {caller}",
|
||||
data=notification_msg,
|
||||
logger="notification_stream",
|
||||
# Associates this notification with the original request
|
||||
# Ensures notifications are sent to the correct response stream
|
||||
@@ -90,6 +107,7 @@ def main(
|
||||
# - nowhere (if GET request isn't supported)
|
||||
related_request_id=ctx.request_id,
|
||||
)
|
||||
logger.debug(f"Sent notification {i+1}/{count} for caller: {caller}")
|
||||
if i < count - 1: # Don't wait after the last notification
|
||||
await anyio.sleep(interval)
|
||||
|
||||
@@ -163,8 +181,10 @@ def main(
|
||||
http_transport = StreamableHTTPServerTransport(
|
||||
mcp_session_id=new_session_id,
|
||||
is_json_response_enabled=json_response,
|
||||
event_store=event_store, # Enable resumability
|
||||
)
|
||||
server_instances[http_transport.mcp_session_id] = http_transport
|
||||
logger.info(f"Created new transport with session ID: {new_session_id}")
|
||||
async with http_transport.connect() as streams:
|
||||
read_stream, write_stream = streams
|
||||
|
||||
|
||||
Reference in New Issue
Block a user