@@ -1037,17 +1037,44 @@ For more information on mounting applications in Starlette, see the [Starlette d
1037
1037
1038
1038
For more control, you can use the low-level server implementation directly. This gives you full access to the protocol and allows you to customize every aspect of your server, including lifecycle management through the lifespan API:
1039
1039
1040
+ <!-- snippet-source examples/snippets/servers/lowlevel/lifespan.py -->
1040
1041
``` python
1041
- from contextlib import asynccontextmanager
1042
+ """
1043
+ Run from the repository root:
1044
+ uv run examples/snippets/servers/lowlevel/lifespan.py
1045
+ """
1046
+
1042
1047
from collections.abc import AsyncIterator
1048
+ from contextlib import asynccontextmanager
1043
1049
1044
- from fake_database import Database # Replace with your actual DB type
1050
+ import mcp.server.stdio
1051
+ import mcp.types as types
1052
+ from mcp.server.lowlevel import NotificationOptions, Server
1053
+ from mcp.server.models import InitializationOptions
1045
1054
1046
- from mcp.server import Server
1055
+
1056
+ # Mock database class for example
1057
+ class Database :
1058
+ """ Mock database class for example."""
1059
+
1060
+ @ classmethod
1061
+ async def connect (cls ) -> " Database" :
1062
+ """ Connect to database."""
1063
+ print (" Database connected" )
1064
+ return cls ()
1065
+
1066
+ async def disconnect (self ) -> None :
1067
+ """ Disconnect from database."""
1068
+ print (" Database disconnected" )
1069
+
1070
+ async def query (self , query_str : str ) -> list[dict[str , str ]]:
1071
+ """ Execute a query."""
1072
+ # Simulate database query
1073
+ return [{" id" : " 1" , " name" : " Example" , " query" : query_str}]
1047
1074
1048
1075
1049
1076
@asynccontextmanager
1050
- async def server_lifespan (server : Server) -> AsyncIterator[dict ]:
1077
+ async def server_lifespan (_server : Server) -> AsyncIterator[dict ]:
1051
1078
""" Manage server startup and shutdown lifecycle."""
1052
1079
# Initialize resources on startup
1053
1080
db = await Database.connect()
@@ -1062,21 +1089,79 @@ async def server_lifespan(server: Server) -> AsyncIterator[dict]:
1062
1089
server = Server(" example-server" , lifespan = server_lifespan)
1063
1090
1064
1091
1065
- # Access lifespan context in handlers
1092
+ @server.list_tools ()
1093
+ async def handle_list_tools () -> list[types.Tool]:
1094
+ """ List available tools."""
1095
+ return [
1096
+ types.Tool(
1097
+ name = " query_db" ,
1098
+ description = " Query the database" ,
1099
+ inputSchema = {
1100
+ " type" : " object" ,
1101
+ " properties" : {" query" : {" type" : " string" , " description" : " SQL query to execute" }},
1102
+ " required" : [" query" ],
1103
+ },
1104
+ )
1105
+ ]
1106
+
1107
+
1066
1108
@server.call_tool ()
1067
- async def query_db (name : str , arguments : dict ) -> list :
1109
+ async def query_db (name : str , arguments : dict ) -> list[types.TextContent]:
1110
+ """ Handle database query tool call."""
1111
+ if name != " query_db" :
1112
+ raise ValueError (f " Unknown tool: { name} " )
1113
+
1114
+ # Access lifespan context
1068
1115
ctx = server.request_context
1069
1116
db = ctx.lifespan_context[" db" ]
1070
- return await db.query(arguments[" query" ])
1117
+
1118
+ # Execute query
1119
+ results = await db.query(arguments[" query" ])
1120
+
1121
+ return [types.TextContent(type = " text" , text = f " Query results: { results} " )]
1122
+
1123
+
1124
+ async def run ():
1125
+ """ Run the server with lifespan management."""
1126
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1127
+ await server.run(
1128
+ read_stream,
1129
+ write_stream,
1130
+ InitializationOptions(
1131
+ server_name = " example-server" ,
1132
+ server_version = " 0.1.0" ,
1133
+ capabilities = server.get_capabilities(
1134
+ notification_options = NotificationOptions(),
1135
+ experimental_capabilities = {},
1136
+ ),
1137
+ ),
1138
+ )
1139
+
1140
+
1141
+ if __name__ == " __main__" :
1142
+ import asyncio
1143
+
1144
+ asyncio.run(run())
1071
1145
```
1072
1146
1147
+ _ Full example: [ examples/snippets/servers/lowlevel/lifespan.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/lifespan.py ) _
1148
+ <!-- /snippet-source -->
1149
+
1073
1150
The lifespan API provides:
1074
1151
1075
1152
- A way to initialize resources when the server starts and clean them up when it stops
1076
1153
- Access to initialized resources through the request context in handlers
1077
1154
- Type-safe context passing between lifespan and request handlers
1078
1155
1156
+ <!-- snippet-source examples/snippets/servers/lowlevel/basic.py -->
1079
1157
``` python
1158
+ """
1159
+ Run from the repository root:
1160
+ uv run examples/snippets/servers/lowlevel/basic.py
1161
+ """
1162
+
1163
+ import asyncio
1164
+
1080
1165
import mcp.server.stdio
1081
1166
import mcp.types as types
1082
1167
from mcp.server.lowlevel import NotificationOptions, Server
@@ -1088,38 +1173,37 @@ server = Server("example-server")
1088
1173
1089
1174
@server.list_prompts ()
1090
1175
async def handle_list_prompts () -> list[types.Prompt]:
1176
+ """ List available prompts."""
1091
1177
return [
1092
1178
types.Prompt(
1093
1179
name = " example-prompt" ,
1094
1180
description = " An example prompt template" ,
1095
- arguments = [
1096
- types.PromptArgument(
1097
- name = " arg1" , description = " Example argument" , required = True
1098
- )
1099
- ],
1181
+ arguments = [types.PromptArgument(name = " arg1" , description = " Example argument" , required = True )],
1100
1182
)
1101
1183
]
1102
1184
1103
1185
1104
1186
@server.get_prompt ()
1105
- async def handle_get_prompt (
1106
- name : str , arguments : dict[str , str ] | None
1107
- ) -> types.GetPromptResult:
1187
+ async def handle_get_prompt (name : str , arguments : dict[str , str ] | None ) -> types.GetPromptResult:
1188
+ """ Get a specific prompt by name."""
1108
1189
if name != " example-prompt" :
1109
1190
raise ValueError (f " Unknown prompt: { name} " )
1110
1191
1192
+ arg1_value = (arguments or {}).get(" arg1" , " default" )
1193
+
1111
1194
return types.GetPromptResult(
1112
1195
description = " Example prompt" ,
1113
1196
messages = [
1114
1197
types.PromptMessage(
1115
1198
role = " user" ,
1116
- content = types.TextContent(type = " text" , text = " Example prompt text" ),
1199
+ content = types.TextContent(type = " text" , text = f " Example prompt text with argument: { arg1_value } " ),
1117
1200
)
1118
1201
],
1119
1202
)
1120
1203
1121
1204
1122
1205
async def run ():
1206
+ """ Run the basic low-level server."""
1123
1207
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1124
1208
await server.run(
1125
1209
read_stream,
@@ -1136,67 +1220,108 @@ async def run():
1136
1220
1137
1221
1138
1222
if __name__ == " __main__" :
1139
- import asyncio
1140
-
1141
1223
asyncio.run(run())
1142
1224
```
1143
1225
1226
+ _ Full example: [ examples/snippets/servers/lowlevel/basic.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/basic.py ) _
1227
+ <!-- /snippet-source -->
1228
+
1144
1229
Caution: The ` uv run mcp run ` and ` uv run mcp dev ` tool doesn't support low-level server.
1145
1230
1146
1231
#### Structured Output Support
1147
1232
1148
1233
The low-level server supports structured output for tools, allowing you to return both human-readable content and machine-readable structured data. Tools can define an ` outputSchema ` to validate their structured output:
1149
1234
1235
+ <!-- snippet-source examples/snippets/servers/lowlevel/structured_output.py -->
1150
1236
``` python
1151
- from types import Any
1237
+ """
1238
+ Run from the repository root:
1239
+ uv run examples/snippets/servers/lowlevel/structured_output.py
1240
+ """
1152
1241
1242
+ import asyncio
1243
+ from typing import Any
1244
+
1245
+ import mcp.server.stdio
1153
1246
import mcp.types as types
1154
- from mcp.server.lowlevel import Server
1247
+ from mcp.server.lowlevel import NotificationOptions, Server
1248
+ from mcp.server.models import InitializationOptions
1155
1249
1156
1250
server = Server(" example-server" )
1157
1251
1158
1252
1159
1253
@server.list_tools ()
1160
1254
async def list_tools () -> list[types.Tool]:
1255
+ """ List available tools with structured output schemas."""
1161
1256
return [
1162
1257
types.Tool(
1163
- name = " calculate " ,
1164
- description = " Perform mathematical calculations " ,
1258
+ name = " get_weather " ,
1259
+ description = " Get current weather for a city " ,
1165
1260
inputSchema = {
1166
1261
" type" : " object" ,
1167
- " properties" : {
1168
- " expression" : {" type" : " string" , " description" : " Math expression" }
1169
- },
1170
- " required" : [" expression" ],
1262
+ " properties" : {" city" : {" type" : " string" , " description" : " City name" }},
1263
+ " required" : [" city" ],
1171
1264
},
1172
1265
outputSchema = {
1173
1266
" type" : " object" ,
1174
1267
" properties" : {
1175
- " result" : {" type" : " number" },
1176
- " expression" : {" type" : " string" },
1268
+ " temperature" : {" type" : " number" , " description" : " Temperature in Celsius" },
1269
+ " condition" : {" type" : " string" , " description" : " Weather condition" },
1270
+ " humidity" : {" type" : " number" , " description" : " Humidity percentage" },
1271
+ " city" : {" type" : " string" , " description" : " City name" },
1177
1272
},
1178
- " required" : [" result " , " expression " ],
1273
+ " required" : [" temperature " , " condition " , " humidity " , " city " ],
1179
1274
},
1180
1275
)
1181
1276
]
1182
1277
1183
1278
1184
1279
@server.call_tool ()
1185
1280
async def call_tool (name : str , arguments : dict[str , Any]) -> dict[str , Any]:
1186
- if name == " calculate" :
1187
- expression = arguments[" expression" ]
1188
- try :
1189
- result = eval (expression) # Use a safe math parser
1190
- structured = {" result" : result, " expression" : expression}
1191
-
1192
- # low-level server will validate structured output against the tool's
1193
- # output schema, and automatically serialize it into a TextContent block
1194
- # for backwards compatibility with pre-2025-06-18 clients.
1195
- return structured
1196
- except Exception as e:
1197
- raise ValueError (f " Calculation error: { str (e)} " )
1281
+ """ Handle tool calls with structured output."""
1282
+ if name == " get_weather" :
1283
+ city = arguments[" city" ]
1284
+
1285
+ # Simulated weather data - in production, call a weather API
1286
+ weather_data = {
1287
+ " temperature" : 22.5 ,
1288
+ " condition" : " partly cloudy" ,
1289
+ " humidity" : 65 ,
1290
+ " city" : city, # Include the requested city
1291
+ }
1292
+
1293
+ # low-level server will validate structured output against the tool's
1294
+ # output schema, and additionally serialize it into a TextContent block
1295
+ # for backwards compatibility with pre-2025-06-18 clients.
1296
+ return weather_data
1297
+ else :
1298
+ raise ValueError (f " Unknown tool: { name} " )
1299
+
1300
+
1301
+ async def run ():
1302
+ """ Run the structured output server."""
1303
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1304
+ await server.run(
1305
+ read_stream,
1306
+ write_stream,
1307
+ InitializationOptions(
1308
+ server_name = " structured-output-example" ,
1309
+ server_version = " 0.1.0" ,
1310
+ capabilities = server.get_capabilities(
1311
+ notification_options = NotificationOptions(),
1312
+ experimental_capabilities = {},
1313
+ ),
1314
+ ),
1315
+ )
1316
+
1317
+
1318
+ if __name__ == " __main__" :
1319
+ asyncio.run(run())
1198
1320
```
1199
1321
1322
+ _ Full example: [ examples/snippets/servers/lowlevel/structured_output.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/structured_output.py ) _
1323
+ <!-- /snippet-source -->
1324
+
1200
1325
Tools can return data in three ways:
1201
1326
1202
1327
1 . ** Content only** : Return a list of content blocks (default behavior before spec revision 2025-06-18)
0 commit comments