|
| 1 | +--- |
| 2 | +title: Instrument AI Agents |
| 3 | +sidebar_order: 500 |
| 4 | +description: "Learn how to manually instrument your code to use Sentry's Agents module." |
| 5 | +--- |
| 6 | + |
| 7 | +With <Link to="/product/insights/agents/dashboard/">Sentry AI Agent Monitoring</Link>, you can monitor and debug your AI systems with full-stack context. You'll be able to track key insights like token usage, latency, tool usage, and error rates. AI Agent Monitoring data will be fully connected to your other Sentry data like logs, errors, and traces. |
| 8 | + |
| 9 | +As a prerequisite to setting up AI Agent Monitoring with JavaScript, you'll need to first <PlatformLink to="/tracing/">set up tracing</PlatformLink>. Once this is done, the JavaScript SDK will automatically instrument AI agents created with supported libraries. If that doesn't fit your use case, you can use custom instrumentation described below. |
| 10 | + |
| 11 | +## Automatic Instrumentation |
| 12 | + |
| 13 | +The JavaScript SDK supports automatic instrumentation for some AI libraries. We recommend adding their integrations to your Sentry configuration to automatically capture spans for AI agents. |
| 14 | + |
| 15 | +- <PlatformLink to="/configuration/integrations/vercelai/"> |
| 16 | + Vercel AI SDK |
| 17 | + </PlatformLink> |
| 18 | + |
| 19 | +## Manual Instrumentation |
| 20 | + |
| 21 | +If you're using a library that Sentry does not automatically instrument, you can manually instrument your code to capture spans. For your AI agents data to show up in the Sentry [AI Agents Insights](https://sentry.io/orgredirect/organizations/:orgslug/insights/agents/), two spans must be created and have well-defined names and data attributes. See below. |
| 22 | + |
| 23 | +## Spans |
| 24 | + |
| 25 | +### Invoke Agent Span |
| 26 | + |
| 27 | +<Include name="tracing/ai-agents-module/invoke-agent-span" /> |
| 28 | + |
| 29 | +#### Example of an Invoke Agent Span: |
| 30 | + |
| 31 | +```javascript |
| 32 | +// some example agent implementation for demonstration |
| 33 | +const myAgent = { |
| 34 | + name: "Weather Agent", |
| 35 | + modelProvider: "openai", |
| 36 | + model: "o3-mini", |
| 37 | + async run() { |
| 38 | + // Agent implementation |
| 39 | + return { |
| 40 | + output: "The weather in Paris is sunny", |
| 41 | + usage: { |
| 42 | + inputTokens: 15, |
| 43 | + outputTokens: 8, |
| 44 | + }, |
| 45 | + }; |
| 46 | + }, |
| 47 | +}; |
| 48 | + |
| 49 | +Sentry.startSpan( |
| 50 | + { |
| 51 | + op: "gen_ai.invoke_agent", |
| 52 | + name: `invoke_agent ${myAgent.name}`, |
| 53 | + attributes: { |
| 54 | + "gen_ai.operation.name": "invoke_agent", |
| 55 | + "gen_ai.system": myAgent.modelProvider, |
| 56 | + "gen_ai.request.model": myAgent.model, |
| 57 | + "gen_ai.agent.name": myAgent.name, |
| 58 | + }, |
| 59 | + }, |
| 60 | + async (span) => { |
| 61 | + // run the agent |
| 62 | + const result = await myAgent.run(); |
| 63 | + |
| 64 | + // set agent response |
| 65 | + // we assume result.output is a string |
| 66 | + // type of `gen_ai.response.text` needs to be a string |
| 67 | + span.setAttribute("gen_ai.response.text", JSON.stringify([result.output])); |
| 68 | + |
| 69 | + // set token usage |
| 70 | + // we assume the result includes the tokens used |
| 71 | + span.setAttribute("gen_ai.usage.input_tokens", result.usage.inputTokens); |
| 72 | + span.setAttribute("gen_ai.usage.output_tokens", result.usage.outputTokens); |
| 73 | + |
| 74 | + return result; |
| 75 | + } |
| 76 | +); |
| 77 | +``` |
| 78 | + |
| 79 | +### AI Client Span |
| 80 | + |
| 81 | +<Include name="tracing/ai-agents-module/ai-client-span" /> |
| 82 | + |
| 83 | +#### Example AI Client Span |
| 84 | + |
| 85 | +```javascript |
| 86 | +// some example implementation for demonstration |
| 87 | +const myAi = { |
| 88 | + modelProvider: "openai", |
| 89 | + model: "o3-mini", |
| 90 | + modelConfig: { |
| 91 | + temperature: 0.1, |
| 92 | + presencePenalty: 0.5, |
| 93 | + }, |
| 94 | + async createMessage(messages, maxTokens) { |
| 95 | + // AI implementation |
| 96 | + return { |
| 97 | + output: |
| 98 | + "Here's a joke: Why don't scientists trust atoms? Because they make up everything!", |
| 99 | + usage: { |
| 100 | + inputTokens: 12, |
| 101 | + outputTokens: 24, |
| 102 | + }, |
| 103 | + }; |
| 104 | + }, |
| 105 | +}; |
| 106 | + |
| 107 | +Sentry.startSpan( |
| 108 | + { |
| 109 | + op: "gen_ai.chat", |
| 110 | + name: `chat ${myAi.model}`, |
| 111 | + attributes: { |
| 112 | + "gen_ai.operation.name": "chat", |
| 113 | + "gen_ai.system": myAi.modelProvider, |
| 114 | + "gen_ai.request.model": myAi.model, |
| 115 | + }, |
| 116 | + }, |
| 117 | + async (span) => { |
| 118 | + // set up messages for LLM |
| 119 | + const maxTokens = 1024; |
| 120 | + const prompt = "Tell me a joke"; |
| 121 | + const messages = [{ role: "user", content: prompt }]; |
| 122 | + |
| 123 | + // set chat request data |
| 124 | + span.setAttribute("gen_ai.request.messages", JSON.stringify(messages)); |
| 125 | + span.setAttribute("gen_ai.request.max_tokens", maxTokens); |
| 126 | + span.setAttribute( |
| 127 | + "gen_ai.request.temperature", |
| 128 | + myAi.modelConfig.temperature |
| 129 | + ); |
| 130 | + span.setAttribute( |
| 131 | + "gen_ai.request.presence_penalty", |
| 132 | + myAi.modelConfig.presencePenalty |
| 133 | + ); |
| 134 | + |
| 135 | + // ask the LLM |
| 136 | + const result = await myAi.createMessage(messages, maxTokens); |
| 137 | + |
| 138 | + // set response |
| 139 | + // we assume result.output is a string |
| 140 | + // type of `gen_ai.response.text` needs to be a string |
| 141 | + span.setAttribute("gen_ai.response.text", JSON.stringify([result.output])); |
| 142 | + |
| 143 | + // set token usage |
| 144 | + // we assume the result includes the tokens used |
| 145 | + span.setAttribute("gen_ai.usage.input_tokens", result.usage.inputTokens); |
| 146 | + span.setAttribute("gen_ai.usage.output_tokens", result.usage.outputTokens); |
| 147 | + |
| 148 | + return result; |
| 149 | + } |
| 150 | +); |
| 151 | +``` |
| 152 | + |
| 153 | +### Execute Tool Span |
| 154 | + |
| 155 | +<Include name="tracing/ai-agents-module/execute-tool-span" /> |
| 156 | + |
| 157 | +#### Example Execute Tool Span |
| 158 | + |
| 159 | +```javascript |
| 160 | +// some example implementation for demonstration |
| 161 | +const myAi = { |
| 162 | + modelProvider: "openai", |
| 163 | + model: "o3-mini", |
| 164 | + async createMessage(messages, maxTokens) { |
| 165 | + // AI implementation that returns tool calls |
| 166 | + return { |
| 167 | + toolCalls: [ |
| 168 | + { |
| 169 | + name: "random_number", |
| 170 | + description: "Generate a random number", |
| 171 | + arguments: { max: 10 }, |
| 172 | + }, |
| 173 | + ], |
| 174 | + }; |
| 175 | + }, |
| 176 | +}; |
| 177 | + |
| 178 | +const prompt = "Generate a random number between 0 and 10"; |
| 179 | +const messages = [{ role: "user", content: prompt }]; |
| 180 | + |
| 181 | +// First, make the AI call |
| 182 | +const result = await Sentry.startSpan( |
| 183 | + { op: "gen_ai.chat", name: `chat ${myAi.model}` }, |
| 184 | + () => myAi.createMessage(messages, 1024) |
| 185 | +); |
| 186 | + |
| 187 | +// Check if we should call a tool |
| 188 | +if (result.toolCalls && result.toolCalls.length > 0) { |
| 189 | + const tool = result.toolCalls[0]; |
| 190 | + |
| 191 | + await Sentry.startSpan( |
| 192 | + { |
| 193 | + op: "gen_ai.execute_tool", |
| 194 | + name: `gen_ai.execute_tool ${tool.name}`, |
| 195 | + attributes: { |
| 196 | + "gen_ai.system": myAi.modelProvider, |
| 197 | + "gen_ai.request.model": myAi.model, |
| 198 | + "gen_ai.tool.type": "function", |
| 199 | + "gen_ai.tool.name": tool.name, |
| 200 | + "gen_ai.tool.description": tool.description, |
| 201 | + "gen_ai.tool.input": JSON.stringify(tool.arguments), |
| 202 | + }, |
| 203 | + }, |
| 204 | + async (span) => { |
| 205 | + // run tool (example implementation) |
| 206 | + const toolResult = Math.floor(Math.random() * tool.arguments.max); |
| 207 | + |
| 208 | + // set tool result |
| 209 | + span.setAttribute("gen_ai.tool.output", String(toolResult)); |
| 210 | + |
| 211 | + return toolResult; |
| 212 | + } |
| 213 | + ); |
| 214 | +} |
| 215 | +``` |
| 216 | + |
| 217 | +### Handoff Span |
| 218 | + |
| 219 | +<Include name="tracing/ai-agents-module/handoff-span" /> |
| 220 | + |
| 221 | +#### Example of a Handoff Span |
| 222 | + |
| 223 | +```javascript |
| 224 | +// some example agent implementation for demonstration |
| 225 | +const myAgent = { |
| 226 | + name: "Weather Agent", |
| 227 | + modelProvider: "openai", |
| 228 | + model: "o3-mini", |
| 229 | + async run() { |
| 230 | + // Agent implementation |
| 231 | + return { |
| 232 | + handoffTo: "Travel Agent", |
| 233 | + output: |
| 234 | + "I need to handoff to the travel agent for booking recommendations", |
| 235 | + }; |
| 236 | + }, |
| 237 | +}; |
| 238 | + |
| 239 | +const otherAgent = { |
| 240 | + name: "Travel Agent", |
| 241 | + modelProvider: "openai", |
| 242 | + model: "o3-mini", |
| 243 | + async run() { |
| 244 | + // Other agent implementation |
| 245 | + return { output: "Here are some travel recommendations..." }; |
| 246 | + }, |
| 247 | +}; |
| 248 | + |
| 249 | +// First agent execution |
| 250 | +const result = await Sentry.startSpan( |
| 251 | + { op: "gen_ai.invoke_agent", name: `invoke_agent ${myAgent.name}` }, |
| 252 | + () => myAgent.run() |
| 253 | +); |
| 254 | + |
| 255 | +// Check if we should handoff to another agent |
| 256 | +if (result.handoffTo) { |
| 257 | + // Create handoff span |
| 258 | + await Sentry.startSpan( |
| 259 | + { |
| 260 | + op: "gen_ai.handoff", |
| 261 | + name: `handoff from ${myAgent.name} to ${otherAgent.name}`, |
| 262 | + attributes: { |
| 263 | + "gen_ai.system": myAgent.modelProvider, |
| 264 | + "gen_ai.request.model": myAgent.model, |
| 265 | + }, |
| 266 | + }, |
| 267 | + () => { |
| 268 | + // the handoff span just marks the handoff |
| 269 | + // no actual work is done here |
| 270 | + } |
| 271 | + ); |
| 272 | + |
| 273 | + // Execute the other agent |
| 274 | + await Sentry.startSpan( |
| 275 | + { op: "gen_ai.invoke_agent", name: `invoke_agent ${otherAgent.name}` }, |
| 276 | + () => otherAgent.run() |
| 277 | + ); |
| 278 | +} |
| 279 | +``` |
| 280 | + |
| 281 | +## Common Span Attributes |
| 282 | + |
| 283 | +<Include name="tracing/ai-agents-module/common-span-attributes" /> |
0 commit comments