Skip to content

Commit f538ef0

Browse files
authored
feat(node): Add OpenAI integration (#17022)
This PR adds official support for instrumenting OpenAI SDK calls in Node with Sentry tracing, following OpenTelemetry semantic conventions for Generative AI. We currently instrument the following OpenAI SDK methods: - client.chat.completions.create() - For chat-based completions - client.responses.create() - For the responses API Currently supported: - Node.js - Mentioned methods are supported in this PR - ESM and CJS - Both module systems are supported The openAIIntegration() accepts the following options: ``` // The integration respects your sendDefaultPii client option interface OpenAiOptions { recordInputs?: boolean; // Whether to record prompt messages recordOutputs?: boolean; // Whether to record response text } ``` Example: ``` Sentry.init({ dsn: '__DSN__', sendDefaultPii: false, // Even with PII disabled globally integrations: [ Sentry.openAIIntegration({ recordInputs: true, // Force recording prompts recordOutputs: true, // Force recording responses }), ], }); ```
1 parent 5319942 commit f538ef0

File tree

20 files changed

+1253
-1
lines changed

20 files changed

+1253
-1
lines changed
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
integrations: [
11+
Sentry.openAIIntegration({
12+
recordInputs: true,
13+
recordOutputs: true,
14+
}),
15+
],
16+
});
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
import { instrumentOpenAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockOpenAI {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
8+
this.chat = {
9+
completions: {
10+
create: async params => {
11+
// Simulate processing time
12+
await new Promise(resolve => setTimeout(resolve, 10));
13+
14+
if (params.model === 'error-model') {
15+
const error = new Error('Model not found');
16+
error.status = 404;
17+
error.headers = { 'x-request-id': 'mock-request-123' };
18+
throw error;
19+
}
20+
21+
return {
22+
id: 'chatcmpl-mock123',
23+
object: 'chat.completion',
24+
created: 1677652288,
25+
model: params.model,
26+
system_fingerprint: 'fp_44709d6fcb',
27+
choices: [
28+
{
29+
index: 0,
30+
message: {
31+
role: 'assistant',
32+
content: 'Hello from OpenAI mock!',
33+
},
34+
finish_reason: 'stop',
35+
},
36+
],
37+
usage: {
38+
prompt_tokens: 10,
39+
completion_tokens: 15,
40+
total_tokens: 25,
41+
},
42+
};
43+
},
44+
},
45+
};
46+
47+
this.responses = {
48+
create: async params => {
49+
await new Promise(resolve => setTimeout(resolve, 10));
50+
51+
return {
52+
id: 'resp_mock456',
53+
object: 'response',
54+
created: 1677652290,
55+
model: params.model,
56+
input_text: params.input,
57+
output_text: `Response to: ${params.input}`,
58+
finish_reason: 'stop',
59+
usage: {
60+
input_tokens: 5,
61+
output_tokens: 8,
62+
total_tokens: 13,
63+
},
64+
};
65+
},
66+
};
67+
}
68+
}
69+
70+
async function run() {
71+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
72+
const mockClient = new MockOpenAI({
73+
apiKey: 'mock-api-key',
74+
});
75+
76+
const client = instrumentOpenAiClient(mockClient);
77+
78+
// First test: basic chat completion
79+
await client.chat.completions.create({
80+
model: 'gpt-3.5-turbo',
81+
messages: [
82+
{ role: 'system', content: 'You are a helpful assistant.' },
83+
{ role: 'user', content: 'What is the capital of France?' },
84+
],
85+
temperature: 0.7,
86+
max_tokens: 100,
87+
});
88+
89+
// Second test: responses API
90+
await client.responses.create({
91+
model: 'gpt-3.5-turbo',
92+
input: 'Translate this to French: Hello',
93+
instructions: 'You are a translator',
94+
});
95+
96+
// Third test: error handling
97+
try {
98+
await client.chat.completions.create({
99+
model: 'error-model',
100+
messages: [{ role: 'user', content: 'This will fail' }],
101+
});
102+
} catch {
103+
// Error is expected and handled
104+
}
105+
});
106+
}
107+
108+
run();
Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,183 @@
1+
import { afterAll, describe, expect } from 'vitest';
2+
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';
3+
4+
describe('OpenAI integration', () => {
5+
afterAll(() => {
6+
cleanupChildProcesses();
7+
});
8+
9+
const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
10+
transaction: 'main',
11+
spans: expect.arrayContaining([
12+
// First span - basic chat completion without PII
13+
expect.objectContaining({
14+
data: {
15+
'gen_ai.operation.name': 'chat',
16+
'sentry.op': 'gen_ai.chat',
17+
'sentry.origin': 'manual',
18+
'gen_ai.system': 'openai',
19+
'gen_ai.request.model': 'gpt-3.5-turbo',
20+
'gen_ai.request.temperature': 0.7,
21+
'gen_ai.response.model': 'gpt-3.5-turbo',
22+
'gen_ai.response.id': 'chatcmpl-mock123',
23+
'gen_ai.response.finish_reasons': '["stop"]',
24+
'gen_ai.usage.input_tokens': 10,
25+
'gen_ai.usage.output_tokens': 15,
26+
'gen_ai.usage.total_tokens': 25,
27+
'openai.response.id': 'chatcmpl-mock123',
28+
'openai.response.model': 'gpt-3.5-turbo',
29+
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
30+
'openai.usage.completion_tokens': 15,
31+
'openai.usage.prompt_tokens': 10,
32+
},
33+
description: 'chat gpt-3.5-turbo',
34+
op: 'gen_ai.chat',
35+
origin: 'manual',
36+
status: 'ok',
37+
}),
38+
// Second span - responses API
39+
expect.objectContaining({
40+
data: {
41+
'gen_ai.operation.name': 'chat',
42+
'sentry.op': 'gen_ai.chat',
43+
'sentry.origin': 'manual',
44+
'gen_ai.system': 'openai',
45+
'gen_ai.request.model': 'gpt-3.5-turbo',
46+
'gen_ai.response.model': 'gpt-3.5-turbo',
47+
'gen_ai.response.id': 'resp_mock456',
48+
'gen_ai.usage.input_tokens': 5,
49+
'gen_ai.usage.output_tokens': 8,
50+
'gen_ai.usage.total_tokens': 13,
51+
'openai.response.id': 'resp_mock456',
52+
'openai.response.model': 'gpt-3.5-turbo',
53+
'openai.usage.completion_tokens': 8,
54+
'openai.usage.prompt_tokens': 5,
55+
},
56+
description: 'chat gpt-3.5-turbo',
57+
op: 'gen_ai.chat',
58+
origin: 'manual',
59+
status: 'ok',
60+
}),
61+
// Third span - error handling
62+
expect.objectContaining({
63+
data: {
64+
'gen_ai.operation.name': 'chat',
65+
'sentry.op': 'gen_ai.chat',
66+
'sentry.origin': 'manual',
67+
'gen_ai.system': 'openai',
68+
'gen_ai.request.model': 'error-model',
69+
},
70+
description: 'chat error-model',
71+
op: 'gen_ai.chat',
72+
origin: 'manual',
73+
status: 'unknown_error',
74+
}),
75+
]),
76+
};
77+
78+
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
79+
transaction: 'main',
80+
spans: expect.arrayContaining([
81+
// First span - basic chat completion with PII
82+
expect.objectContaining({
83+
data: {
84+
'gen_ai.operation.name': 'chat',
85+
'sentry.op': 'gen_ai.chat',
86+
'sentry.origin': 'manual',
87+
'gen_ai.system': 'openai',
88+
'gen_ai.request.model': 'gpt-3.5-turbo',
89+
'gen_ai.request.temperature': 0.7,
90+
'gen_ai.request.messages':
91+
'[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is the capital of France?"}]',
92+
'gen_ai.response.model': 'gpt-3.5-turbo',
93+
'gen_ai.response.id': 'chatcmpl-mock123',
94+
'gen_ai.response.finish_reasons': '["stop"]',
95+
'gen_ai.response.text': '["Hello from OpenAI mock!"]',
96+
'gen_ai.usage.input_tokens': 10,
97+
'gen_ai.usage.output_tokens': 15,
98+
'gen_ai.usage.total_tokens': 25,
99+
'openai.response.id': 'chatcmpl-mock123',
100+
'openai.response.model': 'gpt-3.5-turbo',
101+
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
102+
'openai.usage.completion_tokens': 15,
103+
'openai.usage.prompt_tokens': 10,
104+
},
105+
description: 'chat gpt-3.5-turbo',
106+
op: 'gen_ai.chat',
107+
origin: 'manual',
108+
status: 'ok',
109+
}),
110+
// Second span - responses API with PII
111+
expect.objectContaining({
112+
data: {
113+
'gen_ai.operation.name': 'chat',
114+
'sentry.op': 'gen_ai.chat',
115+
'sentry.origin': 'manual',
116+
'gen_ai.system': 'openai',
117+
'gen_ai.request.model': 'gpt-3.5-turbo',
118+
'gen_ai.request.messages': '"Translate this to French: Hello"',
119+
'gen_ai.response.text': 'Response to: Translate this to French: Hello',
120+
'gen_ai.response.model': 'gpt-3.5-turbo',
121+
'gen_ai.response.id': 'resp_mock456',
122+
'gen_ai.usage.input_tokens': 5,
123+
'gen_ai.usage.output_tokens': 8,
124+
'gen_ai.usage.total_tokens': 13,
125+
'openai.response.id': 'resp_mock456',
126+
'openai.response.model': 'gpt-3.5-turbo',
127+
'openai.usage.completion_tokens': 8,
128+
'openai.usage.prompt_tokens': 5,
129+
},
130+
description: 'chat gpt-3.5-turbo',
131+
op: 'gen_ai.chat',
132+
origin: 'manual',
133+
status: 'ok',
134+
}),
135+
// Third span - error handling with PII
136+
expect.objectContaining({
137+
data: {
138+
'gen_ai.operation.name': 'chat',
139+
'sentry.op': 'gen_ai.chat',
140+
'sentry.origin': 'manual',
141+
'gen_ai.system': 'openai',
142+
'gen_ai.request.model': 'error-model',
143+
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
144+
},
145+
description: 'chat error-model',
146+
op: 'gen_ai.chat',
147+
origin: 'manual',
148+
status: 'unknown_error',
149+
}),
150+
]),
151+
};
152+
153+
const EXPECTED_TRANSACTION_WITH_OPTIONS = {
154+
transaction: 'main',
155+
spans: expect.arrayContaining([
156+
// Check that custom options are respected
157+
expect.objectContaining({
158+
data: expect.objectContaining({
159+
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
160+
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
161+
}),
162+
}),
163+
]),
164+
};
165+
166+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
167+
test('creates openai related spans with sendDefaultPii: false', async () => {
168+
await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed();
169+
});
170+
});
171+
172+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
173+
test('creates openai related spans with sendDefaultPii: true', async () => {
174+
await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }).start().completed();
175+
});
176+
});
177+
178+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
179+
test('creates openai related spans with custom options', async () => {
180+
await createRunner().expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS }).start().completed();
181+
});
182+
});
183+
});

packages/astro/src/index.server.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@ export {
8484
nodeContextIntegration,
8585
onUncaughtExceptionIntegration,
8686
onUnhandledRejectionIntegration,
87+
openAIIntegration,
8788
parameterize,
8889
postgresIntegration,
8990
postgresJsIntegration,

packages/aws-serverless/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ export {
5151
nativeNodeFetchIntegration,
5252
onUncaughtExceptionIntegration,
5353
onUnhandledRejectionIntegration,
54+
openAIIntegration,
5455
modulesIntegration,
5556
contextLinesIntegration,
5657
nodeContextIntegration,

packages/bun/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ export {
7171
nativeNodeFetchIntegration,
7272
onUncaughtExceptionIntegration,
7373
onUnhandledRejectionIntegration,
74+
openAIIntegration,
7475
modulesIntegration,
7576
contextLinesIntegration,
7677
nodeContextIntegration,

packages/core/src/index.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,9 @@ export type { ReportDialogOptions } from './report-dialog';
124124
export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/exports';
125125
export { consoleLoggingIntegration } from './logs/console-integration';
126126
export { addVercelAiProcessors } from './utils/vercel-ai';
127-
127+
export { instrumentOpenAiClient } from './utils/openai';
128+
export { OPENAI_INTEGRATION_NAME } from './utils/openai/constants';
129+
export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './utils/openai/types';
128130
export type { FeatureFlag } from './utils/featureFlags';
129131
export {
130132
_INTERNAL_copyFlagsFromScopeToEvent,

0 commit comments

Comments
 (0)