From 2c84d4308ac6ae9cbb6d376761602a2b4f1a4706 Mon Sep 17 00:00:00 2001 From: Andrei Borza Date: Tue, 24 Jun 2025 16:50:44 +0200 Subject: [PATCH 01/13] Expose vercel ai integration --- packages/cloudflare/src/index.ts | 1 + .../cloudflare/src/integrations/modules.ts | 141 ++++ .../tracing/vercelai/ai_sdk_attributes.ts | 794 ++++++++++++++++++ .../tracing/vercelai/constants.ts | 1 + .../integrations/tracing/vercelai/index.ts | 272 ++++++ .../integrations/tracing/vercelai/types.ts | 69 ++ .../cloudflare/src/utils/addOriginToSpan.ts | 8 + packages/cloudflare/src/utils/commonjs.ts | 8 + 8 files changed, 1294 insertions(+) create mode 100644 packages/cloudflare/src/integrations/modules.ts create mode 100644 packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts create mode 100644 packages/cloudflare/src/integrations/tracing/vercelai/constants.ts create mode 100644 packages/cloudflare/src/integrations/tracing/vercelai/index.ts create mode 100644 packages/cloudflare/src/integrations/tracing/vercelai/types.ts create mode 100644 packages/cloudflare/src/utils/addOriginToSpan.ts create mode 100644 packages/cloudflare/src/utils/commonjs.ts diff --git a/packages/cloudflare/src/index.ts b/packages/cloudflare/src/index.ts index 8efcc8439809..a5bb99d40818 100644 --- a/packages/cloudflare/src/index.ts +++ b/packages/cloudflare/src/index.ts @@ -107,6 +107,7 @@ export { CloudflareClient } from './client'; export { getDefaultIntegrations } from './sdk'; export { fetchIntegration } from './integrations/fetch'; +export { vercelAIIntegration } from './integrations/tracing/vercelai'; export { instrumentD1WithSentry } from './d1'; diff --git a/packages/cloudflare/src/integrations/modules.ts b/packages/cloudflare/src/integrations/modules.ts new file mode 100644 index 000000000000..6adee9e46744 --- /dev/null +++ b/packages/cloudflare/src/integrations/modules.ts @@ -0,0 +1,141 @@ +import { existsSync, readFileSync } from 'node:fs'; +import { dirname, join } from 'node:path'; +import type { IntegrationFn } from '@sentry/core'; +import { isCjs } from '../utils/commonjs'; + +type ModuleInfo = Record; + +let moduleCache: ModuleInfo | undefined; + +const INTEGRATION_NAME = 'Modules'; + +declare const __SENTRY_SERVER_MODULES__: Record; + +/** + * `__SENTRY_SERVER_MODULES__` can be replaced at build time with the modules loaded by the server. + * Right now, we leverage this in Next.js to circumvent the problem that we do not get access to these things at runtime. + */ +const SERVER_MODULES = typeof __SENTRY_SERVER_MODULES__ === 'undefined' ? {} : __SENTRY_SERVER_MODULES__; + +const _modulesIntegration = (() => { + return { + name: INTEGRATION_NAME, + processEvent(event) { + event.modules = { + ...event.modules, + ..._getModules(), + }; + + return event; + }, + getModules: _getModules, + }; +}) satisfies IntegrationFn; + +/** + * Add node modules / packages to the event. + * For this, multiple sources are used: + * - They can be injected at build time into the __SENTRY_SERVER_MODULES__ variable (e.g. in Next.js) + * - They are extracted from the dependencies & devDependencies in the package.json file + * - They are extracted from the require.cache (CJS only) + */ +export const modulesIntegration = _modulesIntegration; + +function getRequireCachePaths(): string[] { + try { + return require.cache ? Object.keys(require.cache as Record) : []; + } catch (e) { + return []; + } +} + +/** Extract information about package.json modules */ +function collectModules(): ModuleInfo { + return { + ...SERVER_MODULES, + ...getModulesFromPackageJson(), + ...(isCjs() ? collectRequireModules() : {}), + }; +} + +/** Extract information about package.json modules from require.cache */ +function collectRequireModules(): ModuleInfo { + const mainPaths = require.main?.paths || []; + const paths = getRequireCachePaths(); + + // We start with the modules from package.json (if possible) + // These may be overwritten by more specific versions from the require.cache + const infos: ModuleInfo = {}; + const seen = new Set(); + + paths.forEach(path => { + let dir = path; + + /** Traverse directories upward in the search of package.json file */ + const updir = (): void | (() => void) => { + const orig = dir; + dir = dirname(orig); + + if (!dir || orig === dir || seen.has(orig)) { + return undefined; + } + if (mainPaths.indexOf(dir) < 0) { + return updir(); + } + + const pkgfile = join(orig, 'package.json'); + seen.add(orig); + + if (!existsSync(pkgfile)) { + return updir(); + } + + try { + const info = JSON.parse(readFileSync(pkgfile, 'utf8')) as { + name: string; + version: string; + }; + infos[info.name] = info.version; + } catch (_oO) { + // no-empty + } + }; + + updir(); + }); + + return infos; +} + +/** Fetches the list of modules and the versions loaded by the entry file for your node.js app. */ +function _getModules(): ModuleInfo { + if (!moduleCache) { + moduleCache = collectModules(); + } + return moduleCache; +} + +interface PackageJson { + dependencies?: Record; + devDependencies?: Record; +} + +function getPackageJson(): PackageJson { + try { + const filePath = join(process.cwd(), 'package.json'); + const packageJson = JSON.parse(readFileSync(filePath, 'utf8')) as PackageJson; + + return packageJson; + } catch (e) { + return {}; + } +} + +function getModulesFromPackageJson(): ModuleInfo { + const packageJson = getPackageJson(); + + return { + ...packageJson.dependencies, + ...packageJson.devDependencies, + }; +} diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts b/packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts new file mode 100644 index 000000000000..8d7b6913a636 --- /dev/null +++ b/packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts @@ -0,0 +1,794 @@ +/** + * AI SDK Telemetry Attributes + * Based on https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data + */ + +// ============================================================================= +// COMMON ATTRIBUTES +// ============================================================================= + +/** + * Common attribute for operation name across all functions and spans + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data + */ +export const OPERATION_NAME_ATTRIBUTE = 'operation.name'; + +/** + * Common attribute for AI operation ID across all functions and spans + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data + */ +export const AI_OPERATION_ID_ATTRIBUTE = 'ai.operationId'; + +// ============================================================================= +// SHARED ATTRIBUTES +// ============================================================================= + +/** + * `generateText` function - `ai.generateText` span + * `streamText` function - `ai.streamText` span + * + * The prompt that was used when calling the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_PROMPT_ATTRIBUTE = 'ai.prompt'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The JSON schema version of the schema that was passed into the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SCHEMA_ATTRIBUTE = 'ai.schema'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The name of the schema that was passed into the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SCHEMA_NAME_ATTRIBUTE = 'ai.schema.name'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The description of the schema that was passed into the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SCHEMA_DESCRIPTION_ATTRIBUTE = 'ai.schema.description'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The object that was generated (stringified JSON) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_RESPONSE_OBJECT_ATTRIBUTE = 'ai.response.object'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The object generation mode, e.g. `json` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SETTINGS_MODE_ATTRIBUTE = 'ai.settings.mode'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The output type that was used, e.g. `object` or `no-schema` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SETTINGS_OUTPUT_ATTRIBUTE = 'ai.settings.output'; + +/** + * `embed` function - `ai.embed.doEmbed` span + * `embedMany` function - `ai.embedMany` span + * + * The values that were passed into the function (array) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function + */ +export const AI_VALUES_ATTRIBUTE = 'ai.values'; + +/** + * `embed` function - `ai.embed.doEmbed` span + * `embedMany` function - `ai.embedMany` span + * + * An array of JSON-stringified embeddings + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function + */ +export const AI_EMBEDDINGS_ATTRIBUTE = 'ai.embeddings'; + +// ============================================================================= +// GENERATETEXT FUNCTION - UNIQUE ATTRIBUTES +// ============================================================================= + +/** + * `generateText` function - `ai.generateText` span + * + * The text that was generated + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_RESPONSE_TEXT_ATTRIBUTE = 'ai.response.text'; + +/** + * `generateText` function - `ai.generateText` span + * + * The tool calls that were made as part of the generation (stringified JSON) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_RESPONSE_TOOL_CALLS_ATTRIBUTE = 'ai.response.toolCalls'; + +/** + * `generateText` function - `ai.generateText` span + * + * The reason why the generation finished + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_RESPONSE_FINISH_REASON_ATTRIBUTE = 'ai.response.finishReason'; + +/** + * `generateText` function - `ai.generateText` span + * + * The maximum number of steps that were set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_SETTINGS_MAX_STEPS_ATTRIBUTE = 'ai.settings.maxSteps'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * The format of the prompt + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_FORMAT_ATTRIBUTE = 'ai.prompt.format'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * The messages that were passed into the provider + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_MESSAGES_ATTRIBUTE = 'ai.prompt.messages'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * Array of stringified tool definitions + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_TOOLS_ATTRIBUTE = 'ai.prompt.tools'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * The stringified tool choice setting (JSON) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_TOOL_CHOICE_ATTRIBUTE = 'ai.prompt.toolChoice'; + +// ============================================================================= +// STREAMTEXT FUNCTION - UNIQUE ATTRIBUTES +// ============================================================================= + +/** + * `streamText` function - `ai.streamText.doStream` span + * + * The time it took to receive the first chunk in milliseconds + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_RESPONSE_MS_TO_FIRST_CHUNK_ATTRIBUTE = 'ai.response.msToFirstChunk'; + +/** + * `streamText` function - `ai.streamText.doStream` span + * + * The time it took to receive the finish part of the LLM stream in milliseconds + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_RESPONSE_MS_TO_FINISH_ATTRIBUTE = 'ai.response.msToFinish'; + +/** + * `streamText` function - `ai.streamText.doStream` span + * + * The average completion tokens per second + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND_ATTRIBUTE = 'ai.response.avgCompletionTokensPerSecond'; + +// ============================================================================= +// EMBED FUNCTION - UNIQUE ATTRIBUTES +// ============================================================================= + +/** + * `embed` function - `ai.embed` span + * + * The value that was passed into the `embed` function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_VALUE_ATTRIBUTE = 'ai.value'; + +/** + * `embed` function - `ai.embed` span + * + * A JSON-stringified embedding + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_EMBEDDING_ATTRIBUTE = 'ai.embedding'; + +// ============================================================================= +// BASIC LLM SPAN INFORMATION +// ============================================================================= + +/** + * Basic LLM span information + * Multiple spans + * + * The functionId that was set through `telemetry.functionId` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const RESOURCE_NAME_ATTRIBUTE = 'resource.name'; + +/** + * Basic LLM span information + * Multiple spans + * + * The id of the model + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_MODEL_ID_ATTRIBUTE = 'ai.model.id'; + +/** + * Basic LLM span information + * Multiple spans + * + * The provider of the model + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_MODEL_PROVIDER_ATTRIBUTE = 'ai.model.provider'; + +/** + * Basic LLM span information + * Multiple spans + * + * The request headers that were passed in through `headers` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_REQUEST_HEADERS_ATTRIBUTE = 'ai.request.headers'; + +/** + * Basic LLM span information + * Multiple spans + * + * The maximum number of retries that were set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_SETTINGS_MAX_RETRIES_ATTRIBUTE = 'ai.settings.maxRetries'; + +/** + * Basic LLM span information + * Multiple spans + * + * The functionId that was set through `telemetry.functionId` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE = 'ai.telemetry.functionId'; + +/** + * Basic LLM span information + * Multiple spans + * + * The metadata that was passed in through `telemetry.metadata` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_TELEMETRY_METADATA_ATTRIBUTE = 'ai.telemetry.metadata'; + +/** + * Basic LLM span information + * Multiple spans + * + * The number of completion tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE = 'ai.usage.completionTokens'; + +/** + * Basic LLM span information + * Multiple spans + * + * The number of prompt tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'ai.usage.promptTokens'; + +// ============================================================================= +// CALL LLM SPAN INFORMATION +// ============================================================================= + +/** + * Call LLM span information + * Individual LLM call spans + * + * The model that was used to generate the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const AI_RESPONSE_MODEL_ATTRIBUTE = 'ai.response.model'; + +/** + * Call LLM span information + * Individual LLM call spans + * + * The id of the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const AI_RESPONSE_ID_ATTRIBUTE = 'ai.response.id'; + +/** + * Call LLM span information + * Individual LLM call spans + * + * The timestamp of the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const AI_RESPONSE_TIMESTAMP_ATTRIBUTE = 'ai.response.timestamp'; + +// ============================================================================= +// SEMANTIC CONVENTIONS FOR GENAI OPERATIONS +// ============================================================================= + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The provider that was used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The model that was requested + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The temperature that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE = 'gen_ai.request.temperature'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The maximum number of tokens that were set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE = 'gen_ai.request.max_tokens'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The frequency penalty that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE = 'gen_ai.request.frequency_penalty'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The presence penalty that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE = 'gen_ai.request.presence_penalty'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The topK parameter value that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_TOP_K_ATTRIBUTE = 'gen_ai.request.top_k'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The topP parameter value that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_TOP_P_ATTRIBUTE = 'gen_ai.request.top_p'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The stop sequences + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE = 'gen_ai.request.stop_sequences'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The finish reasons that were returned by the provider + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE = 'gen_ai.response.finish_reasons'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The model that was used to generate the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_RESPONSE_MODEL_ATTRIBUTE = 'gen_ai.response.model'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The id of the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_RESPONSE_ID_ATTRIBUTE = 'gen_ai.response.id'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The number of prompt tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.input_tokens'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The number of completion tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.output_tokens'; + +// ============================================================================= +// BASIC EMBEDDING SPAN INFORMATION +// ============================================================================= + +/** + * Basic embedding span information + * Embedding spans + * + * The number of tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-embedding-span-information + */ +export const AI_USAGE_TOKENS_ATTRIBUTE = 'ai.usage.tokens'; + +// ============================================================================= +// TOOL CALL SPANS +// ============================================================================= + +/** + * Tool call spans + * `ai.toolCall` span + * + * The name of the tool + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_NAME_ATTRIBUTE = 'ai.toolCall.name'; + +/** + * Tool call spans + * `ai.toolCall` span + * + * The id of the tool call + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_ID_ATTRIBUTE = 'ai.toolCall.id'; + +/** + * Tool call spans + * `ai.toolCall` span + * + * The parameters of the tool call + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_ARGS_ATTRIBUTE = 'ai.toolCall.args'; + +/** + * Tool call spans + * `ai.toolCall` span + * + * The result of the tool call + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_RESULT_ATTRIBUTE = 'ai.toolCall.result'; + +// ============================================================================= +// SPAN ATTRIBUTE OBJECTS +// ============================================================================= + +/** + * Attributes collected for `ai.generateText` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_GENERATE_TEXT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_PROMPT: AI_PROMPT_ATTRIBUTE, + AI_RESPONSE_TEXT: AI_RESPONSE_TEXT_ATTRIBUTE, + AI_RESPONSE_TOOL_CALLS: AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + AI_RESPONSE_FINISH_REASON: AI_RESPONSE_FINISH_REASON_ATTRIBUTE, + AI_SETTINGS_MAX_STEPS: AI_SETTINGS_MAX_STEPS_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.generateText.doGenerate` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_GENERATE_TEXT_DO_GENERATE_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_PROMPT_FORMAT: AI_PROMPT_FORMAT_ATTRIBUTE, + AI_PROMPT_MESSAGES: AI_PROMPT_MESSAGES_ATTRIBUTE, + AI_PROMPT_TOOLS: AI_PROMPT_TOOLS_ATTRIBUTE, + AI_PROMPT_TOOL_CHOICE: AI_PROMPT_TOOL_CHOICE_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + // Call LLM span information + AI_RESPONSE_MODEL: AI_RESPONSE_MODEL_ATTRIBUTE, + AI_RESPONSE_ID: AI_RESPONSE_ID_ATTRIBUTE, + AI_RESPONSE_TIMESTAMP: AI_RESPONSE_TIMESTAMP_ATTRIBUTE, + // Semantic Conventions for GenAI operations + GEN_AI_SYSTEM: GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_REQUEST_MODEL: GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE: GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS: GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_FREQUENCY_PENALTY: GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_PRESENCE_PENALTY: GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_TOP_K: GEN_AI_REQUEST_TOP_K_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P: GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_REQUEST_STOP_SEQUENCES: GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS: GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL: GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_ID: GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS: GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS: GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.streamText` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_STREAM_TEXT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_PROMPT: AI_PROMPT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.streamText.doStream` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_STREAM_TEXT_DO_STREAM_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_RESPONSE_MS_TO_FIRST_CHUNK: AI_RESPONSE_MS_TO_FIRST_CHUNK_ATTRIBUTE, + AI_RESPONSE_MS_TO_FINISH: AI_RESPONSE_MS_TO_FINISH_ATTRIBUTE, + AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND: AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + // Call LLM span information + AI_RESPONSE_MODEL: AI_RESPONSE_MODEL_ATTRIBUTE, + AI_RESPONSE_ID: AI_RESPONSE_ID_ATTRIBUTE, + AI_RESPONSE_TIMESTAMP: AI_RESPONSE_TIMESTAMP_ATTRIBUTE, + // Semantic Conventions for GenAI operations + GEN_AI_SYSTEM: GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_REQUEST_MODEL: GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE: GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS: GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_FREQUENCY_PENALTY: GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_PRESENCE_PENALTY: GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_TOP_K: GEN_AI_REQUEST_TOP_K_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P: GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_REQUEST_STOP_SEQUENCES: GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS: GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL: GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_ID: GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS: GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS: GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.generateObject` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + */ +export const AI_GENERATE_OBJECT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_SCHEMA: AI_SCHEMA_ATTRIBUTE, + AI_SCHEMA_NAME: AI_SCHEMA_NAME_ATTRIBUTE, + AI_SCHEMA_DESCRIPTION: AI_SCHEMA_DESCRIPTION_ATTRIBUTE, + AI_RESPONSE_OBJECT: AI_RESPONSE_OBJECT_ATTRIBUTE, + AI_SETTINGS_MODE: AI_SETTINGS_MODE_ATTRIBUTE, + AI_SETTINGS_OUTPUT: AI_SETTINGS_OUTPUT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.streamObject` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_STREAM_OBJECT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_SCHEMA: AI_SCHEMA_ATTRIBUTE, + AI_SCHEMA_NAME: AI_SCHEMA_NAME_ATTRIBUTE, + AI_SCHEMA_DESCRIPTION: AI_SCHEMA_DESCRIPTION_ATTRIBUTE, + AI_RESPONSE_OBJECT: AI_RESPONSE_OBJECT_ATTRIBUTE, + AI_SETTINGS_MODE: AI_SETTINGS_MODE_ATTRIBUTE, + AI_SETTINGS_OUTPUT: AI_SETTINGS_OUTPUT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.embed` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_EMBED_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_VALUE: AI_VALUE_ATTRIBUTE, + AI_EMBEDDING: AI_EMBEDDING_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + // Basic embedding span information + AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.embed.doEmbed` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_EMBED_DO_EMBED_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_VALUES: AI_VALUES_ATTRIBUTE, + AI_EMBEDDINGS: AI_EMBEDDINGS_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + // Basic embedding span information + AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.embedMany` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function + */ +export const AI_EMBED_MANY_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_VALUES: AI_VALUES_ATTRIBUTE, + AI_EMBEDDINGS: AI_EMBEDDINGS_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + // Basic embedding span information + AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.toolCall` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_TOOL_CALL_NAME: AI_TOOL_CALL_NAME_ATTRIBUTE, + AI_TOOL_CALL_ID: AI_TOOL_CALL_ID_ATTRIBUTE, + AI_TOOL_CALL_ARGS: AI_TOOL_CALL_ARGS_ATTRIBUTE, + AI_TOOL_CALL_RESULT: AI_TOOL_CALL_RESULT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, +} as const; diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/constants.ts b/packages/cloudflare/src/integrations/tracing/vercelai/constants.ts new file mode 100644 index 000000000000..fd4473c4c084 --- /dev/null +++ b/packages/cloudflare/src/integrations/tracing/vercelai/constants.ts @@ -0,0 +1 @@ +export const INTEGRATION_NAME = 'VercelAI'; diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/index.ts b/packages/cloudflare/src/integrations/tracing/vercelai/index.ts new file mode 100644 index 000000000000..fc88ea37173e --- /dev/null +++ b/packages/cloudflare/src/integrations/tracing/vercelai/index.ts @@ -0,0 +1,272 @@ +/** + * This is a copy of the Vercel AI integration from the node SDK. + * + * The only difference is that it does not use `@opentelemetry/instrumentation` + * because Cloudflare Workers do not support it. + * + * Therefore, we cannot automatically patch setting `experimental_telemetry: { isEnabled: true }` + * and users have to manually these to get spans. + */ + +/* eslint-disable @typescript-eslint/no-dynamic-delete */ +/* eslint-disable complexity */ +import type { Client, IntegrationFn } from '@sentry/core'; +import { defineIntegration, SEMANTIC_ATTRIBUTE_SENTRY_OP, spanToJSON } from '@sentry/core'; +import { addOriginToSpan } from '../../../utils/addOriginToSpan'; +import type { modulesIntegration } from '../../modules'; +import { + AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER_ATTRIBUTE, + AI_PROMPT_ATTRIBUTE, + AI_PROMPT_MESSAGES_ATTRIBUTE, + AI_PROMPT_TOOLS_ATTRIBUTE, + AI_RESPONSE_TEXT_ATTRIBUTE, + AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TOOL_CALL_ID_ATTRIBUTE, + AI_TOOL_CALL_NAME_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} from './ai_sdk_attributes'; +import { INTEGRATION_NAME } from './constants'; +import type { VercelAiOptions } from './types'; + +/** + * Determines if the integration should be forced based on environment and package availability. + * Returns true if the 'ai' package is available. + */ +function shouldForceIntegration(client: Client): boolean { + const modules = client.getIntegrationByName>('Modules'); + return !!modules?.getModules?.()?.ai; +} + +const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { + return { + name: INTEGRATION_NAME, + options, + afterAllSetup(client) { + function registerProcessors(): void { + client.on('spanStart', span => { + const { data: attributes, description: name } = spanToJSON(span); + + if (!name) { + return; + } + + // Tool call spans + // https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + if ( + attributes[AI_TOOL_CALL_NAME_ATTRIBUTE] && + attributes[AI_TOOL_CALL_ID_ATTRIBUTE] && + name === 'ai.toolCall' + ) { + addOriginToSpan(span, 'auto.vercelai.otel'); + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); + span.setAttribute('gen_ai.tool.call.id', attributes[AI_TOOL_CALL_ID_ATTRIBUTE]); + span.setAttribute('gen_ai.tool.name', attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]); + span.updateName(`execute_tool ${attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]}`); + return; + } + + // The AI and Provider must be defined for generate, stream, and embed spans. + // The id of the model + const aiModelId = attributes[AI_MODEL_ID_ATTRIBUTE]; + // the provider of the model + const aiModelProvider = attributes[AI_MODEL_PROVIDER_ATTRIBUTE]; + if (typeof aiModelId !== 'string' || typeof aiModelProvider !== 'string' || !aiModelId || !aiModelProvider) { + return; + } + + addOriginToSpan(span, 'auto.vercelai.otel'); + + const nameWthoutAi = name.replace('ai.', ''); + span.setAttribute('ai.pipeline.name', nameWthoutAi); + span.updateName(nameWthoutAi); + + // If a Telemetry name is set and it is a pipeline span, use that as the operation name + const functionId = attributes[AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE]; + if (functionId && typeof functionId === 'string' && name.split('.').length - 1 === 1) { + span.updateName(`${nameWthoutAi} ${functionId}`); + span.setAttribute('ai.pipeline.name', functionId); + } + + if (attributes[AI_PROMPT_ATTRIBUTE]) { + span.setAttribute('gen_ai.prompt', attributes[AI_PROMPT_ATTRIBUTE]); + } + if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { + span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); + } + span.setAttribute('ai.streaming', name.includes('stream')); + + // Generate Spans + if (name === 'ai.generateText') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.generateText.doGenerate') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_text'); + span.updateName(`generate_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.streamText') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.streamText.doStream') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_text'); + span.updateName(`stream_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.generateObject') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.generateObject.doGenerate') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_object'); + span.updateName(`generate_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.streamObject') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.streamObject.doStream') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_object'); + span.updateName(`stream_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.embed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.embed.doEmbed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed'); + span.updateName(`embed ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.embedMany') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.embedMany.doEmbed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed_many'); + span.updateName(`embed_many ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name.startsWith('ai.stream')) { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run'); + return; + } + }); + + client.addEventProcessor(event => { + if (event.type === 'transaction' && event.spans?.length) { + for (const span of event.spans) { + const { data: attributes, description: name } = span; + + if (!name || span.origin !== 'auto.vercelai.otel') { + continue; + } + + renameAttributeKey( + attributes, + AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + ); + renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); + if ( + typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && + typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' + ) { + attributes['gen_ai.usage.total_tokens'] = + attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; + } + + // Rename AI SDK attributes to standardized gen_ai attributes + renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages'); + renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); + renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); + renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); + } + } + + return event; + }); + } + + // Auto-detect if we should force the integration when running with 'ai' package available + // Note that this can only be detected if the 'Modules' integration is available, and running in CJS mode + const shouldForce = options.force ?? shouldForceIntegration(client); + + if (shouldForce) { + registerProcessors(); + } + }, + }; +}) satisfies IntegrationFn; + +/** + * Adds Sentry tracing instrumentation for the [ai](https://www.npmjs.com/package/ai) library. + * + * For more information, see the [`ai` documentation](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry). + * + * @example + * ```javascript + * const Sentry = require('@sentry/cloudflare'); + * + * Sentry.init({ + * integrations: [Sentry.vercelAIIntegration()], + * }); + * ``` + * + * The integration automatically detects when to force registration in CommonJS environments + * when the 'ai' package is available. You can still manually set the `force` option if needed. + * + * Unlike the Vercel AI integration in the node SDK, this integration does not add tracing support to + * `ai` function calls. You need to enable collecting spans for a specific call by setting + * `experimental_telemetry.isEnabled` to `true` in the first argument of the function call. + * + * ```javascript + * const result = await generateText({ + * model: openai('gpt-4-turbo'), + * experimental_telemetry: { isEnabled: true }, + * }); + * ``` + * + * If you want to collect inputs and outputs for a specific call, you must specifically opt-in to each + * function call by setting `experimental_telemetry.recordInputs` and `experimental_telemetry.recordOutputs` + * to `true`. + * + * ```javascript + * const result = await generateText({ + * model: openai('gpt-4-turbo'), + * experimental_telemetry: { isEnabled: true, recordInputs: true, recordOutputs: true }, + * }); + */ +export const vercelAIIntegration = defineIntegration(_vercelAIIntegration); + +/** + * Renames an attribute key in the provided attributes object if the old key exists. + * This function safely handles null and undefined values. + */ +function renameAttributeKey(attributes: Record, oldKey: string, newKey: string): void { + if (attributes[oldKey] != null) { + attributes[newKey] = attributes[oldKey]; + delete attributes[oldKey]; + } +} diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/types.ts b/packages/cloudflare/src/integrations/tracing/vercelai/types.ts new file mode 100644 index 000000000000..35cfeb33a112 --- /dev/null +++ b/packages/cloudflare/src/integrations/tracing/vercelai/types.ts @@ -0,0 +1,69 @@ +import type { Integration } from '@sentry/core'; + +/** + * Telemetry configuration. + */ +export type TelemetrySettings = { + /** + * Enable or disable telemetry. Disabled by default while experimental. + */ + isEnabled?: boolean; + /** + * Enable or disable input recording. Enabled by default. + * + * You might want to disable input recording to avoid recording sensitive + * information, to reduce data transfers, or to increase performance. + */ + recordInputs?: boolean; + /** + * Enable or disable output recording. Enabled by default. + * + * You might want to disable output recording to avoid recording sensitive + * information, to reduce data transfers, or to increase performance. + */ + recordOutputs?: boolean; + /** + * Identifier for this function. Used to group telemetry data by function. + */ + functionId?: string; + /** + * Additional information to include in the telemetry data. + */ + metadata?: Record; +}; + +/** + * Attribute values may be any non-nullish primitive value except an object. + * + * null or undefined attribute values are invalid and will result in undefined behavior. + */ +export declare type AttributeValue = + | string + | number + | boolean + | Array + | Array + | Array; + +export interface VercelAiOptions { + /** + * Enable or disable input recording. Enabled if `sendDefaultPii` is `true` + * or if you set `isEnabled` to `true` in your ai SDK method telemetry settings + */ + recordInputs?: boolean; + /** + * Enable or disable output recording. Enabled if `sendDefaultPii` is `true` + * or if you set `isEnabled` to `true` in your ai SDK method telemetry settings + */ + recordOutputs?: boolean; + + /** + * By default, the instrumentation will register span processors only when the ai package is used. + * If you want to register the span processors even when the ai package usage cannot be detected, you can set `force` to `true`. + */ + force?: boolean; +} + +export interface VercelAiIntegration extends Integration { + options: VercelAiOptions; +} diff --git a/packages/cloudflare/src/utils/addOriginToSpan.ts b/packages/cloudflare/src/utils/addOriginToSpan.ts new file mode 100644 index 000000000000..2a23710fa7cf --- /dev/null +++ b/packages/cloudflare/src/utils/addOriginToSpan.ts @@ -0,0 +1,8 @@ +import type { Span } from '@opentelemetry/api'; +import type { SpanOrigin } from '@sentry/core'; +import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; + +/** Adds an origin to an OTEL Span. */ +export function addOriginToSpan(span: Span, origin: SpanOrigin): void { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin); +} diff --git a/packages/cloudflare/src/utils/commonjs.ts b/packages/cloudflare/src/utils/commonjs.ts new file mode 100644 index 000000000000..23a9b97f9fc1 --- /dev/null +++ b/packages/cloudflare/src/utils/commonjs.ts @@ -0,0 +1,8 @@ +/** Detect CommonJS. */ +export function isCjs(): boolean { + try { + return typeof module !== 'undefined' && typeof module.exports !== 'undefined'; + } catch { + return false; + } +} From ecdd759c8bf1debe43462d32ebfba86ebce29487 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Wed, 25 Jun 2025 12:23:09 +0200 Subject: [PATCH 02/13] move stuff to core --- .../integrations/tracing/vercelai/index.ts | 204 +----------------- packages/core/src/index.ts | 1 + .../src/utils/vercel-ai-attributes.ts} | 0 packages/core/src/utils/vercel-ai.ts | 191 ++++++++++++++++ .../integrations/tracing/vercelai/index.ts | 192 +---------------- 5 files changed, 200 insertions(+), 388 deletions(-) rename packages/{node/src/integrations/tracing/vercelai/ai_sdk_attributes.ts => core/src/utils/vercel-ai-attributes.ts} (100%) create mode 100644 packages/core/src/utils/vercel-ai.ts diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/index.ts b/packages/cloudflare/src/integrations/tracing/vercelai/index.ts index fc88ea37173e..3a0c48f62135 100644 --- a/packages/cloudflare/src/integrations/tracing/vercelai/index.ts +++ b/packages/cloudflare/src/integrations/tracing/vercelai/index.ts @@ -8,29 +8,9 @@ * and users have to manually these to get spans. */ -/* eslint-disable @typescript-eslint/no-dynamic-delete */ -/* eslint-disable complexity */ -import type { Client, IntegrationFn } from '@sentry/core'; -import { defineIntegration, SEMANTIC_ATTRIBUTE_SENTRY_OP, spanToJSON } from '@sentry/core'; -import { addOriginToSpan } from '../../../utils/addOriginToSpan'; +import type { Client, IntegrationFn} from '@sentry/core'; +import { defineIntegration,processVercelAiSpan } from '@sentry/core'; import type { modulesIntegration } from '../../modules'; -import { - AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER_ATTRIBUTE, - AI_PROMPT_ATTRIBUTE, - AI_PROMPT_MESSAGES_ATTRIBUTE, - AI_PROMPT_TOOLS_ATTRIBUTE, - AI_RESPONSE_TEXT_ATTRIBUTE, - AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TOOL_CALL_ID_ATTRIBUTE, - AI_TOOL_CALL_NAME_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, - GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, -} from './ai_sdk_attributes'; import { INTEGRATION_NAME } from './constants'; import type { VercelAiOptions } from './types'; @@ -38,7 +18,7 @@ import type { VercelAiOptions } from './types'; * Determines if the integration should be forced based on environment and package availability. * Returns true if the 'ai' package is available. */ -function shouldForceIntegration(client: Client): boolean { +function shouldRunIntegration(client: Client): boolean { const modules = client.getIntegrationByName>('Modules'); return !!modules?.getModules?.()?.ai; } @@ -47,173 +27,12 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { return { name: INTEGRATION_NAME, options, - afterAllSetup(client) { + setup(client) { function registerProcessors(): void { - client.on('spanStart', span => { - const { data: attributes, description: name } = spanToJSON(span); - - if (!name) { - return; - } - - // Tool call spans - // https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans - if ( - attributes[AI_TOOL_CALL_NAME_ATTRIBUTE] && - attributes[AI_TOOL_CALL_ID_ATTRIBUTE] && - name === 'ai.toolCall' - ) { - addOriginToSpan(span, 'auto.vercelai.otel'); - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); - span.setAttribute('gen_ai.tool.call.id', attributes[AI_TOOL_CALL_ID_ATTRIBUTE]); - span.setAttribute('gen_ai.tool.name', attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]); - span.updateName(`execute_tool ${attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]}`); - return; - } - - // The AI and Provider must be defined for generate, stream, and embed spans. - // The id of the model - const aiModelId = attributes[AI_MODEL_ID_ATTRIBUTE]; - // the provider of the model - const aiModelProvider = attributes[AI_MODEL_PROVIDER_ATTRIBUTE]; - if (typeof aiModelId !== 'string' || typeof aiModelProvider !== 'string' || !aiModelId || !aiModelProvider) { - return; - } - - addOriginToSpan(span, 'auto.vercelai.otel'); - - const nameWthoutAi = name.replace('ai.', ''); - span.setAttribute('ai.pipeline.name', nameWthoutAi); - span.updateName(nameWthoutAi); - - // If a Telemetry name is set and it is a pipeline span, use that as the operation name - const functionId = attributes[AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE]; - if (functionId && typeof functionId === 'string' && name.split('.').length - 1 === 1) { - span.updateName(`${nameWthoutAi} ${functionId}`); - span.setAttribute('ai.pipeline.name', functionId); - } - - if (attributes[AI_PROMPT_ATTRIBUTE]) { - span.setAttribute('gen_ai.prompt', attributes[AI_PROMPT_ATTRIBUTE]); - } - if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { - span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); - } - span.setAttribute('ai.streaming', name.includes('stream')); - - // Generate Spans - if (name === 'ai.generateText') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.generateText.doGenerate') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_text'); - span.updateName(`generate_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.streamText') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.streamText.doStream') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_text'); - span.updateName(`stream_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.generateObject') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.generateObject.doGenerate') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_object'); - span.updateName(`generate_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.streamObject') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.streamObject.doStream') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_object'); - span.updateName(`stream_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.embed') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.embed.doEmbed') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed'); - span.updateName(`embed ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.embedMany') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.embedMany.doEmbed') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed_many'); - span.updateName(`embed_many ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name.startsWith('ai.stream')) { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run'); - return; - } - }); - - client.addEventProcessor(event => { - if (event.type === 'transaction' && event.spans?.length) { - for (const span of event.spans) { - const { data: attributes, description: name } = span; - - if (!name || span.origin !== 'auto.vercelai.otel') { - continue; - } - - renameAttributeKey( - attributes, - AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, - ); - renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); - if ( - typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && - typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' - ) { - attributes['gen_ai.usage.total_tokens'] = - attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; - } - - // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages'); - renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); - renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); - renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); - } - } - - return event; - }); + client.on('spanEnd', processVercelAiSpan); } - // Auto-detect if we should force the integration when running with 'ai' package available - // Note that this can only be detected if the 'Modules' integration is available, and running in CJS mode - const shouldForce = options.force ?? shouldForceIntegration(client); - - if (shouldForce) { + if (options.force || shouldRunIntegration(client)) { registerProcessors(); } }, @@ -259,14 +78,3 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { * }); */ export const vercelAIIntegration = defineIntegration(_vercelAIIntegration); - -/** - * Renames an attribute key in the provided attributes object if the old key exists. - * This function safely handles null and undefined values. - */ -function renameAttributeKey(attributes: Record, oldKey: string, newKey: string): void { - if (attributes[oldKey] != null) { - attributes[newKey] = attributes[oldKey]; - delete attributes[oldKey]; - } -} diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index b4f09d89f381..84e853456c42 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -123,6 +123,7 @@ export { captureFeedback } from './feedback'; export type { ReportDialogOptions } from './report-dialog'; export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/exports'; export { consoleLoggingIntegration } from './logs/console-integration'; +export { processVercelAiSpan } from './utils/vercel-ai'; export type { FeatureFlag } from './utils/featureFlags'; export { diff --git a/packages/node/src/integrations/tracing/vercelai/ai_sdk_attributes.ts b/packages/core/src/utils/vercel-ai-attributes.ts similarity index 100% rename from packages/node/src/integrations/tracing/vercelai/ai_sdk_attributes.ts rename to packages/core/src/utils/vercel-ai-attributes.ts diff --git a/packages/core/src/utils/vercel-ai.ts b/packages/core/src/utils/vercel-ai.ts new file mode 100644 index 000000000000..30a76bd3f823 --- /dev/null +++ b/packages/core/src/utils/vercel-ai.ts @@ -0,0 +1,191 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../semanticAttributes'; +import type { Span, SpanAttributes, SpanOrigin } from '../types-hoist/span'; +import { spanToJSON } from './spanUtils'; +import { + AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER_ATTRIBUTE, + AI_PROMPT_ATTRIBUTE, + AI_PROMPT_MESSAGES_ATTRIBUTE, + AI_PROMPT_TOOLS_ATTRIBUTE, + AI_RESPONSE_TEXT_ATTRIBUTE, + AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TOOL_CALL_ID_ATTRIBUTE, + AI_TOOL_CALL_NAME_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} from './vercel-ai-attributes'; + +function addOriginToSpan(span: Span, origin: SpanOrigin): void { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin); +} + +/** + * Post-process spans emitted by the Vercel AI SDK. + * This is supposed to be used in `client.on('spanEnd', ...)`, to ensure all data is already finished. + */ +export function processVercelAiSpan(span: Span): void { + const { data: attributes, description: name } = spanToJSON(span); + + if (!name) { + return; + } + + // Tool call spans + // https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + if (attributes[AI_TOOL_CALL_NAME_ATTRIBUTE] && attributes[AI_TOOL_CALL_ID_ATTRIBUTE] && name === 'ai.toolCall') { + processToolCallSpan(span, attributes); + sharedProcessSpan(span, attributes); + return; + } + + // The AI and Provider must be defined for generate, stream, and embed spans. + // The id of the model + const aiModelId = attributes[AI_MODEL_ID_ATTRIBUTE]; + // the provider of the model + const aiModelProvider = attributes[AI_MODEL_PROVIDER_ATTRIBUTE]; + if (typeof aiModelId !== 'string' || typeof aiModelProvider !== 'string' || !aiModelId || !aiModelProvider) { + return; + } + + processGenerateSpan(span, name, attributes); + sharedProcessSpan(span, attributes); +} + +/** + * Renames an attribute key in the provided attributes object if the old key exists. + * This function safely handles null and undefined values. + */ +function renameAttributeKey(attributes: Record, oldKey: string, newKey: string): void { + if (attributes[oldKey] != null) { + attributes[newKey] = attributes[oldKey]; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[oldKey]; + } +} + +function processToolCallSpan(span: Span, attributes: SpanAttributes): void { + addOriginToSpan(span, 'auto.vercelai.otel'); + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); + span.setAttribute('gen_ai.tool.call.id', attributes[AI_TOOL_CALL_ID_ATTRIBUTE]); + span.setAttribute('gen_ai.tool.name', attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]); + span.updateName(`execute_tool ${attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]}`); +} + +function processGenerateSpan(span: Span, name: string, attributes: SpanAttributes): void { + addOriginToSpan(span, 'auto.vercelai.otel'); + + const nameWthoutAi = name.replace('ai.', ''); + span.setAttribute('ai.pipeline.name', nameWthoutAi); + span.updateName(nameWthoutAi); + + // If a Telemetry name is set and it is a pipeline span, use that as the operation name + const functionId = attributes[AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE]; + if (functionId && typeof functionId === 'string' && name.split('.').length - 1 === 1) { + span.updateName(`${nameWthoutAi} ${functionId}`); + span.setAttribute('ai.pipeline.name', functionId); + } + + if (attributes[AI_PROMPT_ATTRIBUTE]) { + span.setAttribute('gen_ai.prompt', attributes[AI_PROMPT_ATTRIBUTE]); + } + if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { + span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); + } + span.setAttribute('ai.streaming', name.includes('stream')); + + // Generate Spans + if (name === 'ai.generateText') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.generateText.doGenerate') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_text'); + span.updateName(`generate_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.streamText') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.streamText.doStream') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_text'); + span.updateName(`stream_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.generateObject') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.generateObject.doGenerate') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_object'); + span.updateName(`generate_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.streamObject') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.streamObject.doStream') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_object'); + span.updateName(`stream_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.embed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.embed.doEmbed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed'); + span.updateName(`embed ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.embedMany') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); + return; + } + + if (name === 'ai.embedMany.doEmbed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed_many'); + span.updateName(`embed_many ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name.startsWith('ai.stream')) { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run'); + return; + } +} + +// Processing for both tool call and non-tool call spans +function sharedProcessSpan(span: Span, attributes: SpanAttributes): void { + renameAttributeKey(attributes, AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE); + renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); + + if ( + typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && + typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' + ) { + attributes['gen_ai.usage.total_tokens'] = + attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; + } + + // Rename AI SDK attributes to standardized gen_ai attributes + renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages'); + renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); + renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); + renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); +} diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index 8ba6cb5af905..28d93141ab41 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -1,27 +1,7 @@ -/* eslint-disable @typescript-eslint/no-dynamic-delete */ -/* eslint-disable complexity */ import type { Client, IntegrationFn } from '@sentry/core'; -import { defineIntegration, SEMANTIC_ATTRIBUTE_SENTRY_OP, spanToJSON } from '@sentry/core'; +import { defineIntegration, processVercelAiSpan } from '@sentry/core'; import { generateInstrumentOnce } from '../../../otel/instrument'; -import { addOriginToSpan } from '../../../utils/addOriginToSpan'; import type { modulesIntegration } from '../../modules'; -import { - AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER_ATTRIBUTE, - AI_PROMPT_ATTRIBUTE, - AI_PROMPT_MESSAGES_ATTRIBUTE, - AI_PROMPT_TOOLS_ATTRIBUTE, - AI_RESPONSE_TEXT_ATTRIBUTE, - AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TOOL_CALL_ID_ATTRIBUTE, - AI_TOOL_CALL_NAME_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, - GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, -} from './ai_sdk_attributes'; import { INTEGRATION_NAME } from './constants'; import { SentryVercelAiInstrumentation } from './instrumentation'; import type { VercelAiOptions } from './types'; @@ -48,164 +28,7 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { }, afterAllSetup(client) { function registerProcessors(): void { - client.on('spanStart', span => { - const { data: attributes, description: name } = spanToJSON(span); - - if (!name) { - return; - } - - // Tool call spans - // https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans - if ( - attributes[AI_TOOL_CALL_NAME_ATTRIBUTE] && - attributes[AI_TOOL_CALL_ID_ATTRIBUTE] && - name === 'ai.toolCall' - ) { - addOriginToSpan(span, 'auto.vercelai.otel'); - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); - span.setAttribute('gen_ai.tool.call.id', attributes[AI_TOOL_CALL_ID_ATTRIBUTE]); - span.setAttribute('gen_ai.tool.name', attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]); - span.updateName(`execute_tool ${attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]}`); - return; - } - - // The AI and Provider must be defined for generate, stream, and embed spans. - // The id of the model - const aiModelId = attributes[AI_MODEL_ID_ATTRIBUTE]; - // the provider of the model - const aiModelProvider = attributes[AI_MODEL_PROVIDER_ATTRIBUTE]; - if (typeof aiModelId !== 'string' || typeof aiModelProvider !== 'string' || !aiModelId || !aiModelProvider) { - return; - } - - addOriginToSpan(span, 'auto.vercelai.otel'); - - const nameWthoutAi = name.replace('ai.', ''); - span.setAttribute('ai.pipeline.name', nameWthoutAi); - span.updateName(nameWthoutAi); - - // If a Telemetry name is set and it is a pipeline span, use that as the operation name - const functionId = attributes[AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE]; - if (functionId && typeof functionId === 'string' && name.split('.').length - 1 === 1) { - span.updateName(`${nameWthoutAi} ${functionId}`); - span.setAttribute('ai.pipeline.name', functionId); - } - - if (attributes[AI_PROMPT_ATTRIBUTE]) { - span.setAttribute('gen_ai.prompt', attributes[AI_PROMPT_ATTRIBUTE]); - } - if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { - span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); - } - span.setAttribute('ai.streaming', name.includes('stream')); - - // Generate Spans - if (name === 'ai.generateText') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.generateText.doGenerate') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_text'); - span.updateName(`generate_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.streamText') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.streamText.doStream') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_text'); - span.updateName(`stream_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.generateObject') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.generateObject.doGenerate') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_object'); - span.updateName(`generate_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.streamObject') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.streamObject.doStream') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_object'); - span.updateName(`stream_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.embed') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.embed.doEmbed') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed'); - span.updateName(`embed ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name === 'ai.embedMany') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.invoke_agent'); - return; - } - - if (name === 'ai.embedMany.doEmbed') { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed_many'); - span.updateName(`embed_many ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); - return; - } - - if (name.startsWith('ai.stream')) { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run'); - return; - } - }); - - client.addEventProcessor(event => { - if (event.type === 'transaction' && event.spans?.length) { - for (const span of event.spans) { - const { data: attributes, description: name } = span; - - if (!name || span.origin !== 'auto.vercelai.otel') { - continue; - } - - renameAttributeKey( - attributes, - AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, - ); - renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); - if ( - typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && - typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' - ) { - attributes['gen_ai.usage.total_tokens'] = - attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; - } - - // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages'); - renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); - renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); - renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); - } - } - - return event; - }); + client.on('spanEnd', processVercelAiSpan); } // Auto-detect if we should force the integration when running with 'ai' package available @@ -260,14 +83,3 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { * }); */ export const vercelAIIntegration = defineIntegration(_vercelAIIntegration); - -/** - * Renames an attribute key in the provided attributes object if the old key exists. - * This function safely handles null and undefined values. - */ -function renameAttributeKey(attributes: Record, oldKey: string, newKey: string): void { - if (attributes[oldKey] != null) { - attributes[newKey] = attributes[oldKey]; - delete attributes[oldKey]; - } -} From 2f9bf954150a82aa2a6dd9a5128973c7034a69d9 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Wed, 25 Jun 2025 12:52:22 +0200 Subject: [PATCH 03/13] move stuff around and cleanup --- .../{vercelai/index.ts => vercelai.ts} | 18 +- .../tracing/vercelai/ai_sdk_attributes.ts | 794 ------------------ .../tracing/vercelai/constants.ts | 1 - .../integrations/tracing/vercelai/types.ts | 69 -- packages/cloudflare/src/sdk.ts | 3 + 5 files changed, 16 insertions(+), 869 deletions(-) rename packages/cloudflare/src/integrations/tracing/{vercelai/index.ts => vercelai.ts} (83%) delete mode 100644 packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts delete mode 100644 packages/cloudflare/src/integrations/tracing/vercelai/constants.ts delete mode 100644 packages/cloudflare/src/integrations/tracing/vercelai/types.ts diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/index.ts b/packages/cloudflare/src/integrations/tracing/vercelai.ts similarity index 83% rename from packages/cloudflare/src/integrations/tracing/vercelai/index.ts rename to packages/cloudflare/src/integrations/tracing/vercelai.ts index 3a0c48f62135..1dbc8149aaa1 100644 --- a/packages/cloudflare/src/integrations/tracing/vercelai/index.ts +++ b/packages/cloudflare/src/integrations/tracing/vercelai.ts @@ -8,11 +8,19 @@ * and users have to manually these to get spans. */ -import type { Client, IntegrationFn} from '@sentry/core'; -import { defineIntegration,processVercelAiSpan } from '@sentry/core'; -import type { modulesIntegration } from '../../modules'; -import { INTEGRATION_NAME } from './constants'; -import type { VercelAiOptions } from './types'; +import type { Client, IntegrationFn } from '@sentry/core'; +import { defineIntegration, processVercelAiSpan } from '@sentry/core'; +import type { modulesIntegration } from '../modules'; + +interface VercelAiOptions { + /** + * By default, the instrumentation will register span processors only when the ai package is used. + * If you want to register the span processors even when the ai package usage cannot be detected, you can set `force` to `true`. + */ + force?: boolean; +} + +const INTEGRATION_NAME = 'VercelAI'; /** * Determines if the integration should be forced based on environment and package availability. diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts b/packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts deleted file mode 100644 index 8d7b6913a636..000000000000 --- a/packages/cloudflare/src/integrations/tracing/vercelai/ai_sdk_attributes.ts +++ /dev/null @@ -1,794 +0,0 @@ -/** - * AI SDK Telemetry Attributes - * Based on https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data - */ - -// ============================================================================= -// COMMON ATTRIBUTES -// ============================================================================= - -/** - * Common attribute for operation name across all functions and spans - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data - */ -export const OPERATION_NAME_ATTRIBUTE = 'operation.name'; - -/** - * Common attribute for AI operation ID across all functions and spans - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data - */ -export const AI_OPERATION_ID_ATTRIBUTE = 'ai.operationId'; - -// ============================================================================= -// SHARED ATTRIBUTES -// ============================================================================= - -/** - * `generateText` function - `ai.generateText` span - * `streamText` function - `ai.streamText` span - * - * The prompt that was used when calling the function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function - */ -export const AI_PROMPT_ATTRIBUTE = 'ai.prompt'; - -/** - * `generateObject` function - `ai.generateObject` span - * `streamObject` function - `ai.streamObject` span - * - * The JSON schema version of the schema that was passed into the function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function - */ -export const AI_SCHEMA_ATTRIBUTE = 'ai.schema'; - -/** - * `generateObject` function - `ai.generateObject` span - * `streamObject` function - `ai.streamObject` span - * - * The name of the schema that was passed into the function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function - */ -export const AI_SCHEMA_NAME_ATTRIBUTE = 'ai.schema.name'; - -/** - * `generateObject` function - `ai.generateObject` span - * `streamObject` function - `ai.streamObject` span - * - * The description of the schema that was passed into the function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function - */ -export const AI_SCHEMA_DESCRIPTION_ATTRIBUTE = 'ai.schema.description'; - -/** - * `generateObject` function - `ai.generateObject` span - * `streamObject` function - `ai.streamObject` span - * - * The object that was generated (stringified JSON) - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function - */ -export const AI_RESPONSE_OBJECT_ATTRIBUTE = 'ai.response.object'; - -/** - * `generateObject` function - `ai.generateObject` span - * `streamObject` function - `ai.streamObject` span - * - * The object generation mode, e.g. `json` - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function - */ -export const AI_SETTINGS_MODE_ATTRIBUTE = 'ai.settings.mode'; - -/** - * `generateObject` function - `ai.generateObject` span - * `streamObject` function - `ai.streamObject` span - * - * The output type that was used, e.g. `object` or `no-schema` - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function - */ -export const AI_SETTINGS_OUTPUT_ATTRIBUTE = 'ai.settings.output'; - -/** - * `embed` function - `ai.embed.doEmbed` span - * `embedMany` function - `ai.embedMany` span - * - * The values that were passed into the function (array) - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function - */ -export const AI_VALUES_ATTRIBUTE = 'ai.values'; - -/** - * `embed` function - `ai.embed.doEmbed` span - * `embedMany` function - `ai.embedMany` span - * - * An array of JSON-stringified embeddings - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function - */ -export const AI_EMBEDDINGS_ATTRIBUTE = 'ai.embeddings'; - -// ============================================================================= -// GENERATETEXT FUNCTION - UNIQUE ATTRIBUTES -// ============================================================================= - -/** - * `generateText` function - `ai.generateText` span - * - * The text that was generated - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_RESPONSE_TEXT_ATTRIBUTE = 'ai.response.text'; - -/** - * `generateText` function - `ai.generateText` span - * - * The tool calls that were made as part of the generation (stringified JSON) - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_RESPONSE_TOOL_CALLS_ATTRIBUTE = 'ai.response.toolCalls'; - -/** - * `generateText` function - `ai.generateText` span - * - * The reason why the generation finished - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_RESPONSE_FINISH_REASON_ATTRIBUTE = 'ai.response.finishReason'; - -/** - * `generateText` function - `ai.generateText` span - * - * The maximum number of steps that were set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_SETTINGS_MAX_STEPS_ATTRIBUTE = 'ai.settings.maxSteps'; - -/** - * `generateText` function - `ai.generateText.doGenerate` span - * - * The format of the prompt - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_PROMPT_FORMAT_ATTRIBUTE = 'ai.prompt.format'; - -/** - * `generateText` function - `ai.generateText.doGenerate` span - * - * The messages that were passed into the provider - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_PROMPT_MESSAGES_ATTRIBUTE = 'ai.prompt.messages'; - -/** - * `generateText` function - `ai.generateText.doGenerate` span - * - * Array of stringified tool definitions - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_PROMPT_TOOLS_ATTRIBUTE = 'ai.prompt.tools'; - -/** - * `generateText` function - `ai.generateText.doGenerate` span - * - * The stringified tool choice setting (JSON) - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_PROMPT_TOOL_CHOICE_ATTRIBUTE = 'ai.prompt.toolChoice'; - -// ============================================================================= -// STREAMTEXT FUNCTION - UNIQUE ATTRIBUTES -// ============================================================================= - -/** - * `streamText` function - `ai.streamText.doStream` span - * - * The time it took to receive the first chunk in milliseconds - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function - */ -export const AI_RESPONSE_MS_TO_FIRST_CHUNK_ATTRIBUTE = 'ai.response.msToFirstChunk'; - -/** - * `streamText` function - `ai.streamText.doStream` span - * - * The time it took to receive the finish part of the LLM stream in milliseconds - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function - */ -export const AI_RESPONSE_MS_TO_FINISH_ATTRIBUTE = 'ai.response.msToFinish'; - -/** - * `streamText` function - `ai.streamText.doStream` span - * - * The average completion tokens per second - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function - */ -export const AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND_ATTRIBUTE = 'ai.response.avgCompletionTokensPerSecond'; - -// ============================================================================= -// EMBED FUNCTION - UNIQUE ATTRIBUTES -// ============================================================================= - -/** - * `embed` function - `ai.embed` span - * - * The value that was passed into the `embed` function - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function - */ -export const AI_VALUE_ATTRIBUTE = 'ai.value'; - -/** - * `embed` function - `ai.embed` span - * - * A JSON-stringified embedding - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function - */ -export const AI_EMBEDDING_ATTRIBUTE = 'ai.embedding'; - -// ============================================================================= -// BASIC LLM SPAN INFORMATION -// ============================================================================= - -/** - * Basic LLM span information - * Multiple spans - * - * The functionId that was set through `telemetry.functionId` - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const RESOURCE_NAME_ATTRIBUTE = 'resource.name'; - -/** - * Basic LLM span information - * Multiple spans - * - * The id of the model - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_MODEL_ID_ATTRIBUTE = 'ai.model.id'; - -/** - * Basic LLM span information - * Multiple spans - * - * The provider of the model - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_MODEL_PROVIDER_ATTRIBUTE = 'ai.model.provider'; - -/** - * Basic LLM span information - * Multiple spans - * - * The request headers that were passed in through `headers` - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_REQUEST_HEADERS_ATTRIBUTE = 'ai.request.headers'; - -/** - * Basic LLM span information - * Multiple spans - * - * The maximum number of retries that were set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_SETTINGS_MAX_RETRIES_ATTRIBUTE = 'ai.settings.maxRetries'; - -/** - * Basic LLM span information - * Multiple spans - * - * The functionId that was set through `telemetry.functionId` - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE = 'ai.telemetry.functionId'; - -/** - * Basic LLM span information - * Multiple spans - * - * The metadata that was passed in through `telemetry.metadata` - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_TELEMETRY_METADATA_ATTRIBUTE = 'ai.telemetry.metadata'; - -/** - * Basic LLM span information - * Multiple spans - * - * The number of completion tokens that were used - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE = 'ai.usage.completionTokens'; - -/** - * Basic LLM span information - * Multiple spans - * - * The number of prompt tokens that were used - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information - */ -export const AI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'ai.usage.promptTokens'; - -// ============================================================================= -// CALL LLM SPAN INFORMATION -// ============================================================================= - -/** - * Call LLM span information - * Individual LLM call spans - * - * The model that was used to generate the response - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const AI_RESPONSE_MODEL_ATTRIBUTE = 'ai.response.model'; - -/** - * Call LLM span information - * Individual LLM call spans - * - * The id of the response - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const AI_RESPONSE_ID_ATTRIBUTE = 'ai.response.id'; - -/** - * Call LLM span information - * Individual LLM call spans - * - * The timestamp of the response - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const AI_RESPONSE_TIMESTAMP_ATTRIBUTE = 'ai.response.timestamp'; - -// ============================================================================= -// SEMANTIC CONVENTIONS FOR GENAI OPERATIONS -// ============================================================================= - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The provider that was used - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The model that was requested - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The temperature that was set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE = 'gen_ai.request.temperature'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The maximum number of tokens that were set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE = 'gen_ai.request.max_tokens'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The frequency penalty that was set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE = 'gen_ai.request.frequency_penalty'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The presence penalty that was set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE = 'gen_ai.request.presence_penalty'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The topK parameter value that was set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_TOP_K_ATTRIBUTE = 'gen_ai.request.top_k'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The topP parameter value that was set - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_TOP_P_ATTRIBUTE = 'gen_ai.request.top_p'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The stop sequences - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE = 'gen_ai.request.stop_sequences'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The finish reasons that were returned by the provider - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE = 'gen_ai.response.finish_reasons'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The model that was used to generate the response - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_RESPONSE_MODEL_ATTRIBUTE = 'gen_ai.response.model'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The id of the response - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_RESPONSE_ID_ATTRIBUTE = 'gen_ai.response.id'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The number of prompt tokens that were used - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.input_tokens'; - -/** - * Semantic Conventions for GenAI operations - * Individual LLM call spans - * - * The number of completion tokens that were used - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information - */ -export const GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.output_tokens'; - -// ============================================================================= -// BASIC EMBEDDING SPAN INFORMATION -// ============================================================================= - -/** - * Basic embedding span information - * Embedding spans - * - * The number of tokens that were used - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-embedding-span-information - */ -export const AI_USAGE_TOKENS_ATTRIBUTE = 'ai.usage.tokens'; - -// ============================================================================= -// TOOL CALL SPANS -// ============================================================================= - -/** - * Tool call spans - * `ai.toolCall` span - * - * The name of the tool - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans - */ -export const AI_TOOL_CALL_NAME_ATTRIBUTE = 'ai.toolCall.name'; - -/** - * Tool call spans - * `ai.toolCall` span - * - * The id of the tool call - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans - */ -export const AI_TOOL_CALL_ID_ATTRIBUTE = 'ai.toolCall.id'; - -/** - * Tool call spans - * `ai.toolCall` span - * - * The parameters of the tool call - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans - */ -export const AI_TOOL_CALL_ARGS_ATTRIBUTE = 'ai.toolCall.args'; - -/** - * Tool call spans - * `ai.toolCall` span - * - * The result of the tool call - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans - */ -export const AI_TOOL_CALL_RESULT_ATTRIBUTE = 'ai.toolCall.result'; - -// ============================================================================= -// SPAN ATTRIBUTE OBJECTS -// ============================================================================= - -/** - * Attributes collected for `ai.generateText` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_GENERATE_TEXT_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_PROMPT: AI_PROMPT_ATTRIBUTE, - AI_RESPONSE_TEXT: AI_RESPONSE_TEXT_ATTRIBUTE, - AI_RESPONSE_TOOL_CALLS: AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, - AI_RESPONSE_FINISH_REASON: AI_RESPONSE_FINISH_REASON_ATTRIBUTE, - AI_SETTINGS_MAX_STEPS: AI_SETTINGS_MAX_STEPS_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.generateText.doGenerate` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function - */ -export const AI_GENERATE_TEXT_DO_GENERATE_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_PROMPT_FORMAT: AI_PROMPT_FORMAT_ATTRIBUTE, - AI_PROMPT_MESSAGES: AI_PROMPT_MESSAGES_ATTRIBUTE, - AI_PROMPT_TOOLS: AI_PROMPT_TOOLS_ATTRIBUTE, - AI_PROMPT_TOOL_CHOICE: AI_PROMPT_TOOL_CHOICE_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, - // Call LLM span information - AI_RESPONSE_MODEL: AI_RESPONSE_MODEL_ATTRIBUTE, - AI_RESPONSE_ID: AI_RESPONSE_ID_ATTRIBUTE, - AI_RESPONSE_TIMESTAMP: AI_RESPONSE_TIMESTAMP_ATTRIBUTE, - // Semantic Conventions for GenAI operations - GEN_AI_SYSTEM: GEN_AI_SYSTEM_ATTRIBUTE, - GEN_AI_REQUEST_MODEL: GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE: GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS: GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_FREQUENCY_PENALTY: GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_PRESENCE_PENALTY: GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_TOP_K: GEN_AI_REQUEST_TOP_K_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P: GEN_AI_REQUEST_TOP_P_ATTRIBUTE, - GEN_AI_REQUEST_STOP_SEQUENCES: GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE, - GEN_AI_RESPONSE_FINISH_REASONS: GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, - GEN_AI_RESPONSE_MODEL: GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_ID: GEN_AI_RESPONSE_ID_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS: GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS: GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.streamText` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function - */ -export const AI_STREAM_TEXT_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_PROMPT: AI_PROMPT_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.streamText.doStream` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function - */ -export const AI_STREAM_TEXT_DO_STREAM_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_RESPONSE_MS_TO_FIRST_CHUNK: AI_RESPONSE_MS_TO_FIRST_CHUNK_ATTRIBUTE, - AI_RESPONSE_MS_TO_FINISH: AI_RESPONSE_MS_TO_FINISH_ATTRIBUTE, - AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND: AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, - // Call LLM span information - AI_RESPONSE_MODEL: AI_RESPONSE_MODEL_ATTRIBUTE, - AI_RESPONSE_ID: AI_RESPONSE_ID_ATTRIBUTE, - AI_RESPONSE_TIMESTAMP: AI_RESPONSE_TIMESTAMP_ATTRIBUTE, - // Semantic Conventions for GenAI operations - GEN_AI_SYSTEM: GEN_AI_SYSTEM_ATTRIBUTE, - GEN_AI_REQUEST_MODEL: GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE: GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS: GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_FREQUENCY_PENALTY: GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_PRESENCE_PENALTY: GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_TOP_K: GEN_AI_REQUEST_TOP_K_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P: GEN_AI_REQUEST_TOP_P_ATTRIBUTE, - GEN_AI_REQUEST_STOP_SEQUENCES: GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE, - GEN_AI_RESPONSE_FINISH_REASONS: GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, - GEN_AI_RESPONSE_MODEL: GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_ID: GEN_AI_RESPONSE_ID_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS: GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS: GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.generateObject` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function - */ -export const AI_GENERATE_OBJECT_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_SCHEMA: AI_SCHEMA_ATTRIBUTE, - AI_SCHEMA_NAME: AI_SCHEMA_NAME_ATTRIBUTE, - AI_SCHEMA_DESCRIPTION: AI_SCHEMA_DESCRIPTION_ATTRIBUTE, - AI_RESPONSE_OBJECT: AI_RESPONSE_OBJECT_ATTRIBUTE, - AI_SETTINGS_MODE: AI_SETTINGS_MODE_ATTRIBUTE, - AI_SETTINGS_OUTPUT: AI_SETTINGS_OUTPUT_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.streamObject` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function - */ -export const AI_STREAM_OBJECT_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_SCHEMA: AI_SCHEMA_ATTRIBUTE, - AI_SCHEMA_NAME: AI_SCHEMA_NAME_ATTRIBUTE, - AI_SCHEMA_DESCRIPTION: AI_SCHEMA_DESCRIPTION_ATTRIBUTE, - AI_RESPONSE_OBJECT: AI_RESPONSE_OBJECT_ATTRIBUTE, - AI_SETTINGS_MODE: AI_SETTINGS_MODE_ATTRIBUTE, - AI_SETTINGS_OUTPUT: AI_SETTINGS_OUTPUT_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.embed` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function - */ -export const AI_EMBED_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_VALUE: AI_VALUE_ATTRIBUTE, - AI_EMBEDDING: AI_EMBEDDING_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - // Basic embedding span information - AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.embed.doEmbed` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function - */ -export const AI_EMBED_DO_EMBED_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_VALUES: AI_VALUES_ATTRIBUTE, - AI_EMBEDDINGS: AI_EMBEDDINGS_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - // Basic embedding span information - AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.embedMany` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function - */ -export const AI_EMBED_MANY_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_VALUES: AI_VALUES_ATTRIBUTE, - AI_EMBEDDINGS: AI_EMBEDDINGS_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, - // Basic embedding span information - AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, -} as const; - -/** - * Attributes collected for `ai.toolCall` span - * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans - */ -export const AI_TOOL_CALL_SPAN_ATTRIBUTES = { - OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, - AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, - AI_TOOL_CALL_NAME: AI_TOOL_CALL_NAME_ATTRIBUTE, - AI_TOOL_CALL_ID: AI_TOOL_CALL_ID_ATTRIBUTE, - AI_TOOL_CALL_ARGS: AI_TOOL_CALL_ARGS_ATTRIBUTE, - AI_TOOL_CALL_RESULT: AI_TOOL_CALL_RESULT_ATTRIBUTE, - // Basic LLM span information - RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, - AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, - AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, - AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, - AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, - AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, - AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, -} as const; diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/constants.ts b/packages/cloudflare/src/integrations/tracing/vercelai/constants.ts deleted file mode 100644 index fd4473c4c084..000000000000 --- a/packages/cloudflare/src/integrations/tracing/vercelai/constants.ts +++ /dev/null @@ -1 +0,0 @@ -export const INTEGRATION_NAME = 'VercelAI'; diff --git a/packages/cloudflare/src/integrations/tracing/vercelai/types.ts b/packages/cloudflare/src/integrations/tracing/vercelai/types.ts deleted file mode 100644 index 35cfeb33a112..000000000000 --- a/packages/cloudflare/src/integrations/tracing/vercelai/types.ts +++ /dev/null @@ -1,69 +0,0 @@ -import type { Integration } from '@sentry/core'; - -/** - * Telemetry configuration. - */ -export type TelemetrySettings = { - /** - * Enable or disable telemetry. Disabled by default while experimental. - */ - isEnabled?: boolean; - /** - * Enable or disable input recording. Enabled by default. - * - * You might want to disable input recording to avoid recording sensitive - * information, to reduce data transfers, or to increase performance. - */ - recordInputs?: boolean; - /** - * Enable or disable output recording. Enabled by default. - * - * You might want to disable output recording to avoid recording sensitive - * information, to reduce data transfers, or to increase performance. - */ - recordOutputs?: boolean; - /** - * Identifier for this function. Used to group telemetry data by function. - */ - functionId?: string; - /** - * Additional information to include in the telemetry data. - */ - metadata?: Record; -}; - -/** - * Attribute values may be any non-nullish primitive value except an object. - * - * null or undefined attribute values are invalid and will result in undefined behavior. - */ -export declare type AttributeValue = - | string - | number - | boolean - | Array - | Array - | Array; - -export interface VercelAiOptions { - /** - * Enable or disable input recording. Enabled if `sendDefaultPii` is `true` - * or if you set `isEnabled` to `true` in your ai SDK method telemetry settings - */ - recordInputs?: boolean; - /** - * Enable or disable output recording. Enabled if `sendDefaultPii` is `true` - * or if you set `isEnabled` to `true` in your ai SDK method telemetry settings - */ - recordOutputs?: boolean; - - /** - * By default, the instrumentation will register span processors only when the ai package is used. - * If you want to register the span processors even when the ai package usage cannot be detected, you can set `force` to `true`. - */ - force?: boolean; -} - -export interface VercelAiIntegration extends Integration { - options: VercelAiOptions; -} diff --git a/packages/cloudflare/src/sdk.ts b/packages/cloudflare/src/sdk.ts index 96b3152e6480..a31a157fc03f 100644 --- a/packages/cloudflare/src/sdk.ts +++ b/packages/cloudflare/src/sdk.ts @@ -13,6 +13,7 @@ import { import type { CloudflareClientOptions, CloudflareOptions } from './client'; import { CloudflareClient } from './client'; import { fetchIntegration } from './integrations/fetch'; +import { vercelAIIntegration } from './integrations/tracing/vercelai'; import { setupOpenTelemetryTracer } from './opentelemetry/tracer'; import { makeCloudflareTransport } from './transport'; import { defaultStackParser } from './vendor/stacktrace'; @@ -30,9 +31,11 @@ export function getDefaultIntegrations(options: CloudflareOptions): Integration[ functionToStringIntegration(), linkedErrorsIntegration(), fetchIntegration(), + modulesIntegration(), // TODO(v10): the `include` object should be defined directly in the integration based on `sendDefaultPii` requestDataIntegration(sendDefaultPii ? undefined : { include: { cookies: false } }), consoleIntegration(), + vercelAIIntegration(), ]; } From 15802bfd38cf0180edbc7e5aa9a863965190897f Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Wed, 25 Jun 2025 13:20:46 +0200 Subject: [PATCH 04/13] fix build --- packages/cloudflare/src/sdk.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/cloudflare/src/sdk.ts b/packages/cloudflare/src/sdk.ts index a31a157fc03f..79bc6ef5e227 100644 --- a/packages/cloudflare/src/sdk.ts +++ b/packages/cloudflare/src/sdk.ts @@ -13,6 +13,7 @@ import { import type { CloudflareClientOptions, CloudflareOptions } from './client'; import { CloudflareClient } from './client'; import { fetchIntegration } from './integrations/fetch'; +import { modulesIntegration } from './integrations/modules'; import { vercelAIIntegration } from './integrations/tracing/vercelai'; import { setupOpenTelemetryTracer } from './opentelemetry/tracer'; import { makeCloudflareTransport } from './transport'; From 1d1337101661c074d3a27f30cd6338f1bcdb1718 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Wed, 25 Jun 2025 13:21:22 +0200 Subject: [PATCH 05/13] add missing export --- packages/cloudflare/src/index.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/cloudflare/src/index.ts b/packages/cloudflare/src/index.ts index a5bb99d40818..dbe349ca3ef8 100644 --- a/packages/cloudflare/src/index.ts +++ b/packages/cloudflare/src/index.ts @@ -108,6 +108,7 @@ export { getDefaultIntegrations } from './sdk'; export { fetchIntegration } from './integrations/fetch'; export { vercelAIIntegration } from './integrations/tracing/vercelai'; +export { modulesIntegration } from './integrations/modules'; export { instrumentD1WithSentry } from './d1'; From a2945efe7201195e58505ce8dac3e8e4de066c94 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Wed, 25 Jun 2025 14:24:06 +0200 Subject: [PATCH 06/13] refactor to fix stuff --- .../src/integrations/tracing/vercelai.ts | 8 +- packages/core/src/client.ts | 3 +- packages/core/src/index.ts | 2 +- packages/core/src/utils/vercel-ai.ts | 76 +++++++++++++------ .../integrations/tracing/vercelai/index.ts | 10 +-- 5 files changed, 61 insertions(+), 38 deletions(-) diff --git a/packages/cloudflare/src/integrations/tracing/vercelai.ts b/packages/cloudflare/src/integrations/tracing/vercelai.ts index 1dbc8149aaa1..c31ac06ba866 100644 --- a/packages/cloudflare/src/integrations/tracing/vercelai.ts +++ b/packages/cloudflare/src/integrations/tracing/vercelai.ts @@ -9,7 +9,7 @@ */ import type { Client, IntegrationFn } from '@sentry/core'; -import { defineIntegration, processVercelAiSpan } from '@sentry/core'; +import { addVercelAiProcessors, defineIntegration } from '@sentry/core'; import type { modulesIntegration } from '../modules'; interface VercelAiOptions { @@ -36,12 +36,8 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { name: INTEGRATION_NAME, options, setup(client) { - function registerProcessors(): void { - client.on('spanEnd', processVercelAiSpan); - } - if (options.force || shouldRunIntegration(client)) { - registerProcessors(); + addVercelAiProcessors(client); } }, }; diff --git a/packages/core/src/client.ts b/packages/core/src/client.ts index 3dd8bd66d023..7997bd3345a0 100644 --- a/packages/core/src/client.ts +++ b/packages/core/src/client.ts @@ -498,7 +498,8 @@ export abstract class Client { ): void; /** - * Register a callback for whenever a span is ended. + * Register a callback for after a span is ended. + * NOTE: The span cannot be mutated anymore in this callback. * Receives the span as argument. * @returns {() => void} A function that, when executed, removes the registered callback. */ diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 84e853456c42..7551478c9c88 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -123,7 +123,7 @@ export { captureFeedback } from './feedback'; export type { ReportDialogOptions } from './report-dialog'; export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/exports'; export { consoleLoggingIntegration } from './logs/console-integration'; -export { processVercelAiSpan } from './utils/vercel-ai'; +export { addVercelAiProcessors } from './utils/vercel-ai'; export type { FeatureFlag } from './utils/featureFlags'; export { diff --git a/packages/core/src/utils/vercel-ai.ts b/packages/core/src/utils/vercel-ai.ts index 30a76bd3f823..2a653addd805 100644 --- a/packages/core/src/utils/vercel-ai.ts +++ b/packages/core/src/utils/vercel-ai.ts @@ -1,5 +1,7 @@ +import type { Client } from '../client'; import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../semanticAttributes'; -import type { Span, SpanAttributes, SpanOrigin } from '../types-hoist/span'; +import type { Event } from '../types-hoist/event'; +import type { Span, SpanAttributes, SpanJSON, SpanOrigin } from '../types-hoist/span'; import { spanToJSON } from './spanUtils'; import { AI_MODEL_ID_ATTRIBUTE, @@ -25,9 +27,9 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { /** * Post-process spans emitted by the Vercel AI SDK. - * This is supposed to be used in `client.on('spanEnd', ...)`, to ensure all data is already finished. + * This is supposed to be used in `client.on('spanStart', ...) */ -export function processVercelAiSpan(span: Span): void { +function onVercelAiSpanStart(span: Span): void { const { data: attributes, description: name } = spanToJSON(span); if (!name) { @@ -38,7 +40,6 @@ export function processVercelAiSpan(span: Span): void { // https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans if (attributes[AI_TOOL_CALL_NAME_ATTRIBUTE] && attributes[AI_TOOL_CALL_ID_ATTRIBUTE] && name === 'ai.toolCall') { processToolCallSpan(span, attributes); - sharedProcessSpan(span, attributes); return; } @@ -52,7 +53,47 @@ export function processVercelAiSpan(span: Span): void { } processGenerateSpan(span, name, attributes); - sharedProcessSpan(span, attributes); +} + +const vercelAiEventProcessor = Object.assign( + (event: Event): Event => { + if (event.type === 'transaction' && event.spans) { + for (const span of event.spans) { + // this mutates spans in-place + processEndedVercelAiSpan(span); + } + } + return event; + }, + { id: 'VercelAiEventProcessor' }, +); + +/** + * Post-process spans emitted by the Vercel AI SDK. + */ +function processEndedVercelAiSpan(span: SpanJSON): void { + const { data: attributes, origin } = span; + + if (origin !== 'auto.vercelai.otel') { + return; + } + + renameAttributeKey(attributes, AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE); + renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); + + if ( + typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && + typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' + ) { + attributes['gen_ai.usage.total_tokens'] = + attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; + } + + // Rename AI SDK attributes to standardized gen_ai attributes + renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages'); + renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); + renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); + renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); } /** @@ -170,22 +211,11 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute } } -// Processing for both tool call and non-tool call spans -function sharedProcessSpan(span: Span, attributes: SpanAttributes): void { - renameAttributeKey(attributes, AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE); - renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); - - if ( - typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && - typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' - ) { - attributes['gen_ai.usage.total_tokens'] = - attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; - } - - // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages'); - renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); - renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); - renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); +/** + * Add event processors to the given client to process Vercel AI spans. + */ +export function addVercelAiProcessors(client: Client): void { + client.on('spanStart', onVercelAiSpanStart); + // Note: We cannot do this on `spanEnd`, because the span cannot be mutated anymore at this point + client.addEventProcessor(vercelAiEventProcessor); } diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index 28d93141ab41..d9a396865d6f 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -1,5 +1,5 @@ import type { Client, IntegrationFn } from '@sentry/core'; -import { defineIntegration, processVercelAiSpan } from '@sentry/core'; +import { addVercelAiProcessors, defineIntegration } from '@sentry/core'; import { generateInstrumentOnce } from '../../../otel/instrument'; import type { modulesIntegration } from '../../modules'; import { INTEGRATION_NAME } from './constants'; @@ -27,18 +27,14 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { instrumentation = instrumentVercelAi(); }, afterAllSetup(client) { - function registerProcessors(): void { - client.on('spanEnd', processVercelAiSpan); - } - // Auto-detect if we should force the integration when running with 'ai' package available // Note that this can only be detected if the 'Modules' integration is available, and running in CJS mode const shouldForce = options.force ?? shouldForceIntegration(client); if (shouldForce) { - registerProcessors(); + addVercelAiProcessors(client); } else { - instrumentation?.callWhenPatched(registerProcessors); + instrumentation?.callWhenPatched(() => addVercelAiProcessors(client)); } }, }; From f30419fdba935c837e8b4ae1d37213c7b73a2084 Mon Sep 17 00:00:00 2001 From: Francesco Gringl-Novy Date: Wed, 25 Jun 2025 14:25:55 +0200 Subject: [PATCH 07/13] Apply suggestions from code review Co-authored-by: Andrei <168741329+andreiborza@users.noreply.github.com> --- packages/cloudflare/src/integrations/tracing/vercelai.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/cloudflare/src/integrations/tracing/vercelai.ts b/packages/cloudflare/src/integrations/tracing/vercelai.ts index c31ac06ba866..5ddc6317d98f 100644 --- a/packages/cloudflare/src/integrations/tracing/vercelai.ts +++ b/packages/cloudflare/src/integrations/tracing/vercelai.ts @@ -5,7 +5,7 @@ * because Cloudflare Workers do not support it. * * Therefore, we cannot automatically patch setting `experimental_telemetry: { isEnabled: true }` - * and users have to manually these to get spans. + * and users have to manually set this to get spans. */ import type { Client, IntegrationFn } from '@sentry/core'; @@ -57,7 +57,7 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { * }); * ``` * - * The integration automatically detects when to force registration in CommonJS environments + * The integration automatically detects when to force registration. * when the 'ai' package is available. You can still manually set the `force` option if needed. * * Unlike the Vercel AI integration in the node SDK, this integration does not add tracing support to From aac34e0d1ba413123c7bf761aff0c0ab5a2381b6 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Wed, 25 Jun 2025 14:26:31 +0200 Subject: [PATCH 08/13] ref comment --- .../cloudflare/src/integrations/tracing/vercelai.ts | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/packages/cloudflare/src/integrations/tracing/vercelai.ts b/packages/cloudflare/src/integrations/tracing/vercelai.ts index 5ddc6317d98f..8dbcc2e9224a 100644 --- a/packages/cloudflare/src/integrations/tracing/vercelai.ts +++ b/packages/cloudflare/src/integrations/tracing/vercelai.ts @@ -48,16 +48,7 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { * * For more information, see the [`ai` documentation](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry). * - * @example - * ```javascript - * const Sentry = require('@sentry/cloudflare'); - * - * Sentry.init({ - * integrations: [Sentry.vercelAIIntegration()], - * }); - * ``` - * - * The integration automatically detects when to force registration. + * The integration automatically detects when to force registration * when the 'ai' package is available. You can still manually set the `force` option if needed. * * Unlike the Vercel AI integration in the node SDK, this integration does not add tracing support to From 9b8f5f09685fa0dcfdc2d840a8baedfcb27c6ff6 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Wed, 25 Jun 2025 15:09:55 +0200 Subject: [PATCH 09/13] conditional node imports? --- .../cloudflare/src/integrations/modules.ts | 46 ++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/packages/cloudflare/src/integrations/modules.ts b/packages/cloudflare/src/integrations/modules.ts index 6adee9e46744..9b89a2561204 100644 --- a/packages/cloudflare/src/integrations/modules.ts +++ b/packages/cloudflare/src/integrations/modules.ts @@ -1,5 +1,5 @@ -import { existsSync, readFileSync } from 'node:fs'; -import { dirname, join } from 'node:path'; +import type * as NodeFs from 'node:fs'; +import type * as NodePath from 'node:path'; import type { IntegrationFn } from '@sentry/core'; import { isCjs } from '../utils/commonjs'; @@ -11,6 +11,17 @@ const INTEGRATION_NAME = 'Modules'; declare const __SENTRY_SERVER_MODULES__: Record; +// Node utils are not available in the worker runtime, so we need to import them dynamically +// So this may or may not be available at runtime +let nodeUtils: + | undefined + | { + dirname: typeof NodePath.dirname; + join: typeof NodePath.join; + existsSync: typeof NodeFs.existsSync; + readFileSync: typeof NodeFs.readFileSync; + }; + /** * `__SENTRY_SERVER_MODULES__` can be replaced at build time with the modules loaded by the server. * Right now, we leverage this in Next.js to circumvent the problem that we do not get access to these things at runtime. @@ -18,6 +29,9 @@ declare const __SENTRY_SERVER_MODULES__: Record; const SERVER_MODULES = typeof __SENTRY_SERVER_MODULES__ === 'undefined' ? {} : __SENTRY_SERVER_MODULES__; const _modulesIntegration = (() => { + // eslint-disable-next-line @typescript-eslint/no-floating-promises + getNodeUtils(); + return { name: INTEGRATION_NAME, processEvent(event) { @@ -58,6 +72,22 @@ function collectModules(): ModuleInfo { }; } +async function getNodeUtils(): Promise { + try { + const { existsSync, readFileSync } = await import('node:fs'); + // eslint-disable-next-line @typescript-eslint/unbound-method + const { dirname, join } = await import('node:path'); + + nodeUtils = { + dirname, + join, + existsSync, + readFileSync, + }; + } catch { + // no-empty + } +} /** Extract information about package.json modules from require.cache */ function collectRequireModules(): ModuleInfo { const mainPaths = require.main?.paths || []; @@ -68,6 +98,12 @@ function collectRequireModules(): ModuleInfo { const infos: ModuleInfo = {}; const seen = new Set(); + if (!nodeUtils) { + return infos; + } + + const { dirname, join, existsSync, readFileSync } = nodeUtils; + paths.forEach(path => { let dir = path; @@ -121,6 +157,12 @@ interface PackageJson { } function getPackageJson(): PackageJson { + if (!nodeUtils) { + return {}; + } + + const { join, readFileSync } = nodeUtils; + try { const filePath = join(process.cwd(), 'package.json'); const packageJson = JSON.parse(readFileSync(filePath, 'utf8')) as PackageJson; From 6c68d0eed386c956b2c4bde2f7641c9f8ef8d4d8 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Thu, 26 Jun 2025 09:45:28 +0200 Subject: [PATCH 10/13] remove modules --- packages/cloudflare/src/index.ts | 1 - .../cloudflare/src/integrations/modules.ts | 183 ------------------ .../src/integrations/tracing/vercelai.ts | 34 +--- packages/cloudflare/src/sdk.ts | 4 - .../integrations/tracing/vercelai/index.ts | 12 +- 5 files changed, 10 insertions(+), 224 deletions(-) delete mode 100644 packages/cloudflare/src/integrations/modules.ts diff --git a/packages/cloudflare/src/index.ts b/packages/cloudflare/src/index.ts index dbe349ca3ef8..a5bb99d40818 100644 --- a/packages/cloudflare/src/index.ts +++ b/packages/cloudflare/src/index.ts @@ -108,7 +108,6 @@ export { getDefaultIntegrations } from './sdk'; export { fetchIntegration } from './integrations/fetch'; export { vercelAIIntegration } from './integrations/tracing/vercelai'; -export { modulesIntegration } from './integrations/modules'; export { instrumentD1WithSentry } from './d1'; diff --git a/packages/cloudflare/src/integrations/modules.ts b/packages/cloudflare/src/integrations/modules.ts deleted file mode 100644 index 9b89a2561204..000000000000 --- a/packages/cloudflare/src/integrations/modules.ts +++ /dev/null @@ -1,183 +0,0 @@ -import type * as NodeFs from 'node:fs'; -import type * as NodePath from 'node:path'; -import type { IntegrationFn } from '@sentry/core'; -import { isCjs } from '../utils/commonjs'; - -type ModuleInfo = Record; - -let moduleCache: ModuleInfo | undefined; - -const INTEGRATION_NAME = 'Modules'; - -declare const __SENTRY_SERVER_MODULES__: Record; - -// Node utils are not available in the worker runtime, so we need to import them dynamically -// So this may or may not be available at runtime -let nodeUtils: - | undefined - | { - dirname: typeof NodePath.dirname; - join: typeof NodePath.join; - existsSync: typeof NodeFs.existsSync; - readFileSync: typeof NodeFs.readFileSync; - }; - -/** - * `__SENTRY_SERVER_MODULES__` can be replaced at build time with the modules loaded by the server. - * Right now, we leverage this in Next.js to circumvent the problem that we do not get access to these things at runtime. - */ -const SERVER_MODULES = typeof __SENTRY_SERVER_MODULES__ === 'undefined' ? {} : __SENTRY_SERVER_MODULES__; - -const _modulesIntegration = (() => { - // eslint-disable-next-line @typescript-eslint/no-floating-promises - getNodeUtils(); - - return { - name: INTEGRATION_NAME, - processEvent(event) { - event.modules = { - ...event.modules, - ..._getModules(), - }; - - return event; - }, - getModules: _getModules, - }; -}) satisfies IntegrationFn; - -/** - * Add node modules / packages to the event. - * For this, multiple sources are used: - * - They can be injected at build time into the __SENTRY_SERVER_MODULES__ variable (e.g. in Next.js) - * - They are extracted from the dependencies & devDependencies in the package.json file - * - They are extracted from the require.cache (CJS only) - */ -export const modulesIntegration = _modulesIntegration; - -function getRequireCachePaths(): string[] { - try { - return require.cache ? Object.keys(require.cache as Record) : []; - } catch (e) { - return []; - } -} - -/** Extract information about package.json modules */ -function collectModules(): ModuleInfo { - return { - ...SERVER_MODULES, - ...getModulesFromPackageJson(), - ...(isCjs() ? collectRequireModules() : {}), - }; -} - -async function getNodeUtils(): Promise { - try { - const { existsSync, readFileSync } = await import('node:fs'); - // eslint-disable-next-line @typescript-eslint/unbound-method - const { dirname, join } = await import('node:path'); - - nodeUtils = { - dirname, - join, - existsSync, - readFileSync, - }; - } catch { - // no-empty - } -} -/** Extract information about package.json modules from require.cache */ -function collectRequireModules(): ModuleInfo { - const mainPaths = require.main?.paths || []; - const paths = getRequireCachePaths(); - - // We start with the modules from package.json (if possible) - // These may be overwritten by more specific versions from the require.cache - const infos: ModuleInfo = {}; - const seen = new Set(); - - if (!nodeUtils) { - return infos; - } - - const { dirname, join, existsSync, readFileSync } = nodeUtils; - - paths.forEach(path => { - let dir = path; - - /** Traverse directories upward in the search of package.json file */ - const updir = (): void | (() => void) => { - const orig = dir; - dir = dirname(orig); - - if (!dir || orig === dir || seen.has(orig)) { - return undefined; - } - if (mainPaths.indexOf(dir) < 0) { - return updir(); - } - - const pkgfile = join(orig, 'package.json'); - seen.add(orig); - - if (!existsSync(pkgfile)) { - return updir(); - } - - try { - const info = JSON.parse(readFileSync(pkgfile, 'utf8')) as { - name: string; - version: string; - }; - infos[info.name] = info.version; - } catch (_oO) { - // no-empty - } - }; - - updir(); - }); - - return infos; -} - -/** Fetches the list of modules and the versions loaded by the entry file for your node.js app. */ -function _getModules(): ModuleInfo { - if (!moduleCache) { - moduleCache = collectModules(); - } - return moduleCache; -} - -interface PackageJson { - dependencies?: Record; - devDependencies?: Record; -} - -function getPackageJson(): PackageJson { - if (!nodeUtils) { - return {}; - } - - const { join, readFileSync } = nodeUtils; - - try { - const filePath = join(process.cwd(), 'package.json'); - const packageJson = JSON.parse(readFileSync(filePath, 'utf8')) as PackageJson; - - return packageJson; - } catch (e) { - return {}; - } -} - -function getModulesFromPackageJson(): ModuleInfo { - const packageJson = getPackageJson(); - - return { - ...packageJson.dependencies, - ...packageJson.devDependencies, - }; -} diff --git a/packages/cloudflare/src/integrations/tracing/vercelai.ts b/packages/cloudflare/src/integrations/tracing/vercelai.ts index 8dbcc2e9224a..c513568997ab 100644 --- a/packages/cloudflare/src/integrations/tracing/vercelai.ts +++ b/packages/cloudflare/src/integrations/tracing/vercelai.ts @@ -8,51 +8,27 @@ * and users have to manually set this to get spans. */ -import type { Client, IntegrationFn } from '@sentry/core'; +import type { IntegrationFn } from '@sentry/core'; import { addVercelAiProcessors, defineIntegration } from '@sentry/core'; -import type { modulesIntegration } from '../modules'; - -interface VercelAiOptions { - /** - * By default, the instrumentation will register span processors only when the ai package is used. - * If you want to register the span processors even when the ai package usage cannot be detected, you can set `force` to `true`. - */ - force?: boolean; -} const INTEGRATION_NAME = 'VercelAI'; -/** - * Determines if the integration should be forced based on environment and package availability. - * Returns true if the 'ai' package is available. - */ -function shouldRunIntegration(client: Client): boolean { - const modules = client.getIntegrationByName>('Modules'); - return !!modules?.getModules?.()?.ai; -} - -const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { +const _vercelAIIntegration = (() => { return { name: INTEGRATION_NAME, - options, setup(client) { - if (options.force || shouldRunIntegration(client)) { - addVercelAiProcessors(client); - } + addVercelAiProcessors(client); }, }; }) satisfies IntegrationFn; /** * Adds Sentry tracing instrumentation for the [ai](https://www.npmjs.com/package/ai) library. + * This integration is not enabled by default, you need to manually add it. * * For more information, see the [`ai` documentation](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry). * - * The integration automatically detects when to force registration - * when the 'ai' package is available. You can still manually set the `force` option if needed. - * - * Unlike the Vercel AI integration in the node SDK, this integration does not add tracing support to - * `ai` function calls. You need to enable collecting spans for a specific call by setting + * You need to enable collecting spans for a specific call by setting * `experimental_telemetry.isEnabled` to `true` in the first argument of the function call. * * ```javascript diff --git a/packages/cloudflare/src/sdk.ts b/packages/cloudflare/src/sdk.ts index 79bc6ef5e227..96b3152e6480 100644 --- a/packages/cloudflare/src/sdk.ts +++ b/packages/cloudflare/src/sdk.ts @@ -13,8 +13,6 @@ import { import type { CloudflareClientOptions, CloudflareOptions } from './client'; import { CloudflareClient } from './client'; import { fetchIntegration } from './integrations/fetch'; -import { modulesIntegration } from './integrations/modules'; -import { vercelAIIntegration } from './integrations/tracing/vercelai'; import { setupOpenTelemetryTracer } from './opentelemetry/tracer'; import { makeCloudflareTransport } from './transport'; import { defaultStackParser } from './vendor/stacktrace'; @@ -32,11 +30,9 @@ export function getDefaultIntegrations(options: CloudflareOptions): Integration[ functionToStringIntegration(), linkedErrorsIntegration(), fetchIntegration(), - modulesIntegration(), // TODO(v10): the `include` object should be defined directly in the integration based on `sendDefaultPii` requestDataIntegration(sendDefaultPii ? undefined : { include: { cookies: false } }), consoleIntegration(), - vercelAIIntegration(), ]; } diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index d9a396865d6f..9ee5fb29f11d 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -42,6 +42,7 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { /** * Adds Sentry tracing instrumentation for the [ai](https://www.npmjs.com/package/ai) library. + * This integration is not enabled by default, you need to manually add it. * * For more information, see the [`ai` documentation](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry). * @@ -54,17 +55,14 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { * }); * ``` * - * The integration automatically detects when to force registration in CommonJS environments - * when the 'ai' package is available. You can still manually set the `force` option if needed. - * - * By default this integration adds tracing support to all `ai` function calls. If you need to disable - * collecting spans for a specific call, you can do so by setting `experimental_telemetry.isEnabled` to - * `false` in the first argument of the function call. + * This integration adds tracing support to all `ai` function calls. + * You need to opt-in to collecting spans for a specific call, + * you can do so by setting `experimental_telemetry.isEnabled` to `true` in the first argument of the function call. * * ```javascript * const result = await generateText({ * model: openai('gpt-4-turbo'), - * experimental_telemetry: { isEnabled: false }, + * experimental_telemetry: { isEnabled: true }, * }); * ``` * From 12e22461c95fd4688269907fbbbe9e1342df7429 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Thu, 26 Jun 2025 09:48:34 +0200 Subject: [PATCH 11/13] export from sveltekit workers --- packages/sveltekit/src/worker/index.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/sveltekit/src/worker/index.ts b/packages/sveltekit/src/worker/index.ts index e49a493fb0b8..8e4645741456 100644 --- a/packages/sveltekit/src/worker/index.ts +++ b/packages/sveltekit/src/worker/index.ts @@ -84,6 +84,7 @@ export { instrumentSupabaseClient, zodErrorsIntegration, featureFlagsIntegration, + vercelAIIntegration, type FeatureFlagsIntegration, } from '@sentry/cloudflare'; From 5e437a91ec0b72dfd3fb58de1721af2209740128 Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Thu, 26 Jun 2025 09:51:13 +0200 Subject: [PATCH 12/13] also export from vercel edge --- packages/vercel-edge/src/index.ts | 1 + .../src/integrations/tracing/vercelai.ts | 51 +++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 packages/vercel-edge/src/integrations/tracing/vercelai.ts diff --git a/packages/vercel-edge/src/index.ts b/packages/vercel-edge/src/index.ts index 303d40144ec3..5325d1e62391 100644 --- a/packages/vercel-edge/src/index.ts +++ b/packages/vercel-edge/src/index.ts @@ -98,5 +98,6 @@ export { VercelEdgeClient } from './client'; export { getDefaultIntegrations, init } from './sdk'; export { winterCGFetchIntegration } from './integrations/wintercg-fetch'; +export { vercelAIIntegration } from './integrations/tracing/vercelai'; export * as logger from './logs/exports'; diff --git a/packages/vercel-edge/src/integrations/tracing/vercelai.ts b/packages/vercel-edge/src/integrations/tracing/vercelai.ts new file mode 100644 index 000000000000..c513568997ab --- /dev/null +++ b/packages/vercel-edge/src/integrations/tracing/vercelai.ts @@ -0,0 +1,51 @@ +/** + * This is a copy of the Vercel AI integration from the node SDK. + * + * The only difference is that it does not use `@opentelemetry/instrumentation` + * because Cloudflare Workers do not support it. + * + * Therefore, we cannot automatically patch setting `experimental_telemetry: { isEnabled: true }` + * and users have to manually set this to get spans. + */ + +import type { IntegrationFn } from '@sentry/core'; +import { addVercelAiProcessors, defineIntegration } from '@sentry/core'; + +const INTEGRATION_NAME = 'VercelAI'; + +const _vercelAIIntegration = (() => { + return { + name: INTEGRATION_NAME, + setup(client) { + addVercelAiProcessors(client); + }, + }; +}) satisfies IntegrationFn; + +/** + * Adds Sentry tracing instrumentation for the [ai](https://www.npmjs.com/package/ai) library. + * This integration is not enabled by default, you need to manually add it. + * + * For more information, see the [`ai` documentation](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry). + * + * You need to enable collecting spans for a specific call by setting + * `experimental_telemetry.isEnabled` to `true` in the first argument of the function call. + * + * ```javascript + * const result = await generateText({ + * model: openai('gpt-4-turbo'), + * experimental_telemetry: { isEnabled: true }, + * }); + * ``` + * + * If you want to collect inputs and outputs for a specific call, you must specifically opt-in to each + * function call by setting `experimental_telemetry.recordInputs` and `experimental_telemetry.recordOutputs` + * to `true`. + * + * ```javascript + * const result = await generateText({ + * model: openai('gpt-4-turbo'), + * experimental_telemetry: { isEnabled: true, recordInputs: true, recordOutputs: true }, + * }); + */ +export const vercelAIIntegration = defineIntegration(_vercelAIIntegration); From d8983091a73cf32c69e036556704560eb337e4bb Mon Sep 17 00:00:00 2001 From: Francesco Novy Date: Thu, 26 Jun 2025 12:36:50 +0200 Subject: [PATCH 13/13] fix index.types.ts --- packages/nextjs/src/index.types.ts | 3 +++ packages/sveltekit/src/index.types.ts | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/nextjs/src/index.types.ts b/packages/nextjs/src/index.types.ts index c630d545061c..04b73ea4c83e 100644 --- a/packages/nextjs/src/index.types.ts +++ b/packages/nextjs/src/index.types.ts @@ -22,6 +22,9 @@ export declare function init( export declare const linkedErrorsIntegration: typeof clientSdk.linkedErrorsIntegration; export declare const contextLinesIntegration: typeof clientSdk.contextLinesIntegration; +// Different implementation in server and worker +export declare const vercelAIIntegration: typeof serverSdk.vercelAIIntegration; + export declare const getDefaultIntegrations: (options: Options) => Integration[]; export declare const defaultStackParser: StackParser; diff --git a/packages/sveltekit/src/index.types.ts b/packages/sveltekit/src/index.types.ts index 03c63041e726..df788559d2f5 100644 --- a/packages/sveltekit/src/index.types.ts +++ b/packages/sveltekit/src/index.types.ts @@ -46,6 +46,9 @@ export declare function wrapLoadWithSentry any>(orig export declare const linkedErrorsIntegration: typeof clientSdk.linkedErrorsIntegration; export declare const contextLinesIntegration: typeof clientSdk.contextLinesIntegration; +// Different implementation in server and worker +export declare const vercelAIIntegration: typeof serverSdk.vercelAIIntegration; + export declare const getDefaultIntegrations: (options: Options) => Integration[]; export declare const defaultStackParser: StackParser;