Skip to content

feat(instrumentation-aws-sdk): add bedrock extension to apply gen ai conventions #2700

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 21 commits into from
Mar 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,088 changes: 991 additions & 97 deletions package-lock.json

Large diffs are not rendered by default.

48 changes: 11 additions & 37 deletions plugins/node/opentelemetry-instrumentation-aws-sdk/.tav.yml
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
# Note: tests must set `SKIP_TEST_IF_DISABLE=true` to override usage of
# `mocha --require '../../../scripts/skip-test-if.js' ...` if calling `npm test`.

# Versions [3.363.0, 3.377.0] of all @aws-sdk/client-* were bad releases. See:
# - https://github.com/open-telemetry/opentelemetry-js-contrib/pull/2464#issuecomment-2403652552
# - https://github.com/open-telemetry/opentelemetry-js-contrib/issues/1828#issuecomment-1834276719

# node version support in JS SDK v3:
# - 14.x dropped in v3.567.0 https://github.com/aws/aws-sdk-js-v3/pull/6034
# - 16.x dropped in v3.723.0 https://github.com/aws/aws-sdk-js-v3/pull/6775
"@aws-sdk/client-bedrock-runtime":
env:
- SKIP_TEST_IF_DISABLE=true
jobs:
- node: ">=18"
versions:
include: "^3.587.0"
exclude: ">=3.363.0 <=3.377.0"
mode: "max-7"
commands:
- mocha --require '@opentelemetry/contrib-test-utils' test/bedrock-runtime.test.ts

"@aws-sdk/client-s3":
env:
Expand All @@ -22,22 +27,6 @@
commands:
- mocha --require '@opentelemetry/contrib-test-utils' test/aws-sdk-v3-s3.test.ts
- mocha --require '@opentelemetry/contrib-test-utils' test/s3.test.ts
- node: "16"
versions:
include: ">=3.6.1 <3.723.0"
exclude: "3.529.0 || >=3.363.0 <=3.377.0"
mode: "max-7"
commands:
- mocha --require '@opentelemetry/contrib-test-utils' test/aws-sdk-v3-s3.test.ts
- mocha --require '@opentelemetry/contrib-test-utils' test/s3.test.ts
- node: "14"
versions:
include: ">=3.6.1 <3.567.0"
exclude: "3.529.0 || >=3.363.0 <=3.377.0"
mode: "max-7"
commands:
- mocha --require '@opentelemetry/contrib-test-utils' test/aws-sdk-v3-s3.test.ts
- mocha --require '@opentelemetry/contrib-test-utils' test/s3.test.ts

"@aws-sdk/client-sqs":
env:
Expand All @@ -50,18 +39,3 @@
mode: "max-7"
commands:
- mocha --require '@opentelemetry/contrib-test-utils' test/aws-sdk-v3-sqs.test.ts
- node: "16"
versions:
include: ">=3.24.0 <3.723.0"
exclude: ">=3.363.0 <=3.377.0"
mode: "max-7"
commands:
- mocha --require '@opentelemetry/contrib-test-utils' test/aws-sdk-v3-sqs.test.ts
- node: "14"
versions:
include: ">=3.24.0 <3.567.0"
exclude: ">=3.363.0 <=3.377.0"
mode: "max-7"
commands:
- mocha --require '@opentelemetry/contrib-test-utils' test/aws-sdk-v3-sqs.test.ts

Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ Specific service logic currently implemented for:
- [SNS](./doc/sns.md)
- [Lambda](./doc/lambda.md)
- DynamoDb
- Amazon Bedrock Runtime (See the [GenAI semantic conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/).)

## Potential Side Effects

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
"prewatch": "npm run precompile",
"prepublishOnly": "npm run compile",
"tdd": "npm run test -- --watch-extensions ts --watch",
"test": "SKIP_TEST_IF_NODE_OLDER_THAN=18 nyc mocha --require '../../../scripts/skip-test-if.js' --require '@opentelemetry/contrib-test-utils' 'test/**/*.test.ts'",
"test": "nyc mocha --require '@opentelemetry/contrib-test-utils' 'test/**/*.test.ts'",
"test-all-versions": "tav",
"version:update": "node ../../../scripts/version-update.js",
"watch": "tsc -w"
Expand All @@ -50,6 +50,7 @@
"@opentelemetry/semantic-conventions": "^1.27.0"
},
"devDependencies": {
"@aws-sdk/client-bedrock-runtime": "^3.587.0",
"@aws-sdk/client-dynamodb": "^3.85.0",
"@aws-sdk/client-kinesis": "^3.85.0",
"@aws-sdk/client-lambda": "^3.85.0",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,9 @@
const serviceName =
clientConfig?.serviceId ??
removeSuffixFromStringIfExists(
awsExecutionContext.clientName,
// Use 'AWS' as a fallback serviceName to match type definition.
// In practice, `clientName` should always be set.
awsExecutionContext.clientName || 'AWS',

Check warning on line 328 in plugins/node/opentelemetry-instrumentation-aws-sdk/src/aws-sdk.ts

View check run for this annotation

Codecov / codecov/patch

plugins/node/opentelemetry-instrumentation-aws-sdk/src/aws-sdk.ts#L328

Added line #L328 was not covered by tests
'Client'
);
const commandName =
Expand Down
140 changes: 140 additions & 0 deletions plugins/node/opentelemetry-instrumentation-aws-sdk/src/semconv.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
/*
* Copyright The OpenTelemetry Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* This file contains a copy of unstable semantic convention definitions
* used by this package.
* @see https://github.com/open-telemetry/opentelemetry-js/tree/main/semantic-conventions#unstable-semconv
*/

/**
* The name of the operation being performed.
*
* @note If one of the predefined values applies, but specific system uses a different name it's **RECOMMENDED** to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries **SHOULD** use applicable predefined value.
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_OPERATION_NAME = 'gen_ai.operation.name' as const;

/**
* The maximum number of tokens the model generates for a request.
*
* @example 100
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_MAX_TOKENS =
'gen_ai.request.max_tokens' as const;

/**
* The name of the GenAI model a request is being made to.
*
* @example "gpt-4"
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_MODEL = 'gen_ai.request.model' as const;

/**
* List of sequences that the model will use to stop generating further tokens.
*
* @example ["forest", "lived"]
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_STOP_SEQUENCES =
'gen_ai.request.stop_sequences' as const;

/**
* The temperature setting for the GenAI request.
*
* @example 0.0
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_TEMPERATURE =
'gen_ai.request.temperature' as const;

/**
* The top_p sampling setting for the GenAI request.
*
* @example 1.0
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_TOP_P = 'gen_ai.request.top_p' as const;

/**
* Array of reasons the model stopped generating tokens, corresponding to each generation received.
*
* @example ["stop"]
* @example ["stop", "length"]
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_RESPONSE_FINISH_REASONS =
'gen_ai.response.finish_reasons' as const;

/**
* The Generative AI product as identified by the client or server instrumentation.
*
* @example "openai"
*
* @note The `gen_ai.system` describes a family of GenAI models with specific model identified
* by `gen_ai.request.model` and `gen_ai.response.model` attributes.
*
* The actual GenAI product may differ from the one identified by the client.
* Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI client
* libraries. In such cases, the `gen_ai.system` is set to `openai` based on the
* instrumentation's best knowledge, instead of the actual system. The `server.address`
* attribute may help identify the actual system in use for `openai`.
*
* For custom model, a custom friendly name **SHOULD** be used.
* If none of these options apply, the `gen_ai.system` **SHOULD** be set to `_OTHER`.
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_SYSTEM = 'gen_ai.system' as const;

/**
* The number of tokens used in the GenAI input (prompt).
*
* @example 100
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_USAGE_INPUT_TOKENS =
'gen_ai.usage.input_tokens' as const;

/**
* The number of tokens used in the GenAI response (completion).
*
* @example 180
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_USAGE_OUTPUT_TOKENS =
'gen_ai.usage.output_tokens' as const;

/**
* Enum value "chat" for attribute {@link ATTR_GEN_AI_OPERATION_NAME}.
*/
export const GEN_AI_OPERATION_NAME_VALUE_CHAT = 'chat' as const;

/**
* Enum value "aws.bedrock" for attribute {@link ATTR_GEN_AI_SYSTEM}.
*/
export const GEN_AI_SYSTEM_VALUE_AWS_BEDROCK = 'aws.bedrock' as const;
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import {
NormalizedRequest,
NormalizedResponse,
} from '../types';
import { BedrockRuntimeServiceExtension } from './bedrock-runtime';
import { DynamodbServiceExtension } from './dynamodb';
import { SnsServiceExtension } from './sns';
import { LambdaServiceExtension } from './lambda';
Expand All @@ -37,6 +38,7 @@ export class ServicesExtensions implements ServiceExtension {
this.services.set('Lambda', new LambdaServiceExtension());
this.services.set('S3', new S3ServiceExtension());
this.services.set('Kinesis', new KinesisServiceExtension());
this.services.set('BedrockRuntime', new BedrockRuntimeServiceExtension());
}

requestPreSpanHook(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
/*
* Copyright The OpenTelemetry Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Attributes, DiagLogger, Span, Tracer } from '@opentelemetry/api';
import { RequestMetadata, ServiceExtension } from './ServiceExtension';
import {
ATTR_GEN_AI_SYSTEM,
ATTR_GEN_AI_OPERATION_NAME,
ATTR_GEN_AI_REQUEST_MODEL,
ATTR_GEN_AI_REQUEST_MAX_TOKENS,
ATTR_GEN_AI_REQUEST_TEMPERATURE,
ATTR_GEN_AI_REQUEST_TOP_P,
ATTR_GEN_AI_REQUEST_STOP_SEQUENCES,
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
ATTR_GEN_AI_RESPONSE_FINISH_REASONS,
GEN_AI_OPERATION_NAME_VALUE_CHAT,
GEN_AI_SYSTEM_VALUE_AWS_BEDROCK,
} from '../semconv';
import {
AwsSdkInstrumentationConfig,
NormalizedRequest,
NormalizedResponse,
} from '../types';

export class BedrockRuntimeServiceExtension implements ServiceExtension {
requestPreSpanHook(
request: NormalizedRequest,
config: AwsSdkInstrumentationConfig,
diag: DiagLogger
): RequestMetadata {
switch (request.commandName) {
case 'Converse':
return this.requestPreSpanHookConverse(request, config, diag);
}

return {
isIncoming: false,
};
}

private requestPreSpanHookConverse(
request: NormalizedRequest,
config: AwsSdkInstrumentationConfig,
diag: DiagLogger
): RequestMetadata {
let spanName = GEN_AI_OPERATION_NAME_VALUE_CHAT;
const spanAttributes: Attributes = {
[ATTR_GEN_AI_SYSTEM]: GEN_AI_SYSTEM_VALUE_AWS_BEDROCK,
[ATTR_GEN_AI_OPERATION_NAME]: GEN_AI_OPERATION_NAME_VALUE_CHAT,
};

const modelId = request.commandInput.modelId;
if (modelId) {
spanAttributes[ATTR_GEN_AI_REQUEST_MODEL] = modelId;
if (spanName) {
spanName += ` ${modelId}`;
}
}

const inferenceConfig = request.commandInput.inferenceConfig;
if (inferenceConfig) {
const { maxTokens, temperature, topP, stopSequences } = inferenceConfig;
if (maxTokens !== undefined) {
spanAttributes[ATTR_GEN_AI_REQUEST_MAX_TOKENS] = maxTokens;
}
if (temperature !== undefined) {
spanAttributes[ATTR_GEN_AI_REQUEST_TEMPERATURE] = temperature;
}
if (topP !== undefined) {
spanAttributes[ATTR_GEN_AI_REQUEST_TOP_P] = topP;
}
if (stopSequences !== undefined) {
spanAttributes[ATTR_GEN_AI_REQUEST_STOP_SEQUENCES] = stopSequences;
}
}

return {
spanName,
isIncoming: false,
spanAttributes,
};
}

responseHook(
response: NormalizedResponse,
span: Span,
tracer: Tracer,
config: AwsSdkInstrumentationConfig
) {
if (!span.isRecording()) {
return;

Check warning on line 104 in plugins/node/opentelemetry-instrumentation-aws-sdk/src/services/bedrock-runtime.ts

View check run for this annotation

Codecov / codecov/patch

plugins/node/opentelemetry-instrumentation-aws-sdk/src/services/bedrock-runtime.ts#L104

Added line #L104 was not covered by tests
}

switch (response.request.commandName) {
case 'Converse':
return this.responseHookConverse(response, span, tracer, config);
}
}

private responseHookConverse(
response: NormalizedResponse,
span: Span,
tracer: Tracer,
config: AwsSdkInstrumentationConfig
) {
const { stopReason, usage } = response.data;
if (usage) {
const { inputTokens, outputTokens } = usage;
if (inputTokens !== undefined) {
span.setAttribute(ATTR_GEN_AI_USAGE_INPUT_TOKENS, inputTokens);
}
if (outputTokens !== undefined) {
span.setAttribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, outputTokens);
}
}

if (stopReason !== undefined) {
span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [stopReason]);
}
}
}
Loading