Skip to content

Commit 1ede87d

Browse files
committed
remove io from vertex integration
1 parent 37deebf commit 1ede87d

File tree

2 files changed

+4
-215
lines changed

2 files changed

+4
-215
lines changed

packages/datadog-plugin-google-cloud-vertexai/src/tracing.js

Lines changed: 0 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ const makeUtilities = require('../../dd-trace/src/plugins/util/llm')
77

88
const {
99
extractModel,
10-
extractSystemInstructions
1110
} = require('./utils')
1211

1312
class GoogleCloudVertexAITracingPlugin extends TracingPlugin {
@@ -47,14 +46,6 @@ class GoogleCloudVertexAITracingPlugin extends TracingPlugin {
4746
const span = ctx.currentStore?.span
4847
if (!span) return
4948

50-
const { result } = ctx
51-
52-
const response = result?.response
53-
if (response) {
54-
const tags = this.tagResponse(response, span)
55-
span.addTags(tags)
56-
}
57-
5849
span.finish()
5950
}
6051

@@ -73,129 +64,12 @@ class GoogleCloudVertexAITracingPlugin extends TracingPlugin {
7364
'vertexai.request.model': model
7465
}
7566

76-
const history = instance.historyInternal
77-
78-
let contents = typeof request === 'string' || Array.isArray(request) ? request : request.contents
79-
if (history) {
80-
contents = [...history, ...(Array.isArray(contents) ? contents : [contents])]
81-
}
82-
8367
const generationConfig = instance.generationConfig || {}
8468
for (const key of Object.keys(generationConfig)) {
8569
const transformedKey = key.replaceAll(/([a-z0-9])([A-Z])/g, '$1_$2').toLowerCase()
8670
tags[`vertexai.request.generation_config.${transformedKey}`] = JSON.stringify(generationConfig[key])
8771
}
8872

89-
if (stream) {
90-
tags['vertexai.request.stream'] = true
91-
}
92-
93-
if (!this.isPromptCompletionSampled(span)) return tags
94-
95-
const systemInstructions = extractSystemInstructions(instance)
96-
97-
for (const [idx, systemInstruction] of systemInstructions.entries()) {
98-
tags[`vertexai.request.system_instruction.${idx}.text`] = systemInstruction
99-
}
100-
101-
if (typeof contents === 'string') {
102-
tags['vertexai.request.contents.0.text'] = contents
103-
return tags
104-
}
105-
106-
for (const [contentIdx, content] of contents.entries()) {
107-
this.tagRequestContent(tags, content, contentIdx)
108-
}
109-
110-
return tags
111-
}
112-
113-
tagRequestPart (part, tags, partIdx, contentIdx) {
114-
tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.text`] = this.normalize(part.text)
115-
116-
const functionCall = part.functionCall
117-
const functionResponse = part.functionResponse
118-
119-
if (functionCall) {
120-
tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_call.name`] = functionCall.name
121-
tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_call.args`] =
122-
this.normalize(JSON.stringify(functionCall.args))
123-
}
124-
if (functionResponse) {
125-
tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_response.name`] =
126-
functionResponse.name
127-
tags[`vertexai.request.contents.${contentIdx}.parts.${partIdx}.function_response.response`] =
128-
this.normalize(JSON.stringify(functionResponse.response))
129-
}
130-
}
131-
132-
tagRequestContent (tags, content, contentIdx) {
133-
if (typeof content === 'string') {
134-
tags[`vertexai.request.contents.${contentIdx}.text`] = this.normalize(content)
135-
return
136-
}
137-
138-
if (content.text || content.functionCall || content.functionResponse) {
139-
this.tagRequestPart(content, tags, 0, contentIdx)
140-
return
141-
}
142-
143-
const { role, parts } = content
144-
if (role) {
145-
tags[`vertexai.request.contents.${contentIdx}.role`] = role
146-
}
147-
148-
for (const [partIdx, part] of parts.entries()) {
149-
this.tagRequestPart(part, tags, partIdx, contentIdx)
150-
}
151-
}
152-
153-
/**
154-
* Generate the response tags.
155-
*
156-
* @param {Object} response
157-
* @param {Span} span
158-
* @returns {Object}
159-
*/
160-
tagResponse (response, span) {
161-
const tags = {}
162-
const isSampled = this.isPromptCompletionSampled(span)
163-
164-
const candidates = response.candidates
165-
for (const [candidateIdx, candidate] of candidates.entries()) {
166-
const finishReason = candidate.finishReason
167-
if (finishReason) {
168-
tags[`vertexai.response.candidates.${candidateIdx}.finish_reason`] = finishReason
169-
}
170-
const candidateContent = candidate.content
171-
const role = candidateContent.role
172-
tags[`vertexai.response.candidates.${candidateIdx}.content.role`] = role
173-
174-
if (!isSampled) continue
175-
176-
const parts = candidateContent.parts
177-
for (const [partIdx, part] of parts.entries()) {
178-
const text = part.text
179-
tags[`vertexai.response.candidates.${candidateIdx}.content.parts.${partIdx}.text`] =
180-
this.normalize(String(text))
181-
182-
const functionCall = part.functionCall
183-
if (!functionCall) continue
184-
185-
tags[`vertexai.response.candidates.${candidateIdx}.content.parts.${partIdx}.function_call.name`] =
186-
functionCall.name
187-
tags[`vertexai.response.candidates.${candidateIdx}.content.parts.${partIdx}.function_call.args`] =
188-
this.normalize(JSON.stringify(functionCall.args))
189-
}
190-
}
191-
192-
const tokenCounts = response.usageMetadata
193-
if (tokenCounts) {
194-
tags['vertexai.response.usage.prompt_tokens'] = tokenCounts.promptTokenCount
195-
tags['vertexai.response.usage.completion_tokens'] = tokenCounts.candidatesTokenCount
196-
tags['vertexai.response.usage.total_tokens'] = tokenCounts.totalTokenCount
197-
}
198-
19973
return tags
20074
}
20175
}

packages/datadog-plugin-google-cloud-vertexai/test/index.spec.js

Lines changed: 4 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -100,38 +100,19 @@ describe('Plugin', () => {
100100
expect(span.meta).to.have.property('span.kind', 'client')
101101

102102
expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
103-
expect(span.meta).to.have.property('vertexai.request.contents.0.role', 'user')
104-
expect(span.meta).to.have.property('vertexai.request.contents.0.parts.0.text', 'Hello, how are you?')
105-
expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP')
106-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text',
107-
'Hello! How can I assist you today?')
108-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model')
109-
110-
expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 35)
111-
expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 2)
112-
expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 37)
113-
114-
if (model.systemInstruction) {
115-
expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text',
116-
'Please provide an answer')
117-
}
118-
expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50')
119-
expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1')
120103
})
121104

122105
const { response } = await model.generateContent({
123106
contents: [{ role: 'user', parts: [{ text: 'Hello, how are you?' }] }]
124107
})
125-
126108
expect(response).to.have.property('candidates')
127109

128110
await checkTraces
129111
})
130112

131113
it('makes a successful call with a string argument', async () => {
132114
const checkTraces = agent.assertSomeTraces(traces => {
133-
expect(traces[0][0].meta).to.have.property('vertexai.request.contents.0.text',
134-
'Hello, how are you?')
115+
expect(traces[0][0].meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
135116
})
136117

137118
const { response } = await model.generateContent('Hello, how are you?')
@@ -148,11 +129,7 @@ describe('Plugin', () => {
148129
const checkTraces = agent.assertSomeTraces(traces => {
149130
const span = traces[0][0]
150131

151-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text', 'undefined')
152-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.function_call.name',
153-
'add')
154-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.function_call.args',
155-
JSON.stringify({ a: 2, b: 2 }))
132+
expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
156133
})
157134

158135
await model.generateContent('what is 2 + 2?')
@@ -174,24 +151,6 @@ describe('Plugin', () => {
174151
expect(span.meta).to.have.property('span.kind', 'client')
175152

176153
expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
177-
expect(span.meta).to.have.property('vertexai.request.contents.0.text', 'Hello, how are you?')
178-
expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP')
179-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text',
180-
'Hi, how are you doing today my friend?')
181-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model')
182-
183-
expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 5)
184-
expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 10)
185-
expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 15)
186-
187-
if (model.systemInstruction) {
188-
expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text',
189-
'Please provide an answer')
190-
}
191-
expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50')
192-
expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1')
193-
194-
expect(span.metrics).to.have.property('vertexai.request.stream', 1)
195154
})
196155

197156
const { stream, response } = await model.generateContentStream('Hello, how are you?')
@@ -226,28 +185,6 @@ describe('Plugin', () => {
226185
expect(span.meta).to.have.property('span.kind', 'client')
227186

228187
expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
229-
230-
expect(span.meta).to.have.property('vertexai.request.contents.0.role', 'user')
231-
expect(span.meta).to.have.property('vertexai.request.contents.0.parts.0.text', 'Foobar?')
232-
expect(span.meta).to.have.property('vertexai.request.contents.1.role', 'model')
233-
expect(span.meta).to.have.property('vertexai.request.contents.1.parts.0.text', 'Foobar!')
234-
expect(span.meta).to.have.property('vertexai.request.contents.2.parts.0.text', 'Hello, how are you?')
235-
236-
expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP')
237-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text',
238-
'Hello! How can I assist you today?')
239-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model')
240-
241-
expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 35)
242-
expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 2)
243-
expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 37)
244-
245-
if (model.systemInstruction) {
246-
expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text',
247-
'Please provide an answer')
248-
}
249-
expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50')
250-
expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1')
251188
})
252189

253190
const chat = model.startChat({
@@ -265,8 +202,7 @@ describe('Plugin', () => {
265202

266203
it('tags a string input', async () => {
267204
const checkTraces = agent.assertSomeTraces(traces => {
268-
expect(traces[0][0].meta).to.have.property('vertexai.request.contents.0.text',
269-
'Hello, how are you?')
205+
expect(traces[0][0].meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
270206
})
271207

272208
const chat = model.startChat({})
@@ -279,10 +215,7 @@ describe('Plugin', () => {
279215

280216
it('tags an array of string inputs', async () => {
281217
const checkTraces = agent.assertSomeTraces(traces => {
282-
expect(traces[0][0].meta).to.have.property('vertexai.request.contents.0.text',
283-
'Hello, how are you?')
284-
expect(traces[0][0].meta).to.have.property('vertexai.request.contents.1.text',
285-
'What should I do today?')
218+
expect(traces[0][0].meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
286219
})
287220

288221
const chat = model.startChat({})
@@ -306,24 +239,6 @@ describe('Plugin', () => {
306239
expect(span.meta).to.have.property('span.kind', 'client')
307240

308241
expect(span.meta).to.have.property('vertexai.request.model', 'gemini-1.5-flash-002')
309-
expect(span.meta).to.have.property('vertexai.request.contents.0.text', 'Hello, how are you?')
310-
expect(span.meta).to.have.property('vertexai.response.candidates.0.finish_reason', 'STOP')
311-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.parts.0.text',
312-
'Hi, how are you doing today my friend?')
313-
expect(span.meta).to.have.property('vertexai.response.candidates.0.content.role', 'model')
314-
315-
expect(span.metrics).to.have.property('vertexai.response.usage.prompt_tokens', 5)
316-
expect(span.metrics).to.have.property('vertexai.response.usage.completion_tokens', 10)
317-
expect(span.metrics).to.have.property('vertexai.response.usage.total_tokens', 15)
318-
319-
if (model.systemInstruction) {
320-
expect(span.meta).to.have.property('vertexai.request.system_instruction.0.text',
321-
'Please provide an answer')
322-
}
323-
expect(span.meta).to.have.property('vertexai.request.generation_config.max_output_tokens', '50')
324-
expect(span.meta).to.have.property('vertexai.request.generation_config.temperature', '1')
325-
326-
expect(span.metrics).to.have.property('vertexai.request.stream', 1)
327242
})
328243

329244
const chat = model.startChat({})

0 commit comments

Comments
 (0)