Skip to content

Commit c769317

Browse files
committed
count tokens tests against both backends
1 parent 8f65c98 commit c769317

File tree

2 files changed

+48
-28
lines changed

2 files changed

+48
-28
lines changed

packages/ai/integration/count-tokens.test.ts

Lines changed: 48 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,6 @@ describe('Count Tokens', () => {
116116
}
117117
};
118118
const response = await model.countTokens([imagePart]);
119-
console.log(JSON.stringify(response));
120119

121120
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
122121
const expectedImageTokens = 259;
@@ -149,19 +148,30 @@ describe('Count Tokens', () => {
149148
};
150149

151150
const response = await model.countTokens([audioPart]);
152-
console.log(JSON.stringify(response));
153-
// This may be different on Google AI
154-
expect(response.totalTokens).to.be.undefined;
151+
152+
const textDetails = response.promptTokensDetails!.find(
153+
d => d.modality === Modality.TEXT
154+
);
155+
const audioDetails = response.promptTokensDetails!.find(
156+
d => d.modality === Modality.AUDIO
157+
);
158+
159+
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
160+
expect(response.totalTokens).to.equal(6);
161+
expect(
162+
response.promptTokensDetails!.length,
163+
).to.equal(2);
164+
expect(textDetails).to.deep.equal({ modality: Modality.TEXT, tokenCount: 1 })
165+
expect(audioDetails).to.deep.equal({ modality: Modality.AUDIO, tokenCount: 5 })
166+
} else if (testConfig.ai.backend.backendType === BackendType.VERTEX_AI) {
167+
expect(response.totalTokens).to.be.undefined;
168+
expect(response.promptTokensDetails!.length).to.equal(1); // For some reason we don't get text
169+
expect(audioDetails).to.deep.equal({ modality: Modality.AUDIO }); // For some reason there are no tokens
170+
}
171+
155172
expect(
156173
response.totalBillableCharacters,
157174
).to.be.undefined; // Incorrect behavior
158-
expect(
159-
response.promptTokensDetails!.length,
160-
).to.equal(1);
161-
expect(
162-
response.promptTokensDetails![0].modality,
163-
).to.equal(Modality.AUDIO);
164-
expect(response.promptTokensDetails![0].tokenCount).to.be.undefined;
165175
});
166176

167177
it('text, image, and audio input', async () => {
@@ -180,15 +190,6 @@ describe('Count Tokens', () => {
180190
contents: [{ role: 'user', parts: [textPart, imagePart, audioPart] }]
181191
};
182192
const response = await model.countTokens(request);
183-
console.log(JSON.stringify(response));
184-
185-
expect(response.totalTokens).to.equal(261);
186-
expect(
187-
response.totalBillableCharacters,
188-
).to.equal('Describe these:'.length - 1); // For some reason it's the length-1
189-
190-
expect(response.promptTokensDetails!.length).to.equal(3);
191-
192193
const textDetails = response.promptTokensDetails!.find(
193194
d => d.modality === Modality.TEXT
194195
);
@@ -199,18 +200,39 @@ describe('Count Tokens', () => {
199200
d => d.modality === Modality.AUDIO
200201
);
201202

202-
expect(textDetails).to.deep.equal({
203-
modality: Modality.TEXT,
204-
tokenCount: 3
205-
});
203+
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
204+
expect(response.totalTokens).to.equal(267);
205+
expect(response.totalBillableCharacters).to.be.undefined;
206+
expect(textDetails).to.deep.equal({
207+
modality: Modality.TEXT,
208+
tokenCount: 4
209+
});
210+
expect(audioDetails).to.deep.equal({ modality: Modality.AUDIO, tokenCount: 5 }); // Incorrect behavior because there's no tokenCount
211+
} else if (testConfig.ai.backend.backendType === BackendType.VERTEX_AI) {
212+
expect(response.totalTokens).to.equal(261);
213+
expect(textDetails).to.deep.equal({
214+
modality: Modality.TEXT,
215+
tokenCount: 3
216+
});
217+
expect(
218+
response.totalBillableCharacters,
219+
).to.equal('Describe these:'.length - 1); // For some reason it's the length-1
220+
expect(audioDetails).to.deep.equal({ modality: Modality.AUDIO }); // Incorrect behavior because there's no tokenCount
221+
}
222+
223+
expect(response.promptTokensDetails!.length).to.equal(3);
224+
206225
expect(visionDetails).to.deep.equal({
207226
modality: Modality.IMAGE,
208227
tokenCount: 258
209228
});
210-
expect(audioDetails).to.deep.equal({ modality: Modality.AUDIO }); // Incorrect behavior because there's no tokenCount
211229
});
212230

213231
it('public storage reference', async () => {
232+
// This test is not expected to pass when using Google AI.
233+
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
234+
return;
235+
}
214236
const model = getGenerativeModel(testConfig.ai, {
215237
model: testConfig.model
216238
});
@@ -220,8 +242,8 @@ describe('Count Tokens', () => {
220242
fileUri: `gs://${FIREBASE_CONFIG.storageBucket}/images/tree.png`
221243
}
222244
};
245+
223246
const response = await model.countTokens([filePart]);
224-
console.log(JSON.stringify(response));
225247

226248
const expectedFileTokens = 258;
227249
expect(response.totalTokens).to.equal(expectedFileTokens);

packages/ai/integration/generate-content.test.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,12 +46,10 @@ describe('Generate Content', () => {
4646
{
4747
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
4848
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
49-
method: HarmBlockMethod.PROBABILITY
5049
},
5150
{
5251
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
5352
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
54-
method: HarmBlockMethod.SEVERITY
5553
},
5654
{
5755
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,

0 commit comments

Comments
 (0)