Skip to content

Commit ae05e53

Browse files
committed
Integration tests
1 parent 98574c2 commit ae05e53

File tree

4 files changed

+75
-33
lines changed

4 files changed

+75
-33
lines changed

packages/vertexai/integration/chat.test.ts

Whitespace-only changes.
Lines changed: 7 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
1-
import { initializeApp } from "@firebase/app";
2-
import { Content, GenerationConfig, HarmBlockMethod, HarmBlockThreshold, HarmCategory, Modality, SafetySetting, getGenerativeModel, getVertexAI } from "../src";
3-
import { expect } from "chai";
1+
import { Content, GenerationConfig, HarmBlockMethod, HarmBlockThreshold, HarmCategory, SafetySetting } from "../src";
42

53
// TODO (dlarocque): Use seperate Firebase config specifically for Vertex AI
64
// TODO (dlarocque): Load this from environment variables, so we can set the config as a
@@ -16,17 +14,17 @@ export const config = {
1614
measurementId: "G-1VL38N8YFE"
1715
};
1816

19-
initializeApp(config);
20-
const MODEL_NAME = 'gemini-1.5-pro';
17+
export const MODEL_NAME = 'gemini-1.5-pro';
2118

22-
let generationConfig: GenerationConfig = {
19+
/// TODO (dlarocque): Fix the naming on these.
20+
export const generationConfig: GenerationConfig = {
2321
temperature: 0,
2422
topP: 0,
2523
topK: 1,
2624
responseMimeType: 'text/plain'
2725
}
2826

29-
let safetySettings: SafetySetting[] = [
27+
export const safetySettings: SafetySetting[] = [
3028
{
3129
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
3230
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
@@ -47,35 +45,11 @@ let safetySettings: SafetySetting[] = [
4745
},
4846
];
4947

50-
let systemInstruction: Content = {
48+
export const systemInstruction: Content = {
5149
role: 'system',
5250
parts: [
5351
{
5452
text: 'You are a friendly and helpful assistant.'
5553
}
5654
]
57-
};
58-
59-
describe('VertexAIService', () => {
60-
it('CountTokens text', async () => {
61-
const vertexAI = getVertexAI();
62-
const model = getGenerativeModel(
63-
vertexAI,
64-
{
65-
model: MODEL_NAME,
66-
generationConfig,
67-
systemInstruction,
68-
safetySettings
69-
}
70-
);
71-
72-
let response = await model.countTokens('Why is the sky blue?');
73-
74-
expect(response.totalTokens).to.equal(6);
75-
expect(response.totalBillableCharacters).to.equal(16);
76-
expect(response.promptTokensDetails).to.not.be.null;
77-
expect(response.promptTokensDetails!.length).to.equal(1);
78-
expect(response.promptTokensDetails![0].modality).to.equal(Modality.TEXT);
79-
expect(response.promptTokensDetails![0].tokenCount).to.equal(6);
80-
});
81-
});
55+
};
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import { expect } from "chai";
2+
import { Modality, getGenerativeModel, getVertexAI } from "../src";
3+
import { MODEL_NAME, generationConfig, systemInstruction, safetySettings, config } from "./constants";
4+
import { initializeApp } from "@firebase/app";
5+
6+
describe('Count Tokens', () => {
7+
8+
before(() => initializeApp(config))
9+
10+
it('CountTokens text', async () => {
11+
const vertexAI = getVertexAI();
12+
const model = getGenerativeModel(
13+
vertexAI,
14+
{
15+
model: MODEL_NAME,
16+
generationConfig,
17+
systemInstruction,
18+
safetySettings
19+
}
20+
);
21+
22+
let response = await model.countTokens('Why is the sky blue?');
23+
24+
expect(response.totalTokens).to.equal(6);
25+
expect(response.totalBillableCharacters).to.equal(16);
26+
expect(response.promptTokensDetails).to.not.be.null;
27+
expect(response.promptTokensDetails!.length).to.equal(1);
28+
expect(response.promptTokensDetails![0].modality).to.equal(Modality.TEXT);
29+
expect(response.promptTokensDetails![0].tokenCount).to.equal(6);
30+
});
31+
});
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import { expect } from "chai";
2+
import { getGenerativeModel, getVertexAI } from "../src";
3+
import { MODEL_NAME, generationConfig, systemInstruction, safetySettings, config } from "./constants";
4+
import { initializeApp } from "@firebase/app";
5+
6+
// Token counts are only expected to differ by at most this number of tokens.
7+
// Set to 1 for whitespace that is not always present.
8+
const TOKEN_COUNT_DELTA = 1;
9+
10+
describe('Generate Content', () => {
11+
12+
before(() => initializeApp(config))
13+
14+
it('generateContent', async () => {
15+
const vertexAI = getVertexAI();
16+
const model = getGenerativeModel(
17+
vertexAI,
18+
{
19+
model: MODEL_NAME,
20+
generationConfig,
21+
systemInstruction,
22+
safetySettings
23+
}
24+
);
25+
26+
const result = await model.generateContent("Where is Google headquarters located? Answer with the city name only.");
27+
const response = result.response;
28+
29+
const trimmedText = response.text().trim();
30+
expect(trimmedText).to.equal('Mountain View');
31+
32+
console.log(JSON.stringify(response));
33+
34+
expect(response.usageMetadata).to.not.be.null;
35+
expect(response.usageMetadata!.promptTokenCount).to.be.closeTo(21, TOKEN_COUNT_DELTA);
36+
});
37+
});

0 commit comments

Comments
 (0)