1
+ import { initializeApp } from "@firebase/app" ;
2
+ import { Content , GenerationConfig , HarmBlockMethod , HarmBlockThreshold , HarmCategory , Modality , SafetySetting , getGenerativeModel , getVertexAI } from "../src" ;
3
+ import { expect } from "chai" ;
4
+
5
+ // TODO (dlarocque): Use seperate Firebase config specifically for Vertex AI
6
+ // TODO (dlarocque): Load this from environment variables, so we can set the config as a
7
+ // secret in CI.
8
+ export const config = {
9
+ apiKey : "AIzaSyBNHCyZ-bpv-WA-HpXTmigJm2aq3z1kaH8" ,
10
+ authDomain : "jscore-sandbox-141b5.firebaseapp.com" ,
11
+ databaseURL : "https://jscore-sandbox-141b5.firebaseio.com" ,
12
+ projectId : "jscore-sandbox-141b5" ,
13
+ storageBucket : "jscore-sandbox-141b5.appspot.com" ,
14
+ messagingSenderId : "280127633210" ,
15
+ appId : "1:280127633210:web:1eb2f7e8799c4d5a46c203" ,
16
+ measurementId : "G-1VL38N8YFE"
17
+ } ;
18
+
19
+ initializeApp ( config ) ;
20
+ const MODEL_NAME = 'gemini-1.5-pro' ;
21
+
22
+ let generationConfig : GenerationConfig = {
23
+ temperature : 0 ,
24
+ topP : 0 ,
25
+ topK : 1 ,
26
+ responseMimeType : 'text/plain'
27
+ }
28
+
29
+ let safetySettings : SafetySetting [ ] = [
30
+ {
31
+ category : HarmCategory . HARM_CATEGORY_HARASSMENT ,
32
+ threshold : HarmBlockThreshold . BLOCK_LOW_AND_ABOVE ,
33
+ method : HarmBlockMethod . PROBABILITY
34
+ } ,
35
+ {
36
+ category : HarmCategory . HARM_CATEGORY_HATE_SPEECH ,
37
+ threshold : HarmBlockThreshold . BLOCK_LOW_AND_ABOVE ,
38
+ method : HarmBlockMethod . SEVERITY
39
+ } ,
40
+ {
41
+ category : HarmCategory . HARM_CATEGORY_SEXUALLY_EXPLICIT ,
42
+ threshold : HarmBlockThreshold . BLOCK_LOW_AND_ABOVE ,
43
+ } ,
44
+ {
45
+ category : HarmCategory . HARM_CATEGORY_DANGEROUS_CONTENT ,
46
+ threshold : HarmBlockThreshold . BLOCK_LOW_AND_ABOVE ,
47
+ } ,
48
+ ] ;
49
+
50
+ let systemInstruction : Content = {
51
+ role : 'system' ,
52
+ parts : [
53
+ {
54
+ text : 'You are a friendly and helpful assistant.'
55
+ }
56
+ ]
57
+ } ;
58
+
59
+ describe ( 'VertexAIService' , ( ) => {
60
+ it ( 'CountTokens text' , async ( ) => {
61
+ const vertexAI = getVertexAI ( ) ;
62
+ const model = getGenerativeModel (
63
+ vertexAI ,
64
+ {
65
+ model : MODEL_NAME ,
66
+ generationConfig,
67
+ systemInstruction,
68
+ safetySettings
69
+ }
70
+ ) ;
71
+
72
+ let response = await model . countTokens ( 'Why is the sky blue?' ) ;
73
+
74
+ expect ( response . totalTokens ) . to . equal ( 6 ) ;
75
+ expect ( response . totalBillableCharacters ) . to . equal ( 16 ) ;
76
+ expect ( response . promptTokensDetails ) . to . not . be . null ;
77
+ expect ( response . promptTokensDetails ! . length ) . to . equal ( 1 ) ;
78
+ expect ( response . promptTokensDetails ! [ 0 ] . modality ) . to . equal ( Modality . TEXT ) ;
79
+ expect ( response . promptTokensDetails ! [ 0 ] . tokenCount ) . to . equal ( 6 ) ;
80
+ } ) ;
81
+ } ) ;
0 commit comments