@@ -116,7 +116,6 @@ describe('Count Tokens', () => {
116
116
}
117
117
} ;
118
118
const response = await model . countTokens ( [ imagePart ] ) ;
119
- console . log ( JSON . stringify ( response ) ) ;
120
119
121
120
if ( testConfig . ai . backend . backendType === BackendType . GOOGLE_AI ) {
122
121
const expectedImageTokens = 259 ;
@@ -149,19 +148,30 @@ describe('Count Tokens', () => {
149
148
} ;
150
149
151
150
const response = await model . countTokens ( [ audioPart ] ) ;
152
- console . log ( JSON . stringify ( response ) ) ;
153
- // This may be different on Google AI
154
- expect ( response . totalTokens ) . to . be . undefined ;
151
+
152
+ const textDetails = response . promptTokensDetails ! . find (
153
+ d => d . modality === Modality . TEXT
154
+ ) ;
155
+ const audioDetails = response . promptTokensDetails ! . find (
156
+ d => d . modality === Modality . AUDIO
157
+ ) ;
158
+
159
+ if ( testConfig . ai . backend . backendType === BackendType . GOOGLE_AI ) {
160
+ expect ( response . totalTokens ) . to . equal ( 6 ) ;
161
+ expect (
162
+ response . promptTokensDetails ! . length ,
163
+ ) . to . equal ( 2 ) ;
164
+ expect ( textDetails ) . to . deep . equal ( { modality : Modality . TEXT , tokenCount : 1 } )
165
+ expect ( audioDetails ) . to . deep . equal ( { modality : Modality . AUDIO , tokenCount : 5 } )
166
+ } else if ( testConfig . ai . backend . backendType === BackendType . VERTEX_AI ) {
167
+ expect ( response . totalTokens ) . to . be . undefined ;
168
+ expect ( response . promptTokensDetails ! . length ) . to . equal ( 1 ) ; // For some reason we don't get text
169
+ expect ( audioDetails ) . to . deep . equal ( { modality : Modality . AUDIO } ) ; // For some reason there are no tokens
170
+ }
171
+
155
172
expect (
156
173
response . totalBillableCharacters ,
157
174
) . to . be . undefined ; // Incorrect behavior
158
- expect (
159
- response . promptTokensDetails ! . length ,
160
- ) . to . equal ( 1 ) ;
161
- expect (
162
- response . promptTokensDetails ! [ 0 ] . modality ,
163
- ) . to . equal ( Modality . AUDIO ) ;
164
- expect ( response . promptTokensDetails ! [ 0 ] . tokenCount ) . to . be . undefined ;
165
175
} ) ;
166
176
167
177
it ( 'text, image, and audio input' , async ( ) => {
@@ -180,15 +190,6 @@ describe('Count Tokens', () => {
180
190
contents : [ { role : 'user' , parts : [ textPart , imagePart , audioPart ] } ]
181
191
} ;
182
192
const response = await model . countTokens ( request ) ;
183
- console . log ( JSON . stringify ( response ) ) ;
184
-
185
- expect ( response . totalTokens ) . to . equal ( 261 ) ;
186
- expect (
187
- response . totalBillableCharacters ,
188
- ) . to . equal ( 'Describe these:' . length - 1 ) ; // For some reason it's the length-1
189
-
190
- expect ( response . promptTokensDetails ! . length ) . to . equal ( 3 ) ;
191
-
192
193
const textDetails = response . promptTokensDetails ! . find (
193
194
d => d . modality === Modality . TEXT
194
195
) ;
@@ -199,18 +200,39 @@ describe('Count Tokens', () => {
199
200
d => d . modality === Modality . AUDIO
200
201
) ;
201
202
202
- expect ( textDetails ) . to . deep . equal ( {
203
- modality : Modality . TEXT ,
204
- tokenCount : 3
205
- } ) ;
203
+ if ( testConfig . ai . backend . backendType === BackendType . GOOGLE_AI ) {
204
+ expect ( response . totalTokens ) . to . equal ( 267 ) ;
205
+ expect ( response . totalBillableCharacters ) . to . be . undefined ;
206
+ expect ( textDetails ) . to . deep . equal ( {
207
+ modality : Modality . TEXT ,
208
+ tokenCount : 4
209
+ } ) ;
210
+ expect ( audioDetails ) . to . deep . equal ( { modality : Modality . AUDIO , tokenCount : 5 } ) ; // Incorrect behavior because there's no tokenCount
211
+ } else if ( testConfig . ai . backend . backendType === BackendType . VERTEX_AI ) {
212
+ expect ( response . totalTokens ) . to . equal ( 261 ) ;
213
+ expect ( textDetails ) . to . deep . equal ( {
214
+ modality : Modality . TEXT ,
215
+ tokenCount : 3
216
+ } ) ;
217
+ expect (
218
+ response . totalBillableCharacters ,
219
+ ) . to . equal ( 'Describe these:' . length - 1 ) ; // For some reason it's the length-1
220
+ expect ( audioDetails ) . to . deep . equal ( { modality : Modality . AUDIO } ) ; // Incorrect behavior because there's no tokenCount
221
+ }
222
+
223
+ expect ( response . promptTokensDetails ! . length ) . to . equal ( 3 ) ;
224
+
206
225
expect ( visionDetails ) . to . deep . equal ( {
207
226
modality : Modality . IMAGE ,
208
227
tokenCount : 258
209
228
} ) ;
210
- expect ( audioDetails ) . to . deep . equal ( { modality : Modality . AUDIO } ) ; // Incorrect behavior because there's no tokenCount
211
229
} ) ;
212
230
213
231
it ( 'public storage reference' , async ( ) => {
232
+ // This test is not expected to pass when using Google AI.
233
+ if ( testConfig . ai . backend . backendType === BackendType . GOOGLE_AI ) {
234
+ return ;
235
+ }
214
236
const model = getGenerativeModel ( testConfig . ai , {
215
237
model : testConfig . model
216
238
} ) ;
@@ -220,8 +242,8 @@ describe('Count Tokens', () => {
220
242
fileUri : `gs://${ FIREBASE_CONFIG . storageBucket } /images/tree.png`
221
243
}
222
244
} ;
245
+
223
246
const response = await model . countTokens ( [ filePart ] ) ;
224
- console . log ( JSON . stringify ( response ) ) ;
225
247
226
248
const expectedFileTokens = 258 ;
227
249
expect ( response . totalTokens ) . to . equal ( expectedFileTokens ) ;
0 commit comments