@@ -53,7 +53,8 @@ async function tokensTextOnly() {
53
53
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
54
54
// as well as the combined token count (`totalTokenCount`).
55
55
console . log ( generateResult . response . usageMetadata ) ;
56
- // { promptTokenCount: 11, candidatesTokenCount: 131, totalTokenCount: 142 }
56
+ // candidatesTokenCount and totalTokenCount depend on response, may vary
57
+ // { promptTokenCount: 11, candidatesTokenCount: 124, totalTokenCount: 135 }
57
58
// [END tokens_text_only]
58
59
}
59
60
@@ -93,7 +94,8 @@ async function tokensChat() {
93
94
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
94
95
// as well as the combined token count (`totalTokenCount`).
95
96
console . log ( chatResult . response . usageMetadata ) ;
96
- // { promptTokenCount: 25, candidatesTokenCount: 22, totalTokenCount: 47 }
97
+ // candidatesTokenCount and totalTokenCount depend on response, may vary
98
+ // { promptTokenCount: 25, candidatesTokenCount: 25, totalTokenCount: 50 }
97
99
// [END tokens_chat]
98
100
}
99
101
@@ -136,6 +138,7 @@ async function tokensMultimodalImageInline() {
136
138
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
137
139
// as well as the combined token count (`totalTokenCount`).
138
140
console . log ( generateResult . response . usageMetadata ) ;
141
+ // candidatesTokenCount and totalTokenCount depend on response, may vary
139
142
// { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 }
140
143
// [END tokens_multimodal_image_inline]
141
144
}
@@ -181,6 +184,7 @@ async function tokensMultimodalImageFileApi() {
181
184
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
182
185
// as well as the combined token count (`totalTokenCount`).
183
186
console . log ( generateResult . response . usageMetadata ) ;
187
+ // candidatesTokenCount and totalTokenCount depend on response, may vary
184
188
// { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 }
185
189
// [END tokens_multimodal_image_file_api]
186
190
await fileManager . deleteFile ( uploadResult . file . name ) ;
@@ -244,6 +248,7 @@ async function tokensMultimodalVideoAudioFileApi() {
244
248
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
245
249
// as well as the combined token count (`totalTokenCount`).
246
250
console . log ( generateResult . response . usageMetadata ) ;
251
+ // candidatesTokenCount and totalTokenCount depend on response, may vary
247
252
// { promptTokenCount: 302, candidatesTokenCount: 46, totalTokenCount: 348 }
248
253
// [END tokens_multimodal_video_audio_file_api]
249
254
await fileManager . deleteFile ( uploadVideoResult . file . name ) ;
@@ -306,7 +311,7 @@ async function tokensCachedContent() {
306
311
console . log ( generateResult . response . usageMetadata ) ;
307
312
// {
308
313
// promptTokenCount: 323396,
309
- // candidatesTokenCount: 113,
314
+ // candidatesTokenCount: 113, (depends on response, may vary)
310
315
// totalTokenCount: 323509,
311
316
// cachedContentTokenCount: 323386
312
317
// }
@@ -320,21 +325,26 @@ async function tokensSystemInstruction() {
320
325
// Make sure to include these imports:
321
326
// import { GoogleGenerativeAI } from "@google/generative-ai";
322
327
const genAI = new GoogleGenerativeAI ( process . env . API_KEY ) ;
323
- const model = genAI . getGenerativeModel ( {
328
+ const prompt = "The quick brown fox jumps over the lazy dog." ;
329
+ const modelNoInstructions = genAI . getGenerativeModel ( {
330
+ model : "models/gemini-1.5-flash" ,
331
+ } ) ;
332
+
333
+ const resultNoInstructions = await modelNoInstructions . countTokens ( prompt ) ;
334
+
335
+ console . log ( resultNoInstructions ) ;
336
+ // { totalTokens: 11 }
337
+
338
+ const modelWithInstructions = genAI . getGenerativeModel ( {
324
339
model : "models/gemini-1.5-flash" ,
325
340
systemInstruction : "You are a cat. Your name is Neko." ,
326
341
} ) ;
327
342
328
- const result = await model . countTokens (
329
- "The quick brown fox jumps over the lazy dog." ,
330
- ) ;
343
+ const resultWithInstructions =
344
+ await modelWithInstructions . countTokens ( prompt ) ;
331
345
332
- console . log ( result ) ;
333
- // {
334
- // totalTokens: 23,
335
- // systemInstructionsTokens: { partTokens: [ 11 ], roleTokens: 1 },
336
- // contentTokens: [ { partTokens: [Array], roleTokens: 1 } ]
337
- // }
346
+ console . log ( resultWithInstructions ) ;
347
+ // { totalTokens: 23 }
338
348
// [END tokens_system_instruction]
339
349
}
340
350
@@ -343,6 +353,17 @@ async function tokensTools() {
343
353
// Make sure to include these imports:
344
354
// import { GoogleGenerativeAI } from "@google/generative-ai";
345
355
const genAI = new GoogleGenerativeAI ( process . env . API_KEY ) ;
356
+ const prompt =
357
+ "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" ;
358
+
359
+ const modelNoTools = genAI . getGenerativeModel ( {
360
+ model : "models/gemini-1.5-flash" ,
361
+ } ) ;
362
+
363
+ const resultNoTools = await modelNoTools . countTokens ( prompt ) ;
364
+
365
+ console . log ( resultNoTools ) ;
366
+ // { totalTokens: 23 }
346
367
347
368
const functionDeclarations = [
348
369
{ name : "add" } ,
@@ -351,22 +372,15 @@ async function tokensTools() {
351
372
{ name : "divide" } ,
352
373
] ;
353
374
354
- const model = genAI . getGenerativeModel ( {
375
+ const modelWithTools = genAI . getGenerativeModel ( {
355
376
model : "models/gemini-1.5-flash" ,
356
377
tools : [ { functionDeclarations } ] ,
357
378
} ) ;
358
379
359
- const result = await model . countTokens (
360
- "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" ,
361
- ) ;
380
+ const resultWithTools = await modelWithTools . countTokens ( prompt ) ;
362
381
363
- console . log ( result ) ;
364
- // {
365
- // totalTokens: 99,
366
- // systemInstructionsTokens: {},
367
- // contentTokens: [ { partTokens: [Array], roleTokens: 1 } ],
368
- // toolTokens: [ { functionDeclarationTokens: [Array] } ]
369
- // }
382
+ console . log ( resultWithTools ) ;
383
+ // { totalTokens: 99 }
370
384
// [END tokens_tools]
371
385
}
372
386
0 commit comments