Skip to content

Commit fd3965f

Browse files
authored
Update count_tokens to show before/after on SI and tools (#215)
1 parent d2d42ca commit fd3965f

File tree

1 file changed

+38
-24
lines changed

1 file changed

+38
-24
lines changed

samples/count_tokens.js

Lines changed: 38 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,8 @@ async function tokensTextOnly() {
5353
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
5454
// as well as the combined token count (`totalTokenCount`).
5555
console.log(generateResult.response.usageMetadata);
56-
// { promptTokenCount: 11, candidatesTokenCount: 131, totalTokenCount: 142 }
56+
// candidatesTokenCount and totalTokenCount depend on response, may vary
57+
// { promptTokenCount: 11, candidatesTokenCount: 124, totalTokenCount: 135 }
5758
// [END tokens_text_only]
5859
}
5960

@@ -93,7 +94,8 @@ async function tokensChat() {
9394
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
9495
// as well as the combined token count (`totalTokenCount`).
9596
console.log(chatResult.response.usageMetadata);
96-
// { promptTokenCount: 25, candidatesTokenCount: 22, totalTokenCount: 47 }
97+
// candidatesTokenCount and totalTokenCount depend on response, may vary
98+
// { promptTokenCount: 25, candidatesTokenCount: 25, totalTokenCount: 50 }
9799
// [END tokens_chat]
98100
}
99101

@@ -136,6 +138,7 @@ async function tokensMultimodalImageInline() {
136138
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
137139
// as well as the combined token count (`totalTokenCount`).
138140
console.log(generateResult.response.usageMetadata);
141+
// candidatesTokenCount and totalTokenCount depend on response, may vary
139142
// { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 }
140143
// [END tokens_multimodal_image_inline]
141144
}
@@ -181,6 +184,7 @@ async function tokensMultimodalImageFileApi() {
181184
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
182185
// as well as the combined token count (`totalTokenCount`).
183186
console.log(generateResult.response.usageMetadata);
187+
// candidatesTokenCount and totalTokenCount depend on response, may vary
184188
// { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 }
185189
// [END tokens_multimodal_image_file_api]
186190
await fileManager.deleteFile(uploadResult.file.name);
@@ -244,6 +248,7 @@ async function tokensMultimodalVideoAudioFileApi() {
244248
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
245249
// as well as the combined token count (`totalTokenCount`).
246250
console.log(generateResult.response.usageMetadata);
251+
// candidatesTokenCount and totalTokenCount depend on response, may vary
247252
// { promptTokenCount: 302, candidatesTokenCount: 46, totalTokenCount: 348 }
248253
// [END tokens_multimodal_video_audio_file_api]
249254
await fileManager.deleteFile(uploadVideoResult.file.name);
@@ -306,7 +311,7 @@ async function tokensCachedContent() {
306311
console.log(generateResult.response.usageMetadata);
307312
// {
308313
// promptTokenCount: 323396,
309-
// candidatesTokenCount: 113,
314+
// candidatesTokenCount: 113, (depends on response, may vary)
310315
// totalTokenCount: 323509,
311316
// cachedContentTokenCount: 323386
312317
// }
@@ -320,21 +325,26 @@ async function tokensSystemInstruction() {
320325
// Make sure to include these imports:
321326
// import { GoogleGenerativeAI } from "@google/generative-ai";
322327
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
323-
const model = genAI.getGenerativeModel({
328+
const prompt = "The quick brown fox jumps over the lazy dog.";
329+
const modelNoInstructions = genAI.getGenerativeModel({
330+
model: "models/gemini-1.5-flash",
331+
});
332+
333+
const resultNoInstructions = await modelNoInstructions.countTokens(prompt);
334+
335+
console.log(resultNoInstructions);
336+
// { totalTokens: 11 }
337+
338+
const modelWithInstructions = genAI.getGenerativeModel({
324339
model: "models/gemini-1.5-flash",
325340
systemInstruction: "You are a cat. Your name is Neko.",
326341
});
327342

328-
const result = await model.countTokens(
329-
"The quick brown fox jumps over the lazy dog.",
330-
);
343+
const resultWithInstructions =
344+
await modelWithInstructions.countTokens(prompt);
331345

332-
console.log(result);
333-
// {
334-
// totalTokens: 23,
335-
// systemInstructionsTokens: { partTokens: [ 11 ], roleTokens: 1 },
336-
// contentTokens: [ { partTokens: [Array], roleTokens: 1 } ]
337-
// }
346+
console.log(resultWithInstructions);
347+
// { totalTokens: 23 }
338348
// [END tokens_system_instruction]
339349
}
340350

@@ -343,6 +353,17 @@ async function tokensTools() {
343353
// Make sure to include these imports:
344354
// import { GoogleGenerativeAI } from "@google/generative-ai";
345355
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
356+
const prompt =
357+
"I have 57 cats, each owns 44 mittens, how many mittens is that in total?";
358+
359+
const modelNoTools = genAI.getGenerativeModel({
360+
model: "models/gemini-1.5-flash",
361+
});
362+
363+
const resultNoTools = await modelNoTools.countTokens(prompt);
364+
365+
console.log(resultNoTools);
366+
// { totalTokens: 23 }
346367

347368
const functionDeclarations = [
348369
{ name: "add" },
@@ -351,22 +372,15 @@ async function tokensTools() {
351372
{ name: "divide" },
352373
];
353374

354-
const model = genAI.getGenerativeModel({
375+
const modelWithTools = genAI.getGenerativeModel({
355376
model: "models/gemini-1.5-flash",
356377
tools: [{ functionDeclarations }],
357378
});
358379

359-
const result = await model.countTokens(
360-
"I have 57 cats, each owns 44 mittens, how many mittens is that in total?",
361-
);
380+
const resultWithTools = await modelWithTools.countTokens(prompt);
362381

363-
console.log(result);
364-
// {
365-
// totalTokens: 99,
366-
// systemInstructionsTokens: {},
367-
// contentTokens: [ { partTokens: [Array], roleTokens: 1 } ],
368-
// toolTokens: [ { functionDeclarationTokens: [Array] } ]
369-
// }
382+
console.log(resultWithTools);
383+
// { totalTokens: 99 }
370384
// [END tokens_tools]
371385
}
372386

0 commit comments

Comments
 (0)