From 8701a9a38916cf24650597df2cf61102c17de94e Mon Sep 17 00:00:00 2001 From: Eli Bendersky Date: Fri, 12 Jul 2024 10:34:06 -0700 Subject: [PATCH 1/2] update --- genai/internal/snippets/example_test.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/genai/internal/snippets/example_test.go b/genai/internal/snippets/example_test.go index d3dc688..965e709 100644 --- a/genai/internal/snippets/example_test.go +++ b/genai/internal/snippets/example_test.go @@ -167,14 +167,13 @@ func ExampleGenerativeModel_CountTokens_contextWindow() { if err != nil { log.Fatal(err) } - fmt.Println("input_token_limit:", info.InputTokenLimit) - fmt.Println("output_token_limit:", info.OutputTokenLimit) - // [END tokens_context_window] - // [START tokens_context_window_return] - // input_token_limit: 30720 - // output_token_limit: 2048 - // [END tokens_context_window_return] + // Returns the "context window" for the model, + // which is the combined input and output token limits. + fmt.Printf("input_token_limit=%v\n", info.InputTokenLimit) + fmt.Printf("output_token_limit=%v\n", info.OutputTokenLimit) + // ( input_token_limit=30720, output_token_limit=2048 ) + // [END tokens_context_window] } func ExampleGenerativeModel_CountTokens_textOnly() { From 49f7ac5f6357f372e175794393e4f7cc27a9ec11 Mon Sep 17 00:00:00 2001 From: Eli Bendersky Date: Fri, 12 Jul 2024 10:51:12 -0700 Subject: [PATCH 2/2] update samples --- genai/example_test.go | 51 ++++++++++++++-- genai/internal/snippets/example_test.go | 77 +++++++++++++------------ 2 files changed, 88 insertions(+), 40 deletions(-) diff --git a/genai/example_test.go b/genai/example_test.go index 590c43e..3b9cdf0 100644 --- a/genai/example_test.go +++ b/genai/example_test.go @@ -166,8 +166,12 @@ func ExampleGenerativeModel_CountTokens_contextWindow() { if err != nil { log.Fatal(err) } - fmt.Println("input_token_limit:", info.InputTokenLimit) - fmt.Println("output_token_limit:", info.OutputTokenLimit) + + // Returns the "context window" for the model, + // which is the combined input and output token limits. + fmt.Printf("input_token_limit=%v\n", info.InputTokenLimit) + fmt.Printf("output_token_limit=%v\n", info.OutputTokenLimit) + // ( input_token_limit=30720, output_token_limit=2048 ) } @@ -188,6 +192,7 @@ func ExampleGenerativeModel_CountTokens_textOnly() { } fmt.Println("total_tokens:", tokResp.TotalTokens) + // ( total_tokens: 10 ) resp, err := model.GenerateContent(ctx, genai.Text(prompt)) if err != nil { @@ -197,7 +202,7 @@ func ExampleGenerativeModel_CountTokens_textOnly() { fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) - + // ( prompt_token_count: 10, candidates_token_count: 38, total_token_count: 48 ) } func ExampleGenerativeModel_CountTokens_cachedContent() { @@ -226,6 +231,7 @@ func ExampleGenerativeModel_CountTokens_cachedContent() { log.Fatal(err) } fmt.Println("total_tokens:", tokResp.TotalTokens) + // ( total_tokens: 5 ) resp, err := modelWithCache.GenerateContent(ctx, genai.Text(prompt)) if err != nil { @@ -236,6 +242,7 @@ func ExampleGenerativeModel_CountTokens_cachedContent() { fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) fmt.Println("cached_content_token_count:", resp.UsageMetadata.CachedContentTokenCount) fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 33007, candidates_token_count: 39, cached_content_token_count: 33002, total_token_count: 33046 ) } @@ -253,12 +260,26 @@ func ExampleGenerativeModel_CountTokens_imageInline() { if err != nil { log.Fatal(err) } - + // Call `CountTokens` to get the input token count + // of the combined text and file (`total_tokens`). + // An image's display or file size does not affect its token count. + // Optionally, you can call `count_tokens` for the text and file separately. tokResp, err := model.CountTokens(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile)) if err != nil { log.Fatal(err) } fmt.Println("total_tokens:", tokResp.TotalTokens) + // ( total_tokens: 264 ) + + resp, err := model.GenerateContent(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile)) + if err != nil { + log.Fatal(err) + } + + fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) + fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) + fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 264, candidates_token_count: 100, total_token_count: 364 ) } @@ -286,11 +307,26 @@ func ExampleGenerativeModel_CountTokens_imageUploadFile() { fd := genai.FileData{ URI: uploadedFile.URI, } + // Call `CountTokens` to get the input token count + // of the combined text and file (`total_tokens`). + // An image's display or file size does not affect its token count. + // Optionally, you can call `count_tokens` for the text and file separately. tokResp, err := model.CountTokens(ctx, genai.Text(prompt), fd) if err != nil { log.Fatal(err) } fmt.Println("total_tokens:", tokResp.TotalTokens) + // ( total_tokens: 264 ) + + resp, err := model.GenerateContent(ctx, genai.Text(prompt), fd) + if err != nil { + log.Fatal(err) + } + + fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) + fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) + fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 264, candidates_token_count: 100, total_token_count: 364 ) } @@ -326,9 +362,14 @@ func ExampleGenerativeModel_CountTokens_chat() { log.Fatal(err) } + // On the response for SendMessage, use `UsageMetadata` to get + // separate input and output token counts + // (`prompt_token_count` and `candidates_token_count`, respectively), + // as well as the combined token count (`total_token_count`). fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 ) } @@ -349,6 +390,7 @@ func ExampleGenerativeModel_CountTokens_systemInstruction() { log.Fatal(err) } fmt.Println("total_tokens:", respNoInstruction.TotalTokens) + // ( total_tokens: 10 ) // Same prompt, this time with system instruction model.SystemInstruction = &genai.Content{ @@ -359,6 +401,7 @@ func ExampleGenerativeModel_CountTokens_systemInstruction() { log.Fatal(err) } fmt.Println("total_tokens:", respWithInstruction.TotalTokens) + // ( total_tokens: 21 ) } diff --git a/genai/internal/snippets/example_test.go b/genai/internal/snippets/example_test.go index 965e709..0dd2cff 100644 --- a/genai/internal/snippets/example_test.go +++ b/genai/internal/snippets/example_test.go @@ -194,6 +194,7 @@ func ExampleGenerativeModel_CountTokens_textOnly() { } fmt.Println("total_tokens:", tokResp.TotalTokens) + // ( total_tokens: 10 ) resp, err := model.GenerateContent(ctx, genai.Text(prompt)) if err != nil { @@ -203,14 +204,7 @@ func ExampleGenerativeModel_CountTokens_textOnly() { fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) - // [END tokens_text_only] - - // [START tokens_text_only_return] - // total_tokens: 10 - // prompt_token_count: 10 - // candidates_token_count: 38 - // total_token_count: 48 - // [END tokens_text_only_return] + // ( prompt_token_count: 10, candidates_token_count: 38, total_token_count: 48 ) } func ExampleGenerativeModel_CountTokens_cachedContent() { @@ -240,6 +234,7 @@ func ExampleGenerativeModel_CountTokens_cachedContent() { log.Fatal(err) } fmt.Println("total_tokens:", tokResp.TotalTokens) + // ( total_tokens: 5 ) resp, err := modelWithCache.GenerateContent(ctx, genai.Text(prompt)) if err != nil { @@ -250,15 +245,8 @@ func ExampleGenerativeModel_CountTokens_cachedContent() { fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) fmt.Println("cached_content_token_count:", resp.UsageMetadata.CachedContentTokenCount) fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 33007, candidates_token_count: 39, cached_content_token_count: 33002, total_token_count: 33046 ) // [END tokens_cached_content] - - // [START tokens_cached_content_return] - // total_tokens: 5 - // prompt_token_count: 33007 - // candidates_token_count: 39 - // cached_content_token_count: 33002 - // total_token_count: 33046 - // [END tokens_cached_content_return] } func ExampleGenerativeModel_CountTokens_imageInline() { @@ -276,17 +264,27 @@ func ExampleGenerativeModel_CountTokens_imageInline() { if err != nil { log.Fatal(err) } - + // Call `CountTokens` to get the input token count + // of the combined text and file (`total_tokens`). + // An image's display or file size does not affect its token count. + // Optionally, you can call `count_tokens` for the text and file separately. tokResp, err := model.CountTokens(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile)) if err != nil { log.Fatal(err) } fmt.Println("total_tokens:", tokResp.TotalTokens) - // [END tokens_multimodal_image_inline] + // ( total_tokens: 264 ) - // [START tokens_multimodal_image_inline_return] - // total_tokens: 264 - // [END tokens_multimodal_image_inline_return] + resp, err := model.GenerateContent(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile)) + if err != nil { + log.Fatal(err) + } + + fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) + fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) + fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 264, candidates_token_count: 100, total_token_count: 364 ) + // [END tokens_multimodal_image_inline] } func ExampleGenerativeModel_CountTokens_imageUploadFile() { @@ -314,16 +312,27 @@ func ExampleGenerativeModel_CountTokens_imageUploadFile() { fd := genai.FileData{ URI: uploadedFile.URI, } + // Call `CountTokens` to get the input token count + // of the combined text and file (`total_tokens`). + // An image's display or file size does not affect its token count. + // Optionally, you can call `count_tokens` for the text and file separately. tokResp, err := model.CountTokens(ctx, genai.Text(prompt), fd) if err != nil { log.Fatal(err) } fmt.Println("total_tokens:", tokResp.TotalTokens) - // [END tokens_multimodal_image_file_api] + // ( total_tokens: 264 ) + + resp, err := model.GenerateContent(ctx, genai.Text(prompt), fd) + if err != nil { + log.Fatal(err) + } - // [START tokens_multimodal_image_file_api_return] - // total_tokens: 264 - // [END tokens_multimodal_image_file_api_return] + fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) + fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) + fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 264, candidates_token_count: 100, total_token_count: 364 ) + // [END tokens_multimodal_image_file_api] } func ExampleGenerativeModel_CountTokens_chat() { @@ -359,16 +368,15 @@ func ExampleGenerativeModel_CountTokens_chat() { log.Fatal(err) } + // On the response for SendMessage, use `UsageMetadata` to get + // separate input and output token counts + // (`prompt_token_count` and `candidates_token_count`, respectively), + // as well as the combined token count (`total_token_count`). fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount) fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount) fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount) + // ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 ) // [END tokens_chat] - - // [START tokens_chat_return] - // prompt_token_count: 21 - // candidates_token_count: 1 - // total_token_count: 22 - // [END tokens_chat_return] } func ExampleGenerativeModel_CountTokens_systemInstruction() { @@ -389,6 +397,7 @@ func ExampleGenerativeModel_CountTokens_systemInstruction() { log.Fatal(err) } fmt.Println("total_tokens:", respNoInstruction.TotalTokens) + // ( total_tokens: 10 ) // Same prompt, this time with system instruction model.SystemInstruction = &genai.Content{ @@ -399,12 +408,8 @@ func ExampleGenerativeModel_CountTokens_systemInstruction() { log.Fatal(err) } fmt.Println("total_tokens:", respWithInstruction.TotalTokens) + // ( total_tokens: 21 ) // [END tokens_system_instruction] - - // [START tokens_system_instruction_return] - // totak_tokens: 10 - // totak_tokens: 21 - // [END tokens_system_instruction_return] } // This example shows how to get a JSON response that conforms to a schema.