Skip to content

Commit 5260899

Browse files
chore(generative-ai): update gemini model version in count token examples (#12683)
* chore(generative-ai): update count token example to use Gemini Flash's latest version 002 * fix(generative-ai): function names in test cases & update requirements.txt * chore: updates function name function name `test_gemini_count_token_example` is already in use. * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: clean up test-file * feat: clean up test-file * feat: clean up test-file --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 1485c42 commit 5260899

8 files changed

+41
-34
lines changed

generative_ai/embeddings/generate_embeddings_with_lower_dimension.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,8 @@ def generate_embeddings_with_lower_dimension() -> MultiModalEmbeddingResponse:
4141

4242
model = MultiModalEmbeddingModel.from_pretrained("multimodalembedding@001")
4343
image = Image.load_from_file(
44-
"gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png")
44+
"gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"
45+
)
4546

4647
embeddings = model.get_embeddings(
4748
image=image,

generative_ai/embeddings/test_embeddings.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,9 @@ def test_multimodal_embedding_image() -> None:
6969

7070
@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
7171
def test_generate_embeddings_with_lower_dimension() -> None:
72-
embeddings = generate_embeddings_with_lower_dimension.generate_embeddings_with_lower_dimension()
72+
embeddings = (
73+
generate_embeddings_with_lower_dimension.generate_embeddings_with_lower_dimension()
74+
)
7375
assert embeddings is not None
7476
assert embeddings.image_embedding is not None
7577
assert len(embeddings.image_embedding) == 128

generative_ai/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ pandas==2.0.3; python_version == '3.8'
33
pandas==2.1.4; python_version > '3.8'
44
pillow==10.3.0; python_version < '3.8'
55
pillow==10.3.0; python_version >= '3.8'
6-
google-cloud-aiplatform[all]==1.64.0
6+
google-cloud-aiplatform[all]==1.69.0
77
sentencepiece==0.2.0
88
google-auth==2.29.0
99
anthropic[vertex]==0.28.0

generative_ai/token_count/api_example.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,36 +16,35 @@
1616
PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
1717

1818

19-
def count_token_service() -> int:
19+
def count_token_api_example() -> int:
2020
# [START generativeaionvertexai_token_count_sample_with_genai]
2121
import vertexai
2222
from vertexai.generative_models import GenerativeModel
2323

24-
# TODO(developer): Update and un-comment below line
25-
# PROJECT_ID = "your-project-id"
24+
# TODO(developer): Update project & location
2625
vertexai.init(project=PROJECT_ID, location="us-central1")
2726

2827
# using Vertex AI Model as tokenzier
29-
model = GenerativeModel("gemini-1.5-flash")
28+
model = GenerativeModel("gemini-1.5-flash-002")
3029

3130
prompt = "hello world"
3231
response = model.count_tokens(prompt)
3332
print(f"Prompt Token Count: {response.total_tokens}")
3433
print(f"Prompt Character Count: {response.total_billable_characters}")
34+
# Example response:
35+
# Prompt Token Count: 2
36+
# Prompt Token Count: 10
3537

3638
prompt = ["hello world", "what's the weather today"]
3739
response = model.count_tokens(prompt)
3840
print(f"Prompt Token Count: {response.total_tokens}")
3941
print(f"Prompt Character Count: {response.total_billable_characters}")
4042
# Example response:
41-
# Prompt Token Count: 2
42-
# Prompt Character Count: 10
43-
# Prompt Token Count: 8
44-
# Prompt Character Count: 31
45-
43+
# Prompt Token Count: 8
44+
# Prompt Token Count: 31
4645
# [END generativeaionvertexai_token_count_sample_with_genai]
4746
return response.total_tokens
4847

4948

5049
if __name__ == "__main__":
51-
count_token_service()
50+
count_token_api_example()

generative_ai/token_count/local_sdk_example.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,27 +13,28 @@
1313
# limitations under the License.
1414

1515

16-
def count_token_locally() -> int:
16+
def local_tokenizer_example() -> int:
1717
# [START generativeaionvertexai_token_count_sample_with_local_sdk]
1818
from vertexai.preview.tokenization import get_tokenizer_for_model
1919

2020
# Using local tokenzier
21-
tokenizer = get_tokenizer_for_model("gemini-1.5-flash")
21+
tokenizer = get_tokenizer_for_model("gemini-1.5-flash-002")
2222

2323
prompt = "hello world"
2424
response = tokenizer.count_tokens(prompt)
2525
print(f"Prompt Token Count: {response.total_tokens}")
26+
# Example response:
27+
# Prompt Token Count: 2
2628

2729
prompt = ["hello world", "what's the weather today"]
2830
response = tokenizer.count_tokens(prompt)
2931
print(f"Prompt Token Count: {response.total_tokens}")
3032
# Example response:
31-
# Prompt Token Count: 2
3233
# Prompt Token Count: 8
3334

3435
# [END generativeaionvertexai_token_count_sample_with_local_sdk]
3536
return response.total_tokens
3637

3738

3839
if __name__ == "__main__":
39-
count_token_locally()
40+
local_tokenizer_example()

generative_ai/token_count/multimodal_token_count_example.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
1919

2020

21-
def count_tokens_multimodal() -> GenerationResponse:
21+
def count_tokens_multimodal_example() -> GenerationResponse:
2222
# [START generativeaionvertexai_gemini_token_count_multimodal]
2323
import vertexai
2424
from vertexai.generative_models import GenerativeModel, Part
@@ -37,29 +37,30 @@ def count_tokens_multimodal() -> GenerationResponse:
3737
"Provide a description of the video.",
3838
]
3939

40-
# Prompt tokens count
40+
# tokens count for user prompt
4141
response = model.count_tokens(contents)
4242
print(f"Prompt Token Count: {response.total_tokens}")
4343
print(f"Prompt Character Count: {response.total_billable_characters}")
44+
# Example response:
45+
# Prompt Token Count: 16822
46+
# Prompt Character Count: 30
4447

4548
# Send text to Gemini
4649
response = model.generate_content(contents)
4750
usage_metadata = response.usage_metadata
4851

49-
# Response tokens count
52+
# tokens count for model response
5053
print(f"Prompt Token Count: {usage_metadata.prompt_token_count}")
5154
print(f"Candidates Token Count: {usage_metadata.candidates_token_count}")
5255
print(f"Total Token Count: {usage_metadata.total_token_count}")
5356
# Example response:
54-
# Prompt Token Count: 16822
55-
# Prompt Character Count: 30
56-
# Prompt Token Count: 16822
57-
# Candidates Token Count: 71
58-
# Total Token Count: 16893
57+
# Prompt Token Count: 16822
58+
# Candidates Token Count: 71
59+
# Total Token Count: 16893
5960

6061
# [END generativeaionvertexai_gemini_token_count_multimodal]
6162
return response
6263

6364

6465
if __name__ == "__main__":
65-
count_tokens_multimodal()
66+
count_tokens_multimodal_example()

generative_ai/token_count/simple_example.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
1919

2020

21-
def count_tokens() -> GenerationResponse:
21+
def count_token_example() -> GenerationResponse:
2222
# [START generativeaionvertexai_gemini_token_count]
2323
import vertexai
2424
from vertexai.generative_models import GenerativeModel
@@ -55,4 +55,4 @@ def count_tokens() -> GenerationResponse:
5555

5656

5757
if __name__ == "__main__":
58-
count_tokens()
58+
count_token_example()

generative_ai/token_count/token_count_test.py renamed to generative_ai/token_count/test_token_count_examples.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,16 +18,19 @@
1818
import simple_example
1919

2020

21-
def test_count_token() -> None:
22-
assert local_sdk_example.count_token_locally()
23-
assert api_example.count_token_service()
21+
def test_local_sdk_example() -> None:
22+
assert local_sdk_example.local_tokenizer_example()
23+
assert api_example.count_token_api_example()
2424

2525

26-
def test_gemini_count_token_example() -> None:
27-
response = simple_example.count_tokens()
26+
def test_simple_example() -> None:
27+
response = simple_example.count_token_example()
2828
assert response
2929
assert response.usage_metadata
3030

31-
response = multimodal_token_count_example.count_tokens_multimodal()
31+
32+
def test_multimodal_example() -> None:
33+
print(dir(multimodal_token_count_example))
34+
response = multimodal_token_count_example.count_tokens_multimodal_example()
3235
assert response
3336
assert response.usage_metadata

0 commit comments

Comments
 (0)