@@ -221,7 +221,7 @@ def test_convert_google_chunk_to_streaming_chunk_real_example(self, monkeypatch)
221221 chunk1 = types .GenerateContentResponse (
222222 candidates = [chunk1_candidate ],
223223 usage_metadata = chunk1_usage ,
224- model_version = "gemini-2.0 -flash" ,
224+ model_version = "gemini-2.5 -flash" ,
225225 response_id = None ,
226226 create_time = None ,
227227 prompt_feedback = None ,
@@ -237,7 +237,7 @@ def test_convert_google_chunk_to_streaming_chunk_real_example(self, monkeypatch)
237237 assert streaming_chunk1 .finish_reason is None
238238 assert streaming_chunk1 .index == 0
239239 assert "received_at" in streaming_chunk1 .meta
240- assert streaming_chunk1 .meta ["model" ] == "gemini-2.0 -flash"
240+ assert streaming_chunk1 .meta ["model" ] == "gemini-2.5 -flash"
241241 assert "usage" in streaming_chunk1 .meta
242242 assert streaming_chunk1 .meta ["usage" ]["prompt_tokens" ] == 217
243243 assert streaming_chunk1 .meta ["usage" ]["completion_tokens" ] is None
@@ -268,7 +268,7 @@ def test_convert_google_chunk_to_streaming_chunk_real_example(self, monkeypatch)
268268 chunk2 = types .GenerateContentResponse (
269269 candidates = [chunk2_candidate ],
270270 usage_metadata = chunk2_usage ,
271- model_version = "gemini-2.0 -flash" ,
271+ model_version = "gemini-2.5 -flash" ,
272272 response_id = None ,
273273 create_time = None ,
274274 prompt_feedback = None ,
@@ -284,7 +284,7 @@ def test_convert_google_chunk_to_streaming_chunk_real_example(self, monkeypatch)
284284 assert streaming_chunk2 .finish_reason is None
285285 assert streaming_chunk2 .index == 1
286286 assert "received_at" in streaming_chunk2 .meta
287- assert streaming_chunk2 .meta ["model" ] == "gemini-2.0 -flash"
287+ assert streaming_chunk2 .meta ["model" ] == "gemini-2.5 -flash"
288288 assert "usage" in streaming_chunk2 .meta
289289 assert streaming_chunk2 .meta ["usage" ]["prompt_tokens" ] == 217
290290 assert streaming_chunk2 .meta ["usage" ]["completion_tokens" ] is None
@@ -329,7 +329,7 @@ def test_convert_google_chunk_to_streaming_chunk_real_example(self, monkeypatch)
329329 chunk = types .GenerateContentResponse (
330330 candidates = [candidate ],
331331 usage_metadata = usage_metadata ,
332- model_version = "gemini-2.0 -flash" ,
332+ model_version = "gemini-2.5 -flash" ,
333333 response_id = None ,
334334 create_time = None ,
335335 prompt_feedback = None ,
@@ -346,7 +346,7 @@ def test_convert_google_chunk_to_streaming_chunk_real_example(self, monkeypatch)
346346 assert streaming_chunk .finish_reason == "stop"
347347 assert streaming_chunk .index == 2
348348 assert "received_at" in streaming_chunk .meta
349- assert streaming_chunk .meta ["model" ] == "gemini-2.0 -flash"
349+ assert streaming_chunk .meta ["model" ] == "gemini-2.5 -flash"
350350 assert streaming_chunk .component_info == component_info
351351 assert "usage" in streaming_chunk .meta
352352 assert streaming_chunk .meta ["usage" ]["prompt_tokens" ] == 144
@@ -388,7 +388,7 @@ class TestGoogleGenAIChatGenerator:
388388 def test_init_default (self , monkeypatch ):
389389 monkeypatch .setenv ("GOOGLE_API_KEY" , "test-api-key" )
390390 component = GoogleGenAIChatGenerator ()
391- assert component ._model == "gemini-2.0 -flash"
391+ assert component ._model == "gemini-2.5 -flash"
392392 assert component ._generation_kwargs == {}
393393 assert component ._safety_settings == []
394394 assert component ._streaming_callback is None
@@ -412,13 +412,13 @@ def test_init_with_parameters(self, monkeypatch):
412412 monkeypatch .setenv ("GOOGLE_API_KEY" , "test-api-key-from-env" )
413413 component = GoogleGenAIChatGenerator (
414414 api_key = Secret .from_token ("test-api-key-from-env" ),
415- model = "gemini-2.0 -flash" ,
415+ model = "gemini-2.5 -flash" ,
416416 streaming_callback = print_streaming_chunk ,
417417 generation_kwargs = {"temperature" : 0.5 , "max_output_tokens" : 100 },
418418 safety_settings = [{"category" : "HARM_CATEGORY_HARASSMENT" , "threshold" : "BLOCK_MEDIUM_AND_ABOVE" }],
419419 tools = [tool ],
420420 )
421- assert component ._model == "gemini-2.0 -flash"
421+ assert component ._model == "gemini-2.5 -flash"
422422 assert component ._streaming_callback is print_streaming_chunk
423423 assert component ._generation_kwargs == {"temperature" : 0.5 , "max_output_tokens" : 100 }
424424 assert component ._safety_settings == [
@@ -429,13 +429,13 @@ def test_init_with_parameters(self, monkeypatch):
429429 def test_init_with_toolset (self , tools , monkeypatch ):
430430 monkeypatch .setenv ("GOOGLE_API_KEY" , "test-api-key" )
431431 toolset = Toolset (tools )
432- generator = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" , tools = toolset )
432+ generator = GoogleGenAIChatGenerator (tools = toolset )
433433 assert generator ._tools == toolset
434434
435435 def test_to_dict_with_toolset (self , tools , monkeypatch ):
436436 monkeypatch .setenv ("GOOGLE_API_KEY" , "test-api-key" )
437437 toolset = Toolset (tools )
438- generator = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" , tools = toolset )
438+ generator = GoogleGenAIChatGenerator (tools = toolset )
439439 data = generator .to_dict ()
440440
441441 assert data ["init_parameters" ]["tools" ]["type" ] == "haystack.tools.toolset.Toolset"
@@ -445,7 +445,7 @@ def test_to_dict_with_toolset(self, tools, monkeypatch):
445445 def test_from_dict_with_toolset (self , tools , monkeypatch ):
446446 monkeypatch .setenv ("GOOGLE_API_KEY" , "test-api-key" )
447447 toolset = Toolset (tools )
448- component = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" , tools = toolset )
448+ component = GoogleGenAIChatGenerator (tools = toolset )
449449 data = component .to_dict ()
450450
451451 deserialized_component = GoogleGenAIChatGenerator .from_dict (data )
@@ -480,7 +480,7 @@ def test_init_with_mixed_tools_and_toolsets(self, monkeypatch):
480480 toolset1 = Toolset ([tool2 ])
481481
482482 # Initialize with mixed list: Tool, Toolset, Tool
483- generator = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" , tools = [tool1 , toolset1 , tool3 ])
483+ generator = GoogleGenAIChatGenerator (tools = [tool1 , toolset1 , tool3 ])
484484
485485 assert generator ._tools == [tool1 , toolset1 , tool3 ]
486486 assert isinstance (generator ._tools , list )
@@ -508,7 +508,7 @@ def test_serde_with_mixed_tools_and_toolsets(self, monkeypatch):
508508
509509 toolset1 = Toolset ([tool2 ])
510510
511- generator = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" , tools = [tool1 , toolset1 ])
511+ generator = GoogleGenAIChatGenerator (tools = [tool1 , toolset1 ])
512512 data = generator .to_dict ()
513513
514514 # Verify serialization preserves structure
@@ -737,12 +737,12 @@ def test_convert_message_to_google_genai_format_with_reasoning_content(self):
737737 @pytest .mark .integration
738738 def test_live_run (self ) -> None :
739739 chat_messages = [ChatMessage .from_user ("What's the capital of France" )]
740- component = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" )
740+ component = GoogleGenAIChatGenerator ()
741741 results = component .run (chat_messages )
742742 assert len (results ["replies" ]) == 1
743743 message : ChatMessage = results ["replies" ][0 ]
744744 assert message .text and "paris" in message .text .lower (), "Response does not contain Paris"
745- assert "gemini-2.0 -flash" in message .meta ["model" ]
745+ assert "gemini-2.5 -flash" in message .meta ["model" ]
746746 assert message .meta ["finish_reason" ] == "stop"
747747
748748 @pytest .mark .skipif (
@@ -846,7 +846,7 @@ def test_live_run_with_tools_streaming(self, tools):
846846 assert tool_message is not None , "No message with tool call found"
847847 assert tool_message .tool_calls is not None , "Tool message has no tool calls"
848848 assert len (tool_message .tool_calls ) == 1 , "Tool message has multiple tool calls"
849- # Google Gen AI (gemini-2.0 -flash and gemini-2.5-pro-preview-05-06) does not provide ids for tool calls although
849+ # Google Gen AI (gemini-2.5 -flash and gemini-2.5-pro-preview-05-06) does not provide ids for tool calls although
850850 # it is in the response schema, revisit in future to see if there are changes and id is provided
851851 # assert tool_message.tool_calls[0].id is not None, "Tool call has no id"
852852 assert tool_message .tool_calls [0 ].tool_name == "weather"
@@ -880,7 +880,7 @@ def test_live_run_with_toolset(self, tools):
880880 assert message .tool_calls is not None , "Message has no tool calls"
881881 assert len (message .tool_calls ) == 1 , "Message has multiple tool calls and it should only have one"
882882 tool_call = message .tool_calls [0 ]
883- # Google Gen AI (gemini-2.0 -flash and gemini-2.5-pro-preview-05-06) does not provide ids for tool calls although
883+ # Google Gen AI (gemini-2.5 -flash and gemini-2.5-pro-preview-05-06) does not provide ids for tool calls although
884884 # it is in the response schema, revisit in future to see if there are changes and id is provided
885885 # assert tool_call.id is not None, "Tool call has no id"
886886 assert message .meta ["finish_reason" ] == "stop"
@@ -1144,7 +1144,7 @@ def test_live_run_with_thinking_unsupported_model_fails_fast(self):
11441144 """
11451145 Integration test to verify that thinking configuration fails fast with unsupported models.
11461146 """
1147- # gemini-2.0-flash is known to not support thinking
1147+ # gemini-2.0-flash does not support thinking
11481148 chat_messages = [ChatMessage .from_user ("Why is the sky blue?" )]
11491149 component = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" , generation_kwargs = {"thinking_budget" : 1024 })
11501150
@@ -1177,7 +1177,7 @@ async def test_live_run_async(self) -> None:
11771177 assert len (results ["replies" ]) == 1
11781178 message : ChatMessage = results ["replies" ][0 ]
11791179 assert message .text and "paris" in message .text .lower (), "Response does not contain Paris"
1180- assert "gemini-2.0 -flash" in message .meta ["model" ]
1180+ assert "gemini-2.5 -flash" in message .meta ["model" ]
11811181 assert message .meta ["finish_reason" ] == "stop"
11821182
11831183 async def test_live_run_async_streaming (self ):
@@ -1250,7 +1250,7 @@ async def test_live_run_async_with_thinking_unsupported_model_fails_fast(self):
12501250 Async integration test to verify that thinking configuration fails fast with unsupported models.
12511251 This tests the fail-fast principle - no silent fallbacks.
12521252 """
1253- # Use a model that does NOT support thinking features
1253+ # Use a model that does NOT support thinking features (gemini-2.0-flash)
12541254 chat_messages = [ChatMessage .from_user ("Why is the sky blue?" )]
12551255 component = GoogleGenAIChatGenerator (model = "gemini-2.0-flash" , generation_kwargs = {"thinking_budget" : 1024 })
12561256
0 commit comments