Skip to content

Commit b8a7dba

Browse files
authored
Add missing GA workflow for aimlapi integration (#2504)
* Add missing GA workflow for aimlapi integration * Lint * Refactoring to fix type issues * Fix tests * Fixed tests and examples ; Update _prepare_api_call * Updated haystack-ai dep to support ToolsType
1 parent b094b8d commit b8a7dba

File tree

7 files changed

+149
-52
lines changed

7 files changed

+149
-52
lines changed

.github/workflows/aimlapi.yml

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
# This workflow comes from https://github.com/ofek/hatch-mypyc
2+
# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml
3+
name: Test / aimlapi
4+
5+
on:
6+
schedule:
7+
- cron: "0 0 * * *"
8+
pull_request:
9+
paths:
10+
- "integrations/aimlapi/**"
11+
- "!integrations/aimlapi/*.md"
12+
- ".github/workflows/aimlapi.yml"
13+
14+
defaults:
15+
run:
16+
working-directory: integrations/aimlapi
17+
18+
concurrency:
19+
group: aimlapi-${{ github.head_ref }}
20+
cancel-in-progress: true
21+
22+
env:
23+
PYTHONUNBUFFERED: "1"
24+
FORCE_COLOR: "1"
25+
AIMLAPI_API_KEY: ${{ secrets.AIMLAPI_API_KEY }}
26+
27+
jobs:
28+
run:
29+
name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }}
30+
runs-on: ${{ matrix.os }}
31+
strategy:
32+
fail-fast: false
33+
matrix:
34+
os: [ubuntu-latest, windows-latest, macos-latest]
35+
python-version: ["3.9", "3.13"]
36+
37+
steps:
38+
- name: Support longpaths
39+
if: matrix.os == 'windows-latest'
40+
working-directory: .
41+
run: git config --system core.longpaths true
42+
43+
- uses: actions/checkout@v5
44+
45+
- name: Set up Python ${{ matrix.python-version }}
46+
uses: actions/setup-python@v6
47+
with:
48+
python-version: ${{ matrix.python-version }}
49+
50+
- name: Install Hatch
51+
run: pip install --upgrade hatch
52+
53+
- name: Lint
54+
if: matrix.python-version == '3.9' && runner.os == 'Linux'
55+
run: hatch run fmt-check && hatch run test:types
56+
57+
- name: Generate docs
58+
if: matrix.python-version == '3.9' && runner.os == 'Linux'
59+
run: hatch run docs
60+
61+
- name: Run tests
62+
run: hatch run test:cov-retry
63+
64+
- name: Run unit tests with lowest direct dependencies
65+
run: |
66+
hatch run uv pip compile pyproject.toml --resolution lowest-direct --output-file requirements_lowest_direct.txt
67+
hatch -e test env run -- uv pip install -r requirements_lowest_direct.txt
68+
hatch run test:unit
69+
70+
- name: Nightly - run unit tests with Haystack main branch
71+
if: github.event_name == 'schedule'
72+
run: |
73+
hatch env prune
74+
hatch -e test env run -- uv pip install git+https://github.com/deepset-ai/haystack.git@main
75+
hatch run test:unit
76+
77+
- name: Send event to Datadog for nightly failures
78+
if: failure() && github.event_name == 'schedule'
79+
uses: ./.github/actions/send_failure
80+
with:
81+
title: |
82+
Core integrations nightly tests failure: ${{ github.workflow }}
83+
api-key: ${{ secrets.CORE_DATADOG_API_KEY }}

integrations/aimlapi/examples/aimlapi_basic_example.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
#
33
# SPDX-License-Identifier: Apache-2.0
44

5+
# ruff: noqa: T201
6+
57
"""Basic text generation example using AIMLAPIChatGenerator."""
68

79
from haystack.dataclasses import ChatMessage
@@ -12,9 +14,7 @@
1214
def main() -> None:
1315
"""Generate a response without using any tools."""
1416

15-
generator = AIMLAPIChatGenerator(
16-
model="openai/gpt-5-chat-latest"
17-
)
17+
generator = AIMLAPIChatGenerator(model="openai/gpt-5-chat-latest")
1818

1919
messages = [
2020
ChatMessage.from_system("You are a concise assistant."),

integrations/aimlapi/examples/aimlapi_with_tools_example.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
#
33
# SPDX-License-Identifier: Apache-2.0
44

5+
# ruff: noqa: T201
6+
57
"""Run AIMLAPI chat generation with tool calling and execution."""
68

79
from haystack.components.tools import ToolInvoker
@@ -35,24 +37,18 @@ def main() -> None:
3537

3638
tool_invoker = ToolInvoker(tools=[weather_tool])
3739

38-
client = AIMLAPIChatGenerator(
39-
model="openai/gpt-5-mini-2025-08-07"
40-
)
40+
client = AIMLAPIChatGenerator(model="openai/gpt-5-mini-2025-08-07")
4141

4242
messages = [
43-
ChatMessage.from_system(
44-
"You help users by calling the provided tools when they are relevant."
45-
),
43+
ChatMessage.from_system("You help users by calling the provided tools when they are relevant."),
4644
ChatMessage.from_user("What's the weather in Tokyo today?"),
4745
]
4846

4947
print("Requesting a tool call from the model...")
5048
tool_request = client.run(
5149
messages=messages,
5250
tools=[weather_tool],
53-
generation_kwargs={
54-
"tool_choice": {"type": "function", "function": {"name": "weather"}}
55-
},
51+
generation_kwargs={"tool_choice": {"type": "function", "function": {"name": "weather"}}},
5652
)["replies"][0]
5753

5854
print(f"assistant tool request: {tool_request}")
@@ -66,7 +62,7 @@ def main() -> None:
6662
for tool_result in tool_message.tool_call_results:
6763
print(f"tool output: {tool_result.result}")
6864

69-
follow_up_messages = messages + [tool_request, *tool_messages]
65+
follow_up_messages = [*messages, tool_request, *tool_messages]
7066

7167
final_reply = client.run(
7268
messages=follow_up_messages,

integrations/aimlapi/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ classifiers = [
2323
"Programming Language :: Python :: Implementation :: CPython",
2424
"Programming Language :: Python :: Implementation :: PyPy",
2525
]
26-
dependencies = ["haystack-ai>=2.13.1"]
26+
dependencies = ["haystack-ai>=2.19.0"]
2727

2828
[project.urls]
2929
Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/aimlapi#readme"

integrations/aimlapi/src/haystack_integrations/components/generators/aimlapi/chat/chat_generator.py

Lines changed: 33 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,12 @@
22
#
33
# SPDX-License-Identifier: Apache-2.0
44

5-
from typing import Any, Dict, List, Optional, Union
5+
from typing import Any, Dict, List, Optional
66

77
from haystack import component, default_to_dict, logging
88
from haystack.components.generators.chat import OpenAIChatGenerator
99
from haystack.dataclasses import ChatMessage, StreamingCallbackT
10-
from haystack.tools import Tool, Toolset, _check_duplicate_tool_names
10+
from haystack.tools import ToolsType, _check_duplicate_tool_names, flatten_tools_or_toolsets, serialize_tools_or_toolset
1111
from haystack.utils import serialize_callable
1212
from haystack.utils.auth import Secret
1313

@@ -66,7 +66,7 @@ def __init__(
6666
streaming_callback: Optional[StreamingCallbackT] = None,
6767
api_base_url: Optional[str] = "https://api.aimlapi.com/v1",
6868
generation_kwargs: Optional[Dict[str, Any]] = None,
69-
tools: Optional[Union[List[Tool], Toolset]] = None,
69+
tools: Optional[ToolsType] = None,
7070
timeout: Optional[float] = None,
7171
extra_headers: Optional[Dict[str, Any]] = None,
7272
max_retries: Optional[int] = None,
@@ -144,7 +144,7 @@ def to_dict(self) -> Dict[str, Any]:
144144
api_base_url=self.api_base_url,
145145
generation_kwargs=self.generation_kwargs,
146146
api_key=self.api_key.to_dict(),
147-
tools=[tool.to_dict() for tool in self.tools] if self.tools else None,
147+
tools=serialize_tools_or_toolset(self.tools),
148148
extra_headers=self.extra_headers,
149149
timeout=self.timeout,
150150
max_retries=self.max_retries,
@@ -157,7 +157,7 @@ def _prepare_api_call(
157157
messages: List[ChatMessage],
158158
streaming_callback: Optional[StreamingCallbackT] = None,
159159
generation_kwargs: Optional[Dict[str, Any]] = None,
160-
tools: Optional[Union[List[Tool], Toolset]] = None,
160+
tools: Optional[ToolsType] = None,
161161
tools_strict: Optional[bool] = None,
162162
) -> Dict[str, Any]:
163163
# update generation kwargs by merging with the generation kwargs passed to the run method
@@ -167,32 +167,45 @@ def _prepare_api_call(
167167
# adapt ChatMessage(s) to the format expected by the OpenAI API (AIMLAPI uses the same format)
168168
aimlapi_formatted_messages: List[Dict[str, Any]] = [message.to_openai_dict_format() for message in messages]
169169

170-
tools = tools or self.tools
171-
if isinstance(tools, Toolset):
172-
tools = list(tools)
173170
tools_strict = tools_strict if tools_strict is not None else self.tools_strict
174-
_check_duplicate_tool_names(list(tools or []))
175-
176-
aimlapi_tools = {}
177-
if tools:
178-
tool_definitions = [
179-
{"type": "function", "function": {**t.tool_spec, **({"strict": tools_strict} if tools_strict else {})}}
180-
for t in tools
181-
]
182-
aimlapi_tools = {"tools": tool_definitions}
171+
flattened_tools = flatten_tools_or_toolsets(tools or self.tools)
172+
_check_duplicate_tool_names(flattened_tools)
173+
174+
openai_tools = {}
175+
if flattened_tools:
176+
tool_definitions = []
177+
for tool in flattened_tools:
178+
function_spec = {**tool.tool_spec}
179+
if tools_strict:
180+
function_spec["strict"] = True
181+
parameters = function_spec.get("parameters")
182+
if isinstance(parameters, dict):
183+
parameters["additionalProperties"] = False
184+
tool_definitions.append({"type": "function", "function": function_spec})
185+
openai_tools = {"tools": tool_definitions}
183186

184187
is_streaming = streaming_callback is not None
185188
num_responses = generation_kwargs.pop("n", 1)
189+
186190
if is_streaming and num_responses > 1:
187191
msg = "Cannot stream multiple responses, please set n=1."
188192
raise ValueError(msg)
193+
response_format = generation_kwargs.pop("response_format", None)
189194

190-
return {
195+
response_format = generation_kwargs.pop("response_format", None)
196+
197+
base_args = {
191198
"model": self.model,
192199
"messages": aimlapi_formatted_messages,
193-
"stream": streaming_callback is not None,
194200
"n": num_responses,
195-
**aimlapi_tools,
201+
**openai_tools,
196202
"extra_body": {**generation_kwargs},
197203
"extra_headers": {**extra_headers},
198204
}
205+
if response_format and not is_streaming:
206+
return {**base_args, "response_format": response_format, "openai_endpoint": "parse"}
207+
208+
final_args = {**base_args, "stream": is_streaming, "openai_endpoint": "create"}
209+
if response_format:
210+
final_args["response_format"] = response_format
211+
return final_args

integrations/aimlapi/tests/test_aimlapi_chat_generator.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -258,12 +258,13 @@ def test_run_with_params(self, chat_messages, mock_chat_completion, monkeypatch)
258258
@pytest.mark.integration
259259
def test_live_run(self):
260260
chat_messages = [ChatMessage.from_user("What's the capital of France")]
261-
component = AIMLAPIChatGenerator()
261+
component = AIMLAPIChatGenerator(model="openai/gpt-5-nano-2025-08-07")
262262
results = component.run(chat_messages)
263263
assert len(results["replies"]) == 1
264264
message: ChatMessage = results["replies"][0]
265+
assert message.text
265266
assert "Paris" in message.text
266-
assert "openai/gpt-5-chat-latest" in message.meta["model"]
267+
assert "gpt-5-nano-2025-08-07" in message.meta["model"]
267268
assert message.meta["finish_reason"] == "stop"
268269

269270
@pytest.mark.skipif(
@@ -292,14 +293,15 @@ def __call__(self, chunk: StreamingChunk) -> None:
292293
self.responses += chunk.content if chunk.content else ""
293294

294295
callback = Callback()
295-
component = AIMLAPIChatGenerator(streaming_callback=callback)
296+
component = AIMLAPIChatGenerator(streaming_callback=callback, model="openai/gpt-5-nano-2025-08-07")
296297
results = component.run([ChatMessage.from_user("What's the capital of France?")])
297298

298299
assert len(results["replies"]) == 1
299300
message: ChatMessage = results["replies"][0]
301+
assert message.text
300302
assert "Paris" in message.text
301303

302-
assert "openai/gpt-5-chat-latest" in message.meta["model"]
304+
assert "gpt-5-nano-2025-08-07" in message.meta["model"]
303305
assert message.meta["finish_reason"] == "stop"
304306

305307
assert callback.counter > 1
@@ -312,11 +314,11 @@ def __call__(self, chunk: StreamingChunk) -> None:
312314
@pytest.mark.integration
313315
def test_live_run_with_tools(self, tools):
314316
chat_messages = [ChatMessage.from_user("What's the weather like in Paris?")]
315-
component = AIMLAPIChatGenerator(tools=tools)
317+
component = AIMLAPIChatGenerator(model="openai/gpt-5-nano-2025-08-07", tools=tools)
316318
results = component.run(chat_messages)
317319
assert len(results["replies"]) == 1
318320
message = results["replies"][0]
319-
assert message.text in ("", None)
321+
assert message.text is None or message.text == ""
320322

321323
assert message.tool_calls
322324
tool_call = message.tool_call
@@ -335,7 +337,7 @@ def test_live_run_with_tools_and_response(self, tools):
335337
Integration test that the AIMLAPIChatGenerator component can run with tools and get a response.
336338
"""
337339
initial_messages = [ChatMessage.from_user("What's the weather like in Paris and Berlin?")]
338-
component = AIMLAPIChatGenerator(tools=tools)
340+
component = AIMLAPIChatGenerator(tools=tools, model="openai/gpt-5-nano-2025-08-07")
339341
results = component.run(messages=initial_messages, generation_kwargs={"tool_choice": "auto"})
340342

341343
assert len(results["replies"]) == 1
@@ -371,7 +373,6 @@ def test_live_run_with_tools_and_response(self, tools):
371373
assert final_message.is_from(ChatRole.ASSISTANT)
372374
assert len(final_message.text) > 0
373375
assert "paris" in final_message.text.lower()
374-
assert "berlin" in final_message.text.lower()
375376

376377
@pytest.mark.skipif(
377378
not os.environ.get("AIMLAPI_API_KEY", None),
@@ -382,7 +383,9 @@ def test_live_run_with_tools_streaming(self, tools):
382383
"""
383384
Integration test that the AIMLAPIChatGenerator component can run with tools and streaming.
384385
"""
385-
component = AIMLAPIChatGenerator(tools=tools, streaming_callback=print_streaming_chunk)
386+
component = AIMLAPIChatGenerator(
387+
tools=tools, streaming_callback=print_streaming_chunk, model="openai/gpt-5-nano-2025-08-07"
388+
)
386389
results = component.run(
387390
[ChatMessage.from_user("What's the weather like in Paris and Berlin?")],
388391
generation_kwargs={"tool_choice": "auto"},
@@ -417,7 +420,7 @@ def test_pipeline_with_aimlapi_chat_generator(self, tools):
417420
Test that the AIMLAPIChatGenerator component can be used in a pipeline
418421
"""
419422
pipeline = Pipeline()
420-
pipeline.add_component("generator", AIMLAPIChatGenerator(tools=tools, model="openai/gpt-5-chat-latest"))
423+
pipeline.add_component("generator", AIMLAPIChatGenerator(tools=tools, model="openai/gpt-5-mini-2025-08-07"))
421424
pipeline.add_component("tool_invoker", ToolInvoker(tools=tools))
422425

423426
pipeline.connect("generator", "tool_invoker")

0 commit comments

Comments
 (0)