Skip to content

Commit 9bb186e

Browse files
authored
Merge pull request #810 from Pipelex/release/v0.23.5
Release v0.23.5
2 parents a141c76 + a3ef6c6 commit 9bb186e

46 files changed

Lines changed: 1750 additions & 1246 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.badges/tests.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"schemaVersion": 1,
33
"label": "tests",
4-
"message": "4046",
4+
"message": "4053",
55
"color": "blue",
66
"cacheSeconds": 300
77
}

.pipelex-dev/test_profiles.toml

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ fal = [
220220
openai = ["gpt-image-1", "gpt-image-1-mini", "gpt-image-1.5"]
221221

222222
# --- Google Models ---
223-
google = ["nano-banana", "nano-banana-pro"]
223+
google = ["nano-banana", "nano-banana-pro", "nano-banana-2"]
224224

225225
# --- Qwen Models ---
226226
qwen = ["qwen-image"]
@@ -337,3 +337,30 @@ search_models = ["@linkup"]
337337
[profiles.full]
338338
description = "All available models"
339339
include_all = true
340+
341+
################################################################################
342+
# All Configs GW Profile - One model per Portkey config and model type
343+
################################################################################
344+
[profiles.all_configs_gw]
345+
description = "One model per Portkey config (auto-generated)"
346+
backends = ["pipelex_gateway"]
347+
llm_models = [
348+
"gpt-4o-mini",
349+
"gpt-oss-20b",
350+
"claude-4-sonnet",
351+
"deepseek-v3.2",
352+
"gemini-2.5-pro",
353+
"grok-3",
354+
]
355+
img_gen_models = [
356+
"nano-banana-2",
357+
"gpt-image-1",
358+
"nano-banana",
359+
]
360+
extract_models = [
361+
"azure-document-intelligence",
362+
"deepseek-ocr",
363+
]
364+
search_models = [
365+
"linkup-standard",
366+
]

.pipelex/inference/backends/bedrock.toml

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,15 @@ outputs = ["text"]
5656
# TODO: find out the actual cost per million tokens for nova on bedrock
5757
costs = { input = 3.0, output = 15.0 }
5858

59-
# --- Claude LLMs --------------------------------------------------------------
60-
["claude-3.7-sonnet"]
61-
sdk = "bedrock_anthropic"
62-
model_id = "us.anthropic.claude-3-7-sonnet-20250219-v1:0"
59+
# --- DeepSeek Models ----------------------------------------------------------
60+
["deepseek-v3.1"]
61+
model_id = "deepseek.v3-v1:0"
62+
inputs = ["text"]
63+
outputs = ["text"]
64+
costs = { input = 0, output = 0 }
6365
max_tokens = 8192
64-
inputs = ["text", "images", "pdf"]
65-
outputs = ["text", "structured"]
66-
max_prompt_images = 100
67-
costs = { input = 3.0, output = 15.0 }
68-
thinking_mode = "manual"
6966

67+
# --- Claude LLMs --------------------------------------------------------------
7068
[claude-4-sonnet]
7169
sdk = "bedrock_anthropic"
7270
model_id = "us.anthropic.claude-sonnet-4-20250514-v1:0"

.pipelex/inference/backends/google.toml

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,21 @@ model_id = "gemini-2.5-flash-image"
8080
inputs = ["text"]
8181
outputs = ["image"]
8282
thinking_mode = "none"
83-
costs = { input = 0.0, output = 0.039 }
83+
costs = { input = 0.30, output = 30 }
8484

8585
[nano-banana-pro]
8686
model_type = "img_gen"
8787
model_id = "gemini-3-pro-image-preview"
8888
inputs = ["text"]
8989
outputs = ["image"]
9090
thinking_mode = "none"
91-
costs = { input = 0.0, output = 0.039 }
91+
costs = { input = 2.0, output = 120 }
92+
93+
94+
[nano-banana-2]
95+
model_type = "img_gen"
96+
model_id = "gemini-3.1-flash-image-preview"
97+
inputs = ["text"]
98+
outputs = ["image"]
99+
thinking_mode = "none"
100+
costs = { input = 0.50, output = 60 }

.pipelex/inference/backends/pipelex_gateway_models.md

Lines changed: 1 addition & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -21,14 +21,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
2121
</thead>
2222
<tbody>
2323
<tr>
24-
<td>claude-3.7-sonnet</td>
25-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
26-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
27-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
28-
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
29-
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
30-
</tr>
31-
<tr>
3224
<td>claude-4-opus</td>
3325
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
3426
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
@@ -85,14 +77,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
8577
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
8678
</tr>
8779
<tr>
88-
<td>deepseek-v3.1</td>
89-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
90-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">❌</td>
91-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">❌</td>
92-
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
93-
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
94-
</tr>
95-
<tr>
9680
<td>deepseek-v3.2</td>
9781
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
9882
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">❌</td>
@@ -101,14 +85,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
10185
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
10286
</tr>
10387
<tr>
104-
<td>deepseek-v3.2-speciale</td>
105-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
106-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">❌</td>
107-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">❌</td>
108-
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
109-
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
110-
</tr>
111-
<tr>
11288
<td>gemini-2.5-flash</td>
11389
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
11490
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
@@ -512,12 +488,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
512488
</thead>
513489
<tbody>
514490
<tr>
515-
<td>flux-2-pro</td>
516-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
517-
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
518-
<td style="text-align:center;background-color:rgba(76,175,80,0.15)">✅</td>
519-
</tr>
520-
<tr>
521491
<td>gpt-image-1</td>
522492
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
523493
<td style="text-align:center;background-color:rgba(33,150,243,0.15)">✅</td>
@@ -558,6 +528,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
558528

559529

560530
> **AUTO-GENERATED FILE** - Do not edit manually.
561-
> Last updated: 2026-03-21T13:07:30Z
531+
> Last updated: 2026-04-04T17:05:38Z
562532
>
563533
> Run `pipelex-dev update-gateway-models` or `make ugm` to regenerate.

.pipelex/inference/backends/pipelex_gateway_models_plain.md

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
77

88
## Language Models (LLM)
99

10-
- **claude-3.7-sonnet**
11-
- inputs: text, images, pdf
12-
- outputs: text, structured
1310
- **claude-4-opus**
1411
- inputs: text, images, pdf
1512
- outputs: text, structured
@@ -31,15 +28,9 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
3128
- **claude-4.6-opus**
3229
- inputs: text, images, pdf
3330
- outputs: text, structured
34-
- **deepseek-v3.1**
35-
- inputs: text
36-
- outputs: text, structured
3731
- **deepseek-v3.2**
3832
- inputs: text
3933
- outputs: text, structured
40-
- **deepseek-v3.2-speciale**
41-
- inputs: text
42-
- outputs: text, structured
4334
- **gemini-2.5-flash**
4435
- inputs: text, images, pdf
4536
- outputs: text, structured
@@ -184,9 +175,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
184175

185176
## Image Generation Models
186177

187-
- **flux-2-pro**
188-
- inputs: text, images
189-
- outputs: image
190178
- **gpt-image-1**
191179
- inputs: text, images
192180
- outputs: image
@@ -208,6 +196,6 @@ For configuration details, see the [documentation](https://docs.pipelex.com/late
208196

209197

210198
> **AUTO-GENERATED FILE** - Do not edit manually.
211-
> Last updated: 2026-03-21T13:07:30Z
199+
> Last updated: 2026-04-04T17:05:38Z
212200
>
213201
> Run `pipelex-dev update-gateway-models` or `make ugm` to regenerate.

.pipelex/inference/deck/2_img_gen_deck.toml

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,10 @@ choice_default = "$gen-image"
2727

2828
[img_gen.aliases]
2929
best-gpt = "gpt-image-1.5"
30-
best-gemini = "nano-banana-pro"
31-
best-blackforestlabs = "flux-2-pro"
30+
best-gemini = "nano-banana-2"
3231

33-
default-general = "flux-2-pro"
34-
default-premium = "nano-banana-pro"
32+
default-general = "nano-banana"
33+
default-premium = "nano-banana-2"
3534
default-small = "gpt-image-1-mini"
3635

3736
####################################################################################################

CHANGELOG.md

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,24 @@
11
# Changelog
22

3+
## [v0.23.5] - 2026-04-04
4+
5+
### Added
6+
7+
- **Gateway config**: Introduced `GatewayConfig` to bundle gateway model specs with AWS region, propagating it through the backend library so bedrock backends use the correct region.
8+
- **Config coverage tests**: Integration tests that validate one model per Portkey config for each model type (LLM, image gen, extract, search), with `all_configs_gw` test profile and `make ticc` target.
9+
- **nano-banana-2 model**: Added `gemini-3.1-flash-image-preview` as `nano-banana-2` with updated Google image gen costs.
10+
- **DeepSeek models on bedrock**: Added DeepSeek models to the bedrock backend configuration.
11+
12+
### Changed
13+
14+
- **Image gen deck aliases**: Updated aliases to nano-banana model variants and removed `flux-2-pro`.
15+
- **Remote config**: Bumped to v08.
16+
- **Gateway model docs**: Regenerated, removing retired models (claude-3.7-sonnet, deepseek-v3.1, deepseek-v3.2-speciale, flux-2-pro).
17+
18+
### Fixed
19+
20+
- **deepseek-v3.1 structured output**: Removed unsupported `structured` output capability from the bedrock deepseek-v3.1 model spec — the `bedrock_aioboto3` worker does not implement object generation, so structured calls would fail at runtime.
21+
322
## [v0.23.4] - 2026-04-02
423

524
### Changed

Makefile

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ make tp - Shorthand -> test-with-prints
111111
make tb - Shorthand -> `make test-with-prints TEST=test_boot`
112112
make test-inference - Run unit tests only for inference (with prints)
113113
make ti - Shorthand -> test-inference
114+
make ticc - Shorthand -> test config coverage (all Portkey configs)
114115
make tip - Shorthand -> test-inference-with-prints (parallelized inference tests)
115116
make test-llm - Run unit tests only for llm (with prints)
116117
make tl - Shorthand -> test-llm
@@ -163,7 +164,7 @@ export HELP
163164
format lint ruff-format ruff-lint pyright mypy pylint plxt plxt-format plxt-lint \
164165
rules up-kit-configs ukc check-config-sync ccs check-rules check-urls cu insert-skeleton \
165166
cleanderived cleanenv cleanall \
166-
test test-xdist t test-quiet tq test-with-prints tp test-inference ti \
167+
test test-xdist t test-quiet tq test-with-prints tp test-inference ti ticc \
167168
test-llm tl test-img-gen tg test-extract te codex-tests gha-tests \
168169
run-all-tests run-manual-trigger-gha-tests run-gha_disabled-tests \
169170
validate v check c cc agent-check agent-test \
@@ -546,6 +547,12 @@ tip: test-inference-with-prints
546547
ti: test-inference-fast
547548
@echo "> done: ti-fast = test-inference-fast"
548549

550+
ticc: env
551+
$(call PRINT_TITLE,"Config coverage inference testing")
552+
@$(VENV_PIPELEX_DEV) preprocess-test-models --generate-fixtures --profile all_configs_gw --quiet
553+
$(VENV_PYTEST) -n auto --pipe-run-mode live -m "inference" -s -rfE -k "TestConfigCoverage" $(if $(filter 1,$(VERBOSE)),-v,$(if $(filter 2,$(VERBOSE)),-vv,$(if $(filter 3,$(VERBOSE)),-vvv,)))
554+
@echo "> done: ticc = test-inference config coverage (all Portkey configs)"
555+
549556
ti-dry: env
550557
$(call PRINT_TITLE,"Unit testing")
551558
@if [ -n "$(TEST)" ]; then \

pipelex/builder/operations/pipe_ops.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
# Aliases that agents may use instead of "pipe_code". First found is promoted when canonical key is absent; extras are dropped.
2929
_PIPE_CODE_ALIASES = ("pipe", "the_pipe_code", "code", "name", "pipe_name", "pipe_ref")
3030

31-
# Alias that agents may use instead of "output".
32-
_OUTPUT_ALIAS = "output_concept"
31+
# Aliases that agents may use instead of "output". First found is promoted when canonical key is absent; extras are dropped.
32+
_OUTPUT_ALIASES = ("output_concept", "output_type")
3333

3434

3535
def _normalize_sub_pipe_dict(data: dict[str, Any]) -> None:
@@ -110,24 +110,32 @@ def parse_pipe_spec(pipe_type: str, spec_data: dict[str, Any]) -> PipeSpec:
110110
else:
111111
spec_data.pop("expression")
112112

113-
# Accept "output_concept" as an alias for "output".
114-
# When both "output" and the alias coexist, try the alias value first (agents often put
113+
# Accept output aliases (e.g. "output_concept", "output_type") for "output".
114+
# When both "output" and an alias coexist, try the alias value first (agents often put
115115
# the correct concept name in the alias), falling back to the original "output" value.
116116
output_fallback: Any | None = None
117-
if _OUTPUT_ALIAS in spec_data:
118-
alias_value = spec_data.pop(_OUTPUT_ALIAS)
117+
for output_alias in _OUTPUT_ALIASES:
118+
if output_alias not in spec_data:
119+
continue
120+
alias_value = spec_data.pop(output_alias)
119121
if "output" not in spec_data:
120122
spec_data["output"] = alias_value
121123
else:
122124
output_fallback = spec_data["output"]
123125
spec_data["output"] = alias_value
126+
# First alias wins — drop any remaining aliases without using them.
127+
for remaining_alias in _OUTPUT_ALIASES:
128+
spec_data.pop(remaining_alias, None)
129+
break
124130

125131
# Accept output as dict → extract the concept string
126132
# Agents sometimes structure the output like inputs (as a dict).
127133
# Handle {"type": "ConceptName"} and single-item dicts like {"result": "Text"}.
128134
if "output" in spec_data and isinstance(spec_data["output"], dict):
129135
output_dict: dict[str, Any] = spec_data["output"]
130-
if "type" in output_dict:
136+
if "concept_ref" in output_dict:
137+
spec_data["output"] = output_dict["concept_ref"]
138+
elif "type" in output_dict:
131139
spec_data["output"] = output_dict["type"]
132140
elif len(output_dict) == 1:
133141
spec_data["output"] = next(iter(output_dict.values()))

0 commit comments

Comments
 (0)