Skip to content

Commit aeb5197

Browse files
committed
fix: model normalization
1 parent 6b39290 commit aeb5197

File tree

2 files changed

+48
-15
lines changed

2 files changed

+48
-15
lines changed

lib/request/request-transformer.ts

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -121,23 +121,23 @@ export function getModelConfig(
121121
* @returns Reasoning configuration
122122
*/
123123
export function getReasoningConfig(
124-
originalModel: string | undefined,
124+
modelName: string | undefined,
125125
userConfig: ConfigOptions = {},
126126
): ReasoningConfig {
127-
const normalizedOriginal = originalModel?.toLowerCase() ?? "";
127+
const normalizedName = modelName?.toLowerCase() ?? "";
128128
const isCodexMax =
129-
normalizedOriginal.includes("codex-max") ||
130-
normalizedOriginal.includes("codex max");
129+
normalizedName.includes("codex-max") ||
130+
normalizedName.includes("codex max");
131131
const isCodexMini =
132-
normalizedOriginal.includes("codex-mini") ||
133-
normalizedOriginal.includes("codex mini") ||
134-
normalizedOriginal.includes("codex_mini") ||
135-
normalizedOriginal.includes("codex-mini-latest");
136-
const isCodex = normalizedOriginal.includes("codex") && !isCodexMini;
132+
normalizedName.includes("codex-mini") ||
133+
normalizedName.includes("codex mini") ||
134+
normalizedName.includes("codex_mini") ||
135+
normalizedName.includes("codex-mini-latest");
136+
const isCodex = normalizedName.includes("codex") && !isCodexMini;
137137
const isLightweight =
138138
!isCodexMini &&
139-
(normalizedOriginal.includes("nano") ||
140-
normalizedOriginal.includes("mini"));
139+
(normalizedName.includes("nano") ||
140+
normalizedName.includes("mini"));
141141

142142
// Default based on model type (Codex CLI defaults)
143143
const defaultEffort: ReasoningConfig["effort"] = isCodexMini
@@ -163,6 +163,11 @@ export function getReasoningConfig(
163163
}
164164
}
165165

166+
// For all non-Codex-Max models, downgrade unsupported xhigh to high
167+
if (!isCodexMax && effort === "xhigh") {
168+
effort = "high";
169+
}
170+
166171
// Normalize "minimal" to "low" for Codex families
167172
// Codex CLI presets are low/medium/high (or xhigh for Codex Max)
168173
if (isCodex && effort === "minimal") {
@@ -437,8 +442,8 @@ export async function transformRequestBody(
437442
}
438443
}
439444

440-
// Configure reasoning (use model-specific config)
441-
const reasoningConfig = getReasoningConfig(originalModel, modelConfig);
445+
// Configure reasoning (use normalized model family + model-specific config)
446+
const reasoningConfig = getReasoningConfig(normalizedModel, modelConfig);
442447
body.reasoning = {
443448
...body.reasoning,
444449
...reasoningConfig,

test/request-transformer.test.ts

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -796,6 +796,34 @@ describe('Request Transformer Module', () => {
796796
expect(result.reasoning?.summary).toBe('detailed');
797797
});
798798

799+
it('should downgrade xhigh to high for non-max codex', async () => {
800+
const body: RequestBody = {
801+
model: 'gpt-5.1-codex-high',
802+
input: [],
803+
};
804+
const userConfig: UserConfig = {
805+
global: { reasoningEffort: 'xhigh' },
806+
models: {},
807+
};
808+
const result = await transformRequestBody(body, codexInstructions, userConfig);
809+
expect(result.model).toBe('gpt-5.1-codex');
810+
expect(result.reasoning?.effort).toBe('high');
811+
});
812+
813+
it('should downgrade xhigh to high for non-max general models', async () => {
814+
const body: RequestBody = {
815+
model: 'gpt-5.1-high',
816+
input: [],
817+
};
818+
const userConfig: UserConfig = {
819+
global: { reasoningEffort: 'xhigh' },
820+
models: {},
821+
};
822+
const result = await transformRequestBody(body, codexInstructions, userConfig);
823+
expect(result.model).toBe('gpt-5.1');
824+
expect(result.reasoning?.effort).toBe('high');
825+
});
826+
799827
it('should preserve minimal for non-codex models', async () => {
800828
const body: RequestBody = {
801829
model: 'gpt-5',
@@ -815,7 +843,7 @@ describe('Request Transformer Module', () => {
815843
input: [],
816844
};
817845
const result = await transformRequestBody(body, codexInstructions);
818-
expect(result.reasoning?.effort).toBe('minimal');
846+
expect(result.reasoning?.effort).toBe('medium');
819847
});
820848

821849
describe('CODEX_MODE parameter', () => {
@@ -945,7 +973,7 @@ describe('Request Transformer Module', () => {
945973
const result = await transformRequestBody(body, codexInstructions);
946974

947975
expect(result.model).toBe('gpt-5'); // Normalized
948-
expect(result.reasoning?.effort).toBe('minimal'); // Lightweight default
976+
expect(result.reasoning?.effort).toBe('medium'); // Default for normalized gpt-5
949977
});
950978
});
951979

0 commit comments

Comments
 (0)