diff --git a/.changeset/smooth-radios-breathe.md b/.changeset/smooth-radios-breathe.md new file mode 100644 index 000000000000..9f496f3562fa --- /dev/null +++ b/.changeset/smooth-radios-breathe.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai-compatible': patch +--- + +support OpenRouter's reasoning field diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts index 9dfce5e77911..0abb81774005 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.test.ts @@ -71,7 +71,8 @@ describe('config', () => { describe('doGenerate', () => { function prepareJsonResponse({ content = '', - reasoning_content = '', + reasoning_content, + reasoning, tool_calls, function_call, usage = { @@ -87,6 +88,7 @@ describe('doGenerate', () => { }: { content?: string; reasoning_content?: string; + reasoning?: string; tool_calls?: Array<{ id: string; type: 'function'; @@ -133,6 +135,7 @@ describe('doGenerate', () => { role: 'assistant', content, reasoning_content, + reasoning, tool_calls, function_call, }, @@ -172,7 +175,7 @@ describe('doGenerate', () => { expect(text).toStrictEqual('Hello, World!'); }); - it('should extract reasoning content', async () => { + it('should extract reasoning content when incomming reasoning_content field', async () => { prepareJsonResponse({ content: 'Hello, World!', reasoning_content: 'This is the reasoning behind the response', @@ -190,6 +193,41 @@ describe('doGenerate', () => { ); }); + it('should extract reasoning content when incomming reasoning field', async () => { + prepareJsonResponse({ + content: 'Hello, World!', + reasoning: 'This is the reasoning behind the response', + }); + + const { text, reasoning } = await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(text).toStrictEqual('Hello, World!'); + expect(reasoning).toStrictEqual( + 'This is the reasoning behind the response', + ); + }); + + it('should extract reasoning content when incomming reasoning_content and reasoning fields', async () => { + prepareJsonResponse({ + content: 'Hello, World!', + reasoning_content: '123', + reasoning: '456', + }); + + const { text, reasoning } = await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(text).toStrictEqual('Hello, World!'); + expect(reasoning).toStrictEqual('123'); + }); + it('should extract usage', async () => { prepareJsonResponse({ content: '', @@ -289,7 +327,7 @@ describe('doGenerate', () => { expect(rawResponse?.headers).toStrictEqual({ // default headers: - 'content-length': '335', + 'content-length': '312', 'content-type': 'application/json', // custom header diff --git a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts index 20e53eccea9e..740df8dddb39 100644 --- a/packages/openai-compatible/src/openai-compatible-chat-language-model.ts +++ b/packages/openai-compatible/src/openai-compatible-chat-language-model.ts @@ -300,7 +300,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { return { text: choice.message.content ?? undefined, - reasoning: choice.message.reasoning_content ?? undefined, + reasoning: + choice.message.reasoning_content ?? + choice.message.reasoning ?? + undefined, toolCalls: choice.message.tool_calls?.map(toolCall => ({ toolCallType: 'function', toolCallId: toolCall.id ?? generateId(), @@ -529,10 +532,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV1 { const delta = choice.delta; // enqueue reasoning before text deltas: - if (delta.reasoning_content != null) { + if (delta.reasoning_content != null || delta.reasoning != null) { controller.enqueue({ type: 'reasoning', - textDelta: delta.reasoning_content, + textDelta: delta.reasoning_content || delta.reasoning, }); } @@ -730,6 +733,7 @@ const OpenAICompatibleChatResponseSchema = z.object({ role: z.literal('assistant').nullish(), content: z.string().nullish(), reasoning_content: z.string().nullish(), + reasoning: z.string().nullish(), tool_calls: z .array( z.object({ @@ -766,6 +770,7 @@ const createOpenAICompatibleChatChunkSchema = ( role: z.enum(['assistant']).nullish(), content: z.string().nullish(), reasoning_content: z.string().nullish(), + reasoning: z.string().nullish(), tool_calls: z .array( z.object({