From 0c51feb9c293b9dec8bcee9f2ca79ed9fd741e2c Mon Sep 17 00:00:00 2001 From: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> Date: Thu, 13 Nov 2025 14:39:09 -0800 Subject: [PATCH] fix: max tokens when using models like opus with providers other than anthropic (#4307) --- packages/opencode/src/provider/transform.ts | 4 +- packages/opencode/src/session/prompt.ts | 2 +- .../opencode/test/provider/transform.test.ts | 98 +++++++++++++++++++ 3 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 packages/opencode/test/provider/transform.test.ts diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index e578d806..668f3041 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -176,7 +176,7 @@ export namespace ProviderTransform { } export function maxOutputTokens( - providerID: string, + npm: string, options: Record, modelLimit: number, globalLimit: number, @@ -184,7 +184,7 @@ export namespace ProviderTransform { const modelCap = modelLimit || globalLimit const standardLimit = Math.min(modelCap, globalLimit) - if (providerID === "anthropic") { + if (npm === "@ai-sdk/anthropic") { const thinking = options?.["thinking"] const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0 const enabled = thinking?.["type"] === "enabled" diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index f4dc84be..ae6aff22 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -345,7 +345,7 @@ export namespace SessionPrompt { maxRetries: 0, activeTools: Object.keys(tools).filter((x) => x !== "invalid"), maxOutputTokens: ProviderTransform.maxOutputTokens( - model.providerID, + model.npm ?? "", params.options, model.info.limit.output, OUTPUT_TOKEN_MAX, diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts new file mode 100644 index 00000000..e6080d54 --- /dev/null +++ b/packages/opencode/test/provider/transform.test.ts @@ -0,0 +1,98 @@ +import { describe, expect, test } from "bun:test" +import { ProviderTransform } from "../../src/provider/transform" + +const OUTPUT_TOKEN_MAX = 32000 + +describe("ProviderTransform.maxOutputTokens", () => { + test("returns 32k when modelLimit > 32k", () => { + const modelLimit = 100000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(OUTPUT_TOKEN_MAX) + }) + + test("returns modelLimit when modelLimit < 32k", () => { + const modelLimit = 16000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(16000) + }) + + describe("azure", () => { + test("returns 32k when modelLimit > 32k", () => { + const modelLimit = 100000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(OUTPUT_TOKEN_MAX) + }) + + test("returns modelLimit when modelLimit < 32k", () => { + const modelLimit = 16000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(16000) + }) + }) + + describe("bedrock", () => { + test("returns 32k when modelLimit > 32k", () => { + const modelLimit = 100000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(OUTPUT_TOKEN_MAX) + }) + + test("returns modelLimit when modelLimit < 32k", () => { + const modelLimit = 16000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(16000) + }) + }) + + describe("anthropic without thinking options", () => { + test("returns 32k when modelLimit > 32k", () => { + const modelLimit = 100000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(OUTPUT_TOKEN_MAX) + }) + + test("returns modelLimit when modelLimit < 32k", () => { + const modelLimit = 16000 + const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(16000) + }) + }) + + describe("anthropic with thinking options", () => { + test("returns 32k when budgetTokens + 32k <= modelLimit", () => { + const modelLimit = 100000 + const options = { + thinking: { + type: "enabled", + budgetTokens: 10000, + }, + } + const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(OUTPUT_TOKEN_MAX) + }) + + test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => { + const modelLimit = 50000 + const options = { + thinking: { + type: "enabled", + budgetTokens: 30000, + }, + } + const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(20000) + }) + + test("returns 32k when thinking type is not enabled", () => { + const modelLimit = 100000 + const options = { + thinking: { + type: "disabled", + budgetTokens: 10000, + }, + } + const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX) + expect(result).toBe(OUTPUT_TOKEN_MAX) + }) + }) +})