mirror of
https://github.com/aljazceru/opencode.git
synced 2025-12-22 18:24:21 +01:00
fix: max tokens when using models like opus with providers other than anthropic (#4307)
This commit is contained in:
@@ -176,7 +176,7 @@ export namespace ProviderTransform {
|
||||
}
|
||||
|
||||
export function maxOutputTokens(
|
||||
providerID: string,
|
||||
npm: string,
|
||||
options: Record<string, any>,
|
||||
modelLimit: number,
|
||||
globalLimit: number,
|
||||
@@ -184,7 +184,7 @@ export namespace ProviderTransform {
|
||||
const modelCap = modelLimit || globalLimit
|
||||
const standardLimit = Math.min(modelCap, globalLimit)
|
||||
|
||||
if (providerID === "anthropic") {
|
||||
if (npm === "@ai-sdk/anthropic") {
|
||||
const thinking = options?.["thinking"]
|
||||
const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
|
||||
const enabled = thinking?.["type"] === "enabled"
|
||||
|
||||
@@ -345,7 +345,7 @@ export namespace SessionPrompt {
|
||||
maxRetries: 0,
|
||||
activeTools: Object.keys(tools).filter((x) => x !== "invalid"),
|
||||
maxOutputTokens: ProviderTransform.maxOutputTokens(
|
||||
model.providerID,
|
||||
model.npm ?? "",
|
||||
params.options,
|
||||
model.info.limit.output,
|
||||
OUTPUT_TOKEN_MAX,
|
||||
|
||||
98
packages/opencode/test/provider/transform.test.ts
Normal file
98
packages/opencode/test/provider/transform.test.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import { ProviderTransform } from "../../src/provider/transform"
|
||||
|
||||
const OUTPUT_TOKEN_MAX = 32000
|
||||
|
||||
describe("ProviderTransform.maxOutputTokens", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
|
||||
describe("azure", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("bedrock", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("anthropic without thinking options", () => {
|
||||
test("returns 32k when modelLimit > 32k", () => {
|
||||
const modelLimit = 100000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit when modelLimit < 32k", () => {
|
||||
const modelLimit = 16000
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(16000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("anthropic with thinking options", () => {
|
||||
test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
|
||||
const modelLimit = 100000
|
||||
const options = {
|
||||
thinking: {
|
||||
type: "enabled",
|
||||
budgetTokens: 10000,
|
||||
},
|
||||
}
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
|
||||
test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
|
||||
const modelLimit = 50000
|
||||
const options = {
|
||||
thinking: {
|
||||
type: "enabled",
|
||||
budgetTokens: 30000,
|
||||
},
|
||||
}
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(20000)
|
||||
})
|
||||
|
||||
test("returns 32k when thinking type is not enabled", () => {
|
||||
const modelLimit = 100000
|
||||
const options = {
|
||||
thinking: {
|
||||
type: "disabled",
|
||||
budgetTokens: 10000,
|
||||
},
|
||||
}
|
||||
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
|
||||
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
||||
})
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user