diff --git a/bun.lock b/bun.lock index ee8746c42f2..695cf2932a7 100644 --- a/bun.lock +++ b/bun.lock @@ -364,6 +364,7 @@ "hono": "catalog:", "hono-openapi": "catalog:", "ignore": "7.0.5", + "js-tiktoken": "1.0.21", "jsonc-parser": "3.3.1", "mime-types": "3.0.2", "minimatch": "10.0.3", @@ -3334,6 +3335,8 @@ "js-md4": ["js-md4@0.3.2", "", {}, "sha512-/GDnfQYsltsjRswQhN9fhv3EMw2sCpUdrdxyWDOUK7eyD++r3gRhzgiQgc/x4MAv2i1iuQ4lxO5mvqM3vj4bwA=="], + "js-tiktoken": ["js-tiktoken@1.0.21", "", { "dependencies": { "base64-js": "^1.5.1" } }, "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g=="], + "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], "js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], diff --git a/packages/opencode/package.json b/packages/opencode/package.json index 3e73a7021da..3afa7408b93 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -129,6 +129,7 @@ "hono": "catalog:", "hono-openapi": "catalog:", "ignore": "7.0.5", + "js-tiktoken": "1.0.21", "jsonc-parser": "3.3.1", "mime-types": "3.0.2", "minimatch": "10.0.3", diff --git a/packages/opencode/src/acp/agent.ts b/packages/opencode/src/acp/agent.ts index 2a6bbbb1e44..19eb3705345 100644 --- a/packages/opencode/src/acp/agent.ts +++ b/packages/opencode/src/acp/agent.ts @@ -1738,6 +1738,20 @@ export namespace ACP { } } + // Fallback: try extracting variant from hyphen-joined modelID (e.g., "claude-opus-4-6-high" -> model: "claude-opus-4-6", variant: "high") + const lastHyphen = parsed.modelID.lastIndexOf("-") + if (lastHyphen > 0) { + const candidateVariant = parsed.modelID.slice(lastHyphen + 1) + const baseModelId = parsed.modelID.slice(0, lastHyphen) + const baseModelInfo = provider.models[baseModelId] + if (baseModelInfo?.variants && candidateVariant in baseModelInfo.variants) { + return { + model: { providerID: parsed.providerID, modelID: ModelID.make(baseModelId) }, + variant: candidateVariant, + } + } + } + return { model: parsed, variant: undefined } } } diff --git a/packages/opencode/src/plugin/index.ts b/packages/opencode/src/plugin/index.ts index 755ce2c2117..24be874e1b0 100644 --- a/packages/opencode/src/plugin/index.ts +++ b/packages/opencode/src/plugin/index.ts @@ -12,12 +12,13 @@ import { Session } from "../session" import { NamedError } from "@opencode-ai/util/error" import { CopilotAuthPlugin } from "./copilot" import { gitlabAuthPlugin as GitlabAuthPlugin } from "@gitlab/opencode-gitlab-auth" +import { KiroAuthPlugin } from "./kiro" export namespace Plugin { const log = Log.create({ service: "plugin" }) // Built-in plugins that are directly imported (not installed from npm) - const INTERNAL_PLUGINS: PluginInstance[] = [CodexAuthPlugin, CopilotAuthPlugin, GitlabAuthPlugin] + const INTERNAL_PLUGINS: PluginInstance[] = [CodexAuthPlugin, CopilotAuthPlugin, GitlabAuthPlugin, KiroAuthPlugin] const state = Instance.state(async () => { const client = createOpencodeClient({ diff --git a/packages/opencode/src/plugin/kiro.ts b/packages/opencode/src/plugin/kiro.ts new file mode 100644 index 00000000000..6abbaa923bb --- /dev/null +++ b/packages/opencode/src/plugin/kiro.ts @@ -0,0 +1,337 @@ +import type { Hooks, PluginInput } from "@opencode-ai/plugin" +import * as path from "path" +import * as os from "os" +import * as crypto from "crypto" + +interface KiroToken { + access_token: string + expires_at: string + refresh_token: string + region: string + start_url: string + oauth_flow: string + scopes: string[] +} + +interface KiroDeviceRegistration { + client_id: string + client_secret: string + client_secret_expires_at: string + region: string + oauth_flow: string + scopes: string[] +} + +interface RefreshTokenResponse { + accessToken: string + expiresIn: number + refreshToken?: string +} + +export function getKiroDbPath(): string { + switch (process.platform) { + case "darwin": + return path.join(os.homedir(), "Library/Application Support/kiro-cli/data.sqlite3") + case "win32": + return path.join(process.env.APPDATA || "", "kiro-cli/data.sqlite3") + default: + return path.join(os.homedir(), ".local/share/kiro-cli/data.sqlite3") + } +} + +async function getKiroToken(): Promise { + const dbPath = getKiroDbPath() + const file = Bun.file(dbPath) + if (!(await file.exists())) return null + + try { + const { Database } = await import("bun:sqlite") + const db = new Database(dbPath, { readonly: true }) + const row = db + .query<{ value: string }, [string]>("SELECT value FROM auth_kv WHERE key = ?") + .get("kirocli:odic:token") + db.close() + + if (!row) return null + return JSON.parse(row.value) as KiroToken + } catch { + return null + } +} + +async function getKiroDeviceRegistration(): Promise { + const dbPath = getKiroDbPath() + const file = Bun.file(dbPath) + if (!(await file.exists())) return null + + try { + const { Database } = await import("bun:sqlite") + const db = new Database(dbPath, { readonly: true }) + const row = db + .query<{ value: string }, [string]>("SELECT value FROM auth_kv WHERE key = ?") + .get("kirocli:odic:device-registration") + db.close() + + if (!row) return null + return JSON.parse(row.value) as KiroDeviceRegistration + } catch { + return null + } +} + +async function getKiroProfileArn(): Promise { + const dbPath = getKiroDbPath() + const file = Bun.file(dbPath) + if (!(await file.exists())) return null + + try { + const { Database } = await import("bun:sqlite") + const db = new Database(dbPath, { readonly: true }) + const row = db + .query<{ value: string }, [string]>("SELECT value FROM state WHERE key = ?") + .get("api.codewhisperer.profile") + db.close() + + if (!row) return null + const parsed = JSON.parse(row.value) + return parsed.arn || null + } catch { + return null + } +} + +async function saveKiroToken(token: KiroToken): Promise { + const dbPath = getKiroDbPath() + + try { + const { Database } = await import("bun:sqlite") + const db = new Database(dbPath) + db.query("UPDATE auth_kv SET value = ? WHERE key = ?").run(JSON.stringify(token), "kirocli:odic:token") + db.close() + return true + } catch { + return false + } +} + +async function isTokenValid(token: KiroToken): Promise { + try { + const expiresAt = new Date(token.expires_at).getTime() + // Add 5 minute buffer + return expiresAt > Date.now() + 5 * 60 * 1000 + } catch { + return false + } +} + +async function refreshKiroToken(token: KiroToken, registration: KiroDeviceRegistration): Promise { + const region = token.region || "us-east-1" + const ssoOidcEndpoint = `https://oidc.${region}.amazonaws.com/token` + + try { + const response = await fetch(ssoOidcEndpoint, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + grantType: "refresh_token", + clientId: registration.client_id, + clientSecret: registration.client_secret, + refreshToken: token.refresh_token, + }), + }) + + if (!response.ok) { + return null + } + + const data = (await response.json()) as RefreshTokenResponse + + // Create updated token + const newToken: KiroToken = { + ...token, + access_token: data.accessToken, + expires_at: new Date(Date.now() + data.expiresIn * 1000).toISOString(), + refresh_token: data.refreshToken || token.refresh_token, + } + + // Save to database + await saveKiroToken(newToken) + + return newToken + } catch { + return null + } +} + +async function getValidToken(): Promise { + let token = await getKiroToken() + if (!token) return null + + // If token is still valid, return it + if (await isTokenValid(token)) { + return token + } + + // Try to refresh the token + const registration = await getKiroDeviceRegistration() + if (!registration) { + return null + } + + // Attempt refresh + const refreshedToken = await refreshKiroToken(token, registration) + if (refreshedToken) { + return refreshedToken + } + + return null +} + +async function runKiroLogin(): Promise { + try { + const proc = Bun.spawn({ + cmd: ["kiro-cli", "login"], + stdio: ["inherit", "inherit", "inherit"], + }) + const exitCode = await proc.exited + return exitCode === 0 + } catch { + return false + } +} + +function regionFromArn(arn: string): string | null { + const parts = arn.split(":") + if (parts.length < 6 || parts[0] !== "arn") return null + return parts[3] || null +} + +export async function KiroAuthPlugin(_input: PluginInput): Promise { + return { + auth: { + provider: "kiro", + async loader(getAuth, provider) { + const info = await getAuth() + if (!info || info.type !== "oauth") return {} + + // Get token to determine region for baseURL + const token = await getKiroToken() + if (!token) return {} + + // Determine API endpoint: SSO users use q.{region} with region from profileArn + const arn = await getKiroProfileArn() + const region = (arn && regionFromArn(arn)) || "us-east-1" + const baseURL = `https://q.${region}.amazonaws.com` + + // Set cost to 0 for subscription models + if (provider?.models) { + for (const model of Object.values(provider.models)) { + model.cost = { + input: 0, + output: 0, + cache: { + read: 0, + write: 0, + }, + } + } + } + + return { + baseURL, + async fetch(request: RequestInfo | URL, init?: RequestInit) { + // Get valid token (auto-refresh if needed) + const currentToken = await getValidToken() + if (!currentToken) { + throw new Error("Kiro CLI token not found or refresh failed. Please run 'kiro-cli login'.") + } + + const headers = new Headers(init?.headers) + headers.set("Authorization", `Bearer ${currentToken.access_token}`) + headers.set("x-amzn-codewhisperer-optout", "false") + headers.set("x-amzn-kiro-agent-mode", "vibe") + headers.set("amz-sdk-invocation-id", crypto.randomUUID()) + headers.set("amz-sdk-request", "attempt=1; max=1") + + // Remove any existing API key headers + headers.delete("x-api-key") + + // Inject profileArn for IAM Identity Center (SSO) users + let body = init?.body + if (arn && body) { + try { + const raw = typeof body === "string" ? body : new TextDecoder().decode(body as ArrayBuffer) + const parsed = JSON.parse(raw) + parsed.profileArn = arn + body = JSON.stringify(parsed) + } catch { + // If body isn't JSON, leave it unchanged + } + } + + return fetch(request, { + ...init, + headers, + body, + }) + }, + } + }, + methods: [ + { + type: "oauth", + label: "Use existing Kiro CLI login", + async authorize() { + // Try to get a valid token (with auto-refresh) + let token = await getValidToken() + + // If no valid token, run kiro-cli login + if (!token) { + const loginSuccess = await runKiroLogin() + if (!loginSuccess) { + return { + url: "https://kiro.dev/docs/cli/installation/", + instructions: "Kiro CLI login failed. Please ensure Kiro CLI is installed and try again.", + method: "auto" as const, + async callback() { + return { type: "failed" as const } + }, + } + } + // Re-fetch token after login + token = await getKiroToken() + if (!token || !(await isTokenValid(token))) { + return { + url: "", + instructions: "Failed to get valid token after login.", + method: "auto" as const, + async callback() { + return { type: "failed" as const } + }, + } + } + } + + // Token exists and is valid + const expiresAt = new Date(token.expires_at).getTime() + return { + url: "", + instructions: "Using Kiro CLI credentials", + method: "auto" as const, + async callback() { + return { + type: "success" as const, + refresh: token.refresh_token, + access: token.access_token, + expires: expiresAt, + } + }, + } + }, + }, + ], + }, + } +} diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 9c9c8e83438..15b88956a13 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -30,6 +30,7 @@ import { createOpenAI } from "@ai-sdk/openai" import { createOpenAICompatible } from "@ai-sdk/openai-compatible" import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider" import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/copilot" +import { createKiro } from "./sdk/kiro/src" import { createXai } from "@ai-sdk/xai" import { createMistral } from "@ai-sdk/mistral" import { createGroq } from "@ai-sdk/groq" @@ -46,6 +47,7 @@ import { GoogleAuth } from "google-auth-library" import { ProviderTransform } from "./transform" import { Installation } from "../installation" import { ModelID, ProviderID } from "./schema" +import { getKiroDbPath } from "../plugin/kiro" export namespace Provider { const log = Log.create({ service: "provider" }) @@ -127,6 +129,8 @@ export namespace Provider { "@gitlab/gitlab-ai-provider": createGitLab, // @ts-ignore (TODO: kill this code so we dont have to maintain it) "@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible, + // @ts-ignore + "@ai-sdk/kiro": createKiro, } type CustomModelLoader = (sdk: any, modelID: string, options?: Record) => Promise @@ -664,6 +668,27 @@ export namespace Provider { }, } }, + kiro: async (input) => { + // Check if Kiro CLI authentication exists + const dbPath = getKiroDbPath() + const hasAuth = await Bun.file(dbPath).exists() + + if (!hasAuth) { + // No auth, hide all models + for (const key of Object.keys(input.models)) { + delete input.models[key] + } + } + + return { + autoload: hasAuth, + options: { + headers: { + "x-kiro-client": "opencode", + }, + }, + } + }, } export const Model = z @@ -859,6 +884,260 @@ export namespace Provider { const configProviders = Object.entries(config.provider ?? {}) + // Add Kiro provider with Claude models + const kiroModels: Record = { + "claude-sonnet-4-5": { + id: ModelID.make("claude-sonnet-4-5"), + providerID: ProviderID.kiro, + name: "Claude Sonnet 4.5", + family: "claude-sonnet", + api: { + id: "claude-sonnet-4-5", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + status: "active", + headers: {}, + options: {}, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 64000 }, + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: true, + }, + release_date: "2025-09-29", + variants: { + high: { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }, + }, + }, + "claude-opus-4-5": { + id: ModelID.make("claude-opus-4-5"), + providerID: ProviderID.kiro, + name: "Claude Opus 4.5", + family: "claude-opus", + api: { + id: "claude-opus-4-5", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + status: "active", + headers: {}, + options: {}, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 32000 }, + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: true, + }, + release_date: "2025-11-01", + variants: { + high: { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }, + }, + }, + "claude-opus-4-6": { + id: ModelID.make("claude-opus-4-6"), + providerID: ProviderID.kiro, + name: "Claude Opus 4.6", + family: "claude-opus", + api: { + id: "claude-opus-4-6", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + status: "active", + headers: {}, + options: {}, + cost: { input: 15, output: 75, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 32000 }, + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: true, + }, + release_date: "2025-06-01", + variants: { + high: { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }, + }, + }, + "claude-sonnet-4-6": { + id: ModelID.make("claude-sonnet-4-6"), + providerID: ProviderID.kiro, + name: "Claude Sonnet 4.6", + family: "claude-sonnet", + api: { + id: "claude-sonnet-4-6", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + status: "active", + headers: {}, + options: {}, + cost: { input: 3, output: 15, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 64000 }, + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: true, + }, + release_date: "2025-06-01", + variants: { + high: { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 63999, + }, + }, + }, + }, + "claude-haiku-4-5": { + id: ModelID.make("claude-haiku-4-5"), + providerID: ProviderID.kiro, + name: "Claude Haiku 4.5", + family: "claude-haiku", + api: { + id: "claude-haiku-4-5", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + status: "active", + headers: {}, + options: {}, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 8192 }, + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2025-10-01", + variants: {}, + }, + "claude-sonnet-4": { + id: ModelID.make("claude-sonnet-4"), + providerID: ProviderID.kiro, + name: "Claude Sonnet 4", + family: "claude-sonnet", + api: { + id: "claude-sonnet-4", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + status: "active", + headers: {}, + options: {}, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 64000 }, + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "2025-05-14", + variants: {}, + }, + "claude-3-7-sonnet": { + id: ModelID.make("claude-3-7-sonnet"), + providerID: ProviderID.kiro, + name: "Claude 3.7 Sonnet", + family: "claude-sonnet", + api: { + id: "claude-3-7-sonnet", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + status: "active", + headers: {}, + options: {}, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 64000 }, + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: { field: "reasoning_content" }, + }, + release_date: "2025-02-19", + variants: {}, + }, + } + + database["kiro"] = { + id: ProviderID.kiro, + name: "Kiro (AWS)", + source: "custom", + env: [], + options: {}, + models: kiroModels, + } + function mergeProvider(providerID: ProviderID, provider: Partial) { const existing = providers[providerID] if (existing) { @@ -1231,13 +1510,21 @@ export namespace Provider { } const info = provider.models[modelID] - if (!info) { - const availableModels = Object.keys(provider.models) - const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 }) - const suggestions = matches.map((m) => m.target) - throw new ModelNotFoundError({ providerID, modelID, suggestions }) + if (info) return info + + // Try extracting variant suffix joined by hyphen (e.g., "claude-opus-4-6-high" -> "claude-opus-4-6") + const lastHyphen = modelID.lastIndexOf("-") + if (lastHyphen > 0) { + const baseModelId = modelID.slice(0, lastHyphen) + const candidateVariant = modelID.slice(lastHyphen + 1) + const baseInfo = provider.models[baseModelId] + if (baseInfo?.variants && candidateVariant in baseInfo.variants) return baseInfo } - return info + + const availableModels = Object.keys(provider.models) + const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 }) + const suggestions = matches.map((m) => m.target) + throw new ModelNotFoundError({ providerID, modelID, suggestions }) } export async function getLanguage(model: Model): Promise { diff --git a/packages/opencode/src/provider/schema.ts b/packages/opencode/src/provider/schema.ts index 71c8a1029cd..70a71f10d0e 100644 --- a/packages/opencode/src/provider/schema.ts +++ b/packages/opencode/src/provider/schema.ts @@ -23,6 +23,7 @@ export const ProviderID = providerIdSchema.pipe( openrouter: schema.makeUnsafe("openrouter"), mistral: schema.makeUnsafe("mistral"), gitlab: schema.makeUnsafe("gitlab"), + kiro: schema.makeUnsafe("kiro"), })), ) diff --git a/packages/opencode/src/provider/sdk/kiro/src/converters.ts b/packages/opencode/src/provider/sdk/kiro/src/converters.ts new file mode 100644 index 00000000000..1cc8926261c --- /dev/null +++ b/packages/opencode/src/provider/sdk/kiro/src/converters.ts @@ -0,0 +1,741 @@ +import type { + LanguageModelV2FunctionTool, + LanguageModelV2Prompt, + LanguageModelV2ToolCallPart, + LanguageModelV2ToolResultPart, +} from "@ai-sdk/provider" +import { estimatePayloadTokens, countTokens } from "./tokenizer" +export interface KiroTool { + toolSpecification: { + name: string + description: string + inputSchema: { json: object } + } +} + +export interface KiroToolResult { + content: Array<{ text: string }> + status: "success" | "error" + toolUseId: string +} + +export interface KiroEnvState { + operatingSystem: string + currentWorkingDirectory: string +} + +export interface KiroHistoryItem { + userInputMessage?: { + content: string + modelId: string + origin: string + userInputMessageContext?: { + tools?: KiroTool[] + toolResults?: KiroToolResult[] + envState?: KiroEnvState + } + } + assistantResponseMessage?: { + content: string + messageId?: string + modelId?: string + toolUses?: Array<{ + name: string + toolUseId: string + input: unknown + }> + reasoning?: { + thinking?: string + } + } +} + +export interface KiroPayload { + conversationState: { + chatTriggerType: "MANUAL" + conversationId: string + currentMessage: { + userInputMessage: { + content: string + modelId: string + origin: string + userInputMessageContext?: { + tools?: KiroTool[] + toolResults?: KiroToolResult[] + envState?: KiroEnvState + } + } + } + history: KiroHistoryItem[] + } + profileArn?: string +} + +function extractTextContent( + content: + | string + | Array< + | { type: "text"; text: string } + | { type: "image"; image: unknown; mimeType?: string } + | { type: "file"; data: unknown; mimeType?: string } + >, +): string { + if (typeof content === "string") return content + return content + .filter((part): part is { type: "text"; text: string } => part.type === "text") + .map((part) => part.text) + .join("") +} + +/** + * Sanitizes JSON Schema from fields that Kiro API doesn't accept. + * + * Kiro API returns 400 "Improperly formed request" error if: + * - required is an empty array [] + * - additionalProperties is present in schema + */ +function sanitizeJsonSchema(schema: Record | undefined): Record { + if (!schema || Object.keys(schema).length === 0) return { type: "object", properties: {} } + + const result: Record = {} + + for (const [key, value] of Object.entries(schema)) { + // Skip empty required arrays + if (key === "required" && Array.isArray(value) && value.length === 0) { + continue + } + + // Skip additionalProperties - Kiro API doesn't support it + if (key === "additionalProperties") { + continue + } + + // Skip JSON Schema meta fields the Kiro API doesn't support + if ( + key === "$schema" || + key === "$defs" || + key === "$ref" || + key === "ref" || + key === "definitions" || + key === "examples" || + key === "default" + ) { + continue + } + + // Recursively process nested objects + if (key === "properties" && typeof value === "object" && value !== null) { + const properties: Record = {} + for (const [propName, propValue] of Object.entries(value as Record)) { + properties[propName] = + typeof propValue === "object" && propValue !== null + ? sanitizeJsonSchema(propValue as Record) + : propValue + } + result[key] = properties + } else if (typeof value === "object" && value !== null && !Array.isArray(value)) { + result[key] = sanitizeJsonSchema(value as Record) + } else if (Array.isArray(value)) { + // Process arrays (e.g., anyOf, oneOf) + result[key] = value.map((item) => + typeof item === "object" && item !== null ? sanitizeJsonSchema(item as Record) : item, + ) + } else { + result[key] = value + } + } + + return result +} + +function convertTools(tools?: LanguageModelV2FunctionTool[]): KiroTool[] | undefined { + if (!tools || tools.length === 0) return undefined + + return tools.map((tool) => ({ + toolSpecification: { + name: tool.name, + description: (tool.description || `Tool: ${tool.name}`).slice(0, 10000), + inputSchema: { json: sanitizeJsonSchema(tool.inputSchema as Record) }, + }, + })) +} + +function convertToolResults(parts: LanguageModelV2ToolResultPart[]): KiroToolResult[] { + return parts.map((part) => { + let outputText: string + + // Handle LanguageModelV2ToolResultOutput format + const output = part.output as unknown + if (output && typeof output === "object" && "type" in output && "value" in output) { + // Standard LanguageModelV2ToolResultOutput format: { type: 'text'|'json'|'error-text', value: ... } + const typed = output as { type: string; value: unknown } + if (typed.type === "text" || typed.type === "error-text") { + outputText = String(typed.value) + } else if (typed.type === "json") { + outputText = JSON.stringify(typed.value) + } else { + outputText = JSON.stringify(typed.value) + } + } else if (Array.isArray(output)) { + // Array of content parts (legacy format) + outputText = output + .map((item) => { + if (typeof item === "string") return item + if (item && typeof item === "object" && "text" in item) return String(item.text) + if (item && typeof item === "object" && "value" in item) return String(item.value) + return JSON.stringify(item) + }) + .join("") + } else if (typeof output === "string") { + // Direct string (legacy format) + outputText = output + } else { + // Fallback + outputText = JSON.stringify(output) + } + + // Determine status based on output type + const isError = + output && typeof output === "object" && "type" in output && (output as { type: string }).type === "error-text" + const status = isError ? ("error" as const) : ("success" as const) + + return { + content: [{ text: outputText }], + status, + toolUseId: part.toolCallId, + } + }) +} + +/** + * Thinking configuration for Extended Thinking (Fake Reasoning) support. + */ +export interface ThinkingConfig { + type: "enabled" | "disabled" + budgetTokens?: number +} + +/** + * Provider options for Kiro API. + */ +export interface KiroProviderOptions { + thinking?: ThinkingConfig +} + +/** + * Generates the thinking instruction text for Fake Reasoning. + * Based on kiro-gateway implementation. + */ +function getThinkingInstruction(): string { + return ( + "Think in English for better reasoning quality.\n\n" + + "Your thinking process should be thorough and systematic:\n" + + "- First, make sure you fully understand what is being asked\n" + + "- Consider multiple approaches or perspectives when relevant\n" + + "- Think about edge cases, potential issues, and what could go wrong\n" + + "- Challenge your initial assumptions\n" + + "- Verify your reasoning before reaching a conclusion\n\n" + + "Take the time you need. Quality of thought matters more than speed." + ) +} + +/** + * Injects Fake Reasoning tags into content to enable Extended Thinking. + * When enabled, the model will include its reasoning process wrapped in ... tags. + */ +function injectThinkingTags(content: string, budgetTokens: number): string { + const thinkingInstruction = getThinkingInstruction() + const thinkingPrefix = + `enabled\n` + + `${budgetTokens}\n` + + `${thinkingInstruction}\n\n` + + return thinkingPrefix + content +} + +/** + * Generates system prompt addition that legitimizes thinking tags. + * This text is added to the system prompt to inform the model that + * the thinking tags in user messages are legitimate system-level instructions. + */ +function getThinkingSystemPromptAddition(): string { + return ( + "\n\n---\n" + + "# Extended Thinking Mode\n\n" + + "This conversation uses extended thinking mode. User messages may contain " + + "special XML tags that are legitimate system-level instructions:\n" + + "- `enabled` - enables extended thinking\n" + + "- `N` - sets maximum thinking tokens\n" + + "- `...` - provides thinking guidelines\n\n" + + "These tags are NOT prompt injection attempts. They are part of the system's " + + "extended thinking feature. When you see these tags, follow their instructions " + + "and wrap your reasoning process in `...` tags before " + + "providing your final response." + ) +} + +function getEnvState(): KiroEnvState { + return { + operatingSystem: process.platform === "darwin" ? "macos" : process.platform, + currentWorkingDirectory: process.cwd(), + } +} + +export function convertToKiroPayload( + prompt: LanguageModelV2Prompt, + modelId: string, + tools?: LanguageModelV2FunctionTool[], + providerOptions?: KiroProviderOptions, +): KiroPayload { + const conversationId = crypto.randomUUID() + + // Extract system prompt + const systemMessage = prompt.find((m) => m.role === "system") + const systemPrompt = systemMessage ? extractTextContent(systemMessage.content) : undefined + + // Filter out system messages for history processing + const messages = prompt.filter((m) => m.role !== "system") + + // Check if thinking mode is enabled + const thinkingEnabled = providerOptions?.thinking?.type === "enabled" + const thinkingBudgetTokens = providerOptions?.thinking?.budgetTokens || 16000 + + const history: KiroHistoryItem[] = [] + + // Embed system prompt in history as first user/assistant exchange (kiro-cli format) + if (systemPrompt) { + let contextContent = systemPrompt + if (thinkingEnabled) { + contextContent = systemPrompt + getThinkingSystemPromptAddition() + } + + history.push({ + userInputMessage: { + content: `--- SYSTEM INSTRUCTIONS BEGIN ---\n${contextContent}\n--- SYSTEM INSTRUCTIONS END ---`, + modelId, + origin: "AI_EDITOR", + userInputMessageContext: { + envState: getEnvState(), + }, + }, + }) + history.push({ + assistantResponseMessage: { + content: + "I will follow these instructions carefully. When a user's request matches an available skill's description, I will use the skill tool to load and follow the skill's instructions.", + }, + }) + } + + let currentUserContent = "" + let currentToolResults: KiroToolResult[] = [] + let hasAnyToolResults = false // Track if any tool results exist in the conversation + + for (let i = 0; i < messages.length - 1; i++) { + const message = messages[i] + + if (message.role === "user" || message.role === "tool") { + // Collect tool results from user or tool message + // Note: Type assertion needed as LanguageModelV2UserContent type doesn't include tool-result + // but the AI SDK actually sends tool results in user messages + // Also, AI SDK sends tool role messages containing tool results + const toolResultParts: LanguageModelV2ToolResultPart[] = [] + const contentArray = Array.isArray(message.content) ? message.content : [message.content] + for (const part of contentArray as unknown as Array<{ type: string } & Record>) { + if (part.type === "tool-result") { + toolResultParts.push(part as unknown as LanguageModelV2ToolResultPart) + } + } + if (toolResultParts.length > 0) { + currentToolResults.push(...convertToolResults(toolResultParts)) + hasAnyToolResults = true + } + + // Collect text content (only for user role, tool role doesn't have text) + if (message.role === "user") { + const textContent = message.content + .filter((part): part is { type: "text"; text: string } => part.type === "text") + .map((part) => part.text) + .join("") + if (textContent) { + currentUserContent = textContent + } + } + } else if (message.role === "assistant") { + // Flush pending user message before processing assistant response + // Skip if content is empty/whitespace-only AND no tool results + if ((currentUserContent && currentUserContent.trim()) || currentToolResults.length > 0) { + // Check if the previous history item is also a user message + // Kiro API requires alternating user/assistant messages, so we merge consecutive users + const lastItem = history[history.length - 1] + if (lastItem?.userInputMessage && !lastItem.assistantResponseMessage) { + // Merge with previous user message + const lastUser = lastItem.userInputMessage + if (currentUserContent && currentUserContent.trim()) { + lastUser.content = (lastUser.content ? lastUser.content + "\n\n" : "") + currentUserContent + } + // Merge toolResults if present + if (currentToolResults.length > 0) { + if (!lastUser.userInputMessageContext) lastUser.userInputMessageContext = {} + if (!lastUser.userInputMessageContext.toolResults) lastUser.userInputMessageContext.toolResults = [] + lastUser.userInputMessageContext.toolResults.push(...currentToolResults) + } + } else { + // Normal case: add new history item + const historyItem: KiroHistoryItem = { + userInputMessage: { + content: currentUserContent, + modelId, + origin: "AI_EDITOR", + userInputMessageContext: { + ...(currentToolResults.length > 0 && { toolResults: currentToolResults }), + ...(tools && { tools: convertTools(tools) }), + envState: getEnvState(), + }, + }, + } + history.push(historyItem) + } + currentUserContent = "" + currentToolResults = [] + } + + // Process assistant message + const textContent = message.content + .filter((part): part is { type: "text"; text: string } => part.type === "text") + .map((part) => part.text) + .join("") + + const toolCalls: LanguageModelV2ToolCallPart[] = [] + for (const part of message.content) { + if (part.type === "tool-call") { + toolCalls.push(part as LanguageModelV2ToolCallPart) + } + } + + const reasoningParts = message.content.filter( + (part): part is { type: "reasoning"; text: string } => part.type === "reasoning", + ) + + const assistantItem: KiroHistoryItem = { + assistantResponseMessage: { + content: textContent || "(empty)", + messageId: crypto.randomUUID(), + modelId, + ...(toolCalls.length > 0 && { + toolUses: toolCalls.map((tc) => { + // input can be a JSON string or object - ensure it's an object + let inputObj: unknown + if (typeof tc.input === "string") { + try { + inputObj = JSON.parse(tc.input) + } catch { + inputObj = {} + } + } else { + inputObj = tc.input ?? {} + } + return { + name: tc.toolName, + toolUseId: tc.toolCallId, + input: inputObj, + } + }), + }), + ...(reasoningParts.length > 0 && { + reasoning: { + thinking: reasoningParts.map((r) => r.text).join("\n"), + }, + }), + }, + } + + // Check if the previous history item is also an assistant message + // Kiro API requires alternating user/assistant messages, so we merge consecutive assistants + const lastItem = history[history.length - 1] + if (lastItem?.assistantResponseMessage) { + // Merge with previous assistant message + const lastAssistant = lastItem.assistantResponseMessage + lastAssistant.content += "\n\n" + (textContent || "(empty)") + + // Merge toolUses if present + if (toolCalls.length > 0) { + if (!lastAssistant.toolUses) lastAssistant.toolUses = [] + lastAssistant.toolUses.push( + ...toolCalls.map((tc) => { + let inputObj: unknown + if (typeof tc.input === "string") { + try { + inputObj = JSON.parse(tc.input) + } catch { + inputObj = {} + } + } else { + inputObj = tc.input ?? {} + } + return { + name: tc.toolName, + toolUseId: tc.toolCallId, + input: inputObj, + } + }), + ) + } + + // Merge reasoning if present + if (reasoningParts.length > 0) { + if (!lastAssistant.reasoning) lastAssistant.reasoning = { thinking: "" } + lastAssistant.reasoning.thinking = + (lastAssistant.reasoning.thinking ? lastAssistant.reasoning.thinking + "\n" : "") + + reasoningParts.map((r) => r.text).join("\n") + } + } else { + // Normal case: add new history item + history.push(assistantItem) + } + } + } + + // Process the last message as current message + const lastMessage = messages[messages.length - 1] + let lastUserContent = "" + let lastToolResults: KiroToolResult[] = [] + + if (lastMessage?.role === "user" || lastMessage?.role === "tool") { + const toolResultParts: LanguageModelV2ToolResultPart[] = [] + const contentArray = Array.isArray(lastMessage.content) ? lastMessage.content : [lastMessage.content] + for (const part of contentArray as unknown as Array<{ type: string } & Record>) { + if (part.type === "tool-result") { + toolResultParts.push(part as unknown as LanguageModelV2ToolResultPart) + } + } + if (toolResultParts.length > 0) { + lastToolResults = convertToolResults(toolResultParts) + hasAnyToolResults = true + } + + // Collect text content (only for user role, tool role doesn't have text) + if (lastMessage.role === "user") { + const textContent = lastMessage.content + .filter((part): part is { type: "text"; text: string } => part.type === "text") + .map((part) => part.text) + .join("") + lastUserContent = textContent + } + } + + // Build userInputMessageContext - only include if has content + const userInputMessageContext: { + tools?: KiroTool[] + toolResults?: KiroToolResult[] + envState?: KiroEnvState + } = {} + + const kiroTools = convertTools(tools) + if (kiroTools) { + userInputMessageContext.tools = kiroTools + } + + if (lastToolResults.length > 0) { + userInputMessageContext.toolResults = lastToolResults + } + + userInputMessageContext.envState = getEnvState() + + // Inject thinking tags into user content if thinking mode is enabled + let finalUserContent = lastUserContent || "." + if (thinkingEnabled && lastUserContent) { + finalUserContent = injectThinkingTags(lastUserContent, thinkingBudgetTokens) + } + + // Build userInputMessage + const userInputMessage: { + content: string + modelId: string + origin: string + userInputMessageContext?: typeof userInputMessageContext + } = { + content: finalUserContent, // Use minimal content to avoid triggering AI to "continue" + modelId, + origin: "AI_EDITOR", + } + + // Only add userInputMessageContext if it has content + if (Object.keys(userInputMessageContext).length > 0) { + userInputMessage.userInputMessageContext = userInputMessageContext + } + + // If no tools are defined in this request, strip all toolUses/toolResults from history. + // Kiro API rejects requests with toolUses in history but no tools definition (e.g. compaction). + if (!kiroTools) { + for (const item of history) { + if (item.assistantResponseMessage?.toolUses) delete item.assistantResponseMessage.toolUses + if (item.userInputMessage?.userInputMessageContext?.toolResults) + delete item.userInputMessage.userInputMessageContext.toolResults + } + const filtered = history.filter((item) => { + if ( + item.assistantResponseMessage && + item.assistantResponseMessage.content === "(empty)" && + !item.assistantResponseMessage.reasoning?.thinking?.trim() + ) { + delete item.assistantResponseMessage + } + if ( + item.userInputMessage && + (!item.userInputMessage.content || item.userInputMessage.content === ".") && + !item.userInputMessage.userInputMessageContext?.toolResults?.length + ) { + delete item.userInputMessage + } + return Boolean(item.assistantResponseMessage || item.userInputMessage) + }) + history.length = 0 + history.push(...filtered) + + // After filtering, consecutive same-role messages can appear (e.g. two assistants + // when a "." user between them was removed). Kiro API requires alternating roles, + // so merge consecutive items of the same role. + for (let i = history.length - 1; i > 0; i--) { + const prev = history[i - 1] + const curr = history[i] + const prevIsUser = !!prev.userInputMessage && !prev.assistantResponseMessage + const currIsUser = !!curr.userInputMessage && !curr.assistantResponseMessage + const prevIsAssistant = !!prev.assistantResponseMessage && !prev.userInputMessage + const currIsAssistant = !!curr.assistantResponseMessage && !curr.userInputMessage + + if (prevIsUser && currIsUser) { + const p = prev.userInputMessage! + const c = curr.userInputMessage! + p.content = [p.content, c.content].filter((t) => t && t !== ".").join("\n\n") || "." + history.splice(i, 1) + } else if (prevIsAssistant && currIsAssistant) { + const p = prev.assistantResponseMessage! + const c = curr.assistantResponseMessage! + p.content = [p.content, c.content].filter((t) => t && t !== "(empty)").join("\n\n") || "(empty)" + if (c.reasoning?.thinking) { + p.reasoning = p.reasoning ?? { thinking: "" } + p.reasoning.thinking = [p.reasoning.thinking, c.reasoning.thinking].filter(Boolean).join("\n") + } + history.splice(i, 1) + } + } + lastToolResults = [] + delete userInputMessageContext.toolResults + } + + // Validate entire history: toolUses/toolResults must be paired. + // After compaction/pruning, these pairs can become mismatched + // causing "Improperly formed request" error from Kiro API. + // Strategy: delete orphan toolUses/toolResults to avoid model retrying completed tools. + for (let i = 0; i < history.length - 1; i++) { + const item = history[i] + const next = history[i + 1] + const uses = item.assistantResponseMessage?.toolUses ?? [] + const results = next?.userInputMessage?.userInputMessageContext?.toolResults ?? [] + + if (uses.length > 0 && results.length === 0) { + delete item.assistantResponseMessage!.toolUses + } else if (uses.length === 0 && results.length > 0) { + delete next.userInputMessage!.userInputMessageContext!.toolResults + } else if (uses.length > 0 && results.length > 0) { + const resultIds = new Set(results.map((r) => r.toolUseId)) + const useIds = new Set(uses.map((u) => u.toolUseId)) + item.assistantResponseMessage!.toolUses = uses.filter((u) => resultIds.has(u.toolUseId)) + next.userInputMessage!.userInputMessageContext!.toolResults = results.filter((r) => useIds.has(r.toolUseId)) + if (item.assistantResponseMessage!.toolUses.length === 0) delete item.assistantResponseMessage!.toolUses + if (next.userInputMessage!.userInputMessageContext!.toolResults!.length === 0) + delete next.userInputMessage!.userInputMessageContext!.toolResults + } + } + + // Validate last history item against current message's toolResults + const lastHistoryItem = history[history.length - 1] + const lastUses = lastHistoryItem?.assistantResponseMessage?.toolUses ?? [] + if (lastUses.length > 0 && lastToolResults.length === 0) { + delete lastHistoryItem.assistantResponseMessage!.toolUses + } else if (lastUses.length === 0 && lastToolResults.length > 0) { + lastToolResults = [] + delete userInputMessageContext.toolResults + } else if (lastUses.length > 0 && lastToolResults.length > 0) { + const resultIds = new Set(lastToolResults.map((r) => r.toolUseId)) + const useIds = new Set(lastUses.map((u) => u.toolUseId)) + lastHistoryItem.assistantResponseMessage!.toolUses = lastUses.filter((u) => resultIds.has(u.toolUseId)) + lastToolResults = lastToolResults.filter((r) => useIds.has(r.toolUseId)) + if (lastHistoryItem.assistantResponseMessage!.toolUses.length === 0) + delete lastHistoryItem.assistantResponseMessage!.toolUses + if (lastToolResults.length === 0) delete userInputMessageContext.toolResults + else userInputMessageContext.toolResults = lastToolResults + } + + // First history user item should not have orphan toolResults + if (history.length > 0 && history[0].userInputMessage?.userInputMessageContext?.toolResults?.length) { + delete history[0].userInputMessage.userInputMessageContext.toolResults + } + + // Build conversationState + const conversationState: KiroPayload["conversationState"] = { + chatTriggerType: "MANUAL", + conversationId, + currentMessage: { userInputMessage }, + history, + } + + const payload: KiroPayload = { conversationState } + return cleanKiroPayload(payload) +} + +function cleanKiroPayload(payload: KiroPayload): KiroPayload { + // Clean history items: remove fields only needed in currentMessage + for (const item of payload.conversationState.history) { + if (item.userInputMessage) { + delete (item.userInputMessage as any).modelId + delete (item.userInputMessage as any).origin + const ctx = item.userInputMessage.userInputMessageContext + if (ctx) delete (ctx as any).tools + } + if (item.assistantResponseMessage) { + delete (item.assistantResponseMessage as any).messageId + delete (item.assistantResponseMessage as any).modelId + } + } + // Sanitize empty user messages — Kiro API rejects content: "" + for (const item of payload.conversationState.history) { + if (item.userInputMessage && !item.userInputMessage.content) { + item.userInputMessage.content = "." + } + } + fixOrphanedToolResults(payload) + ensureAlternation(payload) + return payload +} +function fixOrphanedToolResults(payload: KiroPayload) { + const history = payload.conversationState.history + const ids = new Set() + for (const item of history) { + for (const tu of item.assistantResponseMessage?.toolUses ?? []) ids.add(tu.toolUseId) + } + for (const item of history) { + const ctx = item.userInputMessage?.userInputMessageContext + if (ctx?.toolResults) ctx.toolResults = ctx.toolResults.filter((tr) => ids.has(tr.toolUseId)) + } + // Also fix currentMessage toolResults + const cmCtx = payload.conversationState.currentMessage.userInputMessage.userInputMessageContext + if (cmCtx?.toolResults) cmCtx.toolResults = cmCtx.toolResults.filter((tr) => ids.has(tr.toolUseId)) +} +function ensureAlternation(payload: KiroPayload) { + const history = payload.conversationState.history + let i = 1 + while (i < history.length) { + const prevIsUser = "userInputMessage" in history[i - 1] + const currIsUser = "userInputMessage" in history[i] + if (prevIsUser === currIsUser) { + if (currIsUser) { + history.splice(i, 0, { assistantResponseMessage: { content: "" } } as any) + } else { + history.splice(i, 0, { userInputMessage: { content: ".", userInputMessageContext: {} } } as any) + } + } + i++ + } +} diff --git a/packages/opencode/src/provider/sdk/kiro/src/index.ts b/packages/opencode/src/provider/sdk/kiro/src/index.ts new file mode 100644 index 00000000000..a70d3fdee9b --- /dev/null +++ b/packages/opencode/src/provider/sdk/kiro/src/index.ts @@ -0,0 +1,2 @@ +export { createKiro } from "./kiro-provider" +export type { KiroProvider, KiroProviderSettings } from "./kiro-provider" diff --git a/packages/opencode/src/provider/sdk/kiro/src/kiro-language-model.ts b/packages/opencode/src/provider/sdk/kiro/src/kiro-language-model.ts new file mode 100644 index 00000000000..ed6786a7b91 --- /dev/null +++ b/packages/opencode/src/provider/sdk/kiro/src/kiro-language-model.ts @@ -0,0 +1,469 @@ +import type { + LanguageModelV2, + LanguageModelV2CallWarning, + LanguageModelV2Content, + LanguageModelV2FinishReason, + LanguageModelV2FunctionTool, + LanguageModelV2StreamPart, + LanguageModelV2Usage, +} from "@ai-sdk/provider" +import type { FetchFunction } from "@ai-sdk/provider-utils" +import { convertToKiroPayload, type KiroProviderOptions } from "./converters" +import { normalizeModelName } from "./model-resolver" +import { parseAwsEventStream, type KiroEvent } from "./streaming" +import { estimatePayloadTokens, countTokens } from "./tokenizer" + +export interface KiroLanguageModelConfig { + provider: string + apiKey?: string + baseURL: string + headers?: Record + fetch?: FetchFunction +} + +function headersToRecord(headers: Headers): Record { + const result: Record = {} + headers.forEach((value, key) => { + result[key] = value + }) + return result +} + +export class KiroLanguageModel implements LanguageModelV2 { + readonly specificationVersion = "v2" + readonly modelId: string + private readonly config: KiroLanguageModelConfig + + readonly supportedUrls: Record = { + "image/*": [/^https?:\/\/.*$/], + "application/pdf": [/^https?:\/\/.*$/], + } + + constructor(modelId: string, config: KiroLanguageModelConfig) { + this.modelId = modelId + this.config = config + } + + get provider(): string { + return this.config.provider + } + + async doGenerate( + options: Parameters[0], + ): Promise>> { + const result = await this.doStream(options) + const reader = result.stream.getReader() + + const content: LanguageModelV2Content[] = [] + let finishReason: LanguageModelV2FinishReason = "unknown" + const usage: LanguageModelV2Usage = { + inputTokens: undefined, + outputTokens: undefined, + totalTokens: undefined, + } + const warnings: LanguageModelV2CallWarning[] = [] + + let currentText = "" + let currentTextId: string | null = null + const toolCalls: Map = new Map() + let currentReasoning = "" + let currentReasoningId: string | null = null + + while (true) { + const { done, value } = await reader.read() + if (done) break + + switch (value.type) { + case "stream-start": + warnings.push(...(value.warnings || [])) + break + + case "text-start": + currentTextId = value.id + currentText = "" + break + + case "text-delta": + currentText += value.delta + break + + case "text-end": + if (currentText) { + content.push({ + type: "text", + text: currentText, + }) + } + currentTextId = null + break + + case "reasoning-start": + currentReasoningId = value.id + currentReasoning = "" + break + + case "reasoning-delta": + currentReasoning += value.delta + break + + case "reasoning-end": + if (currentReasoning) { + content.push({ + type: "reasoning", + text: currentReasoning, + }) + } + currentReasoningId = null + break + + case "tool-input-start": + toolCalls.set(value.id, { toolName: value.toolName, input: "" }) + break + + case "tool-input-delta": + const toolCall = toolCalls.get(value.id) + if (toolCall) { + toolCall.input += value.delta + } + break + + case "tool-call": + content.push({ + type: "tool-call", + toolCallId: value.toolCallId, + toolName: value.toolName, + input: value.input, + }) + break + + case "finish": + finishReason = value.finishReason + if (value.usage) { + usage.inputTokens = value.usage.inputTokens + usage.outputTokens = value.usage.outputTokens + usage.totalTokens = value.usage.totalTokens + } + break + } + } + + // Handle any remaining text + if (currentTextId && currentText) { + content.push({ + type: "text", + text: currentText, + }) + } + + // Handle any remaining reasoning + if (currentReasoningId && currentReasoning) { + content.push({ + type: "reasoning", + text: currentReasoning, + }) + } + + return { + content, + finishReason, + usage, + warnings, + request: result.request, + response: result.response, + } + } + + async doStream( + options: Parameters[0], + ): Promise>> { + const kiroModelId = normalizeModelName(this.modelId) + const functionTools = options.tools?.filter((tool): tool is LanguageModelV2FunctionTool => tool.type === "function") + + // Extract Kiro-specific provider options for thinking mode + const kiroProviderOptions: KiroProviderOptions | undefined = options.providerOptions?.kiro as + | KiroProviderOptions + | undefined + + const payload = convertToKiroPayload(options.prompt, kiroModelId, functionTools, kiroProviderOptions) + + // Pre-flight context overflow check — let the compaction system handle it + const KIRO_CONTEXT_LIMIT = 210_000 + const KIRO_PAYLOAD_BYTE_LIMIT = 450_000 + const estimated = estimatePayloadTokens(payload) + const payloadBytes = Buffer.byteLength(JSON.stringify(payload), "utf-8") + const historyLen = payload.conversationState.history?.length ?? 0 + if (estimated > KIRO_CONTEXT_LIMIT || payloadBytes > KIRO_PAYLOAD_BYTE_LIMIT) { + const { APICallError } = await import("ai") + throw new APICallError({ + message: `Input token count ${estimated} exceeds the maximum ${KIRO_CONTEXT_LIMIT} for this model`, + url: `${this.config.baseURL}/generateAssistantResponse`, + requestBodyValues: {}, + statusCode: 400, + responseBody: `estimated ${estimated} tokens / ${payloadBytes} bytes exceeds context window`, + isRetryable: false, + }) + } + + // 意味のあるコンテンツがない場合は早期リターン(無限ループ防止) + const currentMessage = payload.conversationState.currentMessage.userInputMessage + const hasUserContent = currentMessage.content && currentMessage.content !== "." + const hasToolResults = (currentMessage.userInputMessageContext?.toolResults?.length ?? 0) > 0 + + if (!hasUserContent && !hasToolResults) { + // 空のストリームを返して終了 + return { + stream: new ReadableStream({ + start(controller) { + controller.enqueue({ type: "stream-start", warnings: [] }) + controller.enqueue({ + type: "finish", + finishReason: "stop", + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }) + controller.close() + }, + }), + request: { body: payload }, + response: { headers: {} }, + } + } + + const headers: Record = { + "Content-Type": "application/json", + Accept: "application/vnd.amazon.eventstream", + ...this.config.headers, + } + + if (this.config.apiKey) { + headers["Authorization"] = `Bearer ${this.config.apiKey}` + } + + // Merge with request headers + const requestHeaders: Record = { ...headers } + if (options.headers) { + for (const [key, value] of Object.entries(options.headers)) { + if (value !== undefined) { + requestHeaders[key] = value + } + } + } + + const fetchFn = this.config.fetch ?? fetch + const url = `${this.config.baseURL}/generateAssistantResponse` + + const response = await fetchFn(url, { + method: "POST", + headers: requestHeaders, + body: JSON.stringify(payload), + signal: options.abortSignal, + }) + + if (!response.ok) { + const errorText = await response.text() + const fs = await import("fs") + const debugPayloadBytes = Buffer.byteLength(JSON.stringify(payload), "utf-8") + fs.writeFileSync("/tmp/kiro-payload-error.json", JSON.stringify({ status: response.status, statusText: response.statusText, errorText, payloadBytes: debugPayloadBytes, payload }, null, 2)) + const { APICallError } = await import("ai") + throw new APICallError({ + message: `${response.status} ${response.statusText}`, + url, + requestBodyValues: payload as unknown as Record, + statusCode: response.status, + responseBody: errorText, + isRetryable: response.status >= 500, + }) + } + + if (!response.body) { + throw new Error("Response body is empty") + } + + const warnings: LanguageModelV2CallWarning[] = [] + + // Handle unsupported settings + if (options.topK != null) { + warnings.push({ type: "unsupported-setting", setting: "topK" }) + } + if (options.presencePenalty != null) { + warnings.push({ type: "unsupported-setting", setting: "presencePenalty" }) + } + if (options.frequencyPenalty != null) { + warnings.push({ type: "unsupported-setting", setting: "frequencyPenalty" }) + } + if (options.seed != null) { + warnings.push({ type: "unsupported-setting", setting: "seed" }) + } + if (options.stopSequences != null) { + warnings.push({ type: "unsupported-setting", setting: "stopSequences" }) + } + + const kiroStream = parseAwsEventStream(response.body) + + let finishReason: LanguageModelV2FinishReason = "unknown" + const usage: LanguageModelV2Usage = { + inputTokens: undefined, + outputTokens: undefined, + totalTokens: undefined, + } + let textId = crypto.randomUUID() + let reasoningId: string | null = null + let textStarted = false + let reasoningStarted = false + const toolCallIds: Map = new Map() // toolUseId -> toolName + let outputText = "" + + const responseHeaders = headersToRecord(response.headers) + + return { + stream: kiroStream.pipeThrough( + new TransformStream({ + start(controller) { + controller.enqueue({ type: "stream-start", warnings }) + }, + + transform(event, controller) { + switch (event.type) { + case "content": + if (!textStarted) { + textStarted = true + controller.enqueue({ + type: "text-start", + id: textId, + }) + } + controller.enqueue({ + type: "text-delta", + id: textId, + delta: event.content, + }) + outputText += event.content + break + + case "thinking_start": + reasoningId = crypto.randomUUID() + reasoningStarted = true + controller.enqueue({ + type: "reasoning-start", + id: reasoningId, + }) + break + + case "thinking": + if (reasoningId) { + controller.enqueue({ + type: "reasoning-delta", + id: reasoningId, + delta: event.thinking, + }) + outputText += event.thinking + } + break + + case "thinking_stop": + if (reasoningId) { + controller.enqueue({ + type: "reasoning-end", + id: reasoningId, + }) + reasoningId = null + reasoningStarted = false + } + break + + case "tool_start": + toolCallIds.set(event.toolUseId, event.name) + controller.enqueue({ + type: "tool-input-start", + id: event.toolUseId, + toolName: event.name, + }) + break + + case "tool_input": + controller.enqueue({ + type: "tool-input-delta", + id: event.toolUseId, + delta: event.input, + }) + break + + case "tool_stop": + controller.enqueue({ + type: "tool-input-end", + id: event.toolUseId, + }) + const toolName = toolCallIds.get(event.toolUseId) + if (toolName) { + controller.enqueue({ + type: "tool-call", + toolCallId: event.toolUseId, + toolName, + input: typeof event.input === "string" ? event.input : JSON.stringify(event.input), + }) + finishReason = "tool-calls" + } + break + + case "usage": + usage.outputTokens = event.outputTokens + if (event.inputTokens > 0) { + usage.inputTokens = event.inputTokens + usage.totalTokens = event.inputTokens + event.outputTokens + } + break + + case "done": + if (finishReason === "unknown") { + finishReason = "stop" + } + break + + case "error": + controller.enqueue({ + type: "error", + error: new Error(event.error), + }) + finishReason = "error" + break + } + }, + + flush(controller) { + // Estimate tokens from payload using tiktoken (kiro-gateway style) + if (!usage.inputTokens) { + usage.inputTokens = estimatePayloadTokens(payload) + } + if (!usage.outputTokens && outputText) { + usage.outputTokens = countTokens(outputText) + } + usage.totalTokens = (usage.inputTokens ?? 0) + (usage.outputTokens ?? 0) + + // Close any open text part + if (textStarted) { + controller.enqueue({ + type: "text-end", + id: textId, + }) + } + + // Close any open reasoning part + if (reasoningStarted && reasoningId) { + controller.enqueue({ + type: "reasoning-end", + id: reasoningId, + }) + } + + controller.enqueue({ + type: "finish", + finishReason, + usage, + }) + }, + }), + ), + request: { body: payload }, + response: { headers: responseHeaders }, + } + } +} diff --git a/packages/opencode/src/provider/sdk/kiro/src/kiro-provider.ts b/packages/opencode/src/provider/sdk/kiro/src/kiro-provider.ts new file mode 100644 index 00000000000..546f68072f2 --- /dev/null +++ b/packages/opencode/src/provider/sdk/kiro/src/kiro-provider.ts @@ -0,0 +1,36 @@ +import type { LanguageModelV2 } from "@ai-sdk/provider" +import type { FetchFunction } from "@ai-sdk/provider-utils" +import { KiroLanguageModel } from "./kiro-language-model" + +export interface KiroProviderSettings { + apiKey?: string + baseURL?: string + region?: string + headers?: Record + fetch?: FetchFunction +} + +export interface KiroProvider { + (modelId: string): LanguageModelV2 + languageModel(modelId: string): LanguageModelV2 +} + +export function createKiro(options: KiroProviderSettings = {}): KiroProvider { + const region = options.region ?? "us-east-1" + const baseURL = options.baseURL ?? `https://q.${region}.amazonaws.com` + + const createLanguageModel = (modelId: string): LanguageModelV2 => { + return new KiroLanguageModel(modelId, { + provider: "kiro", + apiKey: options.apiKey, + baseURL, + headers: options.headers, + fetch: options.fetch, + }) + } + + const provider = (modelId: string): LanguageModelV2 => createLanguageModel(modelId) + provider.languageModel = createLanguageModel + + return provider as KiroProvider +} diff --git a/packages/opencode/src/provider/sdk/kiro/src/model-resolver.ts b/packages/opencode/src/provider/sdk/kiro/src/model-resolver.ts new file mode 100644 index 00000000000..8f3573d00ee --- /dev/null +++ b/packages/opencode/src/provider/sdk/kiro/src/model-resolver.ts @@ -0,0 +1,15 @@ +const HIDDEN_MODELS: Record = { + "claude-3.7-sonnet": "CLAUDE_3_7_SONNET_20250219_V1_0", + "claude-3-7-sonnet": "CLAUDE_3_7_SONNET_20250219_V1_0", +} + +export function normalizeModelName(name: string): string { + // Convert model names like claude-sonnet-4-5 → claude-sonnet-4.5 + // or claude-haiku-4-5-20251001 → claude-haiku-4.5 + const normalized = name + .toLowerCase() + .replace(/-(\d+)-(\d{1,2})(?:-(?:\d{8}|latest))?$/, "-$1.$2") // 4-5 → 4.5 + .replace(/-(\d+)(?:-\d{8})?$/, "-$1") // 4-20250514 → 4 + + return HIDDEN_MODELS[normalized] ?? normalized +} diff --git a/packages/opencode/src/provider/sdk/kiro/src/streaming.ts b/packages/opencode/src/provider/sdk/kiro/src/streaming.ts new file mode 100644 index 00000000000..4669189bf18 --- /dev/null +++ b/packages/opencode/src/provider/sdk/kiro/src/streaming.ts @@ -0,0 +1,631 @@ +export type KiroEventType = + | "content" + | "tool_start" + | "tool_input" + | "tool_stop" + | "thinking_start" + | "thinking" + | "thinking_stop" + | "usage" + | "context_usage" + | "done" + | "error" + +export interface KiroContentEvent { + type: "content" + content: string +} + +export interface KiroToolStartEvent { + type: "tool_start" + name: string + toolUseId: string +} + +export interface KiroToolInputEvent { + type: "tool_input" + toolUseId: string + input: string +} + +export interface KiroToolStopEvent { + type: "tool_stop" + toolUseId: string + input: unknown +} + +export interface KiroThinkingStartEvent { + type: "thinking_start" +} + +export interface KiroThinkingEvent { + type: "thinking" + thinking: string +} + +export interface KiroThinkingStopEvent { + type: "thinking_stop" +} + +export interface KiroUsageEvent { + type: "usage" + inputTokens: number + outputTokens: number +} + +export interface KiroContextUsageEvent { + type: "context_usage" + percentage: number +} + +export interface KiroDoneEvent { + type: "done" +} + +export interface KiroErrorEvent { + type: "error" + error: string +} + +export type KiroEvent = + | KiroContentEvent + | KiroToolStartEvent + | KiroToolInputEvent + | KiroToolStopEvent + | KiroThinkingStartEvent + | KiroThinkingEvent + | KiroThinkingStopEvent + | KiroUsageEvent + | KiroContextUsageEvent + | KiroDoneEvent + | KiroErrorEvent + +// AWS Event Stream message header types +interface AwsEventStreamHeader { + name: string + type: number + value: string | number | ArrayBuffer +} + +function readUint32(view: DataView, offset: number): number { + return view.getUint32(offset, false) // big-endian +} + +function decodeAwsEventStreamMessage(buffer: ArrayBuffer): { + headers: Record + payload: Uint8Array +} | null { + if (buffer.byteLength < 16) return null + + const view = new DataView(buffer) + + // Read prelude + const totalLength = readUint32(view, 0) + const headersLength = readUint32(view, 4) + // const preludeCrc = readUint32(view, 8) + + if (buffer.byteLength < totalLength) return null + + // Read headers + const headers: Record = {} + let offset = 12 + const headersEnd = 12 + headersLength + + while (offset < headersEnd) { + const nameLength = view.getUint8(offset) + offset += 1 + const name = new TextDecoder().decode(new Uint8Array(buffer, offset, nameLength)) + offset += nameLength + const headerType = view.getUint8(offset) + offset += 1 + + let value: string | number | ArrayBuffer + switch (headerType) { + case 0: // bool true + value = 1 + break + case 1: // bool false + value = 0 + break + case 2: // byte + value = view.getInt8(offset) + offset += 1 + break + case 3: // short + value = view.getInt16(offset, false) + offset += 2 + break + case 4: // int + value = view.getInt32(offset, false) + offset += 4 + break + case 5: // long + // JavaScript doesn't handle 64-bit ints well, read as two 32-bit values + const high = view.getInt32(offset, false) + const low = view.getUint32(offset + 4, false) + value = high * 0x100000000 + low + offset += 8 + break + case 6: // bytes + const bytesLength = view.getUint16(offset, false) + offset += 2 + value = buffer.slice(offset, offset + bytesLength) + offset += bytesLength + break + case 7: // string + const stringLength = view.getUint16(offset, false) + offset += 2 + value = new TextDecoder().decode(new Uint8Array(buffer, offset, stringLength)) + offset += stringLength + break + case 8: // timestamp + const timestampHigh = view.getInt32(offset, false) + const timestampLow = view.getUint32(offset + 4, false) + value = timestampHigh * 0x100000000 + timestampLow + offset += 8 + break + case 9: // uuid + value = buffer.slice(offset, offset + 16) + offset += 16 + break + default: + throw new Error(`Unknown header type: ${headerType}`) + } + + headers[name] = value + } + + // Read payload + const payloadLength = totalLength - headersLength - 16 // 12 bytes prelude + 4 bytes message CRC + const payload = new Uint8Array(buffer, headersEnd, payloadLength) + + return { headers, payload } +} + +// Simple format: {"content": "..."} or {"name": "...", "toolUseId": "...", "input": "..."} etc. +interface KiroSimpleEvent { + content?: string + name?: string + toolUseId?: string + input?: string | Record + stop?: boolean + usage?: number + thinking?: string + stopReason?: string + contextUsagePercentage?: number +} + +// Nested format: {"assistantResponseEvent": {...}} +interface KiroNestedEvent { + assistantResponseEvent?: { + contentBlockDeltaEvent?: { + delta?: { + reasoningContentBlockDelta?: { + thinking?: string + } + text?: string + toolUse?: { + input: string + } + } + } + contentBlockStartEvent?: { + start?: { + reasoningContent?: unknown + text?: string + toolUse?: { + name: string + toolUseId: string + } + } + } + contentBlockStopEvent?: { + contentBlockIndex: number + } + messageStartEvent?: unknown + messageStopEvent?: { + stopReason?: string + } + usageMetricsEvent?: { + inputTokens?: number + outputTokens?: number + latencyMs?: number + } + } + supplementaryWebLinksEvent?: unknown +} + +type KiroRawEvent = KiroSimpleEvent | KiroNestedEvent + +export function parseAwsEventStream(stream: ReadableStream): ReadableStream { + let buffer = new Uint8Array(0) + let currentToolCall: { toolUseId: string; input: string } | null = null + let inThinking = false + // For Fake Reasoning: track if we're inside tags in content + let inFakeThinking = false + let contentBuffer = "" + + // Helper to process content with tags (Fake Reasoning) + const processContentWithThinkingTags = ( + content: string, + controller: TransformStreamDefaultController, + ) => { + contentBuffer += content + + while (true) { + if (!inFakeThinking) { + // Look for tag + const thinkingStart = contentBuffer.indexOf("") + if (thinkingStart === -1) { + // No thinking tag found, output all content except last 10 chars (in case tag is split) + if (contentBuffer.length > 10) { + const safeContent = contentBuffer.slice(0, -10) + contentBuffer = contentBuffer.slice(-10) + if (safeContent) { + controller.enqueue({ type: "content", content: safeContent }) + } + } + break + } + + // Output content before + if (thinkingStart > 0) { + controller.enqueue({ type: "content", content: contentBuffer.slice(0, thinkingStart) }) + } + + // Enter thinking mode + inFakeThinking = true + controller.enqueue({ type: "thinking_start" }) + contentBuffer = contentBuffer.slice(thinkingStart + "".length) + } else { + // Look for tag + const thinkingEnd = contentBuffer.indexOf("") + if (thinkingEnd === -1) { + // No end tag found, output thinking content except last 11 chars + if (contentBuffer.length > 11) { + const safeThinking = contentBuffer.slice(0, -11) + contentBuffer = contentBuffer.slice(-11) + if (safeThinking) { + controller.enqueue({ type: "thinking", thinking: safeThinking }) + } + } + break + } + + // Output thinking content before + if (thinkingEnd > 0) { + controller.enqueue({ type: "thinking", thinking: contentBuffer.slice(0, thinkingEnd) }) + } + + // Exit thinking mode + inFakeThinking = false + controller.enqueue({ type: "thinking_stop" }) + contentBuffer = contentBuffer.slice(thinkingEnd + "".length) + } + } + } + + // Flush remaining content buffer + const flushContentBuffer = (controller: TransformStreamDefaultController) => { + if (contentBuffer.length > 0) { + if (inFakeThinking) { + controller.enqueue({ type: "thinking", thinking: contentBuffer }) + controller.enqueue({ type: "thinking_stop" }) + inFakeThinking = false + } else { + controller.enqueue({ type: "content", content: contentBuffer }) + } + contentBuffer = "" + } + } + + return stream.pipeThrough( + new TransformStream({ + async transform(chunk, controller) { + // Append chunk to buffer + const newBuffer = new Uint8Array(buffer.length + chunk.length) + newBuffer.set(buffer) + newBuffer.set(chunk, buffer.length) + buffer = newBuffer + + // Try to parse complete messages + while (buffer.length >= 12) { + const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength) + const totalLength = readUint32(view, 0) + + if (buffer.length < totalLength) break + + const messageBuffer = buffer.slice(0, totalLength).buffer + buffer = buffer.slice(totalLength) + + const message = decodeAwsEventStreamMessage(messageBuffer) + if (!message) { + continue + } + + // Check for exception + const exceptionType = message.headers[":exception-type"] + if (exceptionType) { + const payload = new TextDecoder().decode(message.payload) + try { + const errorJson = JSON.parse(payload) + controller.enqueue({ + type: "error", + error: errorJson.message || errorJson.Message || payload, + }) + } catch { + controller.enqueue({ + type: "error", + error: payload, + }) + } + continue + } + + // Parse JSON payload + if (message.payload.length === 0) continue + + try { + const payloadText = new TextDecoder().decode(message.payload) + const data = JSON.parse(payloadText) as KiroRawEvent + + // Handle simple format: {"content": "..."}, {"name": "...", "toolUseId": "..."}, etc. + const simple = data as KiroSimpleEvent + if (simple.content !== undefined) { + // Process content with Fake Reasoning tags + processContentWithThinkingTags(simple.content, controller) + continue + } + + if (simple.thinking !== undefined) { + if (!inThinking) { + inThinking = true + controller.enqueue({ type: "thinking_start" }) + } + controller.enqueue({ + type: "thinking", + thinking: simple.thinking, + }) + continue + } + + if (simple.name !== undefined && simple.toolUseId !== undefined) { + // Check if this is a new tool call or continuation of existing one + const isNewToolCall = + !currentToolCall || currentToolCall.toolUseId !== simple.toolUseId + + if (isNewToolCall && simple.input === undefined && simple.stop !== true) { + // New tool call start (no input yet) + currentToolCall = { + toolUseId: simple.toolUseId, + input: "", + } + controller.enqueue({ + type: "tool_start", + name: simple.name, + toolUseId: simple.toolUseId, + }) + continue + } + + // Ensure currentToolCall exists for input/stop processing + if (!currentToolCall || currentToolCall.toolUseId !== simple.toolUseId) { + currentToolCall = { + toolUseId: simple.toolUseId, + input: "", + } + controller.enqueue({ + type: "tool_start", + name: simple.name, + toolUseId: simple.toolUseId, + }) + } + + // Handle input if present + if (simple.input !== undefined) { + const inputDelta = + typeof simple.input === "object" + ? JSON.stringify(simple.input) + : String(simple.input) + currentToolCall.input += inputDelta + controller.enqueue({ + type: "tool_input", + toolUseId: currentToolCall.toolUseId, + input: inputDelta, + }) + } + + // Handle stop if present + if (simple.stop === true) { + let parsedInput: unknown = currentToolCall.input + try { + parsedInput = JSON.parse(currentToolCall.input) + } catch { + // Keep as string + } + controller.enqueue({ + type: "tool_stop", + toolUseId: currentToolCall.toolUseId, + input: parsedInput, + }) + currentToolCall = null + } + continue + } + + if (simple.input !== undefined && currentToolCall) { + // Tool input delta (without name/toolUseId) - input can be string or object + const inputDelta = + typeof simple.input === "object" + ? JSON.stringify(simple.input) + : String(simple.input) + currentToolCall.input += inputDelta + controller.enqueue({ + type: "tool_input", + toolUseId: currentToolCall.toolUseId, + input: inputDelta, + }) + continue + } + + if (simple.stop === true && currentToolCall) { + // Tool stop (without name/toolUseId) + let parsedInput: unknown = currentToolCall.input + try { + parsedInput = JSON.parse(currentToolCall.input) + } catch { + // Keep as string + } + controller.enqueue({ + type: "tool_stop", + toolUseId: currentToolCall.toolUseId, + input: parsedInput, + }) + currentToolCall = null + continue + } + + if (simple.usage !== undefined) { + controller.enqueue({ + type: "usage", + inputTokens: 0, + outputTokens: simple.usage, + }) + continue + } + + if (simple.contextUsagePercentage !== undefined) { + controller.enqueue({ + type: "context_usage", + percentage: simple.contextUsagePercentage, + }) + continue + } + + if (simple.stopReason !== undefined) { + if (inThinking) { + inThinking = false + controller.enqueue({ type: "thinking_stop" }) + } + controller.enqueue({ type: "done" }) + continue + } + + // Handle nested format: {"assistantResponseEvent": {...}} + const nested = data as KiroNestedEvent + if (nested.assistantResponseEvent) { + const event = nested.assistantResponseEvent + + // Content block start + if (event.contentBlockStartEvent?.start) { + const start = event.contentBlockStartEvent.start + + if (start.reasoningContent !== undefined) { + inThinking = true + controller.enqueue({ type: "thinking_start" }) + } else if (start.toolUse) { + currentToolCall = { + toolUseId: start.toolUse.toolUseId, + input: "", + } + controller.enqueue({ + type: "tool_start", + name: start.toolUse.name, + toolUseId: start.toolUse.toolUseId, + }) + } + } + + // Content block delta + if (event.contentBlockDeltaEvent?.delta) { + const delta = event.contentBlockDeltaEvent.delta + + if (delta.reasoningContentBlockDelta?.thinking) { + controller.enqueue({ + type: "thinking", + thinking: delta.reasoningContentBlockDelta.thinking, + }) + } else if (delta.text !== undefined) { + controller.enqueue({ + type: "content", + content: delta.text, + }) + } else if (delta.toolUse?.input !== undefined) { + if (currentToolCall) { + currentToolCall.input += delta.toolUse.input + controller.enqueue({ + type: "tool_input", + toolUseId: currentToolCall.toolUseId, + input: delta.toolUse.input, + }) + } + } + } + + // Content block stop + if (event.contentBlockStopEvent !== undefined) { + if (inThinking) { + inThinking = false + controller.enqueue({ type: "thinking_stop" }) + } else if (currentToolCall) { + let parsedInput: unknown = currentToolCall.input + try { + parsedInput = JSON.parse(currentToolCall.input) + } catch { + // Keep as string if not valid JSON + } + controller.enqueue({ + type: "tool_stop", + toolUseId: currentToolCall.toolUseId, + input: parsedInput, + }) + currentToolCall = null + } + } + + // Usage metrics + if (event.usageMetricsEvent) { + controller.enqueue({ + type: "usage", + inputTokens: event.usageMetricsEvent.inputTokens ?? 0, + outputTokens: event.usageMetricsEvent.outputTokens ?? 0, + }) + } + + // Message stop + if (event.messageStopEvent) { + controller.enqueue({ type: "done" }) + } + } + } catch (e) { + // Skip unparseable payloads + } + } + }, + + flush(controller) { + // Flush any remaining content buffer (Fake Reasoning) + flushContentBuffer(controller) + + // Handle any remaining incomplete state + if (inThinking) { + controller.enqueue({ type: "thinking_stop" }) + } + if (currentToolCall) { + let parsedInput: unknown = currentToolCall.input + try { + parsedInput = JSON.parse(currentToolCall.input) + } catch { + // Keep as string + } + controller.enqueue({ + type: "tool_stop", + toolUseId: currentToolCall.toolUseId, + input: parsedInput, + }) + } + controller.enqueue({ type: "done" }) + }, + }), + ) +} diff --git a/packages/opencode/src/provider/sdk/kiro/src/tokenizer.ts b/packages/opencode/src/provider/sdk/kiro/src/tokenizer.ts new file mode 100644 index 00000000000..65a89b89f2d --- /dev/null +++ b/packages/opencode/src/provider/sdk/kiro/src/tokenizer.ts @@ -0,0 +1,107 @@ +import { encodingForModel } from "js-tiktoken" +import type { KiroPayload } from "./converters" + +let encoding: ReturnType | undefined + +function getEncoding() { + if (!encoding) encoding = encodingForModel("gpt-4o") + return encoding +} + +// Raw GPT-4o token count — no correction factor applied. +// Correction is applied at the payload level in estimatePayloadTokens. +export function countTokens(text: string) { + if (!text) return 0 + return getEncoding().encode(text).length +} + +// Empirical constants derived from binary-search boundary tests against the Kiro API. +// The server rejected payloads at these exact byte thresholds: +// 0 tools → 812,219 bytes 10 tools → 808,395 bytes +// Server limit ≈ 200K tokens (server-side tokenizer, not publicly available). +// +// Tool overhead (server-side) is non-linear: +// 1 tool: 531 server tokens for 54 raw tokens (9.8x) +// 10 tools: 2,443 server tokens for 540 raw tokens (4.5x) +// Model: TOOL_FIXED_OVERHEAD + TOOL_PER_OVERHEAD * rawToolTokens +const TOOL_FIXED_OVERHEAD = 350 +const TOOL_PER_MULTIPLIER = 3.5 + +// The server tokenizer differs from gpt-4o by content type: +// prose/JSON: ~1.0x code: ~1.2-1.3x mixed: ~1.1x +// We use 1.10 as a conservative baseline (most payloads are mixed code+prose) +// plus a 5% safety margin to avoid hitting the server limit. +const TEXT_CORRECTION = 1.10 +const SAFETY_MARGIN = 1.05 + +// Per-message framing overhead (role markers, separators, JSON structure). +// Anthropic adds ~4 tokens per message for role/turn markers. +const MSG_OVERHEAD = 4 + +export function estimatePayloadTokens(payload: KiroPayload) { + const state = payload.conversationState + let tokens = 0 + let toolDefTokens = 0 + + // Current message + const msg = state.currentMessage.userInputMessage + tokens += countTokens(msg.content) + + // Tool definitions — counted separately with higher multiplier + if (msg.userInputMessageContext?.tools) { + for (const tool of msg.userInputMessageContext.tools) { + toolDefTokens += countTokens(tool.toolSpecification.name) + toolDefTokens += countTokens(tool.toolSpecification.description) + toolDefTokens += countTokens(JSON.stringify(tool.toolSpecification.inputSchema)) + } + } + + // Tool results + if (msg.userInputMessageContext?.toolResults) { + for (const result of msg.userInputMessageContext.toolResults) { + tokens += MSG_OVERHEAD + for (const c of result.content) tokens += countTokens(c.text) + } + } + + // History + if (state.history) { + for (const item of state.history) { + tokens += MSG_OVERHEAD + if (item.userInputMessage) { + tokens += countTokens(item.userInputMessage.content) + if (item.userInputMessage.userInputMessageContext?.toolResults) { + for (const r of item.userInputMessage.userInputMessageContext.toolResults) { + tokens += MSG_OVERHEAD + for (const c of r.content) tokens += countTokens(c.text) + } + } + } + if (item.assistantResponseMessage) { + tokens += countTokens(item.assistantResponseMessage.content) + if (item.assistantResponseMessage.toolUses) { + for (const tu of item.assistantResponseMessage.toolUses) { + tokens += MSG_OVERHEAD + tokens += countTokens(tu.name) + tokens += countTokens(typeof tu.input === "string" ? tu.input : JSON.stringify(tu.input)) + } + } + if (item.assistantResponseMessage.reasoning?.thinking) { + tokens += countTokens(item.assistantResponseMessage.reasoning.thinking) + } + } + } + } + + // Apply text correction (gpt-4o → server tokenizer approximation) + tokens = Math.round(tokens * TEXT_CORRECTION) + + // Add tool definition overhead (empirical: fixed + multiplier * raw tokens) + if (toolDefTokens > 0) { + tokens += TOOL_FIXED_OVERHEAD + Math.round(toolDefTokens * TOOL_PER_MULTIPLIER) + } + + // Service/framing tokens + safety margin + tokens += 3 + return Math.round(tokens * SAFETY_MARGIN) +} diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 05b9f031fe6..e43288f78ab 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -707,6 +707,22 @@ export namespace ProviderTransform { return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }])) } return {} + + case "@ai-sdk/kiro": + return { + high: { + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }, + } } return {} } diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index f1335f6f21a..d66fe8338cb 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -295,9 +295,9 @@ export namespace MessageV2 { .object({ status: z.literal("completed"), input: z.record(z.string(), z.any()), - output: z.string(), - title: z.string(), - metadata: z.record(z.string(), z.any()), + output: z.string().default(""), + title: z.string().default(""), + metadata: z.record(z.string(), z.any()).default({}), time: z.object({ start: z.number(), end: z.number(), @@ -697,8 +697,10 @@ export namespace MessageV2 { if (part.type === "tool") { toolNames.add(part.tool) if (part.state.status === "completed") { - const outputText = part.state.time.compacted ? "[Old tool result content cleared]" : part.state.output - const attachments = part.state.time.compacted || options?.stripMedia ? [] : (part.state.attachments ?? []) + const outputText = part.state.time.compacted + ? "[Tool output removed during compaction. Re-run this tool if you need the full result.]" + : part.state.output + const attachments = part.state.time.compacted ? [] : (part.state.attachments ?? []) // For providers that don't support media in tool results, extract media files // (images, PDFs) to be sent as a separate user message diff --git a/packages/opencode/test/plugin/kiro.test.ts b/packages/opencode/test/plugin/kiro.test.ts new file mode 100644 index 00000000000..16504d4fd0f --- /dev/null +++ b/packages/opencode/test/plugin/kiro.test.ts @@ -0,0 +1,40 @@ +import { describe, expect, test } from "bun:test" +import { getKiroDbPath } from "../../src/plugin/kiro" + +describe("plugin.kiro", () => { + describe("getKiroDbPath", () => { + test("returns correct path for macOS", () => { + // Note: This test will only pass on macOS + if (process.platform === "darwin") { + const path = getKiroDbPath() + expect(path).toContain("Library/Application Support/kiro-cli/data.sqlite3") + expect(path).toMatch(/^\/Users\//) + } + }) + + test("returns correct path for Windows", () => { + // Note: This test will only pass on Windows + if (process.platform === "win32") { + const path = getKiroDbPath() + expect(path).toContain("kiro-cli/data.sqlite3") + expect(path).toContain("AppData") + } + }) + + test("returns correct path for Linux", () => { + // Note: This test will only pass on Linux + if (process.platform === "linux") { + const path = getKiroDbPath() + expect(path).toContain(".local/share/kiro-cli/data.sqlite3") + } + }) + + test("returns a non-empty string", () => { + const path = getKiroDbPath() + expect(typeof path).toBe("string") + expect(path.length).toBeGreaterThan(0) + expect(path).toContain("kiro-cli") + expect(path).toContain("data.sqlite3") + }) + }) +}) diff --git a/packages/opencode/test/provider/kiro-compaction.test.ts b/packages/opencode/test/provider/kiro-compaction.test.ts new file mode 100644 index 00000000000..26a000d0050 --- /dev/null +++ b/packages/opencode/test/provider/kiro-compaction.test.ts @@ -0,0 +1,237 @@ +import { describe, expect, test } from "bun:test" +import { convertToKiroPayload } from "../../src/provider/sdk/kiro/src/converters" + +describe("kiro compaction: tool calls in history + no tools", () => { + const modelId = "claude-opus-4.6" + + function buildCompactionPrompt() { + return [ + { role: "system" as const, content: "You are a helpful assistant" }, + { role: "user" as const, content: [{ type: "text" as const, text: "List files in current directory" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "I'll list the files for you." }, + { type: "tool-call" as const, toolCallId: "call_001", toolName: "bash", input: { command: "ls" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_001", + toolName: "bash", + output: { type: "text" as const, value: "file1.txt\nfile2.txt\nREADME.md" }, + }, + ], + }, + { + role: "assistant" as const, + content: [{ type: "text" as const, text: "Found 3 files: file1.txt, file2.txt, README.md" }], + }, + { role: "user" as const, content: [{ type: "text" as const, text: "Now read README.md" }] }, + { + role: "assistant" as const, + content: [ + { type: "tool-call" as const, toolCallId: "call_002", toolName: "read", input: { path: "README.md" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_002", + toolName: "read", + output: { type: "text" as const, value: "# My Project\nThis is a readme." }, + }, + ], + }, + { + role: "assistant" as const, + content: [{ type: "text" as const, text: "The README contains project documentation." }], + }, + { role: "user" as const, content: [{ type: "text" as const, text: "What did we do so far?" }] }, + { + role: "user" as const, + content: [ + { + type: "text" as const, + text: "Provide a detailed prompt for continuing our conversation above.", + }, + ], + }, + ] + } + + function assertValidHistory(history: any[]) { + for (const item of history) { + expect(!!item.userInputMessage || !!item.assistantResponseMessage).toBe(true) + expect(item.assistantResponseMessage?.toolUses).toBeUndefined() + expect(item.userInputMessage?.userInputMessageContext?.toolResults).toBeUndefined() + } + for (let i = 1; i < history.length; i++) { + const prev = history[i - 1] + const curr = history[i] + const prevIsUserOnly = !!prev.userInputMessage && !prev.assistantResponseMessage + const currIsUserOnly = !!curr.userInputMessage && !curr.assistantResponseMessage + const prevIsAssistantOnly = !!prev.assistantResponseMessage && !prev.userInputMessage + const currIsAssistantOnly = !!curr.assistantResponseMessage && !curr.userInputMessage + if (prevIsUserOnly && currIsUserOnly) throw new Error(`Consecutive user-only at ${i - 1},${i}`) + if (prevIsAssistantOnly && currIsAssistantOnly) throw new Error(`Consecutive assistant-only at ${i - 1},${i}`) + } + } + + test("payload has non-empty currentMessage content", () => { + const result = convertToKiroPayload(buildCompactionPrompt() as any, modelId) + const content = result.conversationState.currentMessage.userInputMessage.content + expect(content).not.toBe(".") + expect(content.length).toBeGreaterThan(10) + }) + + test("history is valid after stripping tools", () => { + const result = convertToKiroPayload(buildCompactionPrompt() as any, modelId) + assertValidHistory(result.conversationState.history) + }) + + test("no (empty) assistant content remains after stripping", () => { + const result = convertToKiroPayload(buildCompactionPrompt() as any, modelId) + for (const item of result.conversationState.history) { + if (item.assistantResponseMessage) { + expect(item.assistantResponseMessage.content).not.toBe("(empty)") + } + } + }) + + test("currentMessage has no toolResults when no tools defined", () => { + const result = convertToKiroPayload(buildCompactionPrompt() as any, modelId) + expect( + result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.toolResults, + ).toBeUndefined() + }) + + test("tool-call-only assistant turn is properly handled", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Do something" }] }, + { + role: "assistant" as const, + content: [ + { type: "tool-call" as const, toolCallId: "call_x", toolName: "bash", input: { command: "echo hi" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_x", + toolName: "bash", + output: { type: "text" as const, value: "hi" }, + }, + ], + }, + { + role: "assistant" as const, + content: [{ type: "text" as const, text: "Done!" }], + }, + { role: "user" as const, content: [{ type: "text" as const, text: "Summarize" }] }, + ] + + const result = convertToKiroPayload(prompt as any, modelId) + assertValidHistory(result.conversationState.history) + }) + + test("multiple consecutive tool calls are handled", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Set up the project" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "I'll set up the project." }, + { type: "tool-call" as const, toolCallId: "call_a", toolName: "bash", input: { command: "mkdir src" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_a", + toolName: "bash", + output: { type: "text" as const, value: "" }, + }, + ], + }, + { + role: "assistant" as const, + content: [ + { + type: "tool-call" as const, + toolCallId: "call_b", + toolName: "bash", + input: { command: "touch src/index.ts" }, + }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_b", + toolName: "bash", + output: { type: "text" as const, value: "" }, + }, + ], + }, + { + role: "assistant" as const, + content: [{ type: "text" as const, text: "Project structure created." }], + }, + { role: "user" as const, content: [{ type: "text" as const, text: "Compact this conversation" }] }, + ] + + const result = convertToKiroPayload(prompt as any, modelId) + assertValidHistory(result.conversationState.history) + expect(result.conversationState.currentMessage.userInputMessage.content).not.toBe(".") + }) + + test("compaction with minimal user content before tool-heavy history", () => { + const prompt = [ + { role: "system" as const, content: "System prompt" }, + { role: "user" as const, content: [{ type: "text" as const, text: "." }] }, + { + role: "assistant" as const, + content: [ + { type: "tool-call" as const, toolCallId: "call_z", toolName: "bash", input: { command: "ls" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_z", + toolName: "bash", + output: { type: "text" as const, value: "output" }, + }, + ], + }, + { + role: "assistant" as const, + content: [{ type: "text" as const, text: "Here are the results." }], + }, + { + role: "user" as const, + content: [ + { type: "text" as const, text: "Provide a detailed prompt for continuing our conversation above." }, + ], + }, + ] + + const result = convertToKiroPayload(prompt as any, modelId) + expect(result.conversationState.currentMessage.userInputMessage.content).toContain("Provide a detailed prompt") + assertValidHistory(result.conversationState.history) + }) +}) diff --git a/packages/opencode/test/provider/kiro-provider.test.ts b/packages/opencode/test/provider/kiro-provider.test.ts new file mode 100644 index 00000000000..68a319d1d4e --- /dev/null +++ b/packages/opencode/test/provider/kiro-provider.test.ts @@ -0,0 +1,369 @@ +import { test, expect, mock } from "bun:test" +import path from "path" + +// === Mocks === +mock.module("../../src/bun/index", () => ({ + BunProc: { + install: async (pkg: string, _version?: string) => { + const lastAtIndex = pkg.lastIndexOf("@") + return lastAtIndex > 0 ? pkg.substring(0, lastAtIndex) : pkg + }, + run: async () => { + throw new Error("BunProc.run should not be called in tests") + }, + which: () => process.execPath, + InstallFailedError: class extends Error {}, + }, +})) + +const mockPlugin = async () => ({}) +mock.module("opencode-copilot-auth", () => ({ default: mockPlugin })) +mock.module("opencode-anthropic-auth", () => ({ default: mockPlugin })) +mock.module("@gitlab/opencode-gitlab-auth", () => ({ default: mockPlugin, gitlabAuthPlugin: mockPlugin })) + +const { tmpdir } = await import("../fixture/fixture") +const { Instance } = await import("../../src/project/instance") +const { Provider } = await import("../../src/provider/provider") +const { ProviderID, ModelID } = await import("../../src/provider/schema") + +test("Kiro: provider is registered in database with correct models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + if (kiro) { + expect(kiro.name).toBe("Kiro (AWS)") + expect(kiro.id).toBe(ProviderID.kiro) + } + }, + }) +}) + +test("Kiro: models have correct capabilities", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + + if (kiro && Object.keys(kiro.models).length > 0) { + const sonnet = kiro.models[ModelID.make("claude-sonnet-4-5")] + if (sonnet) { + expect(sonnet.capabilities.toolcall).toBe(true) + expect(sonnet.capabilities.reasoning).toBe(true) + expect(sonnet.capabilities.attachment).toBe(true) + expect(sonnet.capabilities.input.text).toBe(true) + expect(sonnet.capabilities.input.image).toBe(true) + expect(sonnet.capabilities.input.pdf).toBe(true) + expect(sonnet.limit.context).toBe(200000) + expect(sonnet.cost.input).toBe(0) + expect(sonnet.cost.output).toBe(0) + } + } + }, + }) +}) + +test("Kiro: models have correct variants for thinking mode", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + + if (kiro && Object.keys(kiro.models).length > 0) { + const sonnet = kiro.models[ModelID.make("claude-sonnet-4-5")] + if (sonnet && sonnet.variants) { + expect(sonnet.variants.high).toBeDefined() + expect(sonnet.variants.max).toBeDefined() + expect(sonnet.variants.high.thinking?.type).toBe("enabled") + expect(sonnet.variants.high.thinking?.budgetTokens).toBe(16000) + expect(sonnet.variants.max.thinking?.budgetTokens).toBe(31999) + } + } + }, + }) +}) + +test("Kiro: provider uses correct npm package", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + + if (kiro && Object.keys(kiro.models).length > 0) { + const model = Object.values(kiro.models)[0] + expect(model.api.npm).toBe("@ai-sdk/kiro") + expect(model.api.url).toContain("codewhisperer") + expect(model.api.url).toContain("amazonaws.com") + } + }, + }) +}) + +test("Kiro: provider behavior depends on auth state", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + + if (kiro) { + expect(kiro.id).toBe(ProviderID.kiro) + expect(kiro.name).toBe("Kiro (AWS)") + expect(typeof Object.keys(kiro.models).length).toBe("number") + } + }, + }) +}) + +test("Kiro: provider can be configured via opencode.json", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + kiro: { + options: { + headers: { + "X-Custom-Header": "test-value", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + + if (kiro) { + expect(kiro.options?.headers?.["X-Custom-Header"]).toBe("test-value") + } + }, + }) +}) + +test("Kiro: parseModel splits variant from model ID correctly", async () => { + const parsed = Provider.parseModel("kiro/claude-opus-4-6/high") + expect(parsed.providerID).toBe(ProviderID.kiro) + expect(parsed.modelID).toBe(ModelID.make("claude-opus-4-6/high")) + + const parsedMax = Provider.parseModel("kiro/claude-opus-4-6/max") + expect(parsedMax.providerID).toBe(ProviderID.kiro) + expect(parsedMax.modelID).toBe(ModelID.make("claude-opus-4-6/max")) + + const parsedBase = Provider.parseModel("kiro/claude-opus-4-6") + expect(parsedBase.providerID).toBe(ProviderID.kiro) + expect(parsedBase.modelID).toBe(ModelID.make("claude-opus-4-6")) +}) + +test("Kiro: all reasoning models have high and max variants", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + if (!kiro || Object.keys(kiro.models).length === 0) return + + for (const [, model] of Object.entries(kiro.models)) { + if (!model.capabilities.reasoning) { + expect(Object.keys(model.variants ?? {})).toEqual([]) + continue + } + expect(model.variants).toBeDefined() + expect(model.variants!["high"]).toBeDefined() + expect(model.variants!["max"]).toBeDefined() + expect(model.variants!["high"].thinking).toBeDefined() + expect(model.variants!["max"].thinking).toBeDefined() + } + }, + }) +}) + +test("Kiro: ProviderTransform.variants returns correct thinking config", async () => { + const { ProviderTransform } = await import("../../src/provider/transform") + + const kiroModel = { + id: "claude-sonnet-4-5", + providerID: "kiro", + api: { + id: "claude-sonnet-4-5", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + name: "Claude Sonnet 4.5", + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: true, + }, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 64000 }, + status: "active", + options: {}, + headers: {}, + release_date: "2025-09-29", + } as any + + const variants = ProviderTransform.variants(kiroModel) + + expect(Object.keys(variants)).toEqual(["high", "max"]) + expect(variants.high).toEqual({ + thinking: { + type: "enabled", + budgetTokens: 16000, + }, + }) + expect(variants.max).toEqual({ + thinking: { + type: "enabled", + budgetTokens: 31999, + }, + }) +}) + +test("Kiro: non-reasoning models return empty variants", async () => { + const { ProviderTransform } = await import("../../src/provider/transform") + + const kiroModel = { + id: "claude-haiku-4-5", + providerID: "kiro", + api: { + id: "claude-haiku-4-5", + url: "https://codewhisperer.us-east-1.amazonaws.com", + npm: "@ai-sdk/kiro", + }, + name: "Claude Haiku 4.5", + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: true }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 200000, output: 8192 }, + status: "active", + options: {}, + headers: {}, + release_date: "2025-10-01", + } as any + + const variants = ProviderTransform.variants(kiroModel) + + expect(variants).toEqual({}) +}) + +test("Kiro: parseModel with variant produces modelID that includes variant suffix", () => { + const parsed = Provider.parseModel("kiro/claude-opus-4-6/high") + expect(parsed.modelID).toBe(ModelID.make("claude-opus-4-6/high")) + expect(parsed.modelID).not.toBe(ModelID.make("claude-opus-4-6")) +}) + +test("Kiro: getModel resolves hyphen-joined variant modelID to base model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const kiro = providers[ProviderID.kiro] + if (!kiro || Object.keys(kiro.models).length === 0) return + + const base = await Provider.getModel(ProviderID.kiro, ModelID.make("claude-opus-4-6")) + expect(base.id).toBe(ModelID.make("claude-opus-4-6")) + + const withHigh = await Provider.getModel(ProviderID.kiro, ModelID.make("claude-opus-4-6-high")) + expect(withHigh.id).toBe(ModelID.make("claude-opus-4-6")) + + const withMax = await Provider.getModel(ProviderID.kiro, ModelID.make("claude-opus-4-6-max")) + expect(withMax.id).toBe(ModelID.make("claude-opus-4-6")) + + const sonnetHigh = await Provider.getModel(ProviderID.kiro, ModelID.make("claude-sonnet-4-5-high")) + expect(sonnetHigh.id).toBe(ModelID.make("claude-sonnet-4-5")) + }, + }) +}) diff --git a/packages/opencode/test/provider/kiro-tool-pairing.test.ts b/packages/opencode/test/provider/kiro-tool-pairing.test.ts new file mode 100644 index 00000000000..65b09178aab --- /dev/null +++ b/packages/opencode/test/provider/kiro-tool-pairing.test.ts @@ -0,0 +1,468 @@ +import { describe, expect, test } from "bun:test" +import { convertToKiroPayload } from "../../src/provider/sdk/kiro/src/converters" + +/** + * Validates that every toolUse in history has a matching toolResult and vice versa. + * This is what Kiro API enforces — any mismatch causes 400 "Improperly formed request". + */ +function assertToolPairing(payload: ReturnType) { + const history = payload.conversationState.history + const current = payload.conversationState.currentMessage + + for (let i = 0; i < history.length; i++) { + const item = history[i] + const uses = item.assistantResponseMessage?.toolUses ?? [] + if (uses.length === 0) continue + + // Find the next user message (should be i+1 in alternating structure) + const next = history[i + 1] + const results = next?.userInputMessage?.userInputMessageContext?.toolResults ?? [] + + // Every toolUse must have a matching toolResult + for (const use of uses) { + const match = results.find((r) => r.toolUseId === use.toolUseId) + if (!match) { + // Check if it's the last history item and results are in currentMessage + if (i === history.length - 1) { + const currentResults = current.userInputMessage.userInputMessageContext?.toolResults ?? [] + const currentMatch = currentResults.find((r) => r.toolUseId === use.toolUseId) + if (!currentMatch) throw new Error(`toolUse ${use.toolUseId} at history[${i}] has no matching toolResult`) + } else { + throw new Error(`toolUse ${use.toolUseId} at history[${i}] has no matching toolResult in history[${i + 1}]`) + } + } + } + + // Every toolResult must have a matching toolUse + for (const result of results) { + const match = uses.find((u) => u.toolUseId === result.toolUseId) + if (!match) throw new Error(`toolResult ${result.toolUseId} at history[${i + 1}] has no matching toolUse`) + } + } + + // Validate currentMessage toolResults against last history assistant + const currentResults = current.userInputMessage.userInputMessageContext?.toolResults ?? [] + if (currentResults.length > 0) { + const last = history[history.length - 1] + const lastUses = last?.assistantResponseMessage?.toolUses ?? [] + for (const result of currentResults) { + const match = lastUses.find((u) => u.toolUseId === result.toolUseId) + if (!match) + throw new Error(`currentMessage toolResult ${result.toolUseId} has no matching toolUse in last history item`) + } + } +} + +const modelId = "claude-opus-4.6" +const tools = [ + { + type: "function" as const, + name: "bash", + description: "Run bash", + inputSchema: { + type: "object" as const, + properties: { command: { type: "string" as const } }, + required: ["command"], + }, + }, +] + +describe("kiro tool pairing validation", () => { + test("normal tool call round-trip is valid", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "Running ls" }, + { type: "tool-call" as const, toolCallId: "call_1", toolName: "bash", input: { command: "ls" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_1", + toolName: "bash", + output: { type: "text" as const, value: "file.txt" }, + }, + ], + }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Found file.txt" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "thanks" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("multiple tool calls in one assistant turn are paired", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Setup" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "Setting up" }, + { type: "tool-call" as const, toolCallId: "call_a", toolName: "bash", input: { command: "mkdir src" } }, + { type: "tool-call" as const, toolCallId: "call_b", toolName: "bash", input: { command: "mkdir test" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_a", + toolName: "bash", + output: { type: "text" as const, value: "" }, + }, + { + type: "tool-result" as const, + toolCallId: "call_b", + toolName: "bash", + output: { type: "text" as const, value: "" }, + }, + ], + }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Done" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "ok" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("tool call as last history item with result in currentMessage", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [{ type: "tool-call" as const, toolCallId: "call_1", toolName: "bash", input: { command: "ls" } }], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_1", + toolName: "bash", + output: { type: "text" as const, value: "file.txt" }, + }, + ], + }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("partial toolResult mismatch — 2 uses but only 1 result", () => { + // Simulates compaction/pruning dropping one tool result + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Do two things" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "I'll do both" }, + { type: "tool-call" as const, toolCallId: "call_x", toolName: "bash", input: { command: "echo 1" } }, + { type: "tool-call" as const, toolCallId: "call_y", toolName: "bash", input: { command: "echo 2" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_x", + toolName: "bash", + output: { type: "text" as const, value: "1" }, + }, + { + type: "tool-result" as const, + toolCallId: "call_y", + toolName: "bash", + output: { type: "text" as const, value: "2" }, + }, + ], + }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Both done" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "next" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("consecutive assistant tool calls (merged) with interleaved results", () => { + // assistant(tool_call_1) -> tool(result_1) -> assistant(tool_call_2) -> tool(result_2) -> assistant(text) -> user + // The converter merges consecutive assistants, so tool_call_1 and tool_call_2 end up in one assistant. + // But result_1 flushes as a user message before tool_call_2's assistant. + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Do stuff" }] }, + { + role: "assistant" as const, + content: [{ type: "tool-call" as const, toolCallId: "call_1", toolName: "bash", input: { command: "ls" } }], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_1", + toolName: "bash", + output: { type: "text" as const, value: "a.txt" }, + }, + ], + }, + { + role: "assistant" as const, + content: [ + { type: "tool-call" as const, toolCallId: "call_2", toolName: "bash", input: { command: "cat a.txt" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_2", + toolName: "bash", + output: { type: "text" as const, value: "content" }, + }, + ], + }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Done reading" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "ok" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("long chain: 5 sequential tool calls", () => { + const prompt: any[] = [{ role: "user" as const, content: [{ type: "text" as const, text: "Do 5 things" }] }] + for (let i = 1; i <= 5; i++) { + prompt.push({ + role: "assistant" as const, + content: [ + { type: "tool-call" as const, toolCallId: `call_${i}`, toolName: "bash", input: { command: `echo ${i}` } }, + ], + }) + prompt.push({ + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: `call_${i}`, + toolName: "bash", + output: { type: "text" as const, value: `${i}` }, + }, + ], + }) + } + prompt.push({ role: "assistant" as const, content: [{ type: "text" as const, text: "All 5 done" }] }) + prompt.push({ role: "user" as const, content: [{ type: "text" as const, text: "great" }] }) + + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("tool call followed by user text (no tool result) — orphan toolUse", () => { + // This simulates a scenario where tool execution was interrupted/cancelled + // and the next message is a plain user message without tool results + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run something" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "Running" }, + { type: "tool-call" as const, toolCallId: "call_orphan", toolName: "bash", input: { command: "sleep 100" } }, + ], + }, + // No tool result — user sent a new message directly + { role: "user" as const, content: [{ type: "text" as const, text: "Cancel that, do something else" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "OK, cancelled" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "thanks" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("compaction summary replaces tool-heavy history — no tools passed", () => { + // After compaction, the prompt has tool calls in history but no tools definition. + // The converter should strip all toolUses/toolResults. + const prompt = [ + { role: "system" as const, content: "You are helpful" }, + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "Running" }, + { type: "tool-call" as const, toolCallId: "call_c1", toolName: "bash", input: { command: "ls" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_c1", + toolName: "bash", + output: { type: "text" as const, value: "files" }, + }, + ], + }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Here are the files" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "Summarize" }] }, + ] + // No tools = compaction mode + const result = convertToKiroPayload(prompt as any, modelId) + // In compaction mode, all toolUses/toolResults should be stripped + for (const item of result.conversationState.history) { + expect(item.assistantResponseMessage?.toolUses).toBeUndefined() + expect(item.userInputMessage?.userInputMessageContext?.toolResults).toBeUndefined() + } + expect( + result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.toolResults, + ).toBeUndefined() + }) + + test("assistant with toolUse but empty user content before next assistant — edge case merge", () => { + // user("") -> assistant(tool_call) -> tool(result) -> assistant(text) -> user + // The empty user content might not flush, causing merge issues + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "" }] }, + { + role: "assistant" as const, + content: [{ type: "tool-call" as const, toolCallId: "call_e1", toolName: "bash", input: { command: "ls" } }], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_e1", + toolName: "bash", + output: { type: "text" as const, value: "out" }, + }, + ], + }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Result" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "ok" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("tool result in user message (AI SDK format) with tool-result in content array", () => { + // AI SDK sometimes puts tool-result parts directly in user messages + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [{ type: "tool-call" as const, toolCallId: "call_u1", toolName: "bash", input: { command: "ls" } }], + }, + { + role: "user" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_u1", + toolName: "bash", + output: { type: "text" as const, value: "files" }, + }, + { type: "text" as const, text: "What did you find?" }, + ], + }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("3 tool calls, middle result missing — partial mismatch in history", () => { + // Simulates pruning that drops one tool result from the middle + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Do 3 things" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "Doing 3 things" }, + { type: "tool-call" as const, toolCallId: "call_p1", toolName: "bash", input: { command: "echo 1" } }, + { type: "tool-call" as const, toolCallId: "call_p2", toolName: "bash", input: { command: "echo 2" } }, + { type: "tool-call" as const, toolCallId: "call_p3", toolName: "bash", input: { command: "echo 3" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_p1", + toolName: "bash", + output: { type: "text" as const, value: "1" }, + }, + // call_p2 result missing! + { + type: "tool-result" as const, + toolCallId: "call_p3", + toolName: "bash", + output: { type: "text" as const, value: "3" }, + }, + ], + }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Done" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "ok" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + assertToolPairing(result) + }) + + test("empty content user messages are filtered in no-tools mode (re-compact scenario)", () => { + const prompt = [ + { role: "system" as const, content: "You are a helpful assistant" }, + { role: "user" as const, content: [{ type: "text" as const, text: "Do something" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "I'll help." }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Done with that." }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "Next question" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Sure." }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Finished." }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "Summarize" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId) + const history = result.conversationState.history + for (const item of history) { + if (item.userInputMessage) { + expect(item.userInputMessage.content).not.toBe("") + } + } + for (let i = 1; i < history.length; i++) { + const prev = history[i - 1] + const curr = history[i] + const prevIsUser = !!prev.userInputMessage && !prev.assistantResponseMessage + const currIsUser = !!curr.userInputMessage && !curr.assistantResponseMessage + const prevIsAssistant = !!prev.assistantResponseMessage && !prev.userInputMessage + const currIsAssistant = !!curr.assistantResponseMessage && !curr.userInputMessage + expect(prevIsUser && currIsUser).toBe(false) + expect(prevIsAssistant && currIsAssistant).toBe(false) + } + }) + + test("multiple consecutive empty user messages in no-tools mode", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Start" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "OK" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Continued" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "More" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Final" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "Summarize" }] }, + ] + const result = convertToKiroPayload(prompt as any, modelId) + const history = result.conversationState.history + for (const item of history) { + if (item.userInputMessage) { + expect(item.userInputMessage.content).not.toBe("") + } + } + }) +}) diff --git a/packages/opencode/test/provider/kiro.test.ts b/packages/opencode/test/provider/kiro.test.ts new file mode 100644 index 00000000000..84fd8903073 --- /dev/null +++ b/packages/opencode/test/provider/kiro.test.ts @@ -0,0 +1,572 @@ +import { describe, expect, test } from "bun:test" +import { convertToKiroPayload } from "../../src/provider/sdk/kiro/src/converters" +import { normalizeModelName } from "../../src/provider/sdk/kiro/src/model-resolver" +import { parseAwsEventStream } from "../../src/provider/sdk/kiro/src/streaming" + +describe("normalizeModelName", () => { + test("converts claude-sonnet-4-5 to claude-sonnet-4.5", () => { + expect(normalizeModelName("claude-sonnet-4-5")).toBe("claude-sonnet-4.5") + }) + + test("converts claude-haiku-4-5 to claude-haiku-4.5", () => { + expect(normalizeModelName("claude-haiku-4-5")).toBe("claude-haiku-4.5") + }) + + test("converts claude-opus-4-5 to claude-opus-4.5", () => { + expect(normalizeModelName("claude-opus-4-5")).toBe("claude-opus-4.5") + }) + + test("converts claude-sonnet-4 to claude-sonnet-4", () => { + expect(normalizeModelName("claude-sonnet-4")).toBe("claude-sonnet-4") + }) + + test("handles model with date suffix", () => { + expect(normalizeModelName("claude-sonnet-4-5-20251001")).toBe("claude-sonnet-4.5") + }) + + test("maps claude-3-7-sonnet to hidden model ID", () => { + expect(normalizeModelName("claude-3-7-sonnet")).toBe("CLAUDE_3_7_SONNET_20250219_V1_0") + }) + + test("maps claude-3.7-sonnet to hidden model ID", () => { + expect(normalizeModelName("claude-3.7-sonnet")).toBe("CLAUDE_3_7_SONNET_20250219_V1_0") + }) + + test("converts claude-opus-4-6 to claude-opus-4.6", () => { + expect(normalizeModelName("claude-opus-4-6")).toBe("claude-opus-4.6") + }) + + test("converts claude-sonnet-4-6 to claude-sonnet-4.6", () => { + expect(normalizeModelName("claude-sonnet-4-6")).toBe("claude-sonnet-4.6") + }) + + test("preserves unknown model names", () => { + expect(normalizeModelName("unknown-model")).toBe("unknown-model") + }) + + test("handles uppercase input", () => { + expect(normalizeModelName("CLAUDE-SONNET-4-5")).toBe("claude-sonnet-4.5") + }) +}) + +describe("convertToKiroPayload", () => { + const modelId = "claude-sonnet-4.5" + + test("converts simple user message", () => { + const prompt = [{ role: "user" as const, content: [{ type: "text" as const, text: "Hello" }] }] + + const result = convertToKiroPayload(prompt, modelId) + + expect(result.conversationState.chatTriggerType).toBe("MANUAL") + expect(result.conversationState.conversationId).toBeDefined() + expect(result.conversationState.currentMessage.userInputMessage.content).toBe("Hello") + expect(result.conversationState.currentMessage.userInputMessage.modelId).toBe(modelId) + expect(result.conversationState.currentMessage.userInputMessage.origin).toBe("AI_EDITOR") + expect(result.conversationState.history).toHaveLength(0) + }) + + test("extracts system prompt into history", () => { + const prompt = [ + { role: "system" as const, content: "You are a helpful assistant" }, + { role: "user" as const, content: [{ type: "text" as const, text: "Hello" }] }, + ] + + const result = convertToKiroPayload(prompt, modelId) + + // System prompt should be embedded in history as first user/assistant exchange + const firstHistoryItem = result.conversationState.history[0] + expect(firstHistoryItem?.userInputMessage?.content).toContain("--- SYSTEM INSTRUCTIONS BEGIN ---") + expect(firstHistoryItem?.userInputMessage?.content).toContain("You are a helpful assistant") + expect(firstHistoryItem?.userInputMessage?.content).toContain("--- SYSTEM INSTRUCTIONS END ---") + }) + + test("converts tools to Kiro format", () => { + const prompt = [{ role: "user" as const, content: [{ type: "text" as const, text: "Run a command" }] }] + const tools = [ + { + type: "function" as const, + name: "bash", + description: "Execute a bash command", + inputSchema: { + type: "object" as const, + properties: { + command: { type: "string" as const, description: "The command to run" }, + }, + required: ["command"], + }, + }, + ] + + const result = convertToKiroPayload(prompt, modelId, tools as any) + + const kiroTools = result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.tools + expect(kiroTools).toBeDefined() + expect(kiroTools).toHaveLength(1) + expect(kiroTools![0].toolSpecification.name).toBe("bash") + expect(kiroTools![0].toolSpecification.description).toBe("Execute a bash command") + }) + + test("sanitizes JSON schema - removes empty required array", () => { + const prompt = [{ role: "user" as const, content: [{ type: "text" as const, text: "Test" }] }] + const tools = [ + { + type: "function" as const, + name: "test", + description: "Test tool", + inputSchema: { + type: "object" as const, + properties: {}, + required: [], // Empty array should be removed + }, + }, + ] + + const result = convertToKiroPayload(prompt, modelId, tools as any) + + const schema = result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.tools![0] + .toolSpecification.inputSchema.json as Record + expect(schema.required).toBeUndefined() + }) + + test("sanitizes JSON schema - removes additionalProperties", () => { + const prompt = [{ role: "user" as const, content: [{ type: "text" as const, text: "Test" }] }] + const tools = [ + { + type: "function" as const, + name: "test", + description: "Test tool", + inputSchema: { + type: "object" as const, + properties: { + name: { type: "string" as const }, + }, + additionalProperties: false, // Should be removed + }, + }, + ] + + const result = convertToKiroPayload(prompt, modelId, tools) + + const schema = result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.tools![0] + .toolSpecification.inputSchema.json as Record + expect(schema.additionalProperties).toBeUndefined() + }) + + test("builds history from multi-turn conversation", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Hello" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Hi there!" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "How are you?" }] }, + ] + + const result = convertToKiroPayload(prompt, modelId) + + expect(result.conversationState.history).toHaveLength(2) + expect(result.conversationState.history[0].userInputMessage?.content).toBe("Hello") + expect(result.conversationState.history[1].assistantResponseMessage?.content).toBe("Hi there!") + expect(result.conversationState.currentMessage.userInputMessage.content).toBe("How are you?") + }) + + test("handles tool calls in assistant messages", () => { + const tools = [ + { + type: "function" as const, + name: "bash", + description: "Run bash", + inputSchema: { + type: "object" as const, + properties: { command: { type: "string" as const } }, + required: ["command"], + }, + }, + ] + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [ + { + type: "tool-call" as const, + toolCallId: "call_123", + toolName: "bash", + input: { command: "ls" }, + }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_123", + toolName: "bash", + output: { type: "text" as const, value: "file1.txt file2.txt" }, + }, + ], + }, + ] + + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + + // Check that tool calls are in history + const assistantMsg = result.conversationState.history.find((h) => h.assistantResponseMessage?.toolUses) + expect(assistantMsg).toBeDefined() + expect(assistantMsg?.assistantResponseMessage?.toolUses).toHaveLength(1) + expect(assistantMsg?.assistantResponseMessage?.toolUses![0].name).toBe("bash") + }) + + test("handles tool results in current message", () => { + const tools = [ + { + type: "function" as const, + name: "bash", + description: "Run bash", + inputSchema: { + type: "object" as const, + properties: { command: { type: "string" as const } }, + required: ["command"], + }, + }, + ] + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [ + { + type: "tool-call" as const, + toolCallId: "call_123", + toolName: "bash", + input: { command: "ls" }, + }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_123", + toolName: "bash", + output: { type: "text" as const, value: "file1.txt" }, + }, + ], + }, + ] + + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + + const toolResults = result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.toolResults + expect(toolResults).toBeDefined() + expect(toolResults).toHaveLength(1) + expect(toolResults![0].toolUseId).toBe("call_123") + expect(toolResults![0].content[0].text).toBe("file1.txt") + expect(toolResults![0].status).toBe("success") + }) + + test("handles error tool results", () => { + const tools = [ + { + type: "function" as const, + name: "bash", + description: "Run bash", + inputSchema: { + type: "object" as const, + properties: { command: { type: "string" as const } }, + required: ["command"], + }, + }, + ] + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run bad command" }] }, + { + role: "assistant" as const, + content: [ + { + type: "tool-call" as const, + toolCallId: "call_123", + toolName: "bash", + input: { command: "bad-command" }, + }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_123", + toolName: "bash", + output: { type: "error-text" as const, value: "Command failed" }, + }, + ], + }, + ] + + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + + const toolResults = result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.toolResults + expect(toolResults![0].status).toBe("error") + }) + + describe("thinking mode (Fake Reasoning)", () => { + test("injects thinking tags when enabled", () => { + const prompt = [{ role: "user" as const, content: [{ type: "text" as const, text: "Solve this problem" }] }] + const providerOptions = { + thinking: { + type: "enabled" as const, + budgetTokens: 16000, + }, + } + + const result = convertToKiroPayload(prompt, modelId, undefined, providerOptions) + + const content = result.conversationState.currentMessage.userInputMessage.content + expect(content).toContain("enabled") + expect(content).toContain("16000") + expect(content).toContain("") + expect(content).toContain("Solve this problem") + }) + + test("adds thinking system prompt addition when enabled", () => { + const prompt = [ + { role: "system" as const, content: "You are helpful" }, + { role: "user" as const, content: [{ type: "text" as const, text: "Hello" }] }, + ] + const providerOptions = { + thinking: { + type: "enabled" as const, + budgetTokens: 16000, + }, + } + + const result = convertToKiroPayload(prompt, modelId, undefined, providerOptions) + + // System prompt with thinking addition should be in history + const firstHistoryItem = result.conversationState.history[0] + const contextContent = firstHistoryItem?.userInputMessage?.content + expect(contextContent).toContain("You are helpful") + expect(contextContent).toContain("Extended Thinking Mode") + expect(contextContent).toContain("...") + }) + + test("does not inject thinking tags when disabled", () => { + const prompt = [{ role: "user" as const, content: [{ type: "text" as const, text: "Hello" }] }] + const providerOptions = { + thinking: { + type: "disabled" as const, + }, + } + + const result = convertToKiroPayload(prompt, modelId, undefined, providerOptions) + + const content = result.conversationState.currentMessage.userInputMessage.content + expect(content).not.toContain("") + expect(content).toBe("Hello") + }) + + test("uses default budget tokens when not specified", () => { + const prompt = [{ role: "user" as const, content: [{ type: "text" as const, text: "Test" }] }] + const providerOptions = { + thinking: { + type: "enabled" as const, + }, + } + + const result = convertToKiroPayload(prompt, modelId, undefined, providerOptions) + + const content = result.conversationState.currentMessage.userInputMessage.content + expect(content).toContain("16000") + }) + }) + + test("handles empty user content with minimal placeholder", () => { + const prompt = [{ role: "user" as const, content: [] }] + + const result = convertToKiroPayload(prompt, modelId) + + expect(result.conversationState.currentMessage.userInputMessage.content).toBe(".") + }) + + test("merges consecutive assistant messages", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Hello" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Part 1" }] }, + { role: "assistant" as const, content: [{ type: "text" as const, text: "Part 2" }] }, + { role: "user" as const, content: [{ type: "text" as const, text: "Continue" }] }, + ] + + const result = convertToKiroPayload(prompt, modelId) + + // Should merge consecutive assistant messages + const assistantMsgs = result.conversationState.history.filter((h) => h.assistantResponseMessage) + expect(assistantMsgs).toHaveLength(1) + expect(assistantMsgs[0].assistantResponseMessage?.content).toContain("Part 1") + expect(assistantMsgs[0].assistantResponseMessage?.content).toContain("Part 2") + }) + + test("strips toolUses/toolResults from history when no tools defined (compaction)", () => { + const prompt = [ + { role: "system" as const, content: "You are helpful" }, + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: "I'll run ls" }, + { type: "tool-call" as const, toolCallId: "call_1", toolName: "bash", input: { command: "ls" } }, + ], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_1", + toolName: "bash", + output: { type: "text" as const, value: "file1.txt" }, + }, + ], + }, + { + role: "assistant" as const, + content: [{ type: "text" as const, text: "Found file1.txt" }], + }, + { role: "user" as const, content: [{ type: "text" as const, text: "Summarize the conversation" }] }, + ] + + // No tools passed — simulates compaction + const result = convertToKiroPayload(prompt as any, modelId) + + for (const item of result.conversationState.history) { + expect(item.assistantResponseMessage?.toolUses).toBeUndefined() + expect(item.userInputMessage?.userInputMessageContext?.toolResults).toBeUndefined() + } + expect( + result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.toolResults, + ).toBeUndefined() + }) + + test("removes synthetic empty assistant items after stripping tools", () => { + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [{ type: "tool-call" as const, toolCallId: "call_1", toolName: "bash", input: { command: "ls" } }], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_1", + toolName: "bash", + output: { type: "text" as const, value: "ok" }, + }, + ], + }, + { role: "user" as const, content: [{ type: "text" as const, text: "Summarize" }] }, + ] + + const result = convertToKiroPayload(prompt as any, modelId) + const hasEmptyAssistant = result.conversationState.history.some( + (x) => x.assistantResponseMessage?.content === "(empty)", + ) + + expect(hasEmptyAssistant).toBe(false) + }) + + test("preserves toolUses/toolResults when tools are defined", () => { + const tools = [ + { + type: "function" as const, + name: "bash", + description: "Run bash", + inputSchema: { + type: "object" as const, + properties: { command: { type: "string" as const } }, + required: ["command"], + }, + }, + ] + const prompt = [ + { role: "user" as const, content: [{ type: "text" as const, text: "Run ls" }] }, + { + role: "assistant" as const, + content: [{ type: "tool-call" as const, toolCallId: "call_1", toolName: "bash", input: { command: "ls" } }], + }, + { + role: "tool" as const, + content: [ + { + type: "tool-result" as const, + toolCallId: "call_1", + toolName: "bash", + output: { type: "text" as const, value: "file1.txt" }, + }, + ], + }, + ] + + const result = convertToKiroPayload(prompt as any, modelId, tools as any) + + const assistant = result.conversationState.history.find((h) => h.assistantResponseMessage?.toolUses) + expect(assistant).toBeDefined() + expect(assistant!.assistantResponseMessage!.toolUses).toHaveLength(1) + const toolResults = result.conversationState.currentMessage.userInputMessage.userInputMessageContext?.toolResults + expect(toolResults).toHaveLength(1) + }) +}) + +describe("variant extraction (mirrors parseModelSelection from acp/agent.ts)", () => { + const kiroModels: Record }> = { + "claude-sonnet-4-5": { variants: { high: {}, max: {} } }, + "claude-opus-4-5": { variants: { high: {}, max: {} } }, + "claude-opus-4-6": { variants: { high: {}, max: {} } }, + "claude-sonnet-4-6": { variants: { high: {}, max: {} } }, + "claude-haiku-4-5": { variants: {} }, + "claude-sonnet-4": { variants: {} }, + "claude-3-7-sonnet": { variants: {} }, + } + + function extractVariant(modelID: string) { + if (kiroModels[modelID]) return { baseModel: modelID, variant: undefined } + const segments = modelID.split("/") + if (segments.length > 1) { + const candidate = segments[segments.length - 1] + const base = segments.slice(0, -1).join("/") + const info = kiroModels[base] + if (info?.variants && candidate in info.variants) { + return { baseModel: base, variant: candidate } + } + } + return { baseModel: modelID, variant: undefined } + } + + test("extracts high/max variants from reasoning models", () => { + expect(extractVariant("claude-opus-4-6/high")).toEqual({ baseModel: "claude-opus-4-6", variant: "high" }) + expect(extractVariant("claude-opus-4-6/max")).toEqual({ baseModel: "claude-opus-4-6", variant: "max" }) + expect(extractVariant("claude-sonnet-4-5/high")).toEqual({ baseModel: "claude-sonnet-4-5", variant: "high" }) + expect(extractVariant("claude-sonnet-4-6/max")).toEqual({ baseModel: "claude-sonnet-4-6", variant: "max" }) + }) + + test("passes through base models without variant", () => { + expect(extractVariant("claude-opus-4-6")).toEqual({ baseModel: "claude-opus-4-6", variant: undefined }) + expect(extractVariant("claude-haiku-4-5")).toEqual({ baseModel: "claude-haiku-4-5", variant: undefined }) + }) + + test("does not extract variant from non-reasoning models", () => { + expect(extractVariant("claude-haiku-4-5/high")).toEqual({ baseModel: "claude-haiku-4-5/high", variant: undefined }) + }) + + test("does not extract variant from unknown models", () => { + expect(extractVariant("nonexistent-model/high")).toEqual({ + baseModel: "nonexistent-model/high", + variant: undefined, + }) + }) +}) + +// Note: parseAwsEventStream tests are skipped because they require +// proper AWS Event Stream binary format which is complex to construct in tests. +// The streaming functionality is tested through integration tests. +// The converter and model-resolver tests above provide good coverage of the core logic. diff --git a/packages/opencode/test/session/compaction.test.ts b/packages/opencode/test/session/compaction.test.ts index 452926d12e1..4a69a7e2624 100644 --- a/packages/opencode/test/session/compaction.test.ts +++ b/packages/opencode/test/session/compaction.test.ts @@ -6,6 +6,7 @@ import { Instance } from "../../src/project/instance" import { Log } from "../../src/util/log" import { tmpdir } from "../fixture/fixture" import { Session } from "../../src/session" +import { Identifier } from "../../src/id/id" import type { Provider } from "../../src/provider/provider" Log.init({ print: false }) diff --git a/packages/opencode/test/session/message-v2.test.ts b/packages/opencode/test/session/message-v2.test.ts index 0d5b89730a9..b78b969e236 100644 --- a/packages/opencode/test/session/message-v2.test.ts +++ b/packages/opencode/test/session/message-v2.test.ts @@ -494,7 +494,10 @@ describe("session.message-v2.toModelMessage", () => { type: "tool-result", toolCallId: "call-1", toolName: "bash", - output: { type: "text", value: "[Old tool result content cleared]" }, + output: { + type: "text", + value: "[Tool output removed during compaction. Re-run this tool if you need the full result.]", + }, }, ], },