Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
ebdd88a
Remove prompt mode toggle (#17216)
iamdavidhill Mar 13, 2026
4c5e37b
Remove prompt mode toggle (#17216)
iamdavidhill Mar 13, 2026
c077a43
feat(opencode): add LiteLLM discovery module with /model/info and /mo…
balcsida Feb 20, 2026
3265fa1
feat(opencode): add litellm provider seeding and custom loader
balcsida Feb 20, 2026
3e2ba1f
feat(opencode): add litellm-specific reasoning transform variants
balcsida Feb 20, 2026
d6a0317
fix(opencode): add explicit litellm providerID check for proxy detection
balcsida Feb 20, 2026
7358f9f
feat(opencode): allow interactive litellm provider configuration
balcsida Feb 20, 2026
c377d4a
feat(opencode): add litellm multi-step connect flow in TUI
balcsida Feb 20, 2026
0b30713
feat(app): add litellm connect dialog with base URL and API key fields
balcsida Feb 20, 2026
b1ec648
refactor: clean up litellm code to follow style guide
balcsida Feb 20, 2026
22acdf5
fix(opencode): use global config update for litellm base URL persistence
balcsida Feb 20, 2026
d37b460
feat(opencode): store underlying model in litellm model options
balcsida Feb 20, 2026
4c9341f
fix(opencode): detect litellm Claude models via underlying model info
balcsida Feb 20, 2026
5e7f1f3
fix(opencode): use snake_case budget_tokens for litellm thinking vari…
balcsida Feb 20, 2026
831c2b3
fix(opencode): select correct system prompt for litellm Claude models
balcsida Feb 20, 2026
fe87676
fix(opencode): filter underlyingModel from litellm provider options
balcsida Feb 20, 2026
1e5b092
feat(opencode): enable prompt caching for litellm Claude models
balcsida Feb 20, 2026
ccd656f
refactor(opencode): replace try/catch with .catch() in litellm code
balcsida Feb 21, 2026
ae45c5e
fix(opencode): use branded types for litellm ModelID and ProviderID
balcsida Mar 13, 2026
c5e5bd2
fix(opencode): add githubCopilotEnterprise to ProviderID schema
balcsida Mar 24, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 66 additions & 1 deletion packages/app/src/components/dialog-connect-provider.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import { ProviderIcon } from "@opencode-ai/ui/provider-icon"
import { Spinner } from "@opencode-ai/ui/spinner"
import { TextField } from "@opencode-ai/ui/text-field"
import { showToast } from "@opencode-ai/ui/toast"
import { createEffect, createMemo, createResource, Match, onCleanup, onMount, Switch } from "solid-js"
import { createEffect, createMemo, createResource, Match, onCleanup, onMount, Show, Switch } from "solid-js"
import { createStore, produce } from "solid-js/store"
import { Link } from "@/components/link"
import { useGlobalSDK } from "@/context/global-sdk"
Expand Down Expand Up @@ -461,6 +461,68 @@ export function DialogConnectProvider(props: { provider: string }) {
)
}

function LiteLLMAuthView() {
const [formStore, setFormStore] = createStore({
baseURL: "",
apiKey: "",
})

async function handleSubmit(e: SubmitEvent) {
e.preventDefault()

const data = new FormData(e.currentTarget as HTMLFormElement)
const url = (data.get("baseURL") as string)?.trim() || "http://localhost:4000"
const key = (data.get("apiKey") as string)?.trim()

await globalSDK.client.global.config.update({
config: {
provider: {
litellm: {
options: { baseURL: url },
},
},
},
})
if (key) {
await globalSDK.client.auth.set({
providerID: props.provider,
auth: { type: "api", key },
})
}
await complete()
}

return (
<div class="flex flex-col gap-6">
<div class="text-14-regular text-text-base">
Connect to a LiteLLM proxy server. Models will be discovered automatically.
</div>
<form onSubmit={handleSubmit} class="flex flex-col items-start gap-4">
<TextField
autofocus
type="text"
label="Base URL"
placeholder="http://localhost:4000"
name="baseURL"
value={formStore.baseURL}
onChange={(v) => setFormStore("baseURL", v)}
/>
<TextField
type="text"
label="API Key (optional)"
placeholder="sk-..."
name="apiKey"
value={formStore.apiKey}
onChange={(v) => setFormStore("apiKey", v)}
/>
<Button class="w-auto" type="submit" size="large" variant="primary">
{language.t("common.submit")}
</Button>
</form>
</div>
)
}

function OAuthCodeView() {
const [formStore, setFormStore] = createStore({
value: "",
Expand Down Expand Up @@ -632,6 +694,9 @@ export function DialogConnectProvider(props: { provider: string }) {
</div>
</div>
</Match>
<Match when={method()?.type === "api" && props.provider === "litellm"}>
<LiteLLMAuthView />
</Match>
<Match when={method()?.type === "api"}>
<ApiAuthView />
</Match>
Expand Down
52 changes: 52 additions & 0 deletions packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,9 @@ export function createDialogProviderOptions() {
}
}
if (method.type === "api") {
if (provider.id === "litellm") {
return dialog.replace(() => <LiteLLMMethod />)
}
let metadata: Record<string, string> | undefined
if (method.prompts?.length) {
const value = await PromptsMethod({ dialog, prompts: method.prompts })
Expand Down Expand Up @@ -358,3 +361,52 @@ async function PromptsMethod(props: PromptsMethodProps) {
}
return inputs
}

function LiteLLMMethod() {
const dialog = useDialog()
const sdk = useSDK()
const sync = useSync()
const { theme } = useTheme()

return (
<DialogPrompt
title="LiteLLM Base URL"
placeholder="http://localhost:4000"
description={() => (
<text fg={theme.textMuted}>Enter the base URL of your LiteLLM proxy server.</text>
)}
onConfirm={async (baseURL) => {
const url = baseURL?.trim() || "http://localhost:4000"
dialog.replace(() => (
<DialogPrompt
title="LiteLLM API Key"
placeholder="API key (optional)"
description={() => (
<text fg={theme.textMuted}>Enter the API key for your LiteLLM proxy, or leave empty if not required.</text>
)}
onConfirm={async (apiKey) => {
await sdk.client.global.config.update({
config: {
provider: {
litellm: {
options: { baseURL: url },
},
},
},
})
if (apiKey?.trim()) {
await sdk.client.auth.set({
providerID: "litellm",
auth: { type: "api", key: apiKey.trim() },
})
}
await sdk.client.instance.dispose()
await sync.bootstrap()
dialog.replace(() => <DialogModel providerID="litellm" />)
}}
/>
))
}}
/>
)
}
247 changes: 247 additions & 0 deletions packages/opencode/src/provider/litellm.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
import { Log } from "../util/log"
import { Env } from "../env"
import type { Provider } from "./provider"
import { ModelID, ProviderID } from "./schema"

export namespace LiteLLM {
const log = Log.create({ service: "litellm" })

interface ModelInfoEntry {
model_name: string
litellm_params?: {
model?: string
[key: string]: unknown
}
model_info?: {
id?: string
input_cost_per_token?: number | null
output_cost_per_token?: number | null
cache_read_input_token_cost?: number | null
cache_creation_input_token_cost?: number | null
input_cost_per_token_above_200k_tokens?: number | null
output_cost_per_token_above_200k_tokens?: number | null
max_tokens?: number | null
max_input_tokens?: number | null
max_output_tokens?: number | null
supports_function_calling?: boolean | null
supports_vision?: boolean | null
supports_pdf_input?: boolean | null
supports_audio_input?: boolean | null
supports_audio_output?: boolean | null
supports_video_input?: boolean | null
supports_prompt_caching?: boolean | null
supports_reasoning?: boolean | null
supported_openai_params?: string[] | null
[key: string]: unknown
}
}

const INTERLEAVED_MODELS = ["claude", "anthropic"]

function isWildcard(name: string): boolean {
return name.includes("*") || name.includes("/*")
}

function inferInterleaved(
underlyingModel: string | undefined,
): Provider.Model["capabilities"]["interleaved"] {
if (!underlyingModel) return false
const lower = underlyingModel.toLowerCase()
if (INTERLEAVED_MODELS.some((m) => lower.includes(m))) return true
return false
}

function costPerMillion(costPerToken: number | null | undefined): number {
if (!costPerToken) return 0
return costPerToken * 1_000_000
}

function toModel(entry: ModelInfoEntry): Provider.Model | undefined {
if (isWildcard(entry.model_name)) return undefined

const info = entry.model_info ?? {}
const underlyingModel = entry.litellm_params?.model

const inputCost = costPerMillion(info.input_cost_per_token)
const outputCost = costPerMillion(info.output_cost_per_token)
const cacheReadCost = costPerMillion(info.cache_read_input_token_cost)
const cacheWriteCost = costPerMillion(info.cache_creation_input_token_cost)

const hasOver200K =
info.input_cost_per_token_above_200k_tokens != null ||
info.output_cost_per_token_above_200k_tokens != null

const supportsVision = info.supports_vision === true
const supportsPdf = info.supports_pdf_input === true
const supportsTemperature = info.supported_openai_params?.includes("temperature") ?? true

return {
id: ModelID.make(entry.model_name),
providerID: ProviderID.make("litellm"),
name: entry.model_name,
api: {
id: entry.model_name,
url: "",
npm: "@ai-sdk/openai-compatible",
},
status: "active",
headers: {},
options: underlyingModel ? { underlyingModel } : {},
cost: {
input: inputCost,
output: outputCost,
cache: {
read: cacheReadCost,
write: cacheWriteCost,
},
experimentalOver200K: hasOver200K
? {
input: costPerMillion(info.input_cost_per_token_above_200k_tokens),
output: costPerMillion(info.output_cost_per_token_above_200k_tokens),
cache: { read: 0, write: 0 },
}
: undefined,
},
limit: {
context: (info.max_input_tokens ?? info.max_tokens ?? 128_000) as number,
output: (info.max_output_tokens ?? 8_192) as number,
},
capabilities: {
temperature: supportsTemperature,
reasoning: info.supports_reasoning === true,
attachment: supportsVision || supportsPdf,
toolcall: info.supports_function_calling !== false,
input: {
text: true,
audio: info.supports_audio_input === true,
image: supportsVision,
video: info.supports_video_input === true,
pdf: supportsPdf,
},
output: {
text: true,
audio: info.supports_audio_output === true,
image: false,
video: false,
pdf: false,
},
interleaved: inferInterleaved(underlyingModel),
},
release_date: "",
variants: {},
}
}

function toBasicModel(id: string): Provider.Model {
return {
id: ModelID.make(id),
providerID: ProviderID.make("litellm"),
name: id,
api: { id, url: "", npm: "@ai-sdk/openai-compatible" },
status: "active",
headers: {},
options: {},
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context: 128_000, output: 8_192 },
capabilities: {
temperature: true,
reasoning: false,
attachment: false,
toolcall: true,
input: { text: true, audio: false, image: false, video: false, pdf: false },
output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
release_date: "",
variants: {},
}
}

async function fetchModelInfo(
host: string,
headers: Record<string, string>,
timeout: number,
): Promise<Record<string, Provider.Model> | undefined> {
const url = `${host}/model/info`
const response = await fetch(url, {
headers,
signal: AbortSignal.timeout(timeout),
}).catch(() => undefined)

if (!response?.ok) return undefined

const data = (await response.json()) as { data?: ModelInfoEntry[] }
const entries = data?.data
if (!Array.isArray(entries)) return undefined

const models: Record<string, Provider.Model> = {}
for (const entry of entries) {
const model = toModel(entry)
if (model) models[model.id] = model
}
return Object.keys(models).length > 0 ? models : undefined
}

async function fetchModelList(
host: string,
headers: Record<string, string>,
timeout: number,
): Promise<Record<string, Provider.Model>> {
const url = `${host}/models`
const response = await fetch(url, {
headers,
signal: AbortSignal.timeout(timeout),
}).catch(() => undefined)

if (!response?.ok) return {}

const data = (await response.json()) as { data?: { id: string }[] }
const models: Record<string, Provider.Model> = {}
for (const item of data?.data ?? []) {
if (!item.id) continue
models[item.id] = toBasicModel(item.id)
}
return models
}

export async function discover(
host: string,
options?: {
apiKey?: string
headers?: Record<string, string>
timeout?: number
},
): Promise<Record<string, Provider.Model> | undefined> {
const timeout = options?.timeout ?? Number(Env.get("LITELLM_TIMEOUT") ?? "5000")
const base = host.replace(/\/+$/, "")

const headers: Record<string, string> = {
"Content-Type": "application/json",
...options?.headers,
}
if (options?.apiKey) {
headers["Authorization"] = `Bearer ${options.apiKey}`
}

// Try /model/info first for rich metadata, fall back to /models
const rich = await fetchModelInfo(base, headers, timeout)
if (rich) {
log.info("discovered models from LiteLLM /model/info", {
count: Object.keys(rich).length,
host,
})
return rich
}

const basic = await fetchModelList(base, headers, timeout)
if (Object.keys(basic).length > 0) {
log.info("discovered models from /models (fallback)", {
count: Object.keys(basic).length,
host,
})
return basic
}

return undefined
}
}
Loading
Loading