diff --git a/packages/opencode/bunfig.toml b/packages/opencode/bunfig.toml
index c3b727076..33b39f719 100644
--- a/packages/opencode/bunfig.toml
+++ b/packages/opencode/bunfig.toml
@@ -1,7 +1,7 @@
preload = ["@opentui/solid/preload"]
[test]
-preload = ["./test/preload.ts"]
+preload = ["@opentui/solid/preload", "./test/preload.ts"]
# timeout is not actually parsed from bunfig.toml (see src/bunfig.zig in oven-sh/bun)
# using --timeout in package.json scripts instead
# https://github.com/oven-sh/bun/issues/7789
diff --git a/packages/opencode/src/cli/cmd/tui/app.tsx b/packages/opencode/src/cli/cmd/tui/app.tsx
index 8bb17ff13..66c067255 100644
--- a/packages/opencode/src/cli/cmd/tui/app.tsx
+++ b/packages/opencode/src/cli/cmd/tui/app.tsx
@@ -15,6 +15,7 @@ import { LocalProvider, useLocal } from "@tui/context/local"
import { DialogModel, useConnected } from "@tui/component/dialog-model"
import { DialogMcp } from "@tui/component/dialog-mcp"
import { DialogStatus } from "@tui/component/dialog-status"
+import { DialogCost } from "@tui/component/dialog-cost"
import { DialogThemeList } from "@tui/component/dialog-theme-list"
import { DialogHelp } from "./ui/dialog-help"
import { CommandProvider, useCommandDialog } from "@tui/component/dialog-command"
@@ -542,6 +543,19 @@ function App() {
},
category: "System",
},
+ {
+ title: "View usage & cost",
+ value: "opencode.cost",
+ slash: {
+ name: "cost",
+ aliases: ["usage"],
+ },
+ onSelect: () => {
+ const sessionID = route.data.type === "session" ? route.data.sessionID : undefined
+ dialog.replace(() => )
+ },
+ category: "System",
+ },
{
title: "Switch theme",
value: "theme.switch",
diff --git a/packages/opencode/src/cli/cmd/tui/component/cost-metrics.ts b/packages/opencode/src/cli/cmd/tui/component/cost-metrics.ts
new file mode 100644
index 000000000..f9c271e9c
--- /dev/null
+++ b/packages/opencode/src/cli/cmd/tui/component/cost-metrics.ts
@@ -0,0 +1,79 @@
+import type { Message, Provider, Model } from "@opencode-ai/sdk/v2"
+
+export interface Metrics {
+ actual: number
+ noCache: number
+ cacheHitPct: number
+}
+
+export interface StatsInput {
+ totalCost: number
+ totalTokens: { input: number; cache: { read: number } }
+ modelUsage: Record
+}
+
+export function computeSessionMetrics(messages: Message[], providers: Provider[]): Metrics {
+ let actual = 0
+ let cacheReadTokens = 0
+ let inputTokens = 0
+ let noCacheDelta = 0
+
+ for (const msg of messages) {
+ if (msg.role !== "assistant") continue
+ actual += msg.cost || 0
+ const tokens = msg.tokens
+ if (tokens) {
+ inputTokens += tokens.input || 0
+ cacheReadTokens += tokens.cache?.read || 0
+ const model = findModel(providers, msg.providerID, msg.modelID)
+ const inputPrice = model?.cost?.input ?? 0
+ const cacheReadPrice = model?.cost?.cache?.read ?? inputPrice
+ noCacheDelta += ((tokens.cache?.read || 0) * (inputPrice - cacheReadPrice)) / 1_000_000
+ }
+ }
+
+ const noCache = actual + noCacheDelta
+ const totalInput = inputTokens + cacheReadTokens
+ const cacheHitPct = totalInput > 0 ? (cacheReadTokens / totalInput) * 100 : 0
+
+ return { actual, noCache, cacheHitPct }
+}
+
+export function computeStatsMetrics(stats: StatsInput, providers: Provider[]): Metrics {
+ let noCacheDelta = 0
+ for (const [modelKey, usage] of Object.entries(stats.modelUsage)) {
+ const slash = modelKey.indexOf("/")
+ const providerID = modelKey.substring(0, slash)
+ const modelID = modelKey.substring(slash + 1)
+ const model = findModel(providers, providerID, modelID)
+ const inputPrice = model?.cost?.input ?? 0
+ const cacheReadPrice = model?.cost?.cache?.read ?? inputPrice
+ noCacheDelta += (usage.tokens.cache.read * (inputPrice - cacheReadPrice)) / 1_000_000
+ }
+
+ const totalInput = stats.totalTokens.input + stats.totalTokens.cache.read
+ const cacheHitPct = totalInput > 0 ? (stats.totalTokens.cache.read / totalInput) * 100 : 0
+
+ return {
+ actual: stats.totalCost,
+ noCache: stats.totalCost + noCacheDelta,
+ cacheHitPct,
+ }
+}
+
+export function findModel(providers: Provider[], providerID: string, modelID: string): Model | undefined {
+ const provider = providers.find((p) => p.id === providerID)
+ return provider?.models[modelID]
+}
+
+export function formatCost(n: number): string {
+ if (n >= 1000) return `$${(n / 1000).toFixed(1)}K`
+ return `$${n.toFixed(1)}`
+}
+
+export function formatRow(label: string, metrics: Metrics, maxCostWidth: number): string {
+ const costPair = `${formatCost(metrics.actual)}╱${formatCost(metrics.noCache)}`
+ const padded = costPair.padEnd(maxCostWidth)
+ const hitRate = `⟐${Math.round(metrics.cacheHitPct)}%`
+ return `${label.padEnd(4)} ${padded} ${hitRate}`
+}
diff --git a/packages/opencode/src/cli/cmd/tui/component/dialog-cost.tsx b/packages/opencode/src/cli/cmd/tui/component/dialog-cost.tsx
new file mode 100644
index 000000000..98885ee68
--- /dev/null
+++ b/packages/opencode/src/cli/cmd/tui/component/dialog-cost.tsx
@@ -0,0 +1,74 @@
+import { TextAttributes } from "@opentui/core"
+import { useTheme } from "../context/theme"
+import { useDialog } from "@tui/ui/dialog"
+import { useSync } from "@tui/context/sync"
+import { useSDK } from "@tui/context/sdk"
+import { createMemo, createResource } from "solid-js"
+import { computeSessionMetrics, computeStatsMetrics, formatCost, formatRow } from "./cost-metrics"
+
+export function DialogCost(props: { sessionID: string }) {
+ const sync = useSync()
+ const { theme } = useTheme()
+ const dialog = useDialog()
+ const sdk = useSDK()
+
+ const sessionMetrics = createMemo(() => {
+ const msgs = sync.data.message[props.sessionID] ?? []
+ return computeSessionMetrics(msgs, sync.data.provider)
+ })
+
+ const [dailyStats] = createResource(async () => {
+ const res = await sdk.fetch(sdk.url + "/session/stats?days=1", {})
+ return res.json()
+ })
+
+ const [monthlyStats] = createResource(async () => {
+ const res = await sdk.fetch(sdk.url + "/session/stats?days=30", {})
+ return res.json()
+ })
+
+ const dailyMetrics = createMemo(() => {
+ const data = dailyStats()
+ if (!data) return { actual: 0, noCache: 0, cacheHitPct: 0 }
+ return computeStatsMetrics(data, sync.data.provider)
+ })
+
+ const monthlyMetrics = createMemo(() => {
+ const data = monthlyStats()
+ if (!data) return { actual: 0, noCache: 0, cacheHitPct: 0 }
+ return computeStatsMetrics(data, sync.data.provider)
+ })
+
+ const rows = createMemo(() => {
+ const sess = sessionMetrics()
+ const daily = dailyMetrics()
+ const monthly = monthlyMetrics()
+
+ const allMetrics = [sess, daily, monthly]
+ const maxCostWidth = Math.max(
+ ...allMetrics.map((m) => `${formatCost(m.actual)}╱${formatCost(m.noCache)}`.length),
+ )
+
+ return [
+ formatRow("Sess", sess, maxCostWidth),
+ formatRow("☼-ly", daily, maxCostWidth),
+ formatRow("☽-ly", monthly, maxCostWidth),
+ ]
+ })
+
+ return (
+
+
+
+ Usage
+
+ dialog.clear()}>
+ esc
+
+
+ {rows()[0]}
+ {rows()[1]}
+ {rows()[2]}
+
+ )
+}
diff --git a/packages/opencode/src/server/routes/session.ts b/packages/opencode/src/server/routes/session.ts
index b8fafd336..ad2a8b547 100644
--- a/packages/opencode/src/server/routes/session.ts
+++ b/packages/opencode/src/server/routes/session.ts
@@ -19,6 +19,7 @@ import { PermissionID } from "@/permission/schema"
import { ModelID, ProviderID } from "@/provider/schema"
import { errors } from "../error"
import { lazy } from "../../util/lazy"
+import { aggregateSessionStats } from "../../cli/cmd/stats"
const log = Log.create({ service: "server" })
@@ -92,6 +93,36 @@ export const SessionRoutes = lazy(() =>
return c.json(result)
},
)
+ .get(
+ "/stats",
+ describeRoute({
+ summary: "Get usage stats",
+ description: "Get aggregated usage and cost statistics across sessions.",
+ operationId: "session.stats",
+ responses: {
+ 200: {
+ description: "Usage statistics",
+ content: {
+ "application/json": {
+ schema: resolver(z.any()),
+ },
+ },
+ },
+ },
+ }),
+ validator(
+ "query",
+ z.object({
+ days: z.coerce.number().optional(),
+ project: z.string().optional(),
+ }),
+ ),
+ async (c) => {
+ const query = c.req.valid("query")
+ const stats = await aggregateSessionStats(query.days, query.project)
+ return c.json(stats)
+ },
+ )
.get(
"/:sessionID",
describeRoute({
diff --git a/packages/opencode/test/cli/tui/dialog-cost-ui.test.tsx b/packages/opencode/test/cli/tui/dialog-cost-ui.test.tsx
new file mode 100644
index 000000000..38b074bf3
--- /dev/null
+++ b/packages/opencode/test/cli/tui/dialog-cost-ui.test.tsx
@@ -0,0 +1,223 @@
+import { describe, expect, test, mock, beforeEach } from "bun:test"
+import type { Message, Provider } from "@opencode-ai/sdk/v2"
+
+// --- Stubs ---
+
+const mockProviders: Provider[] = [
+ {
+ id: "anthropic",
+ name: "Anthropic",
+ source: "env",
+ env: [],
+ options: {},
+ models: {
+ "claude-sonnet": {
+ id: "claude-sonnet",
+ providerID: "anthropic",
+ api: { id: "anthropic", url: "", npm: "" },
+ name: "Claude Sonnet",
+ capabilities: {
+ temperature: true,
+ reasoning: false,
+ attachment: false,
+ toolcall: true,
+ input: { text: true, audio: false, image: false, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: { input: 3, output: 15, cache: { read: 0.3, write: 3.75 } },
+ limit: { context: 200000, output: 8192 },
+ status: "active",
+ options: {},
+ headers: {},
+ release_date: "2025-01-01",
+ },
+ },
+ },
+]
+
+const mockMessages: Message[] = [
+ {
+ id: "msg_0",
+ sessionID: "ses_1",
+ role: "user",
+ agent: "build",
+ model: { providerID: "anthropic", modelID: "claude-sonnet" },
+ time: { created: 1000000 },
+ },
+ {
+ id: "msg_1",
+ sessionID: "ses_1",
+ role: "assistant",
+ agent: "build",
+ modelID: "claude-sonnet",
+ providerID: "anthropic",
+ mode: "",
+ parentID: "msg_0",
+ path: { cwd: "/test", root: "/test" },
+ cost: 0.42,
+ tokens: { input: 100_000, output: 2000, reasoning: 0, cache: { read: 400_000, write: 10_000 } },
+ time: { created: 1000100, completed: 1005000 },
+ },
+]
+
+const mockDailyStats = {
+ totalCost: 12.3,
+ totalTokens: { input: 1_000_000, output: 50_000, reasoning: 0, cache: { read: 2_600_000, write: 100_000 } },
+ modelUsage: {
+ "anthropic/claude-sonnet": {
+ cost: 12.3,
+ tokens: { input: 1_000_000, output: 50_000, cache: { read: 2_600_000, write: 100_000 } },
+ },
+ },
+}
+
+const mockMonthlyStats = {
+ totalCost: 148,
+ totalTokens: { input: 10_000_000, output: 500_000, reasoning: 0, cache: { read: 21_250_000, write: 1_000_000 } },
+ modelUsage: {
+ "anthropic/claude-sonnet": {
+ cost: 148,
+ tokens: { input: 10_000_000, output: 500_000, cache: { read: 21_250_000, write: 1_000_000 } },
+ },
+ },
+}
+
+let dialogCleared = false
+
+mock.module("@tui/context/theme", () => ({
+ useTheme: () => ({
+ theme: {
+ text: "#ffffff",
+ textMuted: "#808080",
+ background: "#000000",
+ backgroundPanel: "#111111",
+ success: "#00ff00",
+ error: "#ff0000",
+ warning: "#ffaa00",
+ },
+ mode: () => "dark",
+ setMode: () => {},
+ }),
+}))
+
+mock.module("@tui/ui/dialog", () => ({
+ useDialog: () => ({
+ clear() {
+ dialogCleared = true
+ },
+ replace() {},
+ stack: [],
+ size: "medium",
+ setSize() {},
+ }),
+}))
+
+mock.module("@tui/context/sync", () => ({
+ useSync: () => ({
+ data: {
+ message: {
+ ses_1: mockMessages,
+ },
+ provider: mockProviders,
+ },
+ }),
+}))
+
+mock.module("@tui/context/sdk", () => ({
+ useSDK: () => ({
+ url: "http://localhost:4096",
+ fetch: async (url: string) => {
+ if (url.includes("days=1")) {
+ return { json: () => mockDailyStats }
+ }
+ if (url.includes("days=30")) {
+ return { json: () => mockMonthlyStats }
+ }
+ return { json: () => ({}) }
+ },
+ }),
+}))
+
+const { testRender } = await import("@opentui/solid")
+const { DialogCost } = await import("../../../src/cli/cmd/tui/component/dialog-cost")
+
+describe("DialogCost UI", () => {
+ beforeEach(() => {
+ dialogCleared = false
+ })
+
+ test("renders Usage title and three metric rows", async () => {
+ const { renderOnce, captureCharFrame } = await testRender(() => , {
+ width: 50,
+ height: 10,
+ })
+
+ // Allow async createResource fetchers to resolve
+ await new Promise((r) => setTimeout(r, 100))
+ await renderOnce()
+
+ const frame = captureCharFrame()
+
+ // Title
+ expect(frame).toContain("Usage")
+
+ // Session row
+ expect(frame).toContain("Sess")
+ expect(frame).toContain("$0.4")
+ expect(frame).toContain("⟐")
+
+ // Daily row
+ expect(frame).toContain("☼-ly")
+ expect(frame).toContain("$12.3")
+
+ // Monthly row
+ expect(frame).toContain("☽-ly")
+ expect(frame).toContain("$148")
+ })
+
+ test("renders esc dismiss label", async () => {
+ const { renderOnce, captureCharFrame } = await testRender(() => , {
+ width: 50,
+ height: 10,
+ })
+
+ await new Promise((r) => setTimeout(r, 100))
+ await renderOnce()
+
+ const frame = captureCharFrame()
+ expect(frame).toContain("esc")
+ })
+
+ test("renders zeros for unknown session", async () => {
+ const { renderOnce, captureCharFrame } = await testRender(() => , {
+ width: 50,
+ height: 10,
+ })
+
+ await new Promise((r) => setTimeout(r, 100))
+ await renderOnce()
+
+ const frame = captureCharFrame()
+
+ // Session row should show $0.0 for both actual and no-cache
+ expect(frame).toContain("Sess")
+ expect(frame).toContain("$0.0╱$0.0")
+ expect(frame).toContain("⟐0%")
+ })
+
+ test("shows cache hit percentage for session", async () => {
+ const { renderOnce, captureCharFrame } = await testRender(() => , {
+ width: 50,
+ height: 10,
+ })
+
+ await new Promise((r) => setTimeout(r, 100))
+ await renderOnce()
+
+ const frame = captureCharFrame()
+
+ // Session: cache_read=400k, input=100k → 400k/(100k+400k) = 80%
+ expect(frame).toContain("⟐80%")
+ })
+})
diff --git a/packages/opencode/test/cli/tui/dialog-cost.test.ts b/packages/opencode/test/cli/tui/dialog-cost.test.ts
new file mode 100644
index 000000000..efabd5f29
--- /dev/null
+++ b/packages/opencode/test/cli/tui/dialog-cost.test.ts
@@ -0,0 +1,380 @@
+import { describe, expect, test } from "bun:test"
+import {
+ computeSessionMetrics,
+ computeStatsMetrics,
+ findModel,
+ formatCost,
+ formatRow,
+ type Metrics,
+} from "../../../src/cli/cmd/tui/component/cost-metrics"
+import type { AssistantMessage, Message, Provider, UserMessage } from "@opencode-ai/sdk/v2"
+
+function makeProvider(overrides?: Partial & { models?: Provider["models"] }): Provider {
+ return {
+ id: "anthropic",
+ name: "Anthropic",
+ source: "env",
+ env: [],
+ options: {},
+ models: {},
+ ...overrides,
+ }
+}
+
+function makeModel(input: number, output: number, cacheRead: number, cacheWrite = 0) {
+ return {
+ id: "test-model",
+ providerID: "anthropic",
+ api: { id: "test", url: "", npm: "" },
+ name: "Test Model",
+ capabilities: {
+ temperature: true,
+ reasoning: false,
+ attachment: false,
+ toolcall: true,
+ input: { text: true, audio: false, image: false, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input,
+ output,
+ cache: { read: cacheRead, write: cacheWrite },
+ },
+ limit: { context: 200000, output: 4096 },
+ status: "active" as const,
+ options: {},
+ headers: {},
+ release_date: "2025-01-01",
+ }
+}
+
+function makeAssistantMsg(overrides: Partial = {}): AssistantMessage {
+ return {
+ id: "msg_1",
+ sessionID: "ses_1",
+ role: "assistant",
+ agent: "build",
+ modelID: "test-model",
+ providerID: "anthropic",
+ mode: "",
+ parentID: "msg_0",
+ path: { cwd: "/test", root: "/test" },
+ cost: 0,
+ tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
+ time: { created: 1000000 },
+ ...overrides,
+ }
+}
+
+function makeUserMsg(): UserMessage {
+ return {
+ id: "msg_0",
+ sessionID: "ses_1",
+ role: "user",
+ agent: "build",
+ model: { providerID: "anthropic", modelID: "test-model" },
+ time: { created: 999999 },
+ }
+}
+
+describe("dialog-cost", () => {
+ describe("formatCost", () => {
+ test("formats small values with one decimal", () => {
+ expect(formatCost(0)).toBe("$0.0")
+ expect(formatCost(0.4)).toBe("$0.4")
+ expect(formatCost(12.34)).toBe("$12.3")
+ expect(formatCost(999.9)).toBe("$999.9")
+ })
+
+ test("uses K suffix at >= 1000", () => {
+ expect(formatCost(1000)).toBe("$1.0K")
+ expect(formatCost(1234)).toBe("$1.2K")
+ expect(formatCost(15000)).toBe("$15.0K")
+ })
+ })
+
+ describe("findModel", () => {
+ const model = makeModel(3, 15, 0.3)
+ const providers = [
+ makeProvider({
+ models: { "test-model": model },
+ }),
+ ]
+
+ test("finds model by provider and model ID", () => {
+ expect(findModel(providers, "anthropic", "test-model")).toBe(model)
+ })
+
+ test("returns undefined for unknown provider", () => {
+ expect(findModel(providers, "openai", "test-model")).toBeUndefined()
+ })
+
+ test("returns undefined for unknown model", () => {
+ expect(findModel(providers, "anthropic", "nonexistent")).toBeUndefined()
+ })
+
+ test("returns undefined for empty providers", () => {
+ expect(findModel([], "anthropic", "test-model")).toBeUndefined()
+ })
+ })
+
+ describe("computeSessionMetrics", () => {
+ // input=$3/M, cache_read=$0.30/M → delta = $2.70/M for cache reads
+ const providers = [
+ makeProvider({
+ models: { "test-model": makeModel(3, 15, 0.3) },
+ }),
+ ]
+
+ test("returns zeros for empty messages", () => {
+ const result = computeSessionMetrics([], providers)
+ expect(result.actual).toBe(0)
+ expect(result.noCache).toBe(0)
+ expect(result.cacheHitPct).toBe(0)
+ })
+
+ test("skips user messages", () => {
+ const messages: Message[] = [makeUserMsg()]
+ const result = computeSessionMetrics(messages, providers)
+ expect(result.actual).toBe(0)
+ expect(result.noCache).toBe(0)
+ expect(result.cacheHitPct).toBe(0)
+ })
+
+ test("computes actual cost from assistant messages", () => {
+ const messages: Message[] = [
+ makeAssistantMsg({ cost: 0.5 }),
+ makeAssistantMsg({ id: "msg_2", cost: 0.3 }),
+ ]
+ const result = computeSessionMetrics(messages, providers)
+ expect(result.actual).toBeCloseTo(0.8, 10)
+ })
+
+ test("computes no-cache cost higher than actual when cache is used", () => {
+ const messages: Message[] = [
+ makeAssistantMsg({
+ cost: 0.5,
+ tokens: { input: 100_000, output: 1000, reasoning: 0, cache: { read: 500_000, write: 0 } },
+ }),
+ ]
+ const result = computeSessionMetrics(messages, providers)
+ expect(result.actual).toBe(0.5)
+ // no-cache delta = 500_000 * (3 - 0.3) / 1_000_000 = 500_000 * 2.7 / 1_000_000 = 1.35
+ expect(result.noCache).toBeCloseTo(0.5 + 1.35, 10)
+ })
+
+ test("actual equals no-cache when no cache reads", () => {
+ const messages: Message[] = [
+ makeAssistantMsg({
+ cost: 1.0,
+ tokens: { input: 200_000, output: 5000, reasoning: 0, cache: { read: 0, write: 0 } },
+ }),
+ ]
+ const result = computeSessionMetrics(messages, providers)
+ expect(result.actual).toBe(1.0)
+ expect(result.noCache).toBe(1.0)
+ })
+
+ test("computes cache hit percentage", () => {
+ const messages: Message[] = [
+ makeAssistantMsg({
+ cost: 0.1,
+ tokens: { input: 200_000, output: 1000, reasoning: 0, cache: { read: 800_000, write: 0 } },
+ }),
+ ]
+ const result = computeSessionMetrics(messages, providers)
+ // 800k / (200k + 800k) = 80%
+ expect(result.cacheHitPct).toBeCloseTo(80, 10)
+ })
+
+ test("cache hit is 0 when no input tokens", () => {
+ const messages: Message[] = [
+ makeAssistantMsg({
+ cost: 0.1,
+ tokens: { input: 0, output: 1000, reasoning: 0, cache: { read: 0, write: 0 } },
+ }),
+ ]
+ const result = computeSessionMetrics(messages, providers)
+ expect(result.cacheHitPct).toBe(0)
+ })
+
+ test("handles unknown model gracefully (falls back to 0 delta)", () => {
+ const messages: Message[] = [
+ makeAssistantMsg({
+ providerID: "unknown",
+ modelID: "unknown",
+ cost: 0.5,
+ tokens: { input: 100_000, output: 1000, reasoning: 0, cache: { read: 500_000, write: 0 } },
+ }),
+ ]
+ const result = computeSessionMetrics(messages, providers)
+ // Unknown model → inputPrice=0, cacheReadPrice=0 → delta=0
+ expect(result.actual).toBe(0.5)
+ expect(result.noCache).toBe(0.5)
+ })
+
+ test("aggregates across multiple assistant messages", () => {
+ const messages: Message[] = [
+ makeUserMsg(),
+ makeAssistantMsg({
+ cost: 0.3,
+ tokens: { input: 50_000, output: 500, reasoning: 0, cache: { read: 200_000, write: 0 } },
+ }),
+ makeUserMsg(),
+ makeAssistantMsg({
+ id: "msg_3",
+ cost: 0.2,
+ tokens: { input: 30_000, output: 300, reasoning: 0, cache: { read: 100_000, write: 0 } },
+ }),
+ ]
+ const result = computeSessionMetrics(messages, providers)
+ expect(result.actual).toBeCloseTo(0.5, 10)
+ // delta1 = 200_000 * 2.7 / 1_000_000 = 0.54
+ // delta2 = 100_000 * 2.7 / 1_000_000 = 0.27
+ expect(result.noCache).toBeCloseTo(0.5 + 0.54 + 0.27, 10)
+ // cache hit: (200k + 100k) / (50k + 30k + 200k + 100k) = 300k / 380k ≈ 78.9%
+ expect(result.cacheHitPct).toBeCloseTo(78.947, 1)
+ })
+ })
+
+ describe("computeStatsMetrics", () => {
+ const providers = [
+ makeProvider({
+ models: { "test-model": makeModel(3, 15, 0.3) },
+ }),
+ ]
+
+ test("returns zeros for empty stats", () => {
+ const stats = {
+ totalCost: 0,
+ totalTokens: { input: 0, cache: { read: 0 } },
+ modelUsage: {},
+ }
+ const result = computeStatsMetrics(stats, providers)
+ expect(result.actual).toBe(0)
+ expect(result.noCache).toBe(0)
+ expect(result.cacheHitPct).toBe(0)
+ })
+
+ test("computes no-cache delta from model usage breakdown", () => {
+ const stats = {
+ totalCost: 2.0,
+ totalTokens: { input: 100_000, cache: { read: 500_000 } },
+ modelUsage: {
+ "anthropic/test-model": {
+ cost: 2.0,
+ tokens: { input: 100_000, cache: { read: 500_000 } },
+ },
+ },
+ }
+ const result = computeStatsMetrics(stats, providers)
+ expect(result.actual).toBe(2.0)
+ // delta = 500_000 * (3 - 0.3) / 1_000_000 = 1.35
+ expect(result.noCache).toBeCloseTo(3.35, 10)
+ })
+
+ test("computes cache hit from total tokens", () => {
+ const stats = {
+ totalCost: 1.0,
+ totalTokens: { input: 200_000, cache: { read: 300_000 } },
+ modelUsage: {
+ "anthropic/test-model": {
+ cost: 1.0,
+ tokens: { input: 200_000, cache: { read: 300_000 } },
+ },
+ },
+ }
+ const result = computeStatsMetrics(stats, providers)
+ // 300k / 500k = 60%
+ expect(result.cacheHitPct).toBeCloseTo(60, 10)
+ })
+
+ test("handles multiple models", () => {
+ const providers2 = [
+ makeProvider({
+ models: {
+ "model-a": makeModel(3, 15, 0.3),
+ "model-b": makeModel(10, 30, 1.0),
+ },
+ }),
+ ]
+ const stats = {
+ totalCost: 5.0,
+ totalTokens: { input: 200_000, cache: { read: 600_000 } },
+ modelUsage: {
+ "anthropic/model-a": {
+ cost: 2.0,
+ tokens: { input: 100_000, cache: { read: 400_000 } },
+ },
+ "anthropic/model-b": {
+ cost: 3.0,
+ tokens: { input: 100_000, cache: { read: 200_000 } },
+ },
+ },
+ }
+ const result = computeStatsMetrics(stats, providers2)
+ expect(result.actual).toBe(5.0)
+ // delta-a = 400_000 * (3 - 0.3) / 1_000_000 = 1.08
+ // delta-b = 200_000 * (10 - 1.0) / 1_000_000 = 1.8
+ expect(result.noCache).toBeCloseTo(5.0 + 1.08 + 1.8, 10)
+ })
+
+ test("handles unknown model in stats (zero delta)", () => {
+ const stats = {
+ totalCost: 1.0,
+ totalTokens: { input: 50_000, cache: { read: 100_000 } },
+ modelUsage: {
+ "unknown/mystery": {
+ cost: 1.0,
+ tokens: { input: 50_000, cache: { read: 100_000 } },
+ },
+ },
+ }
+ const result = computeStatsMetrics(stats, providers)
+ expect(result.noCache).toBe(1.0)
+ })
+ })
+
+ describe("formatRow", () => {
+ test("formats a row with label, cost pair, and hit rate", () => {
+ const metrics: Metrics = { actual: 0.4, noCache: 1.9, cacheHitPct: 85 }
+ const row = formatRow("Sess", metrics, 12)
+ expect(row).toContain("Sess")
+ expect(row).toContain("$0.4╱$1.9")
+ expect(row).toContain("⟐85%")
+ })
+
+ test("pads cost pair to max width", () => {
+ const small: Metrics = { actual: 0.1, noCache: 0.2, cacheHitPct: 50 }
+ const large: Metrics = { actual: 148, noCache: 412, cacheHitPct: 68 }
+ const maxWidth = `${formatCost(large.actual)}╱${formatCost(large.noCache)}`.length
+
+ const rowSmall = formatRow("Sess", small, maxWidth)
+ const rowLarge = formatRow("☽-ly", large, maxWidth)
+
+ // Both rows should have the same offset to ⟐
+ const hitIdx1 = rowSmall.indexOf("⟐")
+ const hitIdx2 = rowLarge.indexOf("⟐")
+ expect(hitIdx1).toBe(hitIdx2)
+ })
+
+ test("rounds cache hit percentage", () => {
+ const metrics: Metrics = { actual: 1.0, noCache: 2.0, cacheHitPct: 78.9 }
+ const row = formatRow("Sess", metrics, 12)
+ expect(row).toContain("⟐79%")
+ })
+
+ test("handles zero cache hit", () => {
+ const metrics: Metrics = { actual: 1.0, noCache: 1.0, cacheHitPct: 0 }
+ const row = formatRow("Sess", metrics, 12)
+ expect(row).toContain("⟐0%")
+ })
+
+ test("handles K suffix costs", () => {
+ const metrics: Metrics = { actual: 1200, noCache: 3400, cacheHitPct: 65 }
+ const row = formatRow("☽-ly", metrics, 15)
+ expect(row).toContain("$1.2K╱$3.4K")
+ })
+ })
+})