Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ async function run(cmd) {
stdio: ['pipe', 'pipe', 'ignore'],
});
return stdout.trim();
} catch (_e) { // eslint-disable-line @typescript-eslint/no-unused-vars
} catch (_e) {
// eslint-disable-line @typescript-eslint/no-unused-vars
return null;
}
}
Expand Down
1 change: 1 addition & 0 deletions docs/cli/settings.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ they appear in the UI.
| Hide Sandbox Status | `ui.footer.hideSandboxStatus` | Hide the sandbox status indicator in the footer. | `false` |
| Hide Model Info | `ui.footer.hideModelInfo` | Hide the model name and context usage in the footer. | `false` |
| Hide Context Window Percentage | `ui.footer.hideContextPercentage` | Hides the context window remaining percentage. | `true` |
| Show Token Usage | `ui.footer.showTokenUsage` | Show cumulative token usage per model in the footer. | `true` |
| Hide Footer | `ui.hideFooter` | Hide the footer from the UI | `false` |
| Show Memory Usage | `ui.showMemoryUsage` | Display memory usage information in the UI | `false` |
| Show Line Numbers | `ui.showLineNumbers` | Show line numbers in the chat. | `true` |
Expand Down
4 changes: 4 additions & 0 deletions docs/reference/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,10 @@ their corresponding top-level category object in your `settings.json` file.
- **Description:** Hides the context window remaining percentage.
- **Default:** `true`

- **`ui.footer.showTokenUsage`** (boolean):
- **Description:** Show cumulative token usage per model in the footer.
- **Default:** `true`

- **`ui.hideFooter`** (boolean):
- **Description:** Hide the footer from the UI
- **Default:** `false`
Expand Down
9 changes: 9 additions & 0 deletions packages/cli/src/config/settingsSchema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -598,6 +598,15 @@ const SETTINGS_SCHEMA = {
description: 'Hides the context window remaining percentage.',
showInDialog: true,
},
showTokenUsage: {
type: 'boolean',
label: 'Show Token Usage',
category: 'UI',
requiresRestart: false,
default: true,
description: 'Show cumulative token usage per model in the footer.',
showInDialog: true,
},
},
},
hideFooter: {
Expand Down
42 changes: 42 additions & 0 deletions packages/cli/src/ui/components/Footer.test.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -477,6 +477,48 @@ describe('<Footer />', () => {
expect(lastFrame()).toMatch(/\d+% context left/);
unmount();
});

it('displays token usage when showTokenUsage is true and there are active models', async () => {
const metricsWithModels = {
...mockSessionStats.metrics,
models: {
'gemini-3-pro-preview': {
tokens: {
total: 1500,
input: 1000,
candidates: 500,
cached: 0,
thoughts: 0,
tool: 0,
prompt: 1000,
},
api: { totalRequests: 1, totalErrors: 0, totalLatencyMs: 100 },
roles: {},
},
},
};

const { lastFrame, waitUntilReady, unmount } = renderWithProviders(
<Footer />,
{
width: 120,
uiState: {
sessionStats: { ...mockSessionStats, metrics: metricsWithModels },
},
settings: createMockSettings({
ui: {
footer: {
showTokenUsage: true,
},
},
}),
},
);
await waitUntilReady();
expect(lastFrame()).toContain('PP30:1.5K');
unmount();
});

it('renders complete footer in narrow terminal (baseline narrow)', async () => {
const { lastFrame, waitUntilReady, unmount } = renderWithProviders(
<Footer />,
Expand Down
3 changes: 3 additions & 0 deletions packages/cli/src/ui/components/Footer.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import process from 'node:process';
import { MemoryUsageDisplay } from './MemoryUsageDisplay.js';
import { ContextUsageDisplay } from './ContextUsageDisplay.js';
import { QuotaDisplay } from './QuotaDisplay.js';
import { TokenUsageIndicator } from './TokenUsageIndicator.js';
import { DebugProfiler } from './DebugProfiler.js';
import { isDevelopment } from '../../utils/installationInfo.js';
import { useUIState } from '../contexts/UIStateContext.js';
Expand Down Expand Up @@ -64,6 +65,7 @@ export const Footer: React.FC = () => {
const hideSandboxStatus = settings.merged.ui.footer.hideSandboxStatus;
const hideModelInfo = settings.merged.ui.footer.hideModelInfo;
const hideContextPercentage = settings.merged.ui.footer.hideContextPercentage;
const showTokenUsage = settings.merged.ui.footer.showTokenUsage;

const pathLength = Math.max(20, Math.floor(terminalWidth * 0.25));
const displayPath = shortenPath(tildeifyPath(targetDir), pathLength);
Expand Down Expand Up @@ -165,6 +167,7 @@ export const Footer: React.FC = () => {
</>
)}
</Text>
{showTokenUsage && <TokenUsageIndicator />}
{showMemoryUsage && <MemoryUsageDisplay />}
</Box>
<Box alignItems="center">
Expand Down
131 changes: 131 additions & 0 deletions packages/cli/src/ui/components/TokenUsageIndicator.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/

import { describe, it, expect } from 'vitest';
import { renderWithProviders } from '../../test-utils/render.js';
import { TokenUsageIndicator } from './TokenUsageIndicator.js';
import { ToolCallDecision } from '@google/gemini-cli-core';

const mockMetrics = {
models: {
'gemini-3-pro-preview': {
tokens: {
total: 1500,
input: 1000,
candidates: 500,
cached: 0,
thoughts: 0,
tool: 0,
prompt: 1000,
},
api: { totalRequests: 1, totalErrors: 0, totalLatencyMs: 100 },
roles: {},
},
'gemini-2.5-flash': {
tokens: {
total: 500000,
input: 400000,
candidates: 100000,
cached: 0,
thoughts: 0,
tool: 0,
prompt: 400000,
},
api: { totalRequests: 5, totalErrors: 0, totalLatencyMs: 500 },
roles: {},
},
},
tools: {
totalCalls: 0,
totalSuccess: 0,
totalFail: 0,
totalDurationMs: 0,
totalDecisions: {
[ToolCallDecision.ACCEPT]: 0,
[ToolCallDecision.REJECT]: 0,
[ToolCallDecision.MODIFY]: 0,
[ToolCallDecision.AUTO_ACCEPT]: 0,
},
byName: {},
},
files: {
totalLinesAdded: 0,
totalLinesRemoved: 0,
},
};

describe('<TokenUsageIndicator />', () => {
it('renders nothing when there are no active models', async () => {
const { lastFrame, waitUntilReady, unmount } = renderWithProviders(
<TokenUsageIndicator />,
{
uiState: {
sessionStats: {
sessionId: 'test',
sessionStartTime: new Date(),
lastPromptTokenCount: 0,
promptCount: 0,
metrics: {
models: {},
tools: mockMetrics.tools,
files: mockMetrics.files,
},
},
},
},
);
await waitUntilReady();
expect(lastFrame({ allowEmpty: true })).toBe('');
unmount();
});

it('renders token usage for active models with abbreviations', async () => {
const { lastFrame, waitUntilReady, unmount } = renderWithProviders(
<TokenUsageIndicator />,
{
uiState: {
sessionStats: {
sessionId: 'test',
sessionStartTime: new Date(),
lastPromptTokenCount: 0,
promptCount: 0,
metrics: mockMetrics,
},
},
},
);
await waitUntilReady();
const output = lastFrame();
// gemini-2.5-flash -> F25: 500K (sorted by total descending)
// gemini-3-pro-preview -> PP30: 1.5K
expect(output).toContain('F25:500K');
expect(output).toContain('PP30:1.5K');
unmount();
});

it('sorts models by total usage descending', async () => {
const { lastFrame, waitUntilReady, unmount } = renderWithProviders(
<TokenUsageIndicator />,
{
uiState: {
sessionStats: {
sessionId: 'test',
sessionStartTime: new Date(),
lastPromptTokenCount: 0,
promptCount: 0,
metrics: mockMetrics,
},
},
},
);
await waitUntilReady();
const output = lastFrame();
const posFlash = output.indexOf('F25');
const posPro = output.indexOf('PP30');
expect(posFlash).toBeLessThan(posPro);
unmount();
});
});
54 changes: 54 additions & 0 deletions packages/cli/src/ui/components/TokenUsageIndicator.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/

import type React from 'react';
import { Box, Text } from 'ink';
import { theme } from '../semantic-colors.js';
import { getShortDisplayString } from '@google/gemini-cli-core';
import { useUIState } from '../contexts/UIStateContext.js';

/**
* Formats token counts for compact display (e.g., 1.2k, 1M).
*/
const formatTokens = (tokens: number): string => {
if (tokens < 1000) return tokens.toString();
return Intl.NumberFormat('en-US', {
notation: 'compact',
maximumFractionDigits: 1,
}).format(tokens);
};

export const TokenUsageIndicator: React.FC = () => {
const uiState = useUIState();
const { models } = uiState.sessionStats.metrics;

const activeModels = Object.entries(models)
.filter(([, metrics]) => metrics.tokens.total > 0)
.sort((a, b) => b[1].tokens.total - a[1].tokens.total);

if (activeModels.length === 0) {
return null;
}

return (
<Box flexDirection="row" paddingLeft={1}>
<Text color={theme.ui.comment}>| </Text>
{activeModels.map(([name, metrics], index) => (
<Box key={name} flexDirection="row">
<Text color={theme.text.secondary}>
{getShortDisplayString(name)}:
</Text>
<Text color={theme.text.primary}>
{formatTokens(metrics.tokens.total)}
</Text>
{index < activeModels.length - 1 && (
<Text color={theme.text.secondary}> </Text>
)}
</Box>
))}
</Box>
);
};
45 changes: 45 additions & 0 deletions packages/core/src/code_assist/server.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,51 @@ describe('CodeAssistServer', () => {
expect(results).toHaveLength(0);
});

it('should handle async iterable input in requestStreamingPost', async () => {
const { server, mockRequest } = createTestServer();

// Create a mock async iterable (simulating a Web ReadableStream)
const mockAsyncIterable = {
async *[Symbol.asyncIterator]() {
yield 'data: {"response": {"candidates": [{"content": {"parts": [{"text": "Hello"}]}}]}}\n\n';
},
};

mockRequest.mockResolvedValue({ data: mockAsyncIterable });

const stream = await server.requestStreamingPost('testStream', {});

const results = [];
for await (const res of stream) {
results.push(res);
}

expect(results).toHaveLength(1);
const firstResult = results[0] as {
response: {
candidates: Array<{ content: { parts: Array<{ text: string }> } }>;
};
};
expect(firstResult.response.candidates[0].content.parts[0].text).toBe(
'Hello',
);
});

it('should throw descriptive error for non-stream input in requestStreamingPost', async () => {
const { server, mockRequest } = createTestServer();

// Mock a plain object response (unexpected for a stream)
mockRequest.mockResolvedValue({ data: { error: 'Something went wrong' } });

const stream = await server.requestStreamingPost('testStream', {});

await expect(async () => {
for await (const _ of stream) {
// empty
}
}).rejects.toThrow(/Expected a stream but received object/);
});

it('should call the onboardUser endpoint', async () => {
const { server } = createTestServer();

Expand Down
Loading