Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions integration-tests/api-resilience.responses
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"Part 1. "}],"role":"model"},"index":0}]},{"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":10,"totalTokenCount":110}},{"candidates":[{"content":{"parts":[{"text":"Part 2."}],"role":"model"},"index":0}],"finishReason":"STOP"}]}
50 changes: 50 additions & 0 deletions integration-tests/api-resilience.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/

import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';

describe('API Resilience E2E', () => {
let rig: TestRig;

beforeEach(() => {
rig = new TestRig();
});

afterEach(async () => {
await rig.cleanup();
});

it('should not crash when receiving metadata-only chunks in a stream', async () => {
await rig.setup('api-resilience-metadata-only', {
fakeResponsesPath: join(
dirname(fileURLToPath(import.meta.url)),
'api-resilience.responses',
),
settings: {
planSettings: { modelRouting: false },
},
});

// Run the CLI with a simple prompt.
// The fake responses will provide a stream with a metadata-only chunk in the middle.
// We use gemini-3-pro-preview to minimize internal service calls.
const result = await rig.run({
args: ['hi', '--model', 'gemini-3-pro-preview'],
});

// Verify the output contains text from the normal chunks.
// If the CLI crashed on the metadata chunk, rig.run would throw.
expect(result).toContain('Part 1.');
expect(result).toContain('Part 2.');

// Verify telemetry event for the prompt was still generated
const hasUserPromptEvent = await rig.waitForTelemetryEvent('user_prompt');
expect(hasUserPromptEvent).toBe(true);
});
});
202 changes: 201 additions & 1 deletion packages/core/src/code_assist/server.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,14 @@
import { beforeEach, describe, it, expect, vi, afterEach } from 'vitest';
import { CodeAssistServer } from './server.js';
import { OAuth2Client } from 'google-auth-library';
import { UserTierId, ActionStatus } from './types.js';
import {
UserTierId,
ActionStatus,
type LoadCodeAssistResponse,
type GeminiUserTier,
type SetCodeAssistGlobalUserSettingRequest,
type CodeAssistGlobalUserSettingResponse,
} from './types.js';
import { FinishReason } from '@google/genai';
import { LlmRole } from '../telemetry/types.js';
import { logInvalidChunk } from '../telemetry/loggers.js';
Expand Down Expand Up @@ -678,6 +685,85 @@ describe('CodeAssistServer', () => {
expect(response).toEqual(mockResponse);
});

it('should call fetchAdminControls endpoint', async () => {
const { server } = createTestServer();
const mockResponse = { adminControlsApplicable: true };
const requestPostSpy = vi
.spyOn(server, 'requestPost')
.mockResolvedValue(mockResponse);

const req = { project: 'test-project' };
const response = await server.fetchAdminControls(req);

expect(requestPostSpy).toHaveBeenCalledWith('fetchAdminControls', req);
expect(response).toEqual(mockResponse);
});

it('should call getCodeAssistGlobalUserSetting endpoint', async () => {
const { server } = createTestServer();
const mockResponse: CodeAssistGlobalUserSettingResponse = {
freeTierDataCollectionOptin: true,
};
const requestGetSpy = vi
.spyOn(server, 'requestGet')
.mockResolvedValue(mockResponse);

const response = await server.getCodeAssistGlobalUserSetting();

expect(requestGetSpy).toHaveBeenCalledWith(
'getCodeAssistGlobalUserSetting',
);
expect(response).toEqual(mockResponse);
});

it('should call setCodeAssistGlobalUserSetting endpoint', async () => {
const { server } = createTestServer();
const mockResponse: CodeAssistGlobalUserSettingResponse = {
freeTierDataCollectionOptin: true,
};
const requestPostSpy = vi
.spyOn(server, 'requestPost')
.mockResolvedValue(mockResponse);

const req: SetCodeAssistGlobalUserSettingRequest = {
freeTierDataCollectionOptin: true,
};
const response = await server.setCodeAssistGlobalUserSetting(req);

expect(requestPostSpy).toHaveBeenCalledWith(
'setCodeAssistGlobalUserSetting',
req,
);
expect(response).toEqual(mockResponse);
});

it('should call loadCodeAssist during refreshAvailableCredits', async () => {
const { server } = createTestServer();
const mockPaidTier = {
id: 'test-tier',
name: 'tier',
availableCredits: [{ creditType: 'G1', creditAmount: '50' }],
};
const mockResponse = { paidTier: mockPaidTier };

vi.spyOn(server, 'loadCodeAssist').mockResolvedValue(
mockResponse as unknown as LoadCodeAssistResponse,
);

// Initial state: server has a paidTier without availableCredits
(server as unknown as { paidTier: GeminiUserTier }).paidTier = {
id: 'test-tier',
name: 'tier',
};

await server.refreshAvailableCredits();

expect(server.loadCodeAssist).toHaveBeenCalled();
expect(server.paidTier?.availableCredits).toEqual(
mockPaidTier.availableCredits,
);
});

describe('robustness testing', () => {
it('should not crash on random error objects in loadCodeAssist (isVpcScAffectedUser)', async () => {
const { server } = createTestServer();
Expand Down Expand Up @@ -867,6 +953,46 @@ data: ${jsonString}
);
});

it('should handle malformed JSON within a multi-line data block', async () => {
const config = makeFakeConfig();
const mockRequest = vi.fn();
const client = { request: mockRequest } as unknown as OAuth2Client;
const server = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
undefined,
undefined,
config,
);

const { Readable } = await import('node:stream');
const mockStream = new Readable({
read() {},
});

mockRequest.mockResolvedValue({ data: mockStream });

const stream = await server.requestStreamingPost('testStream', {});

setTimeout(() => {
mockStream.push('data: {\n');
mockStream.push('data: "invalid": json\n');
mockStream.push('data: }\n\n');
mockStream.push(null);
}, 0);

const results = [];
for await (const res of stream) {
results.push(res);
}

expect(results).toHaveLength(0);
expect(logInvalidChunk).toHaveBeenCalled();
});

it('should safely process random response streams in generateContentStream (consumed/remaining credits)', async () => {
const { mockRequest, client } = createTestServer();
const testServer = new CodeAssistServer(
Expand Down Expand Up @@ -914,5 +1040,79 @@ data: ${jsonString}
}
// Should not crash
});

it('should be resilient to metadata-only chunks without candidates in generateContentStream', async () => {
const { mockRequest, client } = createTestServer();
const testServer = new CodeAssistServer(
client,
'test-project',
{},
'test-session',
UserTierId.FREE,
);
const { Readable } = await import('node:stream');

// Chunk 2 is metadata-only, no candidates
const streamResponses = [
{
traceId: '1',
response: {
candidates: [{ content: { parts: [{ text: 'Hello' }] }, index: 0 }],
},
},
{
traceId: '2',
consumedCredits: [{ creditType: 'GOOGLE_ONE_AI', creditAmount: '5' }],
response: {
usageMetadata: { promptTokenCount: 10, totalTokenCount: 15 },
},
},
{
traceId: '3',
response: {
candidates: [
{ content: { parts: [{ text: ' World' }] }, index: 0 },
],
},
},
];

const mockStream = new Readable({
read() {
for (const resp of streamResponses) {
this.push(`data: ${JSON.stringify(resp)}\n\n`);
}
this.push(null);
},
});
mockRequest.mockResolvedValueOnce({ data: mockStream });
vi.spyOn(testServer, 'recordCodeAssistMetrics').mockResolvedValue(
undefined,
);

const stream = await testServer.generateContentStream(
{ model: 'test-model', contents: [] },
'user-prompt-id',
LlmRole.MAIN,
);

const results = [];
for await (const res of stream) {
results.push(res);
}

expect(results).toHaveLength(3);
expect(results[0].candidates).toHaveLength(1);
expect(results[0].candidates?.[0].content?.parts?.[0].text).toBe('Hello');

// Chunk 2 (metadata-only) should still be yielded but with empty candidates
expect(results[1].candidates).toHaveLength(0);
expect(results[1].usageMetadata?.promptTokenCount).toBe(10);

expect(results[2].candidates).toHaveLength(1);
expect(results[2].candidates?.[0].content?.parts?.[0].text).toBe(
' World',
);
});
});
});
Loading