Skip to content

Commit a90bbff

Browse files
Merge pull request #229 from voideditor/model-selection
Minor usability updates
2 parents 2aec37f + 99ff885 commit a90bbff

File tree

26 files changed

+463
-697
lines changed

26 files changed

+463
-697
lines changed

build/win32/code.iss

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ AppMutex={code:GetAppMutex}
2323
SetupMutex={#AppMutex}setup
2424
; this is a Void icon comment. Old: WizardImageFile="{#RepoDir}\resources\win32\inno-big-100.bmp,{#RepoDir}\resources\win32\inno-big-125.bmp,{#RepoDir}\resources\win32\inno-big-150.bmp,{#RepoDir}\resources\win32\inno-big-175.bmp,{#RepoDir}\resources\win32\inno-big-200.bmp,{#RepoDir}\resources\win32\inno-big-225.bmp,{#RepoDir}\resources\win32\inno-big-250.bmp"
2525
; this is a Void icon comment. Old: WizardSmallImageFile="{#RepoDir}\resources\win32\inno-small-100.bmp,{#RepoDir}\resources\win32\inno-small-125.bmp,{#RepoDir}\resources\win32\inno-small-150.bmp,{#RepoDir}\resources\win32\inno-small-175.bmp,{#RepoDir}\resources\win32\inno-small-200.bmp,{#RepoDir}\resources\win32\inno-small-225.bmp,{#RepoDir}\resources\win32\inno-small-250.bmp"
26-
WizardImageFile="{#RepoDir}\resources\win32\inno-void.bmp"
26+
; WizardImageFile="{#RepoDir}\resources\win32\inno-void.bmp"
2727
WizardSmallImageFile="{#RepoDir}\resources\win32\inno-void.bmp"
2828
SetupIconFile={#RepoDir}\resources\win32\code.ico
2929
UninstallDisplayIcon={app}\{#ExeBasename}.exe

resources/win32/code.ico

145 KB
Binary file not shown.

resources/win32/inno-void.bmp

-4.12 MB
Binary file not shown.

src/vs/platform/void/common/llmMessageService.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ export class LLMMessageService extends Disposable implements ILLMMessageService
9898
}
9999
const { providerName, modelName } = modelSelection
100100

101+
// add ai instructions here because we don't have access to voidSettingsService on the other side of the proxy
101102
const aiInstructions = this.voidSettingsService.state.globalSettings.aiInstructions
102103
if (aiInstructions)
103104
proxyParams.messages.unshift({ role: 'system', content: aiInstructions })

src/vs/platform/void/common/llmMessageTypes.ts

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,12 @@ export type LLMMessage = {
3030
content: string;
3131
}
3232

33+
export type _InternalLLMMessage = {
34+
role: 'user' | 'assistant';
35+
content: string;
36+
}
37+
38+
3339
export type ServiceSendLLMFeatureParams = {
3440
useProviderFor: 'Ctrl+K';
3541
range: IRange;
@@ -80,7 +86,7 @@ export type EventLLMMessageOnFinalMessageParams = Parameters<OnFinalMessage>[0]
8086
export type EventLLMMessageOnErrorParams = Parameters<OnError>[0] & { requestId: string }
8187

8288
export type _InternalSendLLMMessageFnType = (params: {
83-
messages: LLMMessage[];
89+
messages: _InternalLLMMessage[];
8490
onText: OnText;
8591
onFinalMessage: OnFinalMessage;
8692
onError: OnError;

src/vs/platform/void/common/voidUpdateService.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import { createDecorator } from '../../instantiation/common/instantiation.js';
77
import { ProxyChannel } from '../../../base/parts/ipc/common/ipc.js';
88
import { IMainProcessService } from '../../ipc/common/mainProcessService.js';
99
import { InstantiationType, registerSingleton } from '../../instantiation/common/extensions.js';
10+
import { IMetricsService } from './metricsService.js';
1011

1112

1213

@@ -27,16 +28,17 @@ export class VoidUpdateService implements IVoidUpdateService {
2728

2829
constructor(
2930
@IMainProcessService mainProcessService: IMainProcessService, // (only usable on client side)
31+
@IMetricsService private readonly metricsService: IMetricsService,
3032
) {
3133
// creates an IPC proxy to use metricsMainService.ts
3234
this.voidUpdateService = ProxyChannel.toService<IVoidUpdateService>(mainProcessService.getChannel('void-channel-update'));
3335
}
3436

3537

36-
3738
// anything transmitted over a channel must be async even if it looks like it doesn't have to be
3839
check: IVoidUpdateService['check'] = async () => {
3940
const res = await this.voidUpdateService.check()
41+
this.metricsService.capture('Check for Updates', { ...res })
4042
return res
4143
}
4244
}

src/vs/platform/void/electron-main/llmMessage/anthropic.ts

Lines changed: 2 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,6 @@ import Anthropic from '@anthropic-ai/sdk';
77
import { _InternalSendLLMMessageFnType } from '../../common/llmMessageTypes.js';
88
import { anthropicMaxPossibleTokens } from '../../common/voidSettingsTypes.js';
99

10-
// Anthropic
11-
type LLMMessageAnthropic = {
12-
role: 'user' | 'assistant';
13-
content: string;
14-
}
1510
export const sendAnthropicMsg: _InternalSendLLMMessageFnType = ({ messages, onText, onFinalMessage, onError, settingsOfProvider, modelName, _setAborter }) => {
1611

1712
const thisConfig = settingsOfProvider.anthropic
@@ -24,20 +19,9 @@ export const sendAnthropicMsg: _InternalSendLLMMessageFnType = ({ messages, onTe
2419

2520
const anthropic = new Anthropic({ apiKey: thisConfig.apiKey, dangerouslyAllowBrowser: true });
2621

27-
// find system messages and concatenate them
28-
const systemMessage = messages
29-
.filter(msg => msg.role === 'system')
30-
.map(msg => msg.content)
31-
.join('\n');
32-
33-
// remove system messages for Anthropic
34-
const anthropicMessages = messages.filter(msg => msg.role !== 'system') as LLMMessageAnthropic[]
35-
36-
37-
3822
const stream = anthropic.messages.stream({
39-
system: systemMessage,
40-
messages: anthropicMessages,
23+
// system: systemMessage,
24+
messages: messages,
4125
model: modelName,
4226
max_tokens: maxTokens,
4327
});

src/vs/platform/void/electron-main/llmMessage/gemini.ts

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
* Licensed under the Apache License, Version 2.0. See LICENSE.txt for more information.
44
*--------------------------------------------------------------------------------------*/
55

6-
import { Content, GoogleGenerativeAI, GoogleGenerativeAIFetchError } from '@google/generative-ai';
6+
import { Content, GoogleGenerativeAI } from '@google/generative-ai';
77
import { _InternalSendLLMMessageFnType } from '../../common/llmMessageTypes.js';
88

99
// Gemini
@@ -16,22 +16,17 @@ export const sendGeminiMsg: _InternalSendLLMMessageFnType = async ({ messages, o
1616
const genAI = new GoogleGenerativeAI(thisConfig.apiKey);
1717
const model = genAI.getGenerativeModel({ model: modelName });
1818

19-
// remove system messages that get sent to Gemini
20-
// str of all system messages
21-
const systemMessage = messages
22-
.filter(msg => msg.role === 'system')
23-
.map(msg => msg.content)
24-
.join('\n');
25-
2619
// Convert messages to Gemini format
2720
const geminiMessages: Content[] = messages
28-
.filter(msg => msg.role !== 'system')
2921
.map((msg, i) => ({
3022
parts: [{ text: msg.content }],
3123
role: msg.role === 'assistant' ? 'model' : 'user'
3224
}))
3325

34-
model.generateContentStream({ contents: geminiMessages, systemInstruction: systemMessage, })
26+
model.generateContentStream({
27+
// systemInstruction: systemMessage,
28+
contents: geminiMessages,
29+
})
3530
.then(async response => {
3631
_setAborter(() => response.stream.return(fullText))
3732

@@ -43,11 +38,6 @@ export const sendGeminiMsg: _InternalSendLLMMessageFnType = async ({ messages, o
4338
onFinalMessage({ fullText });
4439
})
4540
.catch((error) => {
46-
if (error instanceof GoogleGenerativeAIFetchError && error.status === 400) {
47-
onError({ message: 'Invalid API key.', fullError: null });
48-
}
49-
else {
50-
onError({ message: error + '', fullError: error });
51-
}
41+
onError({ message: error + '', fullError: error })
5242
})
5343
}

src/vs/platform/void/electron-main/llmMessage/openai.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ export const sendOpenAIMsg: _InternalSendLLMMessageFnType = ({ messages, onText,
7979
throw new Error(`providerName was invalid: ${providerName}`)
8080
}
8181

82-
openai.models.list()
8382
openai.chat.completions
8483
.create(options)
8584
.then(async response => {
@@ -98,7 +97,7 @@ export const sendOpenAIMsg: _InternalSendLLMMessageFnType = ({ messages, onText,
9897
onError({ message: 'Invalid API key.', fullError: error });
9998
}
10099
else {
101-
onError({ message: error, fullError: error });
100+
onError({ message: error + '', fullError: error });
102101
}
103102
})
104103

src/vs/platform/void/electron-main/llmMessage/sendLLMMessage.ts

Lines changed: 41 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
* Licensed under the Apache License, Version 2.0. See LICENSE.txt for more information.
44
*--------------------------------------------------------------------------------------*/
55

6-
import { LLMMMessageParams, OnText, OnFinalMessage, OnError } from '../../common/llmMessageTypes.js';
6+
import { LLMMMessageParams, OnText, OnFinalMessage, OnError, LLMMessage, _InternalLLMMessage } from '../../common/llmMessageTypes.js';
77
import { IMetricsService } from '../../common/metricsService.js';
88

99
import { sendAnthropicMsg } from './anthropic.js';
@@ -12,8 +12,43 @@ import { sendOpenAIMsg } from './openai.js';
1212
import { sendGeminiMsg } from './gemini.js';
1313
import { sendGroqMsg } from './groq.js';
1414

15+
16+
const cleanMessages = (messages: LLMMessage[]): _InternalLLMMessage[] => {
17+
// trim message content (Anthropic and other providers give an error if there is trailing whitespace)
18+
messages = messages.map(m => ({ ...m, content: m.content.trim() }))
19+
20+
// find system messages and concatenate them
21+
const systemMessage = messages
22+
.filter(msg => msg.role === 'system')
23+
.map(msg => msg.content)
24+
.join('\n') || undefined;
25+
26+
// remove all system messages
27+
const noSystemMessages = messages
28+
.filter(msg => msg.role !== 'system') as _InternalLLMMessage[]
29+
30+
// add system mesasges to first message (should be a user message)
31+
if (systemMessage && (noSystemMessages.length !== 0)) {
32+
const newFirstMessage = {
33+
role: noSystemMessages[0].role,
34+
content: (''
35+
+ '<SYSTEM_MESSAGE>\n'
36+
+ systemMessage
37+
+ '\n'
38+
+ '</SYSTEM_MESSAGE>\n'
39+
+ noSystemMessages[0].content
40+
)
41+
}
42+
noSystemMessages.splice(0, 1) // delete first message
43+
noSystemMessages.unshift(newFirstMessage) // add new first message
44+
}
45+
46+
return noSystemMessages
47+
}
48+
49+
1550
export const sendLLMMessage = ({
16-
messages,
51+
messages: messages_,
1752
onText: onText_,
1853
onFinalMessage: onFinalMessage_,
1954
onError: onError_,
@@ -26,9 +61,7 @@ export const sendLLMMessage = ({
2661

2762
metricsService: IMetricsService
2863
) => {
29-
30-
// trim message content (Anthropic and other providers give an error if there is trailing whitespace)
31-
messages = messages.map(m => ({ ...m, content: m.content.trim() }))
64+
const messages = cleanMessages(messages_)
3265

3366
// only captures number of messages and message "shape", no actual code, instructions, prompts, etc
3467
const captureChatEvent = (eventId: string, extras?: object) => {
@@ -37,6 +70,9 @@ export const sendLLMMessage = ({
3770
modelName,
3871
numMessages: messages?.length,
3972
messagesShape: messages?.map(msg => ({ role: msg.role, length: msg.content.length })),
73+
origNumMessages: messages_?.length,
74+
origMessagesShape: messages_?.map(msg => ({ role: msg.role, length: msg.content.length })),
75+
4076
...extras,
4177
})
4278
}

0 commit comments

Comments
 (0)