Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions core/src/browser/extensions/engines/AIEngine.ts
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,12 @@ export abstract class AIEngine extends BaseExtension {
EngineManager.instance().register(this)
}

/**
* Gets model info
* @param modelId
*/
abstract get(modelId: string): Promise<modelInfo | undefined>

/**
* Lists available models
*/
Expand Down
102 changes: 73 additions & 29 deletions extensions-web/src/jan-provider-web/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ export default class JanProviderWeb extends AIEngine {

override async onLoad() {
console.log('Loading Jan Provider Extension...')

try {
// Initialize authentication and fetch models
await janApiClient.initialize()
Expand All @@ -37,20 +37,43 @@ export default class JanProviderWeb extends AIEngine {

override async onUnload() {
console.log('Unloading Jan Provider Extension...')

// Clear all sessions
for (const sessionId of this.activeSessions.keys()) {
await this.unload(sessionId)
}

janProviderStore.reset()
console.log('Jan Provider Extension unloaded')
}

async get(modelId: string): Promise<modelInfo | undefined> {
return janApiClient
.getModels()
.then((list) => list.find((e) => e.id === modelId))
.then((model) =>
model
? {
id: model.id,
name: model.id, // Use ID as name for now
quant_type: undefined,
providerId: this.provider,
port: 443, // HTTPS port for API
sizeBytes: 0, // Size not provided by Jan API
tags: [],
path: undefined, // Remote model, no local path
owned_by: model.owned_by,
object: model.object,
capabilities: ['tools'], // Jan models support both tools via MCP
}
: undefined
)
}

async list(): Promise<modelInfo[]> {
try {
const janModels = await janApiClient.getModels()

return janModels.map((model) => ({
id: model.id,
name: model.id, // Use ID as name for now
Expand All @@ -75,7 +98,7 @@ export default class JanProviderWeb extends AIEngine {
// For Jan API, we don't actually "load" models in the traditional sense
// We just create a session reference for tracking
const sessionId = `jan-${modelId}-${Date.now()}`

const sessionInfo: SessionInfo = {
pid: Date.now(), // Use timestamp as pseudo-PID
port: 443, // HTTPS port
Expand All @@ -85,8 +108,10 @@ export default class JanProviderWeb extends AIEngine {
}

this.activeSessions.set(sessionId, sessionInfo)

console.log(`Jan model session created: ${sessionId} for model ${modelId}`)

console.log(
`Jan model session created: ${sessionId} for model ${modelId}`
)
return sessionInfo
} catch (error) {
console.error(`Failed to load Jan model ${modelId}:`, error)
Expand All @@ -97,23 +122,23 @@ export default class JanProviderWeb extends AIEngine {
async unload(sessionId: string): Promise<UnloadResult> {
try {
const session = this.activeSessions.get(sessionId)

if (!session) {
return {
success: false,
error: `Session ${sessionId} not found`
error: `Session ${sessionId} not found`,
}
}

this.activeSessions.delete(sessionId)
console.log(`Jan model session unloaded: ${sessionId}`)

return { success: true }
} catch (error) {
console.error(`Failed to unload Jan session ${sessionId}:`, error)
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error'
error: error instanceof Error ? error.message : 'Unknown error',
}
}
}
Expand All @@ -136,9 +161,12 @@ export default class JanProviderWeb extends AIEngine {
}

// Convert core chat completion request to Jan API format
const janMessages: JanChatMessage[] = opts.messages.map(msg => ({
const janMessages: JanChatMessage[] = opts.messages.map((msg) => ({
role: msg.role as 'system' | 'user' | 'assistant',
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
content:
typeof msg.content === 'string'
? msg.content
: JSON.stringify(msg.content),
}))

const janRequest = {
Expand All @@ -162,18 +190,18 @@ export default class JanProviderWeb extends AIEngine {
} else {
// Return single response
const response = await janApiClient.createChatCompletion(janRequest)

// Check if aborted after completion
if (abortController?.signal?.aborted) {
throw new Error('Request was aborted')
}

return {
id: response.id,
object: 'chat.completion' as const,
created: response.created,
model: response.model,
choices: response.choices.map(choice => ({
choices: response.choices.map((choice) => ({
index: choice.index,
message: {
role: choice.message.role,
Expand All @@ -182,7 +210,12 @@ export default class JanProviderWeb extends AIEngine {
reasoning_content: choice.message.reasoning_content,
tool_calls: choice.message.tool_calls,
},
finish_reason: (choice.finish_reason || 'stop') as 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call',
finish_reason: (choice.finish_reason || 'stop') as
| 'stop'
| 'length'
| 'tool_calls'
| 'content_filter'
| 'function_call',
})),
usage: response.usage,
}
Expand All @@ -193,7 +226,10 @@ export default class JanProviderWeb extends AIEngine {
}
}

private async *createStreamingGenerator(janRequest: any, abortController?: AbortController) {
private async *createStreamingGenerator(
janRequest: any,
abortController?: AbortController
) {
let resolve: () => void
let reject: (error: Error) => void
const chunks: any[] = []
Expand Down Expand Up @@ -231,7 +267,7 @@ export default class JanProviderWeb extends AIEngine {
object: chunk.object,
created: chunk.created,
model: chunk.model,
choices: chunk.choices.map(choice => ({
choices: chunk.choices.map((choice) => ({
index: choice.index,
delta: {
role: choice.delta.role,
Expand Down Expand Up @@ -261,14 +297,14 @@ export default class JanProviderWeb extends AIEngine {
if (abortController?.signal?.aborted) {
throw new Error('Request was aborted')
}

while (yieldedIndex < chunks.length) {
yield chunks[yieldedIndex]
yieldedIndex++
}

// Wait a bit before checking again
await new Promise(resolve => setTimeout(resolve, 10))
await new Promise((resolve) => setTimeout(resolve, 10))
}

// Yield any remaining chunks
Expand All @@ -291,24 +327,32 @@ export default class JanProviderWeb extends AIEngine {
}

async delete(modelId: string): Promise<void> {
throw new Error(`Delete operation not supported for remote Jan API model: ${modelId}`)
throw new Error(
`Delete operation not supported for remote Jan API model: ${modelId}`
)
}

async import(modelId: string, _opts: ImportOptions): Promise<void> {
throw new Error(`Import operation not supported for remote Jan API model: ${modelId}`)
throw new Error(
`Import operation not supported for remote Jan API model: ${modelId}`
)
}

async abortImport(modelId: string): Promise<void> {
throw new Error(`Abort import operation not supported for remote Jan API model: ${modelId}`)
throw new Error(
`Abort import operation not supported for remote Jan API model: ${modelId}`
)
}

async getLoadedModels(): Promise<string[]> {
return Array.from(this.activeSessions.values()).map(session => session.model_id)
return Array.from(this.activeSessions.values()).map(
(session) => session.model_id
)
}

async isToolSupported(modelId: string): Promise<boolean> {
// Jan models support tool calls via MCP
console.log(`Checking tool support for Jan model ${modelId}: supported`);
return true;
console.log(`Checking tool support for Jan model ${modelId}: supported`)
return true
}
}
}
32 changes: 30 additions & 2 deletions extensions/llamacpp-extension/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -922,6 +922,30 @@ export default class llamacpp_extension extends AIEngine {
return hash
}

override async get(modelId: string): Promise<modelInfo | undefined> {
const modelPath = await joinPath([
await this.getProviderPath(),
'models',
modelId,
])
const path = await joinPath([modelPath, 'model.yml'])

if (!(await fs.existsSync(path))) return undefined

const modelConfig = await invoke<ModelConfig>('read_yaml', {
path,
})

return {
id: modelId,
name: modelConfig.name ?? modelId,
quant_type: undefined, // TODO: parse quantization type from model.yml or model.gguf
providerId: this.provider,
port: 0, // port is not known until the model is loaded
sizeBytes: modelConfig.size_bytes ?? 0,
} as modelInfo
}

// Implement the required LocalProvider interface methods
override async list(): Promise<modelInfo[]> {
const modelsDir = await joinPath([await this.getProviderPath(), 'models'])
Expand Down Expand Up @@ -1085,7 +1109,10 @@ export default class llamacpp_extension extends AIEngine {
const archiveName = await basename(path)
logger.info(`Installing backend from path: ${path}`)

if (!(await fs.existsSync(path)) || (!path.endsWith('tar.gz') && !path.endsWith('zip'))) {
if (
!(await fs.existsSync(path)) ||
(!path.endsWith('tar.gz') && !path.endsWith('zip'))
) {
logger.error(`Invalid path or file ${path}`)
throw new Error(`Invalid path or file ${path}`)
}
Expand Down Expand Up @@ -2601,7 +2628,8 @@ export default class llamacpp_extension extends AIEngine {
metadata: Record<string, string>
): Promise<number> {
// Extract vision parameters from metadata
const projectionDim = Math.floor(Number(metadata['clip.vision.projection_dim']) / 10) || 256
const projectionDim =
Math.floor(Number(metadata['clip.vision.projection_dim']) / 10) || 256

// Count images in messages
let imageCount = 0
Expand Down
Loading
Loading