@@ -20,6 +20,9 @@ import {
2020 chatCompletionRequest ,
2121 events ,
2222} from '@janhq/core'
23+
24+ import { error , info , warn } from '@tauri-apps/plugin-log'
25+
2326import {
2427 listSupportedBackends ,
2528 downloadBackend ,
@@ -89,6 +92,24 @@ interface EmbeddingData {
8992 index : number
9093 object : string
9194}
95+ /**
96+ * Override the default app.log function to use Jan's logging system.
97+ * @param args
98+ */
99+ const logger = {
100+ info : function ( ...args : any [ ] ) {
101+ console . log ( ...args )
102+ info ( args . map ( ( arg ) => ` ${ arg } ` ) . join ( ` ` ) )
103+ } ,
104+ warn : function ( ...args : any [ ] ) {
105+ console . warn ( ...args )
106+ warn ( args . map ( ( arg ) => ` ${ arg } ` ) . join ( ` ` ) )
107+ } ,
108+ error : function ( ...args : any [ ] ) {
109+ console . error ( ...args )
110+ error ( args . map ( ( arg ) => ` ${ arg } ` ) . join ( ` ` ) )
111+ } ,
112+ }
92113
93114/**
94115 * A class that implements the InferenceExtension interface from the @janhq/core package.
@@ -151,7 +172,7 @@ export default class llamacpp_extension extends AIEngine {
151172
152173 async configureBackends ( ) : Promise < void > {
153174 if ( this . isConfiguringBackends ) {
154- console . log (
175+ logger . info (
155176 'configureBackends already in progress, skipping duplicate call'
156177 )
157178 return
@@ -165,16 +186,18 @@ export default class llamacpp_extension extends AIEngine {
165186 try {
166187 version_backends = await listSupportedBackends ( )
167188 if ( version_backends . length === 0 ) {
168- console . warn (
189+ throw new Error (
169190 'No supported backend binaries found for this system. Backend selection and auto-update will be unavailable.'
170191 )
171- return
172192 } else {
173193 version_backends . sort ( ( a , b ) => b . version . localeCompare ( a . version ) )
174194 }
175195 } catch ( error ) {
176- console . error ( 'Failed to fetch supported backends:' , error )
177- return
196+ throw new Error (
197+ `Failed to fetch supported backends: ${
198+ error instanceof Error ? error . message : error
199+ } `
200+ )
178201 }
179202
180203 let bestAvailableBackendString =
@@ -208,11 +231,11 @@ export default class llamacpp_extension extends AIEngine {
208231 : bestAvailableBackendString || originalDefaultBackendValue
209232
210233 backendSetting . controllerProps . value = initialUiDefault
211- console . log (
234+ logger . info (
212235 `Initial UI default for version_backend set to: ${ initialUiDefault } `
213236 )
214237 } else {
215- console . error (
238+ logger . error (
216239 'Critical setting "version_backend" definition not found in SETTINGS.'
217240 )
218241 throw new Error ( 'Critical setting "version_backend" not found.' )
@@ -236,7 +259,7 @@ export default class llamacpp_extension extends AIEngine {
236259 if ( ! backendWasDownloaded ) {
237260 await this . ensureFinalBackendInstallation ( effectiveBackendString )
238261 } else {
239- console . log (
262+ logger . info (
240263 'Skipping final installation check - backend was just downloaded during auto-update'
241264 )
242265 }
@@ -291,7 +314,7 @@ export default class llamacpp_extension extends AIEngine {
291314
292315 if ( matchingBackends . length > 0 ) {
293316 foundBestBackend = matchingBackends [ 0 ]
294- console . log (
317+ logger . info (
295318 `Determined best available backend: ${ foundBestBackend . version } /${ foundBestBackend . backend } (Category: "${ priorityCategory } ")`
296319 )
297320 break
@@ -309,12 +332,12 @@ export default class llamacpp_extension extends AIEngine {
309332 private async handleAutoUpdate (
310333 bestAvailableBackendString : string
311334 ) : Promise < { wasUpdated : boolean ; newBackend : string } > {
312- console . log (
335+ logger . info (
313336 `Auto-update engine is enabled. Current backend: ${ this . config . version_backend } . Best available: ${ bestAvailableBackendString } `
314337 )
315338
316339 if ( ! bestAvailableBackendString ) {
317- console . warn (
340+ logger . warn (
318341 'Auto-update enabled, but no best available backend determined'
319342 )
320343 return { wasUpdated : false , newBackend : this . config . version_backend }
@@ -327,21 +350,21 @@ export default class llamacpp_extension extends AIEngine {
327350
328351 // Check if update is needed
329352 if ( currentBackend === bestBackend && currentVersion === bestVersion ) {
330- console . log ( 'Auto-update: Already using the best available backend' )
353+ logger . info ( 'Auto-update: Already using the best available backend' )
331354 return { wasUpdated : false , newBackend : this . config . version_backend }
332355 }
333356
334357 // Perform update
335358 try {
336- console . log (
359+ logger . info (
337360 `Auto-updating from ${ this . config . version_backend } to ${ bestAvailableBackendString } `
338361 )
339362
340363 // Download new backend first
341364 await this . ensureBackendReady ( bestBackend , bestVersion )
342365
343366 // Add a small delay on Windows to ensure file operations complete
344- if ( process . platform === 'win32' ) {
367+ if ( IS_WINDOWS ) {
345368 await new Promise ( ( resolve ) => setTimeout ( resolve , 1000 ) )
346369 }
347370
@@ -359,19 +382,19 @@ export default class llamacpp_extension extends AIEngine {
359382 } )
360383 )
361384
362- console . log (
385+ logger . info (
363386 `Successfully updated to backend: ${ bestAvailableBackendString } `
364387 )
365388
366389 // Clean up old backends (with additional delay on Windows)
367- if ( process . platform === 'win32' ) {
390+ if ( IS_WINDOWS ) {
368391 await new Promise ( ( resolve ) => setTimeout ( resolve , 500 ) )
369392 }
370393 await this . removeOldBackends ( bestVersion , bestBackend )
371394
372395 return { wasUpdated : true , newBackend : bestAvailableBackendString }
373396 } catch ( error ) {
374- console . error ( 'Auto-update failed:' , error )
397+ logger . error ( 'Auto-update failed:' , error )
375398 return { wasUpdated : false , newBackend : this . config . version_backend }
376399 }
377400 }
@@ -413,23 +436,23 @@ export default class llamacpp_extension extends AIEngine {
413436 const toRemove = await joinPath ( [ versionPath , backendTypeDir ] )
414437 try {
415438 await fs . rm ( toRemove )
416- console . log ( `Removed old backend: ${ toRemove } ` )
439+ logger . info ( `Removed old backend: ${ toRemove } ` )
417440 } catch ( e ) {
418- console . warn ( `Failed to remove old backend: ${ toRemove } ` , e )
441+ logger . warn ( `Failed to remove old backend: ${ toRemove } ` , e )
419442 }
420443 }
421444 }
422445 }
423446 } catch ( error ) {
424- console . error ( 'Error during old backend cleanup:' , error )
447+ logger . error ( 'Error during old backend cleanup:' , error )
425448 }
426449 }
427450
428451 private async ensureFinalBackendInstallation (
429452 backendString : string
430453 ) : Promise < void > {
431454 if ( ! backendString ) {
432- console . warn ( 'No backend specified for final installation check' )
455+ logger . warn ( 'No backend specified for final installation check' )
433456 return
434457 }
435458
@@ -438,7 +461,7 @@ export default class llamacpp_extension extends AIEngine {
438461 . map ( ( part ) => part ?. trim ( ) )
439462
440463 if ( ! selectedVersion || ! selectedBackend ) {
441- console . warn ( `Invalid backend format: ${ backendString } ` )
464+ logger . warn ( `Invalid backend format: ${ backendString } ` )
442465 return
443466 }
444467
@@ -448,16 +471,16 @@ export default class llamacpp_extension extends AIEngine {
448471 selectedVersion
449472 )
450473 if ( ! isInstalled ) {
451- console . log ( `Final check: Installing backend ${ backendString } ` )
474+ logger . info ( `Final check: Installing backend ${ backendString } ` )
452475 await this . ensureBackendReady ( selectedBackend , selectedVersion )
453- console . log ( `Successfully installed backend: ${ backendString } ` )
476+ logger . info ( `Successfully installed backend: ${ backendString } ` )
454477 } else {
455- console . log (
478+ logger . info (
456479 `Final check: Backend ${ backendString } is already installed`
457480 )
458481 }
459482 } catch ( error ) {
460- console . error (
483+ logger . error (
461484 `Failed to ensure backend ${ backendString } installation:` ,
462485 error
463486 )
@@ -481,7 +504,7 @@ export default class llamacpp_extension extends AIEngine {
481504 try {
482505 await this . unload ( sInfo . model_id )
483506 } catch ( error ) {
484- console . error ( `Failed to unload model ${ sInfo . model_id } :` , error )
507+ logger . error ( `Failed to unload model ${ sInfo . model_id } :` , error )
485508 }
486509 }
487510
@@ -651,7 +674,7 @@ export default class llamacpp_extension extends AIEngine {
651674 : 'onFileDownloadStopped'
652675 events . emit ( eventName , { modelId, downloadType : 'Model' } )
653676 } catch ( error ) {
654- console . error ( 'Error downloading model:' , modelId , opts , error )
677+ logger . error ( 'Error downloading model:' , modelId , opts , error )
655678 events . emit ( 'onFileDownloadError' , { modelId, downloadType : 'Model' } )
656679 throw error
657680 }
@@ -726,16 +749,16 @@ export default class llamacpp_extension extends AIEngine {
726749 if ( res . status === 503 ) {
727750 const body = await res . json ( )
728751 const msg = body ?. error ?. message ?? 'Model loading'
729- console . log ( `waiting for model load... (${ msg } )` )
752+ logger . info ( `waiting for model load... (${ msg } )` )
730753 } else if ( res . ok ) {
731754 const body = await res . json ( )
732755 if ( body . status === 'ok' ) {
733756 return
734757 } else {
735- console . warn ( 'Unexpected OK response from /health:' , body )
758+ logger . warn ( 'Unexpected OK response from /health:' , body )
736759 }
737760 } else {
738- console . warn ( `Unexpected status ${ res . status } from /health` )
761+ logger . warn ( `Unexpected status ${ res . status } from /health` )
739762 }
740763 } catch ( e ) {
741764 await this . unload ( sInfo . model_id )
@@ -860,7 +883,7 @@ export default class llamacpp_extension extends AIEngine {
860883 args . push ( '--rope-freq-scale' , String ( cfg . rope_freq_scale ) )
861884 }
862885
863- console . log ( 'Calling Tauri command llama_load with args:' , args )
886+ logger . info ( 'Calling Tauri command llama_load with args:' , args )
864887 const backendPath = await getBackendExePath ( backend , version )
865888 const libraryPath = await joinPath ( [ await this . getProviderPath ( ) , 'lib' ] )
866889
@@ -878,7 +901,7 @@ export default class llamacpp_extension extends AIEngine {
878901
879902 return sInfo
880903 } catch ( error ) {
881- console . error ( 'Error loading llama-server:\n' , error )
904+ logger . error ( 'Error loading llama-server:\n' , error )
882905 throw new Error ( `Failed to load llama-server: ${ error } ` )
883906 }
884907 }
@@ -898,14 +921,14 @@ export default class llamacpp_extension extends AIEngine {
898921 // If successful, remove from active sessions
899922 if ( result . success ) {
900923 this . activeSessions . delete ( pid )
901- console . log ( `Successfully unloaded model with PID ${ pid } ` )
924+ logger . info ( `Successfully unloaded model with PID ${ pid } ` )
902925 } else {
903- console . warn ( `Failed to unload model: ${ result . error } ` )
926+ logger . warn ( `Failed to unload model: ${ result . error } ` )
904927 }
905928
906929 return result
907930 } catch ( error ) {
908- console . error ( 'Error in unload command:' , error )
931+ logger . error ( 'Error in unload command:' , error )
909932 return {
910933 success : false ,
911934 error : `Failed to unload model: ${ error } ` ,
@@ -935,22 +958,22 @@ export default class llamacpp_extension extends AIEngine {
935958
936959 // Check if download is already in progress
937960 if ( this . pendingDownloads . has ( backendKey ) ) {
938- console . log (
961+ logger . info (
939962 `Backend ${ backendKey } download already in progress, waiting...`
940963 )
941964 await this . pendingDownloads . get ( backendKey )
942965 return
943966 }
944967
945968 // Start new download
946- console . log ( `Backend ${ backendKey } not installed, downloading...` )
969+ logger . info ( `Backend ${ backendKey } not installed, downloading...` )
947970 const downloadPromise = downloadBackend ( backend , version ) . finally ( ( ) => {
948971 this . pendingDownloads . delete ( backendKey )
949972 } )
950973
951974 this . pendingDownloads . set ( backendKey , downloadPromise )
952975 await downloadPromise
953- console . log ( `Backend ${ backendKey } download completed` )
976+ logger . info ( `Backend ${ backendKey } download completed` )
954977 }
955978
956979 private async * handleStreamingResponse (
@@ -1017,7 +1040,7 @@ export default class llamacpp_extension extends AIEngine {
10171040 const chunk = data as chatCompletionChunk
10181041 yield chunk
10191042 } catch ( e ) {
1020- console . error ( 'Error parsing JSON from stream or server error:' , e )
1043+ logger . error ( 'Error parsing JSON from stream or server error:' , e )
10211044 // re‑throw so the async iterator terminates with an error
10221045 throw e
10231046 }
0 commit comments