@@ -1082,17 +1082,18 @@ export default class llamacpp_extension extends AIEngine {
10821082
10831083 // If we reach here, download completed successfully (including validation)
10841084 // The downloadFiles function only returns successfully if all files downloaded AND validated
1085- events . emit ( DownloadEvent . onFileDownloadAndVerificationSuccess , {
1086- modelId,
1087- downloadType : 'Model'
1085+ events . emit ( DownloadEvent . onFileDownloadAndVerificationSuccess , {
1086+ modelId,
1087+ downloadType : 'Model' ,
10881088 } )
10891089 } catch ( error ) {
10901090 logger . error ( 'Error downloading model:' , modelId , opts , error )
10911091 const errorMessage =
10921092 error instanceof Error ? error . message : String ( error )
10931093
10941094 // Check if this is a cancellation
1095- const isCancellationError = errorMessage . includes ( 'Download cancelled' ) ||
1095+ const isCancellationError =
1096+ errorMessage . includes ( 'Download cancelled' ) ||
10961097 errorMessage . includes ( 'Validation cancelled' ) ||
10971098 errorMessage . includes ( 'Hash computation cancelled' ) ||
10981099 errorMessage . includes ( 'cancelled' ) ||
@@ -1372,7 +1373,7 @@ export default class llamacpp_extension extends AIEngine {
13721373 envs [ 'LLAMA_API_KEY' ] = api_key
13731374
13741375 // set user envs
1375- this . parseEnvFromString ( envs , this . llamacpp_env )
1376+ if ( this . llamacpp_env ) this . parseEnvFromString ( envs , this . llamacpp_env )
13761377
13771378 // model option is required
13781379 // NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path
@@ -1751,7 +1752,7 @@ export default class llamacpp_extension extends AIEngine {
17511752 }
17521753 // set envs
17531754 const envs : Record < string , string > = { }
1754- this . parseEnvFromString ( envs , this . llamacpp_env )
1755+ if ( this . llamacpp_env ) this . parseEnvFromString ( envs , this . llamacpp_env )
17551756
17561757 // Ensure backend is downloaded and ready before proceeding
17571758 await this . ensureBackendReady ( backend , version )
@@ -1767,7 +1768,7 @@ export default class llamacpp_extension extends AIEngine {
17671768 return dList
17681769 } catch ( error ) {
17691770 logger . error ( 'Failed to query devices:\n' , error )
1770- throw new Error ( " Failed to load llamacpp backend" )
1771+ throw new Error ( ' Failed to load llamacpp backend' )
17711772 }
17721773 }
17731774
@@ -1876,7 +1877,7 @@ export default class llamacpp_extension extends AIEngine {
18761877 logger . info (
18771878 `Using explicit key_length: ${ keyLen } , value_length: ${ valLen } `
18781879 )
1879- headDim = ( keyLen + valLen )
1880+ headDim = keyLen + valLen
18801881 } else {
18811882 // Fall back to embedding_length estimation
18821883 const embeddingLen = Number ( meta [ `${ arch } .embedding_length` ] )
0 commit comments