Skip to content

Commit f0f9af3

Browse files
authored
Merge branch 'main' into bug/enumeration-chat
2 parents e36692f + d42b311 commit f0f9af3

File tree

29 files changed

+136
-86
lines changed

29 files changed

+136
-86
lines changed
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# ADR 008: Extensible-Jan-with-Docker
2+
3+
## Changelog
4+
5+
- 2023-10-24: Initial draft
6+
7+
## Authors
8+
9+
- @vuonghoainam
10+
11+
## Status
12+
Proposed
13+
14+
## Context
15+
16+
What is the issue that we're seeing that is motivating this decision or change?
17+
- The A.I world is moving fast with multiple runtime/ prebaked environment. We or the builder cannot cover just everything but rather we should adopt it and facillitate it as much as possible within Jan.
18+
- For `Run your own A.I`: Builder can build app on Jan (NodeJS env) and connect to external endpoint which serves the real A.I
19+
- e.g 1: Nitro acting as proxy to `triton-inference-server` running within a Docker container controlled by Jan app
20+
- e.g 2: Original models can be in many formats (pytorch, paddlepaddle). In order to run it with the most optimized version locally, there must be a step to transpile the model ([Ollama import model](https://github.com/jmorganca/ollama/blob/main/docs/import.md), Tensorrt). Btw Jan can prebuilt it and let user pull but later
21+
- For `Build your own A.I`: User can fine tune model locally (of course Jan help it with remote but later)
22+
23+
## Decision
24+
25+
What is the change that we're proposing and/or doing?
26+
- Add Docker client as Core module - [Docker node](https://github.com/apocas/dockerode)
27+
- 2 example A.I app (adr-002) to demonstrate it and actually use!
28+
29+
## Consequences
30+
31+
What becomes easier or more difficult to do because of this change?
32+
- We can extend limitlessly :D
33+
34+
## Alternatives
35+
36+
## Reference

electron/tests/my-models.e2e.spec.ts

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,6 @@ test.afterAll(async () => {
3636

3737
test("shows my models", async () => {
3838
await page.getByTestId("My Models").first().click();
39-
const header = await page
40-
.getByRole("heading")
41-
.filter({ hasText: "My Models" })
42-
.first()
43-
.isVisible();
44-
expect(header).toBe(false);
39+
await page.getByTestId("testid-mymodels-header").isVisible();
4540
// More test cases here...
4641
});

electron/tsconfig.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,10 @@
1010
"noEmitOnError": true,
1111
"baseUrl": ".",
1212
"allowJs": true,
13+
"skipLibCheck": true,
1314
"paths": { "*": ["node_modules/*"] },
1415
"typeRoots": ["node_modules/@types"]
1516
},
1617
"include": ["./**/*.ts"],
17-
"exclude": ["core", "build", "dist", "tests"]
18+
"exclude": ["core", "build", "dist", "tests", "node_modules"]
1819
}

plugins/inference-plugin/index.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ function requestInference(
6767
}
6868
subscriber.complete();
6969
})
70-
.catch(subscriber.error);
70+
.catch((err) => subscriber.error(err));
7171
});
7272
}
7373

@@ -143,7 +143,8 @@ async function handleMessageRequest(data: NewMessageRequest) {
143143
},
144144
error: async (err) => {
145145
message.message =
146-
message.message.trim() + "\n" + "Error occurred: " + err;
146+
message.message.trim() + "\n" + "Error occurred: " + err.message;
147+
events.emit(EventName.OnMessageResponseUpdate, message);
147148
// TODO: Common collections should be able to access via core functions instead of store
148149
await store.updateOne("messages", message._id, message);
149150
},

plugins/inference-plugin/module.ts

Lines changed: 33 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -14,53 +14,49 @@ const initModel = (fileName) => {
1414
if (!fileName) {
1515
reject("Model not found, please download again.");
1616
}
17-
if (subprocess) {
18-
console.error("A subprocess is already running. Attempt to kill then reinit.");
19-
killSubprocess();
20-
}
2117
resolve(fileName);
2218
})
23-
// Kill port process if it is already in use
24-
.then((fileName) =>
25-
tcpPortUsed
26-
.waitUntilFree(PORT, 200, 3000)
27-
.catch(() => killPortProcess(PORT))
28-
.then(() => fileName)
29-
)
3019
// Spawn Nitro subprocess to load model
3120
.then(() => {
32-
let binaryFolder = path.join(__dirname, "nitro"); // Current directory by default
33-
let binaryName;
21+
return tcpPortUsed.check(PORT, "127.0.0.1").then((inUse) => {
22+
if (!inUse) {
23+
let binaryFolder = path.join(__dirname, "nitro"); // Current directory by default
24+
let binaryName;
3425

35-
if (process.platform === "win32") {
36-
// Todo: Need to check for CUDA support to switch between CUDA and non-CUDA binaries
37-
binaryName = "nitro_start_windows.bat";
38-
} else if (process.platform === "darwin") {
39-
// Mac OS platform
40-
binaryName = process.arch === "arm64" ? "nitro_mac_arm64" : "nitro_mac_intel";
41-
} else {
42-
// Linux
43-
// Todo: Need to check for CUDA support to switch between CUDA and non-CUDA binaries
44-
binaryName = "nitro_start_linux.sh"; // For other platforms
45-
}
26+
if (process.platform === "win32") {
27+
// Todo: Need to check for CUDA support to switch between CUDA and non-CUDA binaries
28+
binaryName = "nitro_start_windows.bat";
29+
} else if (process.platform === "darwin") {
30+
// Mac OS platform
31+
binaryName =
32+
process.arch === "arm64"
33+
? "nitro_mac_arm64"
34+
: "nitro_mac_intel";
35+
} else {
36+
// Linux
37+
// Todo: Need to check for CUDA support to switch between CUDA and non-CUDA binaries
38+
binaryName = "nitro_start_linux.sh"; // For other platforms
39+
}
4640

47-
const binaryPath = path.join(binaryFolder, binaryName);
41+
const binaryPath = path.join(binaryFolder, binaryName);
4842

49-
// Execute the binary
50-
subprocess = spawn(binaryPath, { cwd: binaryFolder });
43+
// Execute the binary
44+
subprocess = spawn(binaryPath, { cwd: binaryFolder });
5145

52-
// Handle subprocess output
53-
subprocess.stdout.on("data", (data) => {
54-
console.log(`stdout: ${data}`);
55-
});
46+
// Handle subprocess output
47+
subprocess.stdout.on("data", (data) => {
48+
console.log(`stdout: ${data}`);
49+
});
5650

57-
subprocess.stderr.on("data", (data) => {
58-
console.error(`stderr: ${data}`);
59-
});
51+
subprocess.stderr.on("data", (data) => {
52+
console.error(`stderr: ${data}`);
53+
});
6054

61-
subprocess.on("close", (code) => {
62-
console.log(`child process exited with code ${code}`);
63-
subprocess = null;
55+
subprocess.on("close", (code) => {
56+
console.log(`child process exited with code ${code}`);
57+
subprocess = null;
58+
});
59+
}
6460
});
6561
})
6662
.then(() => tcpPortUsed.waitUntilUsed(PORT, 300, 30000))

plugins/inference-plugin/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@janhq/inference-plugin",
3-
"version": "1.0.13",
3+
"version": "1.0.14",
44
"description": "Inference Plugin, powered by @janhq/nitro, bring a high-performance Llama model inference in pure C++.",
55
"icon": "https://raw.githubusercontent.com/tailwindlabs/heroicons/88e98b0c2b458553fbadccddc2d2f878edc0387b/src/20/solid/command-line.svg",
66
"main": "dist/index.js",

web/app/_components/ConversationalList/index.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ const ConversationalList: React.FC<Props> = ({ models }) => (
1414
</span>
1515
</div>
1616
<div className="scroll mt-2 flex w-full gap-2 overflow-hidden overflow-x-scroll pl-6">
17-
{models.map((item) => (
17+
{models?.map((item) => (
1818
<ConversationalCard key={item._id} model={item} />
1919
))}
2020
</div>

web/app/_components/CreateBotContainer/index.tsx

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,14 @@ import DraggableProgressBar from '../DraggableProgressBar'
1313
import { useSetAtom } from 'jotai'
1414
import { activeBotAtom } from '@helpers/atoms/Bot.atom'
1515
import {
16-
leftSideBarExpandStateAtom,
1716
rightSideBarExpandStateAtom,
1817
} from '@helpers/atoms/SideBarExpand.atom'
1918
import {
2019
MainViewState,
2120
setMainViewStateAtom,
2221
} from '@helpers/atoms/MainView.atom'
23-
import { executeSerial } from '../../../../electron/core/plugin-manager/execution/extension-manager'
2422
import { DataService } from '@janhq/core'
23+
import { executeSerial } from '@services/pluginService'
2524

2625
const CreateBotContainer: React.FC = () => {
2726
const { downloadedModels } = useGetDownloadedModels()

web/app/_components/HistoryItem/index.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import React from 'react'
22
import { useAtomValue, useSetAtom } from 'jotai'
33
import { ModelManagementService } from '@janhq/core'
4-
import { executeSerial } from '../../../../electron/core/plugin-manager/execution/extension-manager'
54
import {
65
getActiveConvoIdAtom,
76
setActiveConvoIdAtom,
@@ -13,6 +12,7 @@ import {
1312
} from '@helpers/atoms/MainView.atom'
1413
import { displayDate } from '@utils/datetime'
1514
import { twMerge } from 'tailwind-merge'
15+
import { executeSerial } from '@services/pluginService'
1616

1717
type Props = {
1818
conversation: Conversation

web/app/_components/LeftRibbonNav/index.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ const LeftRibbonNav: React.FC = () => {
5959

6060
const onBotListClick = async () => {
6161
const bots = await getAllBots()
62-
if (bots.length === 0) {
62+
if (bots?.length === 0) {
6363
alert('You have no bot')
6464
return
6565
}

0 commit comments

Comments
 (0)