diff --git a/deploy-local.sh b/deploy-local.sh new file mode 100644 index 00000000..e5a1a58f --- /dev/null +++ b/deploy-local.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT="$(cd "$(dirname "$0")" && pwd)" +cd "$ROOT" + +echo "==> Installing JS dependencies..." +pnpm install --frozen-lockfile 2>/dev/null || pnpm install + +echo "==> Downloading sona/ffmpeg binaries..." +uv run scripts/pre_build.py + +echo "==> Building Vibe (release)..." +cd desktop +pnpm tauri build 2>&1 | tail -20 + +RPM=$(find src-tauri/target/release/bundle/rpm -name '*.rpm' -print -quit 2>/dev/null) + +if [ -z "$RPM" ]; then + echo "ERROR: No RPM found in src-tauri/target/release/bundle/rpm/" + exit 1 +fi + +echo "==> Installing $RPM..." +sudo rpm -U --force "$RPM" + +echo "==> Done! Vibe has been updated." +echo " Restart the app to use the new version." diff --git a/desktop/src/components/Params.tsx b/desktop/src/components/Params.tsx index 8231d3a5..fd9bc145 100644 --- a/desktop/src/components/Params.tsx +++ b/desktop/src/components/Params.tsx @@ -131,6 +131,7 @@ export default function ModelOptions({ options, setOptions }: ParamsProps) { openaiBaseUrl: llmConfig.openaiBaseUrl, openaiApiKey: llmConfig.openaiApiKey, enabled: llmConfig?.enabled ?? false, + prompt: llmConfig.prompt, }) }}> diff --git a/desktop/src/lib/llm/ollama.ts b/desktop/src/lib/llm/ollama.ts index 740e30fd..0445ccce 100644 --- a/desktop/src/lib/llm/ollama.ts +++ b/desktop/src/lib/llm/ollama.ts @@ -32,6 +32,8 @@ export class Ollama implements Llm { const body = JSON.stringify({ model: this.config.model, prompt, + // Override Modelfile system prompt so user's custom prompt takes effect + system: '', stream: false, }) const response = await fetch(`${this.config.ollamaBaseUrl}/api/generate`, {