diff --git a/docs/public/assets/images/changelog/mcp-linear.png b/docs/public/assets/images/changelog/mcp-linear.png new file mode 100644 index 0000000000..bca1f7539d Binary files /dev/null and b/docs/public/assets/images/changelog/mcp-linear.png differ diff --git a/docs/public/assets/images/changelog/mcplinear2.gif b/docs/public/assets/images/changelog/mcplinear2.gif new file mode 100644 index 0000000000..c013e8e3b6 Binary files /dev/null and b/docs/public/assets/images/changelog/mcplinear2.gif differ diff --git a/docs/src/pages/_meta.json b/docs/src/pages/_meta.json index 9508e506c3..351eb454c7 100644 --- a/docs/src/pages/_meta.json +++ b/docs/src/pages/_meta.json @@ -11,11 +11,6 @@ "type": "page", "title": "Documentation" }, - "cortex": { - "type": "page", - "title": "Cortex", - "display": "hidden" - }, "platforms": { "type": "page", "title": "Platforms", diff --git a/docs/src/pages/changelog/2025-08-14-general-improvs.mdx b/docs/src/pages/changelog/2025-08-14-general-improvs.mdx new file mode 100644 index 0000000000..0fbcd555ab --- /dev/null +++ b/docs/src/pages/changelog/2025-08-14-general-improvs.mdx @@ -0,0 +1,77 @@ +--- +title: "Jan v0.6.8: Engine fixes, new MCP tutorials, and cleaner docs" +version: 0.6.8 +description: "Llama.cpp stability upgrades, Linear/Todoist MCP tutorials, new model pages (Lucy, Jan‑v1), and docs reorganization" +date: 2025-08-14 +ogImage: "/assets/images/changelog/mcplinear2.gif" +--- + +import ChangelogHeader from "@/components/Changelog/ChangelogHeader" +import { Callout } from 'nextra/components' + + + +## Highlights 🎉 + +v0.6.8 focuses on stability and real workflows: major llama.cpp hardening, two new MCP productivity tutorials, new model pages, and a cleaner docs structure. + + +### 🚀 New tutorials & docs + +- Linear MCP tutorial: create/update issues, projects, comments, cycles — directly from chat +- Todoist MCP tutorial: add, list, update, complete, and delete tasks via natural language +- New model pages: + - Lucy (1.7B) — optimized for web_search tool calling + - Jan‑v1 (4B) — strong SimpleQA (91.1%), solid tool use +- Docs updates: + - Reorganized landing and Products sections; streamlined QuickStart + - Ongoing Docs v2 (Astro) migration with handbook, blog, and changelog sections added and then removed + +### 🧱 Llama.cpp engine: stability & correctness + +- Structured error handling for llama.cpp extension +- Better argument handling, improved model path resolution, clearer error messages +- Device parsing tests; conditional Vulkan support; support for missing CUDA backends +- AVX2 instruction support check (Mac Intel) for MCP +- Server hang on model load — fixed +- Session management & port allocation moved to backend for robustness +- Recommended labels in settings; per‑model Jinja template customization +- Tensor buffer type override support +- “Continuous batching” description corrected + +### ✨ UX polish + +- Thread sorting fixed; assistant dropdown click reliability improved +- Responsive left panel text color; provider logo blur cleanup +- Show toast on download errors; context size error dialog restored +- Prevent accidental message submit for IME users +- Onboarding loop fixed; GPU detection brought back +- Connected MCP servers status stays in sync after JSON edits + +### 🔍 Hub & providers + +- Hugging Face token respected for repo search and private README visualization +- Deep links and model details fixed +- Factory reset unblocked; special chars in `modelId` handled +- Feature toggle for auto‑updater respected + +### 🧪 CI & housekeeping + +- Nightly/PR workflow tweaks; clearer API server logs +- Cleaned unused hardware APIs +- Release workflows updated; docs release paths consolidated + +### 🤖 Reasoning model fixes + +- gpt‑oss “thinking block” rendering fixed +- Reasoning text no longer included in chat completion requests + +## Thanks to new contributors + +· @cmppoon · @shmutalov · @B0sh + +--- + +Update your Jan or [download the latest](https://jan.ai/). + +For the complete list of changes, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.6.8). diff --git a/docs/src/pages/docs/_assets/chat_jan_v1.png b/docs/src/pages/docs/_assets/chat_jan_v1.png new file mode 100644 index 0000000000..173c02ecdc Binary files /dev/null and b/docs/src/pages/docs/_assets/chat_jan_v1.png differ diff --git a/docs/src/pages/docs/_assets/creative_bench_jan_v1.png b/docs/src/pages/docs/_assets/creative_bench_jan_v1.png new file mode 100644 index 0000000000..1444d15245 Binary files /dev/null and b/docs/src/pages/docs/_assets/creative_bench_jan_v1.png differ diff --git a/docs/src/pages/docs/_assets/download_janv1.png b/docs/src/pages/docs/_assets/download_janv1.png new file mode 100644 index 0000000000..00d5a78782 Binary files /dev/null and b/docs/src/pages/docs/_assets/download_janv1.png differ diff --git a/docs/src/pages/docs/_assets/enable_mcp.png b/docs/src/pages/docs/_assets/enable_mcp.png new file mode 100644 index 0000000000..88ad8c2abc Binary files /dev/null and b/docs/src/pages/docs/_assets/enable_mcp.png differ diff --git a/docs/src/pages/docs/_assets/jan_v1_demo.gif b/docs/src/pages/docs/_assets/jan_v1_demo.gif new file mode 100644 index 0000000000..9e4298c820 Binary files /dev/null and b/docs/src/pages/docs/_assets/jan_v1_demo.gif differ diff --git a/docs/src/pages/docs/_assets/jan_v1_serper.png b/docs/src/pages/docs/_assets/jan_v1_serper.png new file mode 100644 index 0000000000..ad06fb63f7 Binary files /dev/null and b/docs/src/pages/docs/_assets/jan_v1_serper.png differ diff --git a/docs/src/pages/docs/_assets/jan_v1_serper1.png b/docs/src/pages/docs/_assets/jan_v1_serper1.png new file mode 100644 index 0000000000..1b8dfd74d7 Binary files /dev/null and b/docs/src/pages/docs/_assets/jan_v1_serper1.png differ diff --git a/docs/src/pages/docs/_assets/linear1.png b/docs/src/pages/docs/_assets/linear1.png new file mode 100644 index 0000000000..f260b55ff5 Binary files /dev/null and b/docs/src/pages/docs/_assets/linear1.png differ diff --git a/docs/src/pages/docs/_assets/linear2.png b/docs/src/pages/docs/_assets/linear2.png new file mode 100644 index 0000000000..fc059195bd Binary files /dev/null and b/docs/src/pages/docs/_assets/linear2.png differ diff --git a/docs/src/pages/docs/_assets/linear3.png b/docs/src/pages/docs/_assets/linear3.png new file mode 100644 index 0000000000..f3fbe9b78a Binary files /dev/null and b/docs/src/pages/docs/_assets/linear3.png differ diff --git a/docs/src/pages/docs/_assets/linear4.png b/docs/src/pages/docs/_assets/linear4.png new file mode 100644 index 0000000000..3482649d83 Binary files /dev/null and b/docs/src/pages/docs/_assets/linear4.png differ diff --git a/docs/src/pages/docs/_assets/linear5.png b/docs/src/pages/docs/_assets/linear5.png new file mode 100644 index 0000000000..82d1db180e Binary files /dev/null and b/docs/src/pages/docs/_assets/linear5.png differ diff --git a/docs/src/pages/docs/_assets/linear6.png b/docs/src/pages/docs/_assets/linear6.png new file mode 100644 index 0000000000..717e928d1c Binary files /dev/null and b/docs/src/pages/docs/_assets/linear6.png differ diff --git a/docs/src/pages/docs/_assets/linear7.png b/docs/src/pages/docs/_assets/linear7.png new file mode 100644 index 0000000000..30f992abc3 Binary files /dev/null and b/docs/src/pages/docs/_assets/linear7.png differ diff --git a/docs/src/pages/docs/_assets/linear8.png b/docs/src/pages/docs/_assets/linear8.png new file mode 100644 index 0000000000..4d0ea415a8 Binary files /dev/null and b/docs/src/pages/docs/_assets/linear8.png differ diff --git a/docs/src/pages/docs/_assets/lucy_demo.gif b/docs/src/pages/docs/_assets/lucy_demo.gif new file mode 100644 index 0000000000..824fa21119 Binary files /dev/null and b/docs/src/pages/docs/_assets/lucy_demo.gif differ diff --git a/docs/src/pages/docs/_assets/mcplinear2.gif b/docs/src/pages/docs/_assets/mcplinear2.gif new file mode 100644 index 0000000000..c013e8e3b6 Binary files /dev/null and b/docs/src/pages/docs/_assets/mcplinear2.gif differ diff --git a/docs/src/pages/docs/_assets/mcptodoist_extreme.gif b/docs/src/pages/docs/_assets/mcptodoist_extreme.gif new file mode 100644 index 0000000000..6ae64050f0 Binary files /dev/null and b/docs/src/pages/docs/_assets/mcptodoist_extreme.gif differ diff --git a/docs/src/pages/docs/_assets/serper_janparams.png b/docs/src/pages/docs/_assets/serper_janparams.png new file mode 100644 index 0000000000..89c8ad5027 Binary files /dev/null and b/docs/src/pages/docs/_assets/serper_janparams.png differ diff --git a/docs/src/pages/docs/_assets/serper_page.png b/docs/src/pages/docs/_assets/serper_page.png new file mode 100644 index 0000000000..34c7adfb99 Binary files /dev/null and b/docs/src/pages/docs/_assets/serper_page.png differ diff --git a/docs/src/pages/docs/_assets/serper_playground.png b/docs/src/pages/docs/_assets/serper_playground.png new file mode 100644 index 0000000000..a52ac815e5 Binary files /dev/null and b/docs/src/pages/docs/_assets/serper_playground.png differ diff --git a/docs/src/pages/docs/_assets/simpleqa_jan_v1.png b/docs/src/pages/docs/_assets/simpleqa_jan_v1.png new file mode 100644 index 0000000000..6cb7b125a1 Binary files /dev/null and b/docs/src/pages/docs/_assets/simpleqa_jan_v1.png differ diff --git a/docs/src/pages/docs/_assets/simpleqa_lucy.png b/docs/src/pages/docs/_assets/simpleqa_lucy.png new file mode 100644 index 0000000000..204f08b078 Binary files /dev/null and b/docs/src/pages/docs/_assets/simpleqa_lucy.png differ diff --git a/docs/src/pages/docs/_assets/todoist1.png b/docs/src/pages/docs/_assets/todoist1.png new file mode 100644 index 0000000000..a1b98578db Binary files /dev/null and b/docs/src/pages/docs/_assets/todoist1.png differ diff --git a/docs/src/pages/docs/_assets/todoist2.png b/docs/src/pages/docs/_assets/todoist2.png new file mode 100644 index 0000000000..9e4164b893 Binary files /dev/null and b/docs/src/pages/docs/_assets/todoist2.png differ diff --git a/docs/src/pages/docs/_assets/todoist3.png b/docs/src/pages/docs/_assets/todoist3.png new file mode 100644 index 0000000000..ede276499f Binary files /dev/null and b/docs/src/pages/docs/_assets/todoist3.png differ diff --git a/docs/src/pages/docs/_assets/todoist4.png b/docs/src/pages/docs/_assets/todoist4.png new file mode 100644 index 0000000000..2c8e9c8167 Binary files /dev/null and b/docs/src/pages/docs/_assets/todoist4.png differ diff --git a/docs/src/pages/docs/_assets/todoist5.png b/docs/src/pages/docs/_assets/todoist5.png new file mode 100644 index 0000000000..5e761df7c4 Binary files /dev/null and b/docs/src/pages/docs/_assets/todoist5.png differ diff --git a/docs/src/pages/docs/_assets/toggle_tools.png b/docs/src/pages/docs/_assets/toggle_tools.png new file mode 100644 index 0000000000..53c7e3b055 Binary files /dev/null and b/docs/src/pages/docs/_assets/toggle_tools.png differ diff --git a/docs/src/pages/docs/_assets/turn_on_mcp.png b/docs/src/pages/docs/_assets/turn_on_mcp.png new file mode 100644 index 0000000000..9bce0e81be Binary files /dev/null and b/docs/src/pages/docs/_assets/turn_on_mcp.png differ diff --git a/docs/src/pages/docs/_meta.json b/docs/src/pages/docs/_meta.json index 3b78a26289..614c01e30b 100644 --- a/docs/src/pages/docs/_meta.json +++ b/docs/src/pages/docs/_meta.json @@ -4,20 +4,16 @@ "title": "Switcher" }, "index": "Overview", - "how-to-separator": { - "title": "HOW TO", + "getting-started-separator": { + "title": "GETTING STARTED", "type": "separator" }, + "quickstart": "QuickStart", "desktop": "Install 👋 Jan", - "threads": "Start Chatting", - "jan-models": "Use Jan Models", + "jan-models": "Models", "assistants": "Create Assistants", - - "tutorials-separators": { - "title": "TUTORIALS", - "type": "separator" - }, - "remote-models": "Connect to Remote Models", + "remote-models": "Cloud Providers", + "mcp-examples": "Tutorials", "explanation-separator": { "title": "EXPLANATION", @@ -38,7 +34,6 @@ }, "manage-models": "Manage Models", "mcp": "Model Context Protocol", - "mcp-examples": "MCP Examples", "localserver": { "title": "LOCAL SERVER", diff --git a/docs/src/pages/docs/index.mdx b/docs/src/pages/docs/index.mdx index 66a5023351..9f2bd26c47 100644 --- a/docs/src/pages/docs/index.mdx +++ b/docs/src/pages/docs/index.mdx @@ -1,17 +1,19 @@ --- title: Jan -description: Jan is an open-source ChatGPT-alternative and self-hosted AI platform - build and run AI on your own desktop or server. +description: Build, run, and own your AI. From laptop to superintelligence. keywords: [ Jan, Jan AI, - ChatGPT alternative, - OpenAI platform alternative, - local API, + open superintelligence, + AI ecosystem, local AI, private AI, - conversational AI, - no-subscription fee, + self-hosted AI, + llama.cpp, + Model Context Protocol, + MCP, + GGUF models, large language model, LLM, ] @@ -24,123 +26,152 @@ import FAQBox from '@/components/FaqBox' ![Jan's Cover Image](./_assets/jan-app-new.png) +## Jan's Goal -Jan is a ChatGPT alternative that runs 100% offline on your desktop and (*soon*) on mobile. Our goal is to -make it easy for anyone, with or without coding skills, to download and use AI models with full control and -[privacy](https://www.reuters.com/legal/legalindustry/privacy-paradox-with-ai-2023-10-31/). +> Jan's goal is to build superintelligence that you can self-host and use locally. -Jan is powered by [Llama.cpp](https://github.com/ggerganov/llama.cpp), a local AI engine that provides an OpenAI-compatible -API that can run in the background by default at `https://localhost:1337` (or your custom port). This enables you to power all sorts of -applications with AI capabilities from your laptop/PC. For example, you can connect local tools like [Continue](https://jan.ai/docs/server-examples/continue-dev) -and [Cline](https://cline.bot/) to Jan and power them using your favorite models. +## What is Jan? -Jan doesn't limit you to locally hosted models, meaning, you can create an API key from your favorite model provider, -add it to Jan via the configuration's page and start talking to your favorite models. +Jan is an open-source AI ecosystem that runs on your hardware. We're building towards open superintelligence - a complete AI platform you actually own. -### Features +### The Ecosystem -- Download popular open-source LLMs (Llama3, Gemma3, Qwen3, and more) from the HuggingFace [Model Hub](./docs/manage-models.mdx) -or import any GGUF files (the model format used by llama.cpp) available locally -- Connect to [cloud services](/docs/remote-models/openai) (OpenAI, Anthropic, Mistral, Groq, etc.) -- [Chat](./docs/threads.mdx) with AI models & [customize their parameters](/docs/model-parameters.mdx) via our -intuitive interface -- Use our [local API server](https://jan.ai/api-reference) with an OpenAI-equivalent API to power other apps. +**Models**: We build specialized models for real tasks, not general-purpose assistants: +- **Jan-Nano (32k/128k)**: 4B parameters designed for deep research with MCP. The 128k version processes entire papers, codebases, or legal documents in one go +- **Lucy**: 1.7B model that runs agentic web search on your phone. Small enough for CPU, smart enough for complex searches +- **Jan-v1**: 4B model for agentic reasoning and tool use, achieving 91.1% on SimpleQA -### Philosophy +We also integrate the best open-source models - from OpenAI's gpt-oss to community GGUF models on Hugging Face. The goal: make powerful AI accessible to everyone, not just those with server farms. -Jan is built to be [user-owned](about#-user-owned), this means that Jan is: -- Truly open source via the [Apache 2.0 license](https://github.com/menloresearch/jan/blob/dev/LICENSE) -- [Data is stored locally, following one of the many local-first principles](https://www.inkandswitch.com/local-first) -- Internet is optional, Jan can run 100% offline -- Free choice of AI models, both local and cloud-based -- We do not collect or sell user data. See our [Privacy Policy](./privacy). +**Applications**: Jan Desktop runs on your computer today. Web, mobile, and server versions coming in late 2025. Everything syncs, everything works together. + +**Tools**: Connect to the real world through [Model Context Protocol (MCP)](./mcp). Design with Canva, analyze data in Jupyter notebooks, control browsers, execute code in E2B sandboxes. Your AI can actually do things, not just talk about them. - You can read more about our [philosophy](/about#philosophy) here. +API keys are optional. No account needed. Just download and run. Bring your own API keys to connect your favorite cloud models. -### Inspirations +### Core Features -Jan is inspired by the concepts of [Calm Computing](https://en.wikipedia.org/wiki/Calm_technology), and the Disappearing Computer. +- **Run Models Locally**: Download any GGUF model from Hugging Face, use OpenAI's gpt-oss models, or connect to cloud providers +- **OpenAI-Compatible API**: Local server at `localhost:1337` works with tools like [Continue](./server-examples/continue-dev) and [Cline](https://cline.bot/) +- **Extend with MCP Tools**: Browser automation, web search, data analysis, design tools - all through natural language +- **Your Choice of Infrastructure**: Run on your laptop, self-host on your servers (soon), or use cloud when you need it -## Acknowledgements +### Growing MCP Integrations + +Jan connects to real tools through MCP: +- **Creative Work**: Generate designs with Canva +- **Data Analysis**: Execute Python in Jupyter notebooks +- **Web Automation**: Control browsers with Browserbase and Browser Use +- **Code Execution**: Run code safely in E2B sandboxes +- **Search & Research**: Access current information via Exa, Perplexity, and Octagon +- **More coming**: The MCP ecosystem is expanding rapidly -Jan is built on the shoulders of many open-source projects like: +## Philosophy + +Jan is built to be user-owned: +- **Open Source**: Apache 2.0 license - truly free +- **Local First**: Your data stays on your device. Internet is optional +- **Privacy Focused**: We don't collect or sell user data. See our [Privacy Policy](./privacy) +- **No Lock-in**: Export your data anytime. Use any model. Switch between local and cloud + + +We're building AI that respects your choices. Not another wrapper around someone else's API. + -- [Llama.cpp](https://github.com/ggerganov/llama.cpp/blob/master/LICENSE) -- [Scalar](https://github.com/scalar/scalar) +## Quick Start + +1. [Download Jan](./quickstart) for your operating system +2. Choose a model - download locally or add cloud API keys +3. Start chatting or connect tools via MCP +4. Build with our [API](https://jan.ai/api-reference) + +## Acknowledgements + +Jan is built on the shoulders of giants: +- [Llama.cpp](https://github.com/ggerganov/llama.cpp) for inference +- [Model Context Protocol](https://modelcontextprotocol.io) for tool integration +- The open-source community that makes this possible ## FAQs - Jan is a customizable AI assistant that can run offline on your computer - a privacy-focused alternative to tools like - ChatGPT, Anthropic's Claude, and Google Gemini, with optional cloud AI support. + Jan is an open-source AI ecosystem building towards superintelligence you can self-host. Today it's a desktop app that runs AI models locally. Tomorrow it's a complete platform across all your devices. - - Download Jan on your computer, download a model or add API key for a cloud-based one, and start chatting. For - detailed setup instructions, see our [Quick Start](/docs/quickstart) guide. + + Other platforms are models behind APIs you rent. Jan is a complete AI ecosystem you own. Run any model, use real tools through MCP, keep your data private, and never pay subscriptions for local use. - - Jan supports all major operating systems, - - [Mac](/docs/desktop/mac#compatibility) - - [Windows](/docs/desktop/windows#compatibility) - - [Linux](/docs/desktop/linux) - - Hardware compatibility includes: - - NVIDIA GPUs (CUDA) - - AMD GPUs (Vulkan) - - Intel Arc GPUs (Vulkan) - - Any GPU with Vulkan support + + **Jan Models:** + - Jan-Nano (32k/128k) - Deep research with MCP integration + - Lucy - Mobile-optimized agentic search (1.7B) + - Jan-v1 - Agentic reasoning and tool use (4B) + + **Open Source:** + - OpenAI's gpt-oss models (120b and 20b) + - Any GGUF model from Hugging Face + + **Cloud (with your API keys):** + - OpenAI, Anthropic, Mistral, Groq, and more - - Jan prioritizes privacy by: - - Running 100% offline with locally-stored data - - Using open-source models that keep your conversations private - - Storing all files and chat history on your device in the [Jan Data Folder](/docs/data-folder) - - Never collecting or selling your data - - - When using third-party cloud AI services through Jan, their data policies apply. Check their privacy terms. - - - You can optionally share anonymous usage statistics to help improve Jan, but your conversations are never - shared. See our complete [Privacy Policy](./docs/privacy). + + MCP (Model Context Protocol) lets AI interact with real applications. Instead of just generating text, your AI can create designs in Canva, analyze data in Jupyter, browse the web, and execute code - all through conversation. - - - Download optimized models from the [Jan Hub](/docs/manage-models) - - Import GGUF models from Hugging Face or your local files - - Connect to cloud providers like OpenAI, Anthropic, Mistral and Groq (requires your own API keys) + + **Supported OS**: + - [Windows 10+](/docs/desktop/windows#compatibility) + - [macOS 12+](/docs/desktop/mac#compatibility) + - [Linux (Ubuntu 20.04+)](/docs/desktop/linux) + + **Hardware**: + - Minimum: 8GB RAM, 10GB storage + - Recommended: 16GB RAM, GPU (NVIDIA/AMD/Intel), 50GB storage + - Works with: NVIDIA (CUDA), AMD (Vulkan), Intel Arc, Apple Silicon - - Jan is completely free and open-source with no subscription fees for local models and features. When using cloud-based - models (like GPT-4o or Claude Sonnet 3.7), you'll only pay the standard rates to those providers—we add no markup. + + **Local use**: Always free, no catches + **Cloud models**: You pay providers directly (we add no markup) + **Jan cloud**: Optional paid services coming 2025 + + The core platform will always be free and open source. - - Yes! Once you've downloaded a local model, Jan works completely offline with no internet connection needed. - + + - Runs 100% offline once models are downloaded + - All data stored locally in [Jan Data Folder](/docs/data-folder) + - No telemetry without explicit consent + - Open source code you can audit - - - Join our [Discord community](https://discord.gg/qSwXFx6Krr) to connect with other users - - Contribute through [GitHub](https://github.com/menloresearch/jan) (no permission needed!) - - Get troubleshooting help in our [Discord](https://discord.com/invite/FTk2MvZwJH) channel [#🆘|jan-help](https://discord.com/channels/1107178041848909847/1192090449725358130) - - Check our [Troubleshooting](./docs/troubleshooting) guide for common issues + + When using cloud providers through Jan, their privacy policies apply. + - Yes! We fully support the self-hosted movement. Either download Jan directly or fork it on - [GitHub repository](https://github.com/menloresearch/jan) and build it from source. + Yes. Download directly or build from [source](https://github.com/menloresearch/jan). Jan Server for production deployments coming late 2025. - - Jan stands for "Just a Name". We are, admittedly, bad at marketing 😂. + + - **Jan Web**: Beta late 2025 + - **Jan Mobile**: Late 2025 + - **Jan Server**: Late 2025 + + All versions will sync seamlessly. - - Yes! We love hiring from our community. Check out our open positions at [Careers](https://menlo.bamboohr.com/careers). + + - Code: [GitHub](https://github.com/menloresearch/jan) + - Community: [Discord](https://discord.gg/FTk2MvZwJH) + - Testing: Help evaluate models and report bugs + - Documentation: Improve guides and tutorials + + + Yes! We love hiring from our community. Check [Careers](https://menlo.bamboohr.com/careers). + \ No newline at end of file diff --git a/docs/src/pages/docs/jan-models/jan-v1.mdx b/docs/src/pages/docs/jan-models/jan-v1.mdx new file mode 100644 index 0000000000..f77e727fa1 --- /dev/null +++ b/docs/src/pages/docs/jan-models/jan-v1.mdx @@ -0,0 +1,129 @@ +--- +title: Jan-v1 +description: 4B parameter model with strong performance on reasoning benchmarks +keywords: + [ + Jan, + Jan-v1, + Jan Models, + reasoning, + SimpleQA, + tool calling, + GGUF, + 4B model, + ] +--- + +import { Callout } from 'nextra/components' + +# Jan-v1 + +## Overview + +Jan-v1 is a 4B parameter model based on Qwen3-4B-thinking, designed for reasoning and problem-solving tasks. The model achieves 91.1% accuracy on SimpleQA through model scaling and fine-tuning approaches. + +## Performance + +### SimpleQA Benchmark + +Jan-v1 demonstrates strong factual question-answering capabilities: + +![Jan-v1 SimpleQA Performance](../_assets/simpleqa_jan_v1.png) + +At 91.1% accuracy, Jan-v1 outperforms several larger models on SimpleQA, including Perplexity's 70B model. This performance represents effective scaling and fine-tuning for a 4B parameter model. + +### Chat and Creativity Benchmarks + +Jan-v1 has been evaluated on conversational and creative tasks: + +![Jan-v1 Creativity Benchmarks](../_assets/creative_bench_jan_v1.png) + +These benchmarks (EQBench, CreativeWriting, and IFBench) measure the model's ability to handle conversational nuance, creative expression, and instruction following. + +## Requirements + +- **Memory**: + - Minimum: 8GB RAM (with Q4 quantization) + - Recommended: 16GB RAM (with Q8 quantization) +- **Hardware**: CPU or GPU +- **API Support**: OpenAI-compatible at localhost:1337 + +## Using Jan-v1 + +### Quick Start + +1. Download Jan Desktop +2. Select Jan-v1 from the model list +3. Start chatting - no additional configuration needed + +### Demo + +![Jan-v1 Demo](../_assets/jan_v1_demo.gif) + +### Deployment Options + +**Using vLLM:** +```bash +vllm serve janhq/Jan-v1-4B \ + --host 0.0.0.0 \ + --port 1234 \ + --enable-auto-tool-choice \ + --tool-call-parser hermes +``` + +**Using llama.cpp:** +```bash +llama-server --model jan-v1.gguf \ + --host 0.0.0.0 \ + --port 1234 \ + --jinja \ + --no-context-shift +``` + +### Recommended Parameters + +```yaml +temperature: 0.6 +top_p: 0.95 +top_k: 20 +min_p: 0.0 +max_tokens: 2048 +``` + +## What Jan-v1 Does Well + +- **Question Answering**: 91.1% accuracy on SimpleQA +- **Reasoning Tasks**: Built on thinking-optimized base model +- **Tool Calling**: Supports function calling through hermes parser +- **Instruction Following**: Reliable response to user instructions + +## Limitations + +- **Model Size**: 4B parameters limits complex reasoning compared to larger models +- **Specialized Tasks**: Optimized for Q&A and reasoning, not specialized domains +- **Context Window**: Standard context limitations apply + +## Available Formats + +### GGUF Quantizations + +- **Q4_K_M**: 2.5 GB - Good balance of size and quality +- **Q5_K_M**: 2.89 GB - Better quality, slightly larger +- **Q6_K**: 3.31 GB - Near-full quality +- **Q8_0**: 4.28 GB - Highest quality quantization + +## Models Available + +- [Jan-v1 on Hugging Face](https://huggingface.co/janhq/Jan-v1-4B) +- [Jan-v1 GGUF on Hugging Face](https://huggingface.co/janhq/Jan-v1-4B-GGUF) + +## Technical Notes + + +The model includes a system prompt in the chat template by default to match benchmark performance. A vanilla template without system prompt is available in `chat_template_raw.jinja`. + + +## Community + +- **Discussions**: [HuggingFace Community](https://huggingface.co/janhq/Jan-v1-4B/discussions) +- **Support**: Available through Jan App at [jan.ai](https://jan.ai) \ No newline at end of file diff --git a/docs/src/pages/docs/jan-models/lucy.mdx b/docs/src/pages/docs/jan-models/lucy.mdx new file mode 100644 index 0000000000..ac4006359d --- /dev/null +++ b/docs/src/pages/docs/jan-models/lucy.mdx @@ -0,0 +1,122 @@ +--- +title: Lucy +description: Compact 1.7B model optimized for web search with tool calling +keywords: + [ + Jan, + Lucy, + Jan Models, + web search, + tool calling, + Serper API, + GGUF, + 1.7B model, + ] +--- + +import { Callout } from 'nextra/components' + +# Lucy + +## Overview + +Lucy is a 1.7B parameter model built on Qwen3-1.7B, optimized for web search through tool calling. The model has been trained to work effectively with search APIs like Serper, enabling web search capabilities in resource-constrained environments. + +## Performance + +### SimpleQA Benchmark + +Lucy achieves competitive performance on SimpleQA despite its small size: + +![Lucy SimpleQA Performance](../_assets/simpleqa_lucy.png) + +The benchmark shows Lucy (1.7B) compared against models ranging from 4B to 600B+ parameters. While larger models generally perform better, Lucy demonstrates that effective web search integration can partially compensate for smaller model size. + +## Requirements + +- **Memory**: + - Minimum: 4GB RAM (with Q4 quantization) + - Recommended: 8GB RAM (with Q8 quantization) +- **Search API**: Serper API key required for web search functionality +- **Hardware**: Runs on CPU or GPU + + +To use Lucy's web search capabilities, you'll need a Serper API key. Get one at [serper.dev](https://serper.dev). + + +## Using Lucy + +### Quick Start + +1. Download Jan Desktop +2. Download Lucy from the Hub +3. Configure Serper MCP with your API key +4. Start using web search through natural language + +### Demo + +![Lucy Demo](../_assets/lucy_demo.gif) + +### Deployment Options + +**Using vLLM:** +```bash +vllm serve Menlo/Lucy-128k \ + --host 0.0.0.0 \ + --port 1234 \ + --enable-auto-tool-choice \ + --tool-call-parser hermes \ + --rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' \ + --max-model-len 131072 +``` + +**Using llama.cpp:** +```bash +llama-server model.gguf \ + --host 0.0.0.0 \ + --port 1234 \ + --rope-scaling yarn \ + --rope-scale 3.2 \ + --yarn-orig-ctx 40960 +``` + +### Recommended Parameters + +```yaml +Temperature: 0.7 +Top-p: 0.9 +Top-k: 20 +Min-p: 0.0 +``` + +## What Lucy Does Well + +- **Web Search Integration**: Optimized to call search tools and process results +- **Small Footprint**: 1.7B parameters means lower memory requirements +- **Tool Calling**: Reliable function calling for search APIs + +## Limitations + +- **Requires Internet**: Web search functionality needs active connection +- **API Costs**: Serper API has usage limits and costs +- **Context Processing**: While supporting 128k context, performance may vary with very long inputs +- **General Knowledge**: Limited by 1.7B parameter size for tasks beyond search + +## Models Available + +- [Lucy on Hugging Face](https://huggingface.co/Menlo/Lucy-128k) +- [Lucy GGUF on Hugging Face](https://huggingface.co/Menlo/Lucy-128k-gguf) + +## Citation + +```bibtex +@misc{dao2025lucyedgerunningagenticweb, + title={Lucy: edgerunning agentic web search on mobile with machine generated task vectors}, + author={Alan Dao and Dinh Bach Vu and Alex Nguyen and Norapat Buppodom}, + year={2025}, + eprint={2508.00360}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2508.00360}, +} +``` diff --git a/docs/src/pages/docs/mcp-examples/_meta.json b/docs/src/pages/docs/mcp-examples/_meta.json new file mode 100644 index 0000000000..ce948eb3ec --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/_meta.json @@ -0,0 +1,20 @@ +{ + "browser": { + "title": "Browser Automation" + }, + "data-analysis": { + "title": "Data Analysis" + }, + "search": { + "title": "Search & Research" + }, + "design": { + "title": "Design Tools" + }, + "deepresearch": { + "title": "Deep Research" + }, + "productivity": { + "title": "Productivity" + } +} diff --git a/docs/src/pages/docs/mcp-examples/browser/_meta.json b/docs/src/pages/docs/mcp-examples/browser/_meta.json new file mode 100644 index 0000000000..faa76ef2a8 --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/browser/_meta.json @@ -0,0 +1,6 @@ +{ + "browserbase": { + "title": "Browserbase", + "href": "/docs/mcp-examples/browser/browserbase" + } +} diff --git a/docs/src/pages/docs/mcp-examples/data-analysis/_meta.json b/docs/src/pages/docs/mcp-examples/data-analysis/_meta.json new file mode 100644 index 0000000000..43561ec1a1 --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/data-analysis/_meta.json @@ -0,0 +1,10 @@ +{ + "e2b": { + "title": "E2B Code Sandbox", + "href": "/docs/mcp-examples/data-analysis/e2b" + }, + "jupyter": { + "title": "Jupyter Notebooks", + "href": "/docs/mcp-examples/data-analysis/jupyter" + } +} diff --git a/docs/src/pages/docs/mcp-examples/deepresearch/_meta.json b/docs/src/pages/docs/mcp-examples/deepresearch/_meta.json new file mode 100644 index 0000000000..c746d8e335 --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/deepresearch/_meta.json @@ -0,0 +1,6 @@ +{ + "octagon": { + "title": "Octagon Deep Research", + "href": "/docs/mcp-examples/deepresearch/octagon" + } +} diff --git a/docs/src/pages/docs/mcp-examples/design/_meta.json b/docs/src/pages/docs/mcp-examples/design/_meta.json new file mode 100644 index 0000000000..d9f00c5d2b --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/design/_meta.json @@ -0,0 +1,6 @@ +{ + "canva": { + "title": "Canva", + "href": "/docs/mcp-examples/design/canva" + } +} diff --git a/docs/src/pages/docs/mcp-examples/productivity/_meta.json b/docs/src/pages/docs/mcp-examples/productivity/_meta.json new file mode 100644 index 0000000000..a739472a53 --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/productivity/_meta.json @@ -0,0 +1,10 @@ +{ + "todoist": { + "title": "Todoist", + "href": "/docs/mcp-examples/productivity/todoist" + }, + "linear": { + "title": "Linear", + "href": "/docs/mcp-examples/productivity/linear" + } +} diff --git a/docs/src/pages/docs/mcp-examples/productivity/linear.mdx b/docs/src/pages/docs/mcp-examples/productivity/linear.mdx new file mode 100644 index 0000000000..01423e942f --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/productivity/linear.mdx @@ -0,0 +1,268 @@ +--- +title: Linear MCP +description: Manage software projects and issue tracking through natural language with Linear integration. +keywords: + [ + Jan, + MCP, + Model Context Protocol, + Linear, + project management, + issue tracking, + agile, + software development, + tool calling, + ] +--- + +import { Callout, Steps } from 'nextra/components' + +# Linear MCP + +[Linear MCP](https://linear.app) provides comprehensive project management capabilities through natural conversation. Transform your software development workflow by managing issues, projects, and team collaboration directly through AI. + +## Available Tools + +Linear MCP offers extensive project management capabilities: + +### Issue Management +- `list_issues`: View all issues in your workspace +- `get_issue`: Get details of a specific issue +- `create_issue`: Create new issues with full details +- `update_issue`: Modify existing issues +- `list_my_issues`: See your assigned issues +- `list_issue_statuses`: View available workflow states +- `list_issue_labels`: See and manage labels +- `create_issue_label`: Create new labels + +### Project & Team +- `list_projects`: View all projects +- `get_project`: Get project details +- `create_project`: Start new projects +- `update_project`: Modify project settings +- `list_teams`: See all teams +- `get_team`: Get team information +- `list_users`: View team members + +### Documentation & Collaboration +- `list_documents`: Browse documentation +- `get_document`: Read specific documents +- `search_documentation`: Find information +- `list_comments`: View issue comments +- `create_comment`: Add comments to issues +- `list_cycles`: View sprint cycles + +## Prerequisites + +- Jan with experimental features enabled +- Linear account (free for up to 250 issues) +- Model with strong tool calling support +- Active internet connection + + +Linear offers a generous free tier perfect for small teams and personal projects. Unlimited users, 250 active issues, and full API access included. + + +## Setup + +### Create Linear Account + +1. Sign up at [linear.app](https://linear.app) +2. Complete the onboarding process + +![Linear signup page](../../_assets/linear1.png) + +Once logged in, you'll see your workspace: + +![Linear main dashboard](../../_assets/linear2.png) + +### Enable MCP in Jan + + +Enable **Experimental Features** in **Settings > General** if you don't see the MCP Servers option. + + +1. Go to **Settings > MCP Servers** +2. Toggle **Allow All MCP Tool Permission** ON + +### Configure Linear MCP + +Click the `+` button to add Linear MCP: + +**Configuration:** +- **Server Name**: `linear` +- **Command**: `npx` +- **Arguments**: `-y mcp-remote https://mcp.linear.app/sse` + +![Linear MCP configuration in Jan](../../_assets/linear3.png) + +### Authenticate with Linear + +When you first use Linear tools, a browser tab will open for authentication: + +![Linear authentication page](../../_assets/linear4.png) + +Complete the OAuth flow to grant Jan access to your Linear workspace. + +## Usage + +### Select a Model with Tool Calling + +For this example, we'll use kimi-k2 from Groq: + +1. Add the model in Groq settings: `moonshotai/kimi-k2-instruct` + +![Adding kimi-k2 model](../../_assets/linear6.png) + +2. Enable tools for the model: + +![Enable tools for kimi-k2](../../_assets/linear7.png) + +### Verify Available Tools + +You should see all Linear tools in the chat interface: + +![Linear tools available in chat](../../_assets/linear8.png) + +### Epic Project Management + +Watch AI transform mundane tasks into epic narratives: + +![Linear MCP creating Shakespearean war epic tasks](../../_assets/mcplinear2.gif) + +## Creative Examples + +### 🎭 Shakespearean Sprint Planning +``` +Create Linear tickets in the '👋Jan' team for my AGI project as battles in a Shakespearean war epic. Each sprint is a military campaign, bugs are enemy spies, and merge conflicts are sword fights between rival houses. Invent unique epic titles and dramatic descriptions with battle cries and victory speeches. Characterize bugs as enemy villains and developers as heroic warriors in this noble quest for AGI glory. Make tasks like model training, testing, and deployment sound like grand military campaigns with honor and valor. +``` + +### 🚀 Space Mission Development +``` +Transform our mobile app redesign into a NASA space mission. Create issues where each feature is a mission objective, bugs are space debris to clear, and releases are launch windows. Add dramatic mission briefings, countdown sequences, and astronaut logs. Priority levels become mission criticality ratings. +``` + +### 🏴‍☠️ Pirate Ship Operations +``` +Set up our e-commerce platform project as a pirate fleet adventure. Features are islands to conquer, bugs are sea monsters, deployments are naval battles. Create colorful pirate-themed tickets with treasure maps, crew assignments, and tales of high seas adventure. +``` + +### 🎮 Video Game Quest Log +``` +Structure our API refactoring project like an RPG quest system. Create issues as quests with XP rewards, boss battles for major features, side quests for minor tasks. Include loot drops (completed features), skill trees (learning requirements), and epic boss fight descriptions for challenging bugs. +``` + +### 🍳 Gordon Ramsay's Kitchen +``` +Manage our restaurant app project as if Gordon Ramsay is the head chef. Create brutally honest tickets criticizing code quality, demanding perfection in UX like a Michelin star dish. Bugs are "bloody disasters" and successful features are "finally, some good code." Include Kitchen Nightmares-style rescue plans. +``` + +## Practical Workflows + +### Sprint Planning +``` +Review all open issues in the Backend team, identify the top 10 by priority, and create a new sprint cycle called "Q1 Performance Sprint" with appropriate issues assigned. +``` + +### Bug Triage +``` +List all bugs labeled "critical" or "high-priority", analyze their descriptions, and suggest which ones should be fixed first based on user impact. Update their status to "In Progress" for the top 3. +``` + +### Documentation Audit +``` +Search our documentation for anything related to API authentication. Create issues for any gaps or outdated sections you find, labeled as "documentation" with detailed improvement suggestions. +``` + +### Team Workload Balance +``` +Show me all active issues grouped by assignee. Identify anyone with more than 5 high-priority items and suggest redistributions to balance the workload. +``` + +### Release Planning +``` +Create a project called "v2.0 Release" with milestones for: feature freeze, beta testing, documentation, and launch. Generate appropriate issues for each phase with realistic time estimates. +``` + +## Advanced Integration Patterns + +### Cross-Project Dependencies +``` +Find all issues labeled "blocked" across all projects. For each one, identify what they're waiting on and create linked issues for the blocking items if they don't exist. +``` + +### Automated Status Updates +``` +Look at all issues assigned to me that haven't been updated in 3 days. Add a comment with a status update based on their current state and any blockers. +``` + +### Smart Labeling +``` +Analyze all unlabeled issues in our workspace. Based on their titles and descriptions, suggest appropriate labels and apply them. Create any missing label categories we need. +``` + +### Sprint Retrospectives +``` +Generate a retrospective report for our last completed cycle. List what was completed, what was pushed to next sprint, and create discussion issues for any patterns you notice. +``` + +## Tips for Maximum Productivity + +- **Batch Operations**: Create multiple related issues in one request +- **Smart Templates**: Ask AI to remember your issue templates +- **Natural Queries**: "Show me what John is working on this week" +- **Context Awareness**: Reference previous issues in new requests +- **Automated Workflows**: Set up recurring management tasks + +## Troubleshooting + +**Authentication Issues:** +- Clear browser cookies for Linear +- Re-authenticate through the OAuth flow +- Check Linear workspace permissions +- Verify API access is enabled + +**Tool Calling Errors:** +- Ensure model supports multiple tool calls +- Try breaking complex requests into steps +- Verify all required fields are provided +- Check Linear service status + +**Missing Data:** +- Refresh authentication token +- Verify workspace access permissions +- Check if issues are in archived projects +- Ensure proper team selection + +**Performance Issues:** +- Linear API has rate limits (see dashboard) +- Break bulk operations into batches +- Cache frequently accessed data +- Use specific filters to reduce data + + +Linear's keyboard shortcuts work great alongside MCP! Use CMD+K for quick navigation while AI handles the heavy lifting. + + +## Integration Ideas + +Combine Linear with other MCP tools: + +- **Serper + Linear**: Research technical solutions, then create implementation tickets +- **Jupyter + Linear**: Analyze project metrics, generate data-driven sprint plans +- **Todoist + Linear**: Sync personal tasks with work issues +- **E2B + Linear**: Run code tests, automatically create bug reports + +## Privacy & Security + +Linear MCP uses OAuth for authentication, meaning: +- Your credentials are never shared with Jan +- Access can be revoked anytime from Linear settings +- Data stays within Linear's infrastructure +- Only requested permissions are granted + +## Next Steps + +Linear MCP transforms project management from clicking through interfaces into natural conversation. Whether you're planning sprints, triaging bugs, or crafting epic development sagas, AI becomes your project management companion. + +Start with simple issue creation, then explore complex workflows like automated sprint planning and workload balancing. The combination of Linear's powerful platform with AI's creative capabilities makes project management both efficient and entertaining! \ No newline at end of file diff --git a/docs/src/pages/docs/mcp-examples/productivity/todoist.mdx b/docs/src/pages/docs/mcp-examples/productivity/todoist.mdx new file mode 100644 index 0000000000..2d7844bf0b --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/productivity/todoist.mdx @@ -0,0 +1,259 @@ +--- +title: Todoist MCP +description: Manage your tasks and todo lists through natural language with Todoist integration. +keywords: + [ + Jan, + MCP, + Model Context Protocol, + Todoist, + task management, + productivity, + todo list, + tool calling, + ] +--- + +import { Callout, Steps } from 'nextra/components' + +# Todoist MCP + +[Todoist MCP Server](https://github.com/abhiz123/todoist-mcp-server) enables AI models to manage your Todoist tasks through natural conversation. Instead of switching between apps, you can create, update, and complete tasks by simply chatting with your AI assistant. + +## Available Tools + +- `todoist_create_task`: Add new tasks to your todo list +- `todoist_get_tasks`: Retrieve and view your current tasks +- `todoist_update_task`: Modify existing tasks +- `todoist_complete_task`: Mark tasks as done +- `todoist_delete_task`: Remove tasks from your list + +## Prerequisites + +- Jan with experimental features enabled +- Todoist account (free or premium) +- Model with strong tool calling support +- Node.js installed + + +Todoist offers a generous free tier perfect for personal task management. Premium features add labels, reminders, and more projects. + + +## Setup + +### Create Todoist Account + +1. Sign up at [todoist.com](https://todoist.com) or log in if you have an account +2. Complete the onboarding process + +![Todoist welcome screen](../../_assets/todoist1.png) + +Once logged in, you'll see your main dashboard: + +![Todoist main dashboard](../../_assets/todoist2.png) + +### Get Your API Token + +1. Click **Settings** (gear icon) +2. Navigate to **Integrations** +3. Click on the **Developer** tab +4. Copy your API token (it's already generated for you) + +![Todoist API token in settings](../../_assets/todoist3.png) + +### Enable MCP in Jan + + +If you don't see the MCP Servers option, enable **Experimental Features** in **Settings > General** first. + + +1. Go to **Settings > MCP Servers** +2. Toggle **Allow All MCP Tool Permission** ON + +### Configure Todoist MCP + +Click the `+` button to add a new MCP server: + +**Configuration:** +- **Server Name**: `todoist` +- **Command**: `npx` +- **Arguments**: `-y @abhiz123/todoist-mcp-server` +- **Environment Variables**: + - Key: `TODOIST_API_TOKEN`, Value: `your_api_token_here` + +![Todoist MCP configuration in Jan](../../_assets/todoist4.png) + +## Usage + +### Select a Model with Tool Calling + +Open a new chat and select a model that excels at tool calling. Make sure tools are enabled for your chosen model. + +![Model selection with tools enabled](../../_assets/gpt5-add.png) + +### Verify Tools Available + +You should see the Todoist tools in the tools panel: + +![Todoist tools available in chat](../../_assets/todoist5.png) + +### Start Managing Tasks + +Now you can manage your todo list through natural conversation: + +![Todoist MCP in action](../../_assets/mcptodoist_extreme.gif) + +## Example Prompts + +### Blog Writing Workflow +``` +I need to write a blog post about AI and productivity tools today. Please add some tasks to my todo list to make sure I have a good set of steps to accomplish this task. +``` + +The AI will create structured tasks like: +- Research AI productivity tools +- Create blog outline +- Write introduction +- Draft main sections +- Add examples and screenshots +- Edit and proofread +- Publish and promote + +### Weekly Meal Planning +``` +Help me plan meals for the week. Create a grocery shopping list and cooking schedule for Monday through Friday, focusing on healthy, quick dinners. +``` + +### Home Improvement Project +``` +I'm renovating my home office this weekend. Break down the project into manageable tasks including shopping, prep work, and the actual renovation steps. +``` + +### Study Schedule +``` +I have a statistics exam in 2 weeks. Create a study plan with daily tasks covering all chapters, practice problems, and review sessions. +``` + +### Fitness Goals +``` +Set up a 30-day fitness challenge for me. Include daily workout tasks, rest days, and weekly progress check-ins. +``` + +### Event Planning +``` +I'm organizing a surprise birthday party for next month. Create a comprehensive task list covering invitations, decorations, food, entertainment, and day-of coordination. +``` + +## Advanced Usage + +### Task Management Commands + +**View all tasks:** +``` +Show me all my pending tasks for today +``` + +**Update priorities:** +``` +Make "Write blog introduction" high priority and move it to the top of my list +``` + +**Bulk completion:** +``` +Mark all my morning routine tasks as complete +``` + +**Clean up:** +``` +Delete all completed tasks from last week +``` + +### Project Organization + +Todoist supports projects, though the MCP may have limitations. Try: +``` +Create a new project called "Q1 Goals" and add 5 key objectives as tasks +``` + +### Recurring Tasks + +Set up repeating tasks: +``` +Add a daily task to review my calendar at 9 AM +Add a weekly task for meal prep on Sundays +Add a monthly task to pay bills on the 1st +``` + +## Creative Use Cases + +### 🎮 Game Development Sprint +``` +I'm participating in a 48-hour game jam. Create an hour-by-hour task schedule covering ideation, prototyping, art creation, programming, testing, and submission. +``` + +### 📚 Book Writing Challenge +``` +I'm doing NaNoWriMo (writing a novel in a month). Break down a 50,000-word goal into daily writing tasks with word count targets and plot milestones. +``` + +### 🌱 Garden Planning +``` +It's spring planting season. Create a gardening schedule for the next 3 months including soil prep, planting dates for different vegetables, watering reminders, and harvest times. +``` + +### 🎂 Baking Business Launch +``` +I'm starting a home bakery. Create tasks for getting permits, setting up social media, creating a menu, pricing strategy, and first week's baking schedule. +``` + +### 🏠 Moving Checklist +``` +I'm moving to a new apartment next month. Generate a comprehensive moving checklist including utilities setup, packing by room, change of address notifications, and moving day logistics. +``` + +## Tips for Best Results + +- **Be specific**: "Add task: Call dentist tomorrow at 2 PM" works better than "remind me about dentist" +- **Use natural language**: The AI understands context, so chat naturally +- **Batch operations**: Ask to create multiple related tasks at once +- **Review regularly**: Ask the AI to show your tasks and help prioritize +- **Iterate**: If the tasks aren't quite right, ask the AI to modify them + +## Troubleshooting + +**Tasks not appearing in Todoist:** +- Verify API token is correct +- Check Todoist website/app and refresh +- Ensure MCP server shows as active + +**Tool calling errors:** +- Confirm model supports tool calling +- Enable tools in model settings +- Try a different model (Claude 3.5+ or GPT-4o recommended) + +**Connection issues:** +- Check internet connectivity +- Verify Node.js installation +- Restart Jan after configuration + +**Rate limiting:** +- Todoist API has rate limits +- Space out bulk operations +- Wait a moment between large task batches + + +Todoist syncs across all devices. Tasks created through Jan instantly appear on your phone, tablet, and web app! + + +## Privacy Note + +Your tasks are synced with Todoist's servers. While the MCP runs locally, task data is stored in Todoist's cloud for sync functionality. Review Todoist's privacy policy if you're handling sensitive information. + +## Next Steps + +Combine Todoist MCP with other tools for powerful workflows: +- Use Serper MCP to research topics, then create action items in Todoist +- Generate code with E2B, then add testing tasks to your todo list +- Analyze data with Jupyter, then create follow-up tasks for insights + +Task management through natural language makes staying organized effortless. Let your AI assistant handle the overhead while you focus on getting things done! \ No newline at end of file diff --git a/docs/src/pages/docs/mcp-examples/search/_meta.json b/docs/src/pages/docs/mcp-examples/search/_meta.json new file mode 100644 index 0000000000..0ba01c4ace --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/search/_meta.json @@ -0,0 +1,10 @@ +{ + "exa": { + "title": "Exa Search", + "href": "/docs/mcp-examples/search/exa" + }, + "serper": { + "title": "Serper Search", + "href": "/docs/mcp-examples/search/serper" + } +} diff --git a/docs/src/pages/docs/mcp-examples/search/serper.mdx b/docs/src/pages/docs/mcp-examples/search/serper.mdx new file mode 100644 index 0000000000..2661dbbc7f --- /dev/null +++ b/docs/src/pages/docs/mcp-examples/search/serper.mdx @@ -0,0 +1,179 @@ +--- +title: Serper Search MCP +description: Connect Jan to real-time web search with Google results through Serper API. +keywords: + [ + Jan, + MCP, + Model Context Protocol, + Serper, + Google search, + web search, + real-time search, + tool calling, + Jan v1, + ] +--- + +import { Callout, Steps } from 'nextra/components' + +# Serper Search MCP + +[Serper](https://serper.dev) provides Google search results through a simple API, making it perfect for giving AI models access to current web information. The Serper MCP integration enables Jan models to search the web and retrieve real-time information. + +## Available Tools + +- `google_search`: Search Google and retrieve results with snippets +- `scrape`: Extract content from specific web pages + +## Prerequisites + +- Jan with experimental features enabled +- Serper API key from [serper.dev](https://serper.dev) +- Model with tool calling support (recommended: Jan v1) + + +Serper offers 2,500 free searches upon signup - enough for extensive testing and personal use. + + +## Setup + +### Enable Experimental Features + +1. Go to **Settings** > **General** +2. Toggle **Experimental Features** ON + +![Enable experimental features](../../_assets/enable_mcp.png) + +### Enable MCP + +1. Go to **Settings** > **MCP Servers** +2. Toggle **Allow All MCP Tool Permission** ON + +![Turn on MCP](../../_assets/turn_on_mcp.png) + +### Get Serper API Key + +1. Visit [serper.dev](https://serper.dev) +2. Sign up for a free account +3. Copy your API key from the playground + +![Serper homepage](../../_assets/serper_page.png) + +![Serper playground with API key](../../_assets/serper_playground.png) + +### Configure MCP Server + +Click `+` in MCP Servers section: + +**Configuration:** +- **Server Name**: `serper` +- **Command**: `npx` +- **Arguments**: `-y serper-search-scrape-mcp-server` +- **Environment Variables**: + - Key: `SERPER_API_KEY`, Value: `your-api-key` + +![Serper MCP configuration in Jan](../../_assets/serper_janparams.png) + +### Download Jan v1 + +Jan v1 is optimized for tool calling and works excellently with Serper: + +1. Go to the **Hub** tab +2. Search for **Jan v1** +3. Choose your preferred quantization +4. Click **Download** + +![Download Jan v1 from Hub](../../_assets/download_janv1.png) + +### Enable Tool Calling + +1. Go to **Settings** > **Model Providers** > **Llama.cpp** +2. Find Jan v1 in your models list +3. Click the edit icon +4. Toggle **Tools** ON + +![Enable tools for Jan v1](../../_assets/toggle_tools.png) + +## Usage + +### Start a New Chat + +With Jan v1 selected, you'll see the available Serper tools: + +![Chat view with Serper tools](../../_assets/chat_jan_v1.png) + +### Example Queries + +**Current Information:** +``` +What are the latest developments in quantum computing this week? +``` + +**Comparative Analysis:** +``` +What are the main differences between the Rust programming language and C++? Be spicy, hot takes are encouraged. 😌 +``` +![Jan v1 using Serper for web search](../../_assets/jan_v1_serper.png) + +![Jan v1 using Serper for web search](../../_assets/jan_v1_serper1.png) + +**Research Tasks:** +``` +Find the current stock price of NVIDIA and recent news about their AI chips. +``` + +**Fact-Checking:** +``` +Is it true that the James Webb telescope found signs of life on an exoplanet? What's the latest? +``` + +**Local Information:** +``` +What restaurants opened in San Francisco this month? Focus on Japanese cuisine. +``` + +## How It Works + +1. **Query Processing**: Jan v1 analyzes your question and determines what to search +2. **Web Search**: Calls Serper API to get Google search results +3. **Content Extraction**: Can scrape specific pages for detailed information +4. **Synthesis**: Combines search results into a comprehensive answer + +## Tips for Best Results + +- **Be specific**: "Tesla Model 3 2024 price Australia" works better than "Tesla price" +- **Request recent info**: Add "latest", "current", or "2024/2025" to get recent results +- **Ask follow-ups**: Jan v1 maintains context for deeper research +- **Combine with analysis**: Ask for comparisons, summaries, or insights + +## Troubleshooting + +**No search results:** +- Verify API key is correct +- Check remaining credits at serper.dev +- Ensure MCP server shows as active + +**Tools not appearing:** +- Confirm experimental features are enabled +- Verify tool calling is enabled for your model +- Restart Jan after configuration changes + +**Poor search quality:** +- Use more specific search terms +- Try rephrasing your question +- Check if Serper service is operational + + +Each search query consumes one API credit. Monitor usage at serper.dev dashboard. + + +## API Limits + +- **Free tier**: 2,500 searches +- **Paid plans**: Starting at $50/month for 50,000 searches +- **Rate limits**: 100 requests per second + +## Next Steps + +Serper MCP enables Jan v1 to access current web information, making it a powerful research assistant. Combine with other MCP tools for even more capabilities - use Serper for search, then E2B for data analysis, or Jupyter for visualization. diff --git a/docs/src/pages/docs/quickstart.mdx b/docs/src/pages/docs/quickstart.mdx new file mode 100644 index 0000000000..813b2529a0 --- /dev/null +++ b/docs/src/pages/docs/quickstart.mdx @@ -0,0 +1,158 @@ +--- +title: QuickStart +description: Get started with Jan and start chatting with AI in minutes. +keywords: + [ + Jan, + local AI, + LLM, + chat, + threads, + models, + download, + installation, + conversations, + ] +--- + +import { Callout, Steps } from 'nextra/components' +import { SquarePen, Pencil, Ellipsis, Paintbrush, Trash2, Settings } from 'lucide-react' + +# QuickStart + +Get up and running with Jan in minutes. This guide will help you install Jan, download a model, and start chatting immediately. + + + +### Step 1: Install Jan + +1. [Download Jan](/download) +2. Install the app ([Mac](/docs/desktop/mac), [Windows](/docs/desktop/windows), [Linux](/docs/desktop/linux)) +3. Launch Jan + +### Step 2: Download Jan v1 + +We recommend starting with **Jan v1**, our 4B parameter model optimized for reasoning and tool calling: + +1. Go to the **Hub Tab** +2. Search for **Jan v1** +3. Choose a quantization that fits your hardware: + - **Q4_K_M** (2.5 GB) - Good balance for most users + - **Q8_0** (4.28 GB) - Best quality if you have the RAM +4. Click **Download** + +![Download Jan v1](./_assets/download_janv1.png) + + +Jan v1 achieves 91.1% accuracy on SimpleQA and excels at tool calling, making it perfect for web search and reasoning tasks. + + +**HuggingFace models:** Some require an access token. Add yours in **Settings > Model Providers > Llama.cpp > Hugging Face Access Token**. + +![Add HF Token](./_assets/hf_token.png) + +### Step 3: Enable GPU Acceleration (Optional) + +For Windows/Linux with compatible graphics cards: + +1. Go to **() Settings** > **Hardware** +2. Toggle **GPUs** to ON + +![Turn on GPU acceleration](./_assets/gpu_accl.png) + + +Install required drivers before enabling GPU acceleration. See setup guides for [Windows](/docs/desktop/windows#gpu-acceleration) & [Linux](/docs/desktop/linux#gpu-acceleration). + + +### Step 4: Start Chatting + +1. Click **New Chat** () icon +2. Select your model in the input field dropdown +3. Type your message and start chatting + +![Create New Thread](./_assets/threads-new-chat-updated.png) + +Try asking Jan v1 questions like: +- "Explain quantum computing in simple terms" +- "Help me write a Python function to sort a list" +- "What are the pros and cons of electric vehicles?" + + +**Want to give Jan v1 access to current web information?** Check out our [Serper MCP tutorial](/docs/mcp-examples/search/serper) to enable real-time web search with 2,500 free searches! + + + + +## Managing Conversations + +Jan organizes conversations into threads for easy tracking and revisiting. + +### View Chat History + +- **Left sidebar** shows all conversations +- Click any chat to open the full conversation +- **Favorites**: Pin important threads for quick access +- **Recents**: Access recently used threads + +![Favorites and Recents](./_assets/threads-favorites-and-recents-updated.png) + +### Edit Chat Titles + +1. Hover over a conversation in the sidebar +2. Click **three dots** () icon +3. Click **Rename** +4. Enter new title and save + +![Context Menu](./_assets/threads-context-menu-updated.png) + +### Delete Threads + + +Thread deletion is permanent. No undo available. + + +**Single thread:** +1. Hover over thread in sidebar +2. Click **three dots** () icon +3. Click **Delete** + +**All threads:** +1. Hover over `Recents` category +2. Click **three dots** () icon +3. Select **Delete All** + +## Advanced Features + +### Custom Assistant Instructions + +Customize how models respond: + +1. Use the assistant dropdown in the input field +2. Or go to the **Assistant tab** to create custom instructions +3. Instructions work across all models + +![Assistant Instruction](./_assets/assistant-dropdown.png) + +![Add an Assistant Instruction](./_assets/assistant-edit-dialog.png) + +### Model Parameters + +Fine-tune model behavior: +- Click the **Gear icon** next to your model +- Adjust parameters in **Assistant Settings** +- Switch models via the **model selector** + +![Chat with a Model](./_assets/model-parameters.png) + +### Connect Cloud Models (Optional) + +Connect to OpenAI, Anthropic, Groq, Mistral, and others: + +1. Open any thread +2. Select a cloud model from the dropdown +3. Click the **Gear icon** beside the provider +4. Add your API key (ensure sufficient credits) + +![Connect Remote APIs](./_assets/quick-start-03.png) + +For detailed setup, see [Remote APIs](/docs/remote-models/openai). diff --git a/website/astro.config.mjs b/website/astro.config.mjs index da1d2f48b0..5395b8e9f0 100644 --- a/website/astro.config.mjs +++ b/website/astro.config.mjs @@ -15,30 +15,6 @@ const __dirname = dirname(__filename) export default defineConfig({ // Deploy to the new v2 subdomain site: 'https://v2.jan.ai', - vite: { - resolve: { - alias: { - '@': path.resolve(__dirname, './src'), - '@/components': path.resolve(__dirname, './src/components'), - '@/layouts': path.resolve(__dirname, './src/layouts'), - '@/assets': path.resolve(__dirname, './src/assets'), - '@/content': path.resolve(__dirname, './src/content'), - '@/styles': path.resolve(__dirname, './src/styles'), - '@/utils': path.resolve(__dirname, './src/utils'), - }, - }, - assetsInclude: [ - '**/*.jpg', - '**/*.jpeg', - '**/*.png', - '**/*.gif', - '**/*.svg', - '**/*.webp', - ], - optimizeDeps: { - exclude: ['@astrojs/starlight'], - }, - }, integrations: [ mermaid({ theme: 'default', @@ -51,219 +27,142 @@ export default defineConfig({ plugins: [ // starlightThemeRapide(), starlightThemeNext(), - starlightSidebarTopics( - [ - { - label: 'Jan Desktop', - link: '/jan/', - icon: 'rocket', - items: [ - { - label: 'HOW TO', - items: [ - { - label: 'Install 👋 Jan', - collapsed: false, - autogenerate: { directory: 'jan/installation' }, - }, - { label: 'Start Chatting', slug: 'jan/threads' }, - { - label: 'Use Jan Models', - collapsed: true, - autogenerate: { directory: 'jan/jan-models' }, - }, - { label: 'Assistants', slug: 'jan/assistants' }, - ], - }, - { - label: 'Cloud Providers', - items: [ - { label: 'Anthropic', slug: 'jan/remote-models/anthropic' }, - { label: 'OpenAI', slug: 'jan/remote-models/openai' }, - { label: 'Gemini', slug: 'jan/remote-models/google' }, - { - label: 'OpenRouter', - slug: 'jan/remote-models/openrouter', - }, - { label: 'Cohere', slug: 'jan/remote-models/cohere' }, - { label: 'Mistral', slug: 'jan/remote-models/mistralai' }, - { label: 'Groq', slug: 'jan/remote-models/groq' }, - ], - }, - { - label: 'EXPLANATION', - items: [ - { - label: 'Local AI Engine', - slug: 'jan/explanation/llama-cpp', - }, - { - label: 'Model Parameters', - slug: 'jan/explanation/model-parameters', - }, - ], - }, - { - label: 'ADVANCED', - items: [ - { label: 'Manage Models', slug: 'jan/manage-models' }, - { label: 'Model Context Protocol', slug: 'jan/mcp' }, - { - label: 'MCP Examples', - collapsed: true, - items: [ - { - label: 'Browser Control (Browserbase)', - slug: 'jan/mcp-examples/browser/browserbase', - }, - { - label: 'Code Sandbox (E2B)', - slug: 'jan/mcp-examples/data-analysis/e2b', - }, - { - label: 'Design Creation (Canva)', - slug: 'jan/mcp-examples/design/canva', - }, - { - label: 'Deep Research (Octagon)', - slug: 'jan/mcp-examples/deepresearch/octagon', - }, - { - label: 'Web Search with Exa', - slug: 'jan/mcp-examples/search/exa', - }, - ], - }, - ], - }, - { - label: 'Local Server', - items: [ - { - label: 'All', - collapsed: true, - autogenerate: { directory: 'local-server' }, - }, - ], - }, - { - label: 'REFERENCE', - items: [ - { label: 'Settings', slug: 'jan/settings' }, - { label: 'Jan Data Folder', slug: 'jan/data-folder' }, - { label: 'Troubleshooting', slug: 'jan/troubleshooting' }, - { label: 'Privacy Policy', slug: 'jan/privacy' }, - ], - }, - ], - }, - { - label: 'Jan Mobile', - link: '/mobile/', - badge: { text: 'Soon', variant: 'caution' }, - icon: 'phone', - items: [{ label: 'Overview', slug: 'mobile' }], - }, - { - label: 'Jan Server', - link: '/server/', - badge: { text: 'Soon', variant: 'caution' }, - icon: 'forward-slash', - items: [{ label: 'Overview', slug: 'server' }], - }, - { - label: 'Handbook', - link: '/handbook/', - icon: 'open-book', - items: [ - { label: 'Welcome', slug: 'handbook' }, - { - label: 'About Jan', - items: [ - { - label: 'Why does Jan Exist?', - collapsed: true, - autogenerate: { directory: 'handbook/why' }, - }, - { - label: 'How we make Money', - collapsed: true, - autogenerate: { directory: 'handbook/money' }, - }, - { - label: 'Who We Hire', - collapsed: true, - autogenerate: { directory: 'handbook/who' }, - }, - { - label: "Jan's Philosophies", - collapsed: true, - autogenerate: { directory: 'handbook/philosophy' }, - }, - { - label: 'Brand & Identity', - collapsed: true, - autogenerate: { directory: 'handbook/brand' }, - }, - ], - }, - { - label: 'How We Work', - items: [ - { - label: 'Team Roster', - collapsed: true, - autogenerate: { directory: 'handbook/team' }, - }, - { - label: "Jan's Culture", - collapsed: true, - autogenerate: { directory: 'handbook/culture' }, - }, - { - label: 'How We Build', - collapsed: true, - autogenerate: { directory: 'handbook/how' }, - }, - { - label: 'How We Sell', - collapsed: true, - autogenerate: { directory: 'handbook/sell' }, - }, - ], - }, - { - label: 'HR', - items: [ - { - label: 'HR Lifecycle', - collapsed: true, - autogenerate: { directory: 'handbook/lifecycle' }, - }, - { - label: 'HR Policies', - collapsed: true, - autogenerate: { directory: 'handbook/hr' }, - }, - { - label: 'Compensation', - collapsed: true, - autogenerate: { directory: 'handbook/comp' }, - }, - ], - }, - ], - }, - ], + starlightSidebarTopics([ { - exclude: [ - '/prods', - '/api-reference', - '/products', - '/products/**/*', + label: 'Jan Desktop', + link: '/', + icon: 'rocket', + items: [ + { + label: 'GETTING STARTED', + items: [ + { + label: 'Install 👋 Jan', + collapsed: false, + autogenerate: { directory: 'jan/installation' }, + }, + { label: 'QuickStart', slug: 'jan/quickstart' }, + { + label: 'Models', + collapsed: true, + autogenerate: { directory: 'jan/jan-models' }, + }, + { label: 'Assistants', slug: 'jan/assistants' }, + { + label: 'Cloud Providers', + collapsed: true, + items: [ + { + label: 'Anthropic', + slug: 'jan/remote-models/anthropic', + }, + { label: 'OpenAI', slug: 'jan/remote-models/openai' }, + { label: 'Gemini', slug: 'jan/remote-models/google' }, + { + label: 'OpenRouter', + slug: 'jan/remote-models/openrouter', + }, + { label: 'Cohere', slug: 'jan/remote-models/cohere' }, + { + label: 'Mistral', + slug: 'jan/remote-models/mistralai', + }, + { label: 'Groq', slug: 'jan/remote-models/groq' }, + ], + }, + ], + }, + { + label: 'TUTORIALS', + items: [ + { + label: 'MCP Examples', + collapsed: true, + items: [ + { + label: 'Browser Control (Browserbase)', + slug: 'jan/mcp-examples/browser/browserbase', + }, + { + label: 'Code Sandbox (E2B)', + slug: 'jan/mcp-examples/data-analysis/e2b', + }, + { + label: 'Design Creation (Canva)', + slug: 'jan/mcp-examples/design/canva', + }, + { + label: 'Deep Research (Octagon)', + slug: 'jan/mcp-examples/deepresearch/octagon', + }, + { + label: 'Serper Search', + slug: 'jan/mcp-examples/search/serper', + }, + { + label: 'Web Search (Exa)', + slug: 'jan/mcp-examples/search/exa', + }, + ], + }, + ], + }, + { + label: 'EXPLANATION', + items: [ + { + label: 'Local AI Engine', + slug: 'jan/explanation/llama-cpp', + }, + { + label: 'Model Parameters', + slug: 'jan/explanation/model-parameters', + }, + ], + }, + { + label: 'ADVANCED', + items: [ + { label: 'Manage Models', slug: 'jan/manage-models' }, + { label: 'Model Context Protocol', slug: 'jan/mcp' }, + ], + }, + { + label: 'Local Server', + items: [ + { + label: 'All', + collapsed: true, + autogenerate: { directory: 'local-server' }, + }, + ], + }, + { + label: 'REFERENCE', + items: [ + { label: 'Settings', slug: 'jan/settings' }, + { label: 'Jan Data Folder', slug: 'jan/data-folder' }, + { label: 'Troubleshooting', slug: 'jan/troubleshooting' }, + { label: 'Privacy Policy', slug: 'jan/privacy' }, + ], + }, ], - } - ), + }, + { + label: 'Jan Mobile', + link: '/mobile/', + badge: { text: 'Soon', variant: 'caution' }, + icon: 'phone', + items: [{ label: 'Overview', slug: 'mobile' }], + }, + { + label: 'Jan Server', + link: '/server/', + badge: { text: 'Soon', variant: 'caution' }, + icon: 'forward-slash', + items: [{ label: 'Overview', slug: 'server' }], + }, + ]), ], social: [ { @@ -282,9 +181,6 @@ export default defineConfig({ href: 'https://discord.com/invite/FTk2MvZwJH', }, ], - components: { - Header: './src/components/CustomNav.astro', - }, }), ], }) diff --git a/website/public/gifs/lucy_demo.gif b/website/public/gifs/lucy_demo.gif new file mode 100644 index 0000000000..824fa21119 Binary files /dev/null and b/website/public/gifs/lucy_demo.gif differ diff --git a/website/scripts/fix-blog-images.js b/website/scripts/fix-blog-images.js deleted file mode 100644 index 0412289751..0000000000 --- a/website/scripts/fix-blog-images.js +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env node - -import fs from 'fs' -import path from 'path' -import { fileURLToPath } from 'url' -import { dirname } from 'path' - -const __filename = fileURLToPath(import.meta.url) -const __dirname = dirname(__filename) - -const blogDir = path.join(__dirname, '..', 'src', 'content', 'blog') - -// Function to convert filename to a valid JavaScript variable name -function toVariableName(filename) { - // Remove extension and special characters, convert to camelCase - const base = path.basename(filename, path.extname(filename)) - let varName = base - .replace(/[-_\s]+(.)?/g, (_, c) => (c ? c.toUpperCase() : '')) - .replace(/[^a-zA-Z0-9]/g, '') - .replace(/^./, (c) => c.toLowerCase()) - - // If the variable name starts with a number, prefix with 'img' - if (/^[0-9]/.test(varName)) { - varName = 'img' + varName.charAt(0).toUpperCase() + varName.slice(1) - } - - return varName -} - -// Function to process a single MDX file -function processMDXFile(filePath) { - console.log(`Processing: ${filePath}`) - - let content = fs.readFileSync(filePath, 'utf-8') - - // Find all image references - const imageRegex = /!\[([^\]]*)\]\((\.\/_assets\/[^)]+)\)/g - const images = [] - let match - - while ((match = imageRegex.exec(content)) !== null) { - const altText = match[1] - const imagePath = match[2] - const filename = path.basename(imagePath) - const varName = toVariableName(filename) + 'Img' - - // Check if we already have this image - if (!images.find((img) => img.varName === varName)) { - images.push({ - varName, - path: imagePath, - altText, - originalMatch: match[0], - }) - } - } - - if (images.length === 0) { - console.log(` No images found in ${path.basename(filePath)}`) - return - } - - console.log(` Found ${images.length} images`) - - // Find where to insert imports (after existing imports or frontmatter) - const frontmatterEnd = content.indexOf('---', content.indexOf('---') + 3) + 3 - let importInsertPosition = frontmatterEnd - - // Check if there are already imports - const existingImportRegex = /^import\s+.*$/gm - const imports = content.match(existingImportRegex) - - if (imports && imports.length > 0) { - // Find the last import - const lastImport = imports[imports.length - 1] - importInsertPosition = content.indexOf(lastImport) + lastImport.length - } - - // Generate import statements - const importStatements = images - .map((img) => `import ${img.varName} from '${img.path}';`) - .join('\n') - - // Insert imports - if (imports && imports.length > 0) { - // Add to existing imports - content = - content.slice(0, importInsertPosition) + - '\n' + - importStatements + - content.slice(importInsertPosition) - } else { - // Add new import section after frontmatter - content = - content.slice(0, frontmatterEnd) + - '\n\n' + - importStatements + - '\n' + - content.slice(frontmatterEnd) - } - - // Replace all image references with JSX img tags - images.forEach((img) => { - // Create regex for this specific image - const specificImageRegex = new RegExp( - `!\\[([^\\]]*)\\]\\(${img.path.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}\\)`, - 'g' - ) - - content = content.replace(specificImageRegex, (match, altText) => { - return `${altText || img.altText}` - }) - }) - - // Write the updated content back - fs.writeFileSync(filePath, content) - console.log(` ✓ Updated ${path.basename(filePath)}`) -} - -// Process all MDX files in the blog directory -function processAllBlogPosts() { - const files = fs.readdirSync(blogDir) - const mdxFiles = files.filter((file) => file.endsWith('.mdx')) - - console.log(`Found ${mdxFiles.length} MDX files in blog directory\n`) - - mdxFiles.forEach((file) => { - const filePath = path.join(blogDir, file) - try { - processMDXFile(filePath) - } catch (error) { - console.error(`Error processing ${file}:`, error.message) - } - }) - - console.log('\n✨ All blog posts processed!') -} - -// Run the script -processAllBlogPosts() diff --git a/website/src/assets/blog/3090s.jpg b/website/src/assets/blog/3090s.jpg deleted file mode 100644 index 3a62b3f6f6..0000000000 Binary files a/website/src/assets/blog/3090s.jpg and /dev/null differ diff --git a/website/src/assets/blog/4070s.jpg b/website/src/assets/blog/4070s.jpg deleted file mode 100644 index 3d86223470..0000000000 Binary files a/website/src/assets/blog/4070s.jpg and /dev/null differ diff --git a/website/src/assets/blog/4090s.png b/website/src/assets/blog/4090s.png deleted file mode 100644 index 2c49a3248a..0000000000 Binary files a/website/src/assets/blog/4090s.png and /dev/null differ diff --git a/website/src/assets/blog/ai-locally-llama.cpp.jpg b/website/src/assets/blog/ai-locally-llama.cpp.jpg deleted file mode 100644 index 967b63bf73..0000000000 Binary files a/website/src/assets/blog/ai-locally-llama.cpp.jpg and /dev/null differ diff --git a/website/src/assets/blog/catastrophic-demo.png b/website/src/assets/blog/catastrophic-demo.png deleted file mode 100644 index 7c869fc0e7..0000000000 Binary files a/website/src/assets/blog/catastrophic-demo.png and /dev/null differ diff --git a/website/src/assets/blog/chat-with-docs-prompt.jpg b/website/src/assets/blog/chat-with-docs-prompt.jpg deleted file mode 100644 index df47dd4ef7..0000000000 Binary files a/website/src/assets/blog/chat-with-docs-prompt.jpg and /dev/null differ diff --git a/website/src/assets/blog/chat-with-your-docs-offline-ai.jpg b/website/src/assets/blog/chat-with-your-docs-offline-ai.jpg deleted file mode 100644 index efcda0f079..0000000000 Binary files a/website/src/assets/blog/chat-with-your-docs-offline-ai.jpg and /dev/null differ diff --git a/website/src/assets/blog/chat-with-your-docs2.jpg b/website/src/assets/blog/chat-with-your-docs2.jpg deleted file mode 100644 index 1577b3f5c9..0000000000 Binary files a/website/src/assets/blog/chat-with-your-docs2.jpg and /dev/null differ diff --git a/website/src/assets/blog/deepseek-r1-locally-jan.jpg b/website/src/assets/blog/deepseek-r1-locally-jan.jpg deleted file mode 100644 index 2168b8986c..0000000000 Binary files a/website/src/assets/blog/deepseek-r1-locally-jan.jpg and /dev/null differ diff --git a/website/src/assets/blog/download-jan.jpg b/website/src/assets/blog/download-jan.jpg deleted file mode 100644 index f799260c7f..0000000000 Binary files a/website/src/assets/blog/download-jan.jpg and /dev/null differ diff --git a/website/src/assets/blog/egpu.jpg b/website/src/assets/blog/egpu.jpg deleted file mode 100644 index 9f631d4fd8..0000000000 Binary files a/website/src/assets/blog/egpu.jpg and /dev/null differ diff --git a/website/src/assets/blog/gradient-decent.gif b/website/src/assets/blog/gradient-decent.gif deleted file mode 100644 index 9828f2fe94..0000000000 Binary files a/website/src/assets/blog/gradient-decent.gif and /dev/null differ diff --git a/website/src/assets/blog/hugging-face-jan-model-download.jpg b/website/src/assets/blog/hugging-face-jan-model-download.jpg deleted file mode 100644 index c6cfa8ea5a..0000000000 Binary files a/website/src/assets/blog/hugging-face-jan-model-download.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-hf-model-download.jpg b/website/src/assets/blog/jan-hf-model-download.jpg deleted file mode 100644 index 929acf2ffe..0000000000 Binary files a/website/src/assets/blog/jan-hf-model-download.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-hub-deepseek-r1.jpg b/website/src/assets/blog/jan-hub-deepseek-r1.jpg deleted file mode 100644 index 12c0c66404..0000000000 Binary files a/website/src/assets/blog/jan-hub-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-hub-download-deepseek-r1-2.jpg b/website/src/assets/blog/jan-hub-download-deepseek-r1-2.jpg deleted file mode 100644 index 24be4bd25d..0000000000 Binary files a/website/src/assets/blog/jan-hub-download-deepseek-r1-2.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-hub-download-deepseek-r1.jpg b/website/src/assets/blog/jan-hub-download-deepseek-r1.jpg deleted file mode 100644 index 83d9ab3701..0000000000 Binary files a/website/src/assets/blog/jan-hub-download-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-hub-for-ai-models.jpg b/website/src/assets/blog/jan-hub-for-ai-models.jpg deleted file mode 100644 index a158499b43..0000000000 Binary files a/website/src/assets/blog/jan-hub-for-ai-models.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-library-deepseek-r1.jpg b/website/src/assets/blog/jan-library-deepseek-r1.jpg deleted file mode 100644 index 6a54082dc1..0000000000 Binary files a/website/src/assets/blog/jan-library-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-local-ai.jpg b/website/src/assets/blog/jan-local-ai.jpg deleted file mode 100644 index 2c8c145ff5..0000000000 Binary files a/website/src/assets/blog/jan-local-ai.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-model-download.jpg b/website/src/assets/blog/jan-model-download.jpg deleted file mode 100644 index 7e949403d0..0000000000 Binary files a/website/src/assets/blog/jan-model-download.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-model-selection.jpg b/website/src/assets/blog/jan-model-selection.jpg deleted file mode 100644 index b630c800ec..0000000000 Binary files a/website/src/assets/blog/jan-model-selection.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-runs-deepseek-r1-distills.jpg b/website/src/assets/blog/jan-runs-deepseek-r1-distills.jpg deleted file mode 100644 index 02ce847f4f..0000000000 Binary files a/website/src/assets/blog/jan-runs-deepseek-r1-distills.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan-system-prompt-deepseek-r1.jpg b/website/src/assets/blog/jan-system-prompt-deepseek-r1.jpg deleted file mode 100644 index f79e71af06..0000000000 Binary files a/website/src/assets/blog/jan-system-prompt-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/assets/blog/jan.ai.jpg b/website/src/assets/blog/jan.ai.jpg deleted file mode 100644 index d635d1ab9d..0000000000 Binary files a/website/src/assets/blog/jan.ai.jpg and /dev/null differ diff --git a/website/src/assets/blog/local-ai-model-parameters.jpg b/website/src/assets/blog/local-ai-model-parameters.jpg deleted file mode 100644 index 1d26fc4a5c..0000000000 Binary files a/website/src/assets/blog/local-ai-model-parameters.jpg and /dev/null differ diff --git a/website/src/assets/blog/offline-chatgpt-alternative-ai-without-internet.jpg b/website/src/assets/blog/offline-chatgpt-alternative-ai-without-internet.jpg deleted file mode 100644 index 6dffb1e952..0000000000 Binary files a/website/src/assets/blog/offline-chatgpt-alternative-ai-without-internet.jpg and /dev/null differ diff --git a/website/src/assets/blog/offline-chatgpt-alternatives-jan.jpg b/website/src/assets/blog/offline-chatgpt-alternatives-jan.jpg deleted file mode 100644 index 065b336365..0000000000 Binary files a/website/src/assets/blog/offline-chatgpt-alternatives-jan.jpg and /dev/null differ diff --git a/website/src/assets/blog/og-4090s.webp b/website/src/assets/blog/og-4090s.webp deleted file mode 100644 index 6db1b10b28..0000000000 Binary files a/website/src/assets/blog/og-4090s.webp and /dev/null differ diff --git a/website/src/assets/blog/open-source-ai-quantization.jpg b/website/src/assets/blog/open-source-ai-quantization.jpg deleted file mode 100644 index fe605c3cdc..0000000000 Binary files a/website/src/assets/blog/open-source-ai-quantization.jpg and /dev/null differ diff --git a/website/src/assets/blog/openchat-bench-0106.png b/website/src/assets/blog/openchat-bench-0106.png deleted file mode 100644 index 9fa37960f1..0000000000 Binary files a/website/src/assets/blog/openchat-bench-0106.png and /dev/null differ diff --git a/website/src/assets/blog/qwen3-in-jan-hub.jpeg b/website/src/assets/blog/qwen3-in-jan-hub.jpeg deleted file mode 100644 index e58c5beaba..0000000000 Binary files a/website/src/assets/blog/qwen3-in-jan-hub.jpeg and /dev/null differ diff --git a/website/src/assets/blog/qwen3-settings-in-jan.jpeg b/website/src/assets/blog/qwen3-settings-in-jan.jpeg deleted file mode 100644 index 82d7540a76..0000000000 Binary files a/website/src/assets/blog/qwen3-settings-in-jan.jpeg and /dev/null differ diff --git a/website/src/assets/blog/qwen3-settings-jan-ai.jpeg b/website/src/assets/blog/qwen3-settings-jan-ai.jpeg deleted file mode 100644 index 7fc432e382..0000000000 Binary files a/website/src/assets/blog/qwen3-settings-jan-ai.jpeg and /dev/null differ diff --git a/website/src/assets/blog/replay.png b/website/src/assets/blog/replay.png deleted file mode 100644 index 8ada6ce84b..0000000000 Binary files a/website/src/assets/blog/replay.png and /dev/null differ diff --git a/website/src/assets/blog/run-ai-locally-with-jan.jpg b/website/src/assets/blog/run-ai-locally-with-jan.jpg deleted file mode 100644 index 942ab38ba5..0000000000 Binary files a/website/src/assets/blog/run-ai-locally-with-jan.jpg and /dev/null differ diff --git a/website/src/assets/blog/run-deepseek-r1-locally-in-jan.jpg b/website/src/assets/blog/run-deepseek-r1-locally-in-jan.jpg deleted file mode 100644 index aa69805856..0000000000 Binary files a/website/src/assets/blog/run-deepseek-r1-locally-in-jan.jpg and /dev/null differ diff --git a/website/src/assets/blog/throughput_Comparison.png b/website/src/assets/blog/throughput_Comparison.png deleted file mode 100644 index 6bb63d03ca..0000000000 Binary files a/website/src/assets/blog/throughput_Comparison.png and /dev/null differ diff --git a/website/src/assets/chat_jan_v1.png b/website/src/assets/chat_jan_v1.png new file mode 100644 index 0000000000..fd438d143a Binary files /dev/null and b/website/src/assets/chat_jan_v1.png differ diff --git a/website/src/assets/creative_bench_jan_v1.png b/website/src/assets/creative_bench_jan_v1.png new file mode 100644 index 0000000000..1444d15245 Binary files /dev/null and b/website/src/assets/creative_bench_jan_v1.png differ diff --git a/website/src/assets/download_janv1.png b/website/src/assets/download_janv1.png new file mode 100644 index 0000000000..00d5a78782 Binary files /dev/null and b/website/src/assets/download_janv1.png differ diff --git a/website/src/assets/enable_mcp.png b/website/src/assets/enable_mcp.png new file mode 100644 index 0000000000..88ad8c2abc Binary files /dev/null and b/website/src/assets/enable_mcp.png differ diff --git a/website/src/assets/lucy.jpeg b/website/src/assets/lucy.jpeg new file mode 100644 index 0000000000..6085967e1a Binary files /dev/null and b/website/src/assets/lucy.jpeg differ diff --git a/website/src/assets/serper_janparams.png b/website/src/assets/serper_janparams.png new file mode 100644 index 0000000000..89c8ad5027 Binary files /dev/null and b/website/src/assets/serper_janparams.png differ diff --git a/website/src/assets/serper_page.png b/website/src/assets/serper_page.png new file mode 100644 index 0000000000..34c7adfb99 Binary files /dev/null and b/website/src/assets/serper_page.png differ diff --git a/website/src/assets/serper_playground.png b/website/src/assets/serper_playground.png new file mode 100644 index 0000000000..a52ac815e5 Binary files /dev/null and b/website/src/assets/serper_playground.png differ diff --git a/website/src/assets/simpleqa_jan_v1.png b/website/src/assets/simpleqa_jan_v1.png new file mode 100644 index 0000000000..6cb7b125a1 Binary files /dev/null and b/website/src/assets/simpleqa_jan_v1.png differ diff --git a/website/src/assets/simpleqa_lucy.png b/website/src/assets/simpleqa_lucy.png new file mode 100644 index 0000000000..204f08b078 Binary files /dev/null and b/website/src/assets/simpleqa_lucy.png differ diff --git a/website/src/assets/toggle_tools b/website/src/assets/toggle_tools new file mode 100644 index 0000000000..53c7e3b055 Binary files /dev/null and b/website/src/assets/toggle_tools differ diff --git a/website/src/assets/toggle_tools.png b/website/src/assets/toggle_tools.png new file mode 100644 index 0000000000..53c7e3b055 Binary files /dev/null and b/website/src/assets/toggle_tools.png differ diff --git a/website/src/assets/turn_on_mcp.png b/website/src/assets/turn_on_mcp.png new file mode 100644 index 0000000000..9bce0e81be Binary files /dev/null and b/website/src/assets/turn_on_mcp.png differ diff --git a/website/src/components/Blog/BlogImage.astro b/website/src/components/Blog/BlogImage.astro deleted file mode 100644 index cb3190d566..0000000000 --- a/website/src/components/Blog/BlogImage.astro +++ /dev/null @@ -1,230 +0,0 @@ ---- -export interface Props { - src: string; - alt: string; - caption?: string; - width?: number; - height?: number; - loading?: 'lazy' | 'eager'; - class?: string; -} - -const { - src, - alt, - caption, - width, - height, - loading = 'lazy', - class: className = '' -} = Astro.props; - -// Handle different image path formats -let imageSrc = src; - -// If the path starts with ./ or ../, it's a relative path from the MDX file -if (src.startsWith('./') || src.startsWith('../')) { - // Remove the leading ./ or ../ - imageSrc = src.replace(/^\.\.?\//, ''); - - // Prepend the blog content path if it doesn't include it - if (!imageSrc.includes('/content/blog/')) { - imageSrc = `/src/content/blog/${imageSrc}`; - } -} else if (!src.startsWith('http') && !src.startsWith('/')) { - // For paths without ./ prefix, assume they're relative to blog content - imageSrc = `/src/content/blog/${src}`; -} ---- - -
- {alt} - {caption && ( -
{caption}
- )} -
- - - - diff --git a/website/src/components/Blog/CTABlog.astro b/website/src/components/Blog/CTABlog.astro deleted file mode 100644 index 3d1ab10440..0000000000 --- a/website/src/components/Blog/CTABlog.astro +++ /dev/null @@ -1,87 +0,0 @@ ---- -export interface Props { - title?: string; - description?: string; - buttonText?: string; - buttonLink?: string; - variant?: 'primary' | 'secondary' | 'gradient'; - align?: 'left' | 'center' | 'right'; -} - -const { - title = "Ready to get started?", - description = "Download Jan and start running AI models locally on your device.", - buttonText = "Download Jan", - buttonLink = "https://jan.ai", - variant = 'primary', - align = 'center' -} = Astro.props; - -const variantClasses = { - primary: 'bg-blue-50 dark:bg-blue-900/20 border-blue-200 dark:border-blue-800', - secondary: 'bg-gray-50 dark:bg-gray-900/20 border-gray-200 dark:border-gray-800', - gradient: 'bg-gradient-to-r from-blue-50 to-purple-50 dark:from-blue-900/20 dark:to-purple-900/20 border-purple-200 dark:border-purple-800' -}; - -const alignClasses = { - left: 'text-left', - center: 'text-center', - right: 'text-right' -}; - -const buttonVariantClasses = { - primary: 'bg-blue-600 hover:bg-blue-700 text-white', - secondary: 'bg-gray-800 hover:bg-gray-900 dark:bg-gray-200 dark:hover:bg-gray-300 text-white dark:text-gray-900', - gradient: 'bg-gradient-to-r from-blue-600 to-purple-600 hover:from-blue-700 hover:to-purple-700 text-white' -}; ---- - -
-
- {title && ( -

- {title} -

- )} - - {description && ( -

- {description} -

- )} - - - {buttonText} - - - - -
-
- - diff --git a/website/src/components/Callout.astro b/website/src/components/Callout.astro deleted file mode 100644 index 5329bf57aa..0000000000 --- a/website/src/components/Callout.astro +++ /dev/null @@ -1,85 +0,0 @@ ---- -export interface Props { - type?: 'info' | 'warning' | 'error' | 'success' | 'note'; - emoji?: string; - children?: any; -} - -const { type = 'note', emoji } = Astro.props; - -const typeConfig = { - info: { - bgColor: 'bg-blue-50 dark:bg-blue-900/20', - borderColor: 'border-blue-200 dark:border-blue-800', - textColor: 'text-blue-900 dark:text-blue-200', - defaultEmoji: 'ℹ️' - }, - warning: { - bgColor: 'bg-yellow-50 dark:bg-yellow-900/20', - borderColor: 'border-yellow-200 dark:border-yellow-800', - textColor: 'text-yellow-900 dark:text-yellow-200', - defaultEmoji: '⚠️' - }, - error: { - bgColor: 'bg-red-50 dark:bg-red-900/20', - borderColor: 'border-red-200 dark:border-red-800', - textColor: 'text-red-900 dark:text-red-200', - defaultEmoji: '🚨' - }, - success: { - bgColor: 'bg-green-50 dark:bg-green-900/20', - borderColor: 'border-green-200 dark:border-green-800', - textColor: 'text-green-900 dark:text-green-200', - defaultEmoji: '✅' - }, - note: { - bgColor: 'bg-gray-50 dark:bg-gray-900/20', - borderColor: 'border-gray-200 dark:border-gray-800', - textColor: 'text-gray-900 dark:text-gray-200', - defaultEmoji: '📝' - } -}; - -const config = typeConfig[type] || typeConfig.note; -const displayEmoji = emoji || config.defaultEmoji; ---- - -
-
- -
- -
-
-
- - diff --git a/website/src/components/Changelog/ChangelogHeader.astro b/website/src/components/Changelog/ChangelogHeader.astro deleted file mode 100644 index 57212fb9bf..0000000000 --- a/website/src/components/Changelog/ChangelogHeader.astro +++ /dev/null @@ -1,36 +0,0 @@ ---- -export interface Props { - title: string; - date: string; - ogImage?: string; -} - -const { title, date, ogImage } = Astro.props; - -// Format the date nicely -const formattedDate = new Date(date).toLocaleDateString('en-US', { - year: 'numeric', - month: 'long', - day: 'numeric' -}); ---- - -
- {ogImage && ( -
- {title} -
- )} -
- - diff --git a/website/src/components/CustomNav.astro b/website/src/components/CustomNav.astro deleted file mode 100644 index 62300644c3..0000000000 --- a/website/src/components/CustomNav.astro +++ /dev/null @@ -1,661 +0,0 @@ ---- -// Custom navigation component to add Products and API Reference links -// This overrides the default Starlight Header component -import Search from '@astrojs/starlight/components/Search.astro'; -import ThemeSelect from '@astrojs/starlight/components/ThemeSelect.astro'; -import { Icon } from '@astrojs/starlight/components'; - -// Determine if we're on a docs page based on the current path -const currentPath = Astro.url.pathname; -const isDocsPage = currentPath.startsWith('/jan/') || - currentPath.startsWith('/mobile/') || - currentPath.startsWith('/server/') || - currentPath.startsWith('/local-server/') || - currentPath === '/' || - currentPath === '/index' || - currentPath === '/docs' || - currentPath === '/docs/'; ---- - -
- -
- - - - diff --git a/website/src/components/DownloadButton.astro b/website/src/components/DownloadButton.astro deleted file mode 100644 index ba4a5d49a3..0000000000 --- a/website/src/components/DownloadButton.astro +++ /dev/null @@ -1,233 +0,0 @@ ---- -export interface Props { - class?: string; - showStats?: boolean; - downloadCount?: string; -} - -const { class: className, showStats = false, downloadCount = '3.8M+' } = Astro.props; - -// Download links for different platforms -const downloadLinks = { - 'mac-intel': 'https://github.com/janhq/jan/releases/download/v0.5.14/jan-mac-x64-0.5.14.dmg', - 'mac-arm': 'https://github.com/janhq/jan/releases/download/v0.5.14/jan-mac-arm64-0.5.14.dmg', - 'windows': 'https://github.com/janhq/jan/releases/download/v0.5.14/jan-win-x64-0.5.14.exe', - 'linux-deb': 'https://github.com/janhq/jan/releases/download/v0.5.14/jan-linux-amd64-0.5.14.deb', - 'linux-appimage': 'https://github.com/janhq/jan/releases/download/v0.5.14/jan-linux-x86_64-0.5.14.AppImage' -}; ---- - -
-
- - - - -
- - - - - - - - -
-
- - {showStats && ( -

- {downloadCount} downloads | Free & Open Source -

- )} -
- - - - diff --git a/website/src/components/ReleaseDatabase.astro b/website/src/components/ReleaseDatabase.astro deleted file mode 100644 index 13f5c5a1d6..0000000000 --- a/website/src/components/ReleaseDatabase.astro +++ /dev/null @@ -1,671 +0,0 @@ ---- -export interface Props { - className?: string; -} - -const { className = '' } = Astro.props; ---- - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Release Database
Release VersionTarget DateStatus
- v0.6.7 - Web Search - ReleasedLive
-
-
-
User Stories
-
    -
  • As a user, I want to search the web directly from Jan chat for real-time information
  • -
  • As a developer, I want better API documentation for integration
  • -
  • As a researcher, I want source attribution for all search results
  • -
-
-
-
New Features
-
    -
  • Privacy-respecting web search integration
  • -
  • Multiple search engine support
  • -
  • Improved model downloading experience
  • -
  • Enhanced error handling and user feedback
  • -
-
-
-
Documentation
-
    -
  • Web Search API documentation
  • -
  • Privacy policy updates
  • -
  • Search configuration guide
  • -
-
-
-
- v0.6.5 - Desktop Stability - ReleasedLive
-
-
-
Improvements
-
    -
  • Enhanced memory management for large models
  • -
  • Improved GPU utilization
  • -
  • Better cross-platform compatibility
  • -
-
-
-
Bug Fixes
-
    -
  • Fixed model loading crashes on Windows
  • -
  • Resolved memory leaks in long conversations
  • -
  • Fixed UI freezing during model downloads
  • -
-
-
-
Performance
-
    -
  • 30% faster model loading times
  • -
  • Reduced memory footprint by 25%
  • -
  • Improved response streaming
  • -
-
-
-
- v0.7.0 - Deep Research - January 2025Building
-
-
-
User Stories
-
    -
  • As a researcher, I want to conduct deep research with multiple sources and citations
  • -
  • As a user, I want Jan Nano to work seamlessly on mobile devices
  • -
  • As a team lead, I want to preview jan.ai for my organization
  • -
-
-
-
New Features
-
    -
  • Deep research capabilities with multi-source analysis
  • -
  • jan.ai web platform beta launch
  • -
  • Improved Jan Nano performance and mobile optimization
  • -
  • Citation management and research reports
  • -
-
-
-
Documentation
-
    -
  • Research methodology guide
  • -
  • jan.ai platform documentation
  • -
  • Mobile optimization best practices
  • -
-
-
-
- v0.8.0 - Browser Automation - March 2025Building
-
-
-
User Stories
-
    -
  • As a user, I want Jan to automate web tasks for me safely and efficiently
  • -
  • As a business user, I want to automate repetitive web workflows
  • -
  • As a developer, I want browser automation APIs for custom integrations
  • -
-
-
-
New Features
-
    -
  • Browser automation capabilities (Beta)
  • -
  • Form filling and data extraction
  • -
  • Website navigation and interaction
  • -
  • Safety policies and user confirmation flows
  • -
-
-
-
Documentation
-
    -
  • Browser automation safety guide
  • -
  • Web scraping best practices
  • -
  • API reference for automation tools
  • -
-
-
-
- v0.9.0 - Mobile Launch - Q1 2025Planned
-
-
-
User Stories
-
    -
  • As a mobile user, I want Jan AI available on my iOS/Android device
  • -
  • As a commuter, I want to seamlessly switch between desktop and mobile
  • -
  • As a privacy-conscious user, I want local AI on my phone
  • -
-
-
-
New Features
-
    -
  • iOS and Android mobile applications
  • -
  • Three adaptive modes (Desktop, Server, Local)
  • -
  • Voice-first interface
  • -
  • Cross-device synchronization
  • -
  • Jan Nano on-device processing
  • -
-
-
-
Documentation
-
    -
  • Mobile setup and configuration guide
  • -
  • Voice interaction best practices
  • -
  • Device syncing tutorial
  • -
-
-
-
- v1.0.0 - Server Edition - Q2 2025Planned
-
-
-
User Stories
-
    -
  • As an IT admin, I want to deploy Jan for my entire organization
  • -
  • As a team lead, I want centralized AI with team collaboration features
  • -
  • As a CTO, I want enterprise-grade security and compliance
  • -
-
-
-
New Features
-
    -
  • Multi-user server deployment
  • -
  • Enterprise authentication (SSO, LDAP)
  • -
  • Team collaboration and shared conversations
  • -
  • Docker and Kubernetes deployment options
  • -
  • Admin dashboard and user management
  • -
-
-
-
Documentation
-
    -
  • Enterprise deployment guide
  • -
  • Security and compliance documentation
  • -
  • Admin management tutorials
  • -
-
-
-
- v1.1.0 - AI Agents - Q1 2026Planned
-
-
-
User Stories
-
    -
  • As a user, I want autonomous agents to handle complex multi-step tasks
  • -
  • As a business owner, I want AI that can coordinate multiple tools automatically
  • -
  • As a power user, I want to create custom agent workflows
  • -
-
-
-
New Features
-
    -
  • Autonomous AI agents
  • -
  • Multi-step task planning and execution
  • -
  • Tool orchestration and coordination
  • -
  • Goal-oriented intelligent reasoning
  • -
  • Custom agent workflow builder
  • -
-
-
-
Documentation
-
    -
  • Agent development guide
  • -
  • Workflow automation tutorials
  • -
  • Safety and oversight best practices
  • -
-
-
-
- v1.2.0 - Lucy Multimodal - Q2 2025Planned
-
-
-
User Stories
-
    -
  • As a content creator, I want AI that can understand and process images
  • -
  • As a researcher, I want to analyze documents with both text and visuals
  • -
  • As a designer, I want AI that can help with visual content creation
  • -
-
-
-
New Features
-
    -
  • Lucy multimodal model release
  • -
  • Image understanding and analysis
  • -
  • Document processing with OCR
  • -
  • Visual reasoning capabilities
  • -
  • Audio processing (planned)
  • -
-
-
-
Documentation
-
    -
  • Multimodal AI usage guide
  • -
  • Image processing tutorials
  • -
  • Visual reasoning examples
  • -
-
-
-
-
- - - - diff --git a/website/src/components/SimpleFloatingNav.astro b/website/src/components/SimpleFloatingNav.astro deleted file mode 100644 index 8c43fd6d17..0000000000 --- a/website/src/components/SimpleFloatingNav.astro +++ /dev/null @@ -1,200 +0,0 @@ ---- -export interface Props { - currentPage?: string; -} - -const { currentPage = 'products' } = Astro.props; ---- - - - - - - diff --git a/website/src/components/SimpleTOC.astro b/website/src/components/SimpleTOC.astro deleted file mode 100644 index 94f0418dca..0000000000 --- a/website/src/components/SimpleTOC.astro +++ /dev/null @@ -1,338 +0,0 @@ ---- -export interface Props { - sections?: Array<{ - id: string; - title: string; - level?: number; - }>; -} - -const { - sections = [ - { id: 'what-were-building', title: 'What We\'re Building' }, - { id: 'two-modes-one-experience', title: 'Two Modes, One Experience' }, - { id: 'our-product-principles', title: 'Our Product Principles' }, - { id: 'available-on-every-device', title: 'Available on Every Device' }, - { id: 'jan-desktop', title: 'Jan Desktop', level: 2 }, - { id: 'jan-web', title: 'Jan Web', level: 2 }, - { id: 'jan-mobile', title: 'Jan Mobile', level: 2 }, - { id: 'jan-server', title: 'Jan Server', level: 2 }, - { id: 'jan-mobile-three-modes-one-experience', title: 'Jan Mobile: Three Modes' }, - { id: 'what-makes-jan-different', title: 'What Makes Jan Different' }, - { id: 'development-timeline', title: 'Development Timeline' } - ] -} = Astro.props; ---- - - - - - - diff --git a/website/src/components/StatusIndicator.astro b/website/src/components/StatusIndicator.astro deleted file mode 100644 index f60b234541..0000000000 --- a/website/src/components/StatusIndicator.astro +++ /dev/null @@ -1,147 +0,0 @@ ---- -export interface Props { - status: 'active' | 'warning' | 'success' | 'error' | 'idle'; - label: string; - pulse?: boolean; - size?: 'small' | 'medium' | 'large'; -} - -const { status, label, pulse = true, size = 'medium' } = Astro.props; - -const statusColors = { - active: '#00ff41', - warning: '#ffb000', - success: '#00ff41', - error: '#ff0040', - idle: '#888888' -}; - -const statusColor = statusColors[status]; ---- - -
-
- {label} -
- - diff --git a/website/src/components/Steps.astro b/website/src/components/Steps.astro deleted file mode 100644 index 3abeff7e53..0000000000 --- a/website/src/components/Steps.astro +++ /dev/null @@ -1,112 +0,0 @@ ---- -export interface Props { - class?: string; -} - -const { class: className } = Astro.props; ---- - -
- -
- - diff --git a/website/src/components/YouTube.astro b/website/src/components/YouTube.astro deleted file mode 100644 index 6459addcb6..0000000000 --- a/website/src/components/YouTube.astro +++ /dev/null @@ -1,60 +0,0 @@ ---- -export interface Props { - id: string; - title?: string; - class?: string; -} - -const { id, title = 'YouTube video player', class: className } = Astro.props; - -// Extract video ID and handle both formats: -// - Simple ID: "4mvHgLy_YV8" -// - ID with params: "4mvHgLy_YV8?si=74cmdMmcH3gmpv0R" -const videoId = id.split('?')[0]; -const params = id.includes('?') ? '?' + id.split('?')[1] : ''; ---- - -
- -
- - diff --git a/website/src/content.config.ts b/website/src/content.config.ts index 3c8b69d82b..1945fdee89 100644 --- a/website/src/content.config.ts +++ b/website/src/content.config.ts @@ -1,38 +1,11 @@ -import { defineCollection, z } from 'astro:content'; -import { docsLoader } from '@astrojs/starlight/loaders'; -import { docsSchema } from '@astrojs/starlight/schema'; -import { videosSchema } from 'starlight-videos/schemas'; - -const changelogSchema = z.object({ - title: z.string(), - description: z.string(), - date: z.date(), - version: z.string().optional(), - image: z.string().optional(), - gif: z.string().optional(), - video: z.string().optional(), - featured: z.boolean().default(false), -}); - -const blogSchema = z.object({ - title: z.string(), - description: z.string(), - date: z.date(), - tags: z.string().optional(), - categories: z.string().optional(), - author: z.string().optional(), - ogImage: z.string().optional(), - featured: z.boolean().default(false), -}); +import { defineCollection, z } from 'astro:content' +import { docsLoader } from '@astrojs/starlight/loaders' +import { docsSchema } from '@astrojs/starlight/schema' +import { videosSchema } from 'starlight-videos/schemas' export const collections = { - docs: defineCollection({ loader: docsLoader(), schema: docsSchema({ extend: videosSchema }) }), - changelog: defineCollection({ - type: 'content', - schema: changelogSchema, - }), - blog: defineCollection({ - type: 'content', - schema: blogSchema, - }), -}; + docs: defineCollection({ + loader: docsLoader(), + schema: docsSchema({ extend: videosSchema }), + }), +} diff --git a/website/src/content/blog/_assets/3090s.jpg b/website/src/content/blog/_assets/3090s.jpg deleted file mode 100644 index 3a62b3f6f6..0000000000 Binary files a/website/src/content/blog/_assets/3090s.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/4070s.jpg b/website/src/content/blog/_assets/4070s.jpg deleted file mode 100644 index 3d86223470..0000000000 Binary files a/website/src/content/blog/_assets/4070s.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/4090s.png b/website/src/content/blog/_assets/4090s.png deleted file mode 100644 index 2c49a3248a..0000000000 Binary files a/website/src/content/blog/_assets/4090s.png and /dev/null differ diff --git a/website/src/content/blog/_assets/ai-locally-llama.cpp.jpg b/website/src/content/blog/_assets/ai-locally-llama.cpp.jpg deleted file mode 100644 index 967b63bf73..0000000000 Binary files a/website/src/content/blog/_assets/ai-locally-llama.cpp.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/catastrophic-demo.png b/website/src/content/blog/_assets/catastrophic-demo.png deleted file mode 100644 index 7c869fc0e7..0000000000 Binary files a/website/src/content/blog/_assets/catastrophic-demo.png and /dev/null differ diff --git a/website/src/content/blog/_assets/chat-with-docs-prompt.jpg b/website/src/content/blog/_assets/chat-with-docs-prompt.jpg deleted file mode 100644 index df47dd4ef7..0000000000 Binary files a/website/src/content/blog/_assets/chat-with-docs-prompt.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/chat-with-your-docs-offline-ai.jpg b/website/src/content/blog/_assets/chat-with-your-docs-offline-ai.jpg deleted file mode 100644 index efcda0f079..0000000000 Binary files a/website/src/content/blog/_assets/chat-with-your-docs-offline-ai.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/chat-with-your-docs2.jpg b/website/src/content/blog/_assets/chat-with-your-docs2.jpg deleted file mode 100644 index 1577b3f5c9..0000000000 Binary files a/website/src/content/blog/_assets/chat-with-your-docs2.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/deepseek-r1-locally-jan.jpg b/website/src/content/blog/_assets/deepseek-r1-locally-jan.jpg deleted file mode 100644 index 2168b8986c..0000000000 Binary files a/website/src/content/blog/_assets/deepseek-r1-locally-jan.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/download-jan.jpg b/website/src/content/blog/_assets/download-jan.jpg deleted file mode 100644 index f799260c7f..0000000000 Binary files a/website/src/content/blog/_assets/download-jan.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/egpu.jpg b/website/src/content/blog/_assets/egpu.jpg deleted file mode 100644 index 9f631d4fd8..0000000000 Binary files a/website/src/content/blog/_assets/egpu.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/gradient-decent.gif b/website/src/content/blog/_assets/gradient-decent.gif deleted file mode 100644 index 9828f2fe94..0000000000 Binary files a/website/src/content/blog/_assets/gradient-decent.gif and /dev/null differ diff --git a/website/src/content/blog/_assets/hugging-face-jan-model-download.jpg b/website/src/content/blog/_assets/hugging-face-jan-model-download.jpg deleted file mode 100644 index c6cfa8ea5a..0000000000 Binary files a/website/src/content/blog/_assets/hugging-face-jan-model-download.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-hf-model-download.jpg b/website/src/content/blog/_assets/jan-hf-model-download.jpg deleted file mode 100644 index 929acf2ffe..0000000000 Binary files a/website/src/content/blog/_assets/jan-hf-model-download.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-hub-deepseek-r1.jpg b/website/src/content/blog/_assets/jan-hub-deepseek-r1.jpg deleted file mode 100644 index 12c0c66404..0000000000 Binary files a/website/src/content/blog/_assets/jan-hub-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-hub-download-deepseek-r1-2.jpg b/website/src/content/blog/_assets/jan-hub-download-deepseek-r1-2.jpg deleted file mode 100644 index 24be4bd25d..0000000000 Binary files a/website/src/content/blog/_assets/jan-hub-download-deepseek-r1-2.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-hub-download-deepseek-r1.jpg b/website/src/content/blog/_assets/jan-hub-download-deepseek-r1.jpg deleted file mode 100644 index 83d9ab3701..0000000000 Binary files a/website/src/content/blog/_assets/jan-hub-download-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-hub-for-ai-models.jpg b/website/src/content/blog/_assets/jan-hub-for-ai-models.jpg deleted file mode 100644 index a158499b43..0000000000 Binary files a/website/src/content/blog/_assets/jan-hub-for-ai-models.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-library-deepseek-r1.jpg b/website/src/content/blog/_assets/jan-library-deepseek-r1.jpg deleted file mode 100644 index 6a54082dc1..0000000000 Binary files a/website/src/content/blog/_assets/jan-library-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-local-ai.jpg b/website/src/content/blog/_assets/jan-local-ai.jpg deleted file mode 100644 index 2c8c145ff5..0000000000 Binary files a/website/src/content/blog/_assets/jan-local-ai.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-model-download.jpg b/website/src/content/blog/_assets/jan-model-download.jpg deleted file mode 100644 index 7e949403d0..0000000000 Binary files a/website/src/content/blog/_assets/jan-model-download.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-model-selection.jpg b/website/src/content/blog/_assets/jan-model-selection.jpg deleted file mode 100644 index b630c800ec..0000000000 Binary files a/website/src/content/blog/_assets/jan-model-selection.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-runs-deepseek-r1-distills.jpg b/website/src/content/blog/_assets/jan-runs-deepseek-r1-distills.jpg deleted file mode 100644 index 02ce847f4f..0000000000 Binary files a/website/src/content/blog/_assets/jan-runs-deepseek-r1-distills.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan-system-prompt-deepseek-r1.jpg b/website/src/content/blog/_assets/jan-system-prompt-deepseek-r1.jpg deleted file mode 100644 index f79e71af06..0000000000 Binary files a/website/src/content/blog/_assets/jan-system-prompt-deepseek-r1.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/jan.ai.jpg b/website/src/content/blog/_assets/jan.ai.jpg deleted file mode 100644 index d635d1ab9d..0000000000 Binary files a/website/src/content/blog/_assets/jan.ai.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/local-ai-model-parameters.jpg b/website/src/content/blog/_assets/local-ai-model-parameters.jpg deleted file mode 100644 index 1d26fc4a5c..0000000000 Binary files a/website/src/content/blog/_assets/local-ai-model-parameters.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/offline-chatgpt-alternative-ai-without-internet.jpg b/website/src/content/blog/_assets/offline-chatgpt-alternative-ai-without-internet.jpg deleted file mode 100644 index 6dffb1e952..0000000000 Binary files a/website/src/content/blog/_assets/offline-chatgpt-alternative-ai-without-internet.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/offline-chatgpt-alternatives-jan.jpg b/website/src/content/blog/_assets/offline-chatgpt-alternatives-jan.jpg deleted file mode 100644 index 065b336365..0000000000 Binary files a/website/src/content/blog/_assets/offline-chatgpt-alternatives-jan.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/og-4090s.webp b/website/src/content/blog/_assets/og-4090s.webp deleted file mode 100644 index 6db1b10b28..0000000000 Binary files a/website/src/content/blog/_assets/og-4090s.webp and /dev/null differ diff --git a/website/src/content/blog/_assets/open-source-ai-quantization.jpg b/website/src/content/blog/_assets/open-source-ai-quantization.jpg deleted file mode 100644 index fe605c3cdc..0000000000 Binary files a/website/src/content/blog/_assets/open-source-ai-quantization.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/openchat-bench-0106.png b/website/src/content/blog/_assets/openchat-bench-0106.png deleted file mode 100644 index 9fa37960f1..0000000000 Binary files a/website/src/content/blog/_assets/openchat-bench-0106.png and /dev/null differ diff --git a/website/src/content/blog/_assets/qwen3-in-jan-hub.jpeg b/website/src/content/blog/_assets/qwen3-in-jan-hub.jpeg deleted file mode 100644 index e58c5beaba..0000000000 Binary files a/website/src/content/blog/_assets/qwen3-in-jan-hub.jpeg and /dev/null differ diff --git a/website/src/content/blog/_assets/qwen3-settings-in-jan.jpeg b/website/src/content/blog/_assets/qwen3-settings-in-jan.jpeg deleted file mode 100644 index 82d7540a76..0000000000 Binary files a/website/src/content/blog/_assets/qwen3-settings-in-jan.jpeg and /dev/null differ diff --git a/website/src/content/blog/_assets/qwen3-settings-jan-ai.jpeg b/website/src/content/blog/_assets/qwen3-settings-jan-ai.jpeg deleted file mode 100644 index 7fc432e382..0000000000 Binary files a/website/src/content/blog/_assets/qwen3-settings-jan-ai.jpeg and /dev/null differ diff --git a/website/src/content/blog/_assets/replay.png b/website/src/content/blog/_assets/replay.png deleted file mode 100644 index 8ada6ce84b..0000000000 Binary files a/website/src/content/blog/_assets/replay.png and /dev/null differ diff --git a/website/src/content/blog/_assets/run-ai-locally-with-jan.jpg b/website/src/content/blog/_assets/run-ai-locally-with-jan.jpg deleted file mode 100644 index 942ab38ba5..0000000000 Binary files a/website/src/content/blog/_assets/run-ai-locally-with-jan.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/run-deepseek-r1-locally-in-jan.jpg b/website/src/content/blog/_assets/run-deepseek-r1-locally-in-jan.jpg deleted file mode 100644 index aa69805856..0000000000 Binary files a/website/src/content/blog/_assets/run-deepseek-r1-locally-in-jan.jpg and /dev/null differ diff --git a/website/src/content/blog/_assets/throughput_Comparison.png b/website/src/content/blog/_assets/throughput_Comparison.png deleted file mode 100644 index 6bb63d03ca..0000000000 Binary files a/website/src/content/blog/_assets/throughput_Comparison.png and /dev/null differ diff --git a/website/src/content/blog/benchmarking-nvidia-tensorrt-llm.mdx b/website/src/content/blog/benchmarking-nvidia-tensorrt-llm.mdx deleted file mode 100644 index 75bb2099eb..0000000000 --- a/website/src/content/blog/benchmarking-nvidia-tensorrt-llm.mdx +++ /dev/null @@ -1,321 +0,0 @@ ---- -title: Benchmarking NVIDIA TensorRT-LLM -description: This post compares the performance of TensorRT-LLM and llama.cpp on consumer NVIDIA GPUs, highlighting the trade-offs among speed, resource usage, and convenience. -tags: Nvidia, TensorRT-LLM, llama.cpp, rtx3090, rtx4090, "inference engine" -categories: research -ogImage: assets/images/general/og-throughput-benchmark.png -date: 2024-04-29 ---- - -import { Aside } from '@astrojs/starlight/components' - - -import throughputComparison from '@/assets/blog/throughput_Comparison.png'; -import img4090s from '@/assets/blog/4090s.png'; -import og4090s from '@/assets/blog/og-4090s.webp'; -import img3090s from '@/assets/blog/3090s.jpg'; -import img4070s from '@/assets/blog/4070s.jpg'; -import egpu from '@/assets/blog/egpu.jpg'; - - -Jan now supports [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) in addition to [llama.cpp](https://github.com/ggerganov/llama.cpp), making Jan multi-engine and ultra-fast for users with Nvidia GPUs. - -We've been excited for TensorRT-LLM for a while, and [had a lot of fun implementing it](https://github.com/menloresearch/nitro-tensorrt-llm). As part of the process, we've run some benchmarks, to see how TensorRT-LLM fares on consumer hardware (e.g. [4090s](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/), [3090s](https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/)) we commonly see in the [Jan's hardware community](https://discord.com/channels/1107178041848909847/1201834752206974996). - - - - - -## Key Findings - -image - -TensorRT-LLM was: - -- **30-70% faster** than llama.cpp on the same hardware -- **Consumes less memory on consecutive runs** and **marginally more GPU VRAM utilization** than llama.cpp -- **20%+ smaller compiled model sizes** than llama.cpp -- **Less convenient** as models have to be compiled for a specific OS and GPU architecture, vs. llama.cpp's "Compile once, run everywhere" portability -- **Less accessible** as it does not support older-generation NVIDIA GPUs - -## Why TensorRT-LLM? - -[TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) is Nvidia's open-source inference library that incorporates Nvidia's proprietary optimizations beyond the open-source [cuBLAS](https://developer.nvidia.com/cublas) library. - -As compared to [llama.cpp](https://github.com/ggerganov/llama.cpp), which today dominates Desktop AI as a cross-platform inference engine, TensorRT-LLM is highly optimized for Nvidia GPUs. While llama.cpp compiles models into a [single, generalizable CUDA "backend"](https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda.cu) that can run on a wide range of Nvidia GPUs, TensorRT-LLM compiles models into a [GPU-specific execution graph](https://www.baseten.co/blog/high-performance-ml-inference-with-nvidia-tensorrt/) that is highly optimized for that specific GPU's Tensor Cores, CUDA cores, VRAM and memory bandwidth. - -TensorRT-LLM is typically used in datacenter-grade GPUs, where it produces a [face-melting 10,000 tokens/s](https://nvidia.github.io/TensorRT-LLM/blogs/H100vsA100.html) on [NVIDIA H100 Tensor Core GPUs](https://www.nvidia.com/en-us/data-center/h100/). We were curious for how TensorRT-LLM performs on consumer-grade GPUs, and gave it a spin. - -| Llama.cpp | TensorRT-LLM | -| ------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | -| Baseline | Blazing fast (30-70% faster) | -| Compile once, run cross-platform | Compiled and highly optimized for specific GPU architecture | -| Generalizable and Portable | Highly-optimized | -| Model compiles to [single, generalizable CUDA "backend"](https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda.cu) | Model compiles to [GPU-specific execution graph](https://www.baseten.co/blog/high-performance-ml-inference-with-nvidia-tensorrt/) | - -## Experiment Setup - -We ran the experiment using standardized inference requests in a sandboxed environment: -- **Model**: Mistral 7b model, compiled and quantized at a comparable `int4` quantization. -- **Test runs**: 5 batches of 10 runs each, per inference engine, on a bare metal PC with no other applications. -- **Parameters**: User defaults, i.e. `batch_size 1`, `input_len 2048` and `output_len 512` -- **Measurements**: - - CPU, memory from Jan system monitor - - GPU VRAM utilization metrics from `nvidia-smi`, and taken over an interval of 14 seconds. - - Throughput (token/sec) using [Jan's built-in Tokens/sec perf stat](https://github.com/search?q=repo%3Ajanhq%2Fjan%20timeDiffInSeconds&type=code). - - - - - -### Hardware Selection - -We chose the following GPUs based on our users' preferences: - -| NVIDIA GPU | VRAM (GB) | CUDA Cores | Tensor Cores | Memory Bus Width (bit) | Memory Bandwidth (GB/s) | Connection (GB/s) | -| --------------------------------- | --------- | ---------- | ------------ | ---------------------- | ----------------------- | -------------------------------------------- | -| GeForce RTX 4090 (Ada) | 24 | 16,384 | 512 | 384 | ~1000 | PCIe4.0 x16 (~32) | -| GeForce RTX 3090 (Ampere) | 24 | 10,496 | 328 | 384 | 935.8 | PCIe4.0 x16 (~32) | -| GeForce RTX 4070 Laptop GPU (Ada) | 8 | 7680 | 144 | 192 | 272 | PCIe4.0 x4 (~8) | -| GeForce RTX 4090 eGPU (Ada) | 24 | 16,384 | 512 | 384 | ~1000 | Thunderbolt 3 connected to a USB4 USB-C port ([~1.25-5?](https://www.cablematters.com/Blog/Thunderbolt/usb4-vs-thunderbolt-3)) | - -### llama.cpp Setup - -- llama.cpp commit [15499eb](https://github.com/ggerganov/llama.cpp/commit/15499eb94227401bdc8875da6eb85c15d37068f7) -- We used `Mistral-7b-q4_k_m` in `GGUF` with `ngl` at `100` - - - -### TensorRT-LLM Setup - -- TensorRT-LLM version [0.7.1](https://github.com/NVIDIA/TensorRT-LLM/releases/tag/v0.7.1) and build on Windows -- For TensorRT-LLM, we used `Mistral-7b-int4 AWQ` -- We ran TensorRT-LLM with `free_gpu_memory_fraction` to test it with the lowest VRAM consumption -- Note: We picked AWQ for TensorRT-LLM to be a closer comparison to GGUF's Q4. - -## Results - -### NVIDIA GeForce RTX 4090 GPU - -image -*Jan is built on this Dual-4090 workstation, which recently got upgraded to a nice case* - -image -*The original case (or lack thereof) for our Dual-4090 cluster, as posted on [r/localllama](https://www.reddit.com/r/LocalLLaMA/comments/16lxt6a/case_for_dual_4090s/)* - - - -For this test, we used Jan's [Dual-4090 workstation](https://www.reddit.com/r/LocalLLaMA/comments/16lxt6a/case_for_dual_4090s/), which our engineers timeshare to build Jan. - -The [NVIDIA GeForce RTX 4090](https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/) is the latest top-of-the-line desktop GPU, with an MSRP of $1,599, and uses the Ada architecture. It has a ~1000 GB/s memory bandwidth within VRAM, and a PCIe4 x16 lane (~32 GB/s) between the GPU and the CPU. - -| Metrics | GGUF (using CPU) | GGUF (using GPU) | TensorRT-LLM | How TensorRT-LLM Compares | -| ------------------------ | ---------------- | ---------------- | ------------ | ------------------------- | -| Throughput (token/s) | 14.0 | 100.43 | 170.63 | ✅ 69.89% faster | -| Max GPU Utilization (%) | N/A | 83.50 | 88.50 | 5.99% more | -| Max VRAM Utilization (%) | N/A | 64 | 72.1 | 12.66% more | -| Avg RAM Used (GB) | 0.611 | 7.105 | 4.98 | ✅ 29.88% less | -| Disk Size (GB) | 4.07 | 4.06 | 3.05 | ✅ 24.88% smaller | - -TensorRT-LLM was almost 70% faster than llama.cpp by building the model for the GeForce RTX 4090 GPU’s Ada architecture for optimal graph execution, fully utilizing the 512 Tensor Cores, 16,384 CUDA cores, and 1,000 GB/s of memory bandwidth. - -The intuition for why llama.cpp is slower is because it compiles a model into a [single, generalizable CUDA “backend”](https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda.cu) that can run on many NVIDIA GPUs. Doing so requires llama.cpp to sacrifice all the optimizations that TensorRT-LLM makes with its compilation to a GPU-specific execution graph. - -### NVIDIA GeForce RTX 3090 GPU - -image -*Our 3090 Machine, now used by one of our engineers to build Jan* - - - -The [NVIDIA's GeForce RTX 3090](https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/) is a popular desktop GPU, and retails for approximately $1,500 (as of April 24). It uses the NVIDIA Ampere architecture. As compared to its successor GeForce RTX 4090, it has 33% fewer CUDA cores (10,496) and Tensor Cores (328) and 7% less memory bandwidth (~930 GB/s). - -| Metrics | GGUF (using CPU) | GGUF (using GPU) | TensorRT-LLM | How TensorRT-LLM Compares | -| ------------------------ | ---------------- | ---------------- | ------------ | ------------------------- | -| Throughput (token/s) | 11.42 | 88.70 | 144.19 | ✅ 62.57% faster | -| Max GPU Utilization (%) | N/A | 80.40 | 89.10 | 10.82% more | -| Max VRAM Utilization (%) | N/A | 66.80 | 76.20 | 14.07% more | -| Avg RAM Used (GB) | 0.611 | 2.60 | 0.98 | 62.41%% less | -| Disk Size (GB) | 4.07 | 4.06 | 3.05 | ✅ 24.88% smaller | - -Interestingly, the GeForce RTX 3090 was only 16.6% slower compared with the GeForce RTX 4090. On TPS, TensorRT-LLM outperformed llama.cpp by 62.57%. Curiously, it also used negligible RAM for subsequent inference requests after the initial model warmup. - -### NVIDIA GeForce RTX 4070 Laptop GPU - -image - - - -We also benchmarked an NVIDIA GeForce RTX 4070 Laptop GPU with 8gb of VRAM, which is a popular configuration among Jan users. Laptop GPUs are less powerful than their desktop counterparts, as they trade portability for reduced energy consumption and thermal constraints. - -| Metrics | GGUF (using CPU) | GGUF (using GPU) | TensorRT-LLM | Difference on GPU | -| ------------------------ | ---------------- | ---------------- | ------------ | ----------------- | -| Throughput (token/s) | 11.57 | 39.70 | 51.57 | ✅ 29.9% faster | -| Max GPU Utilization (%) | N/A | 80.00 | 84.67 | 5.83% more | -| Max VRAM Utilization (%) | N/A | 72.78 | 81.22 | 11.60% more | -| Avg RAM Used (GB) | 4.49 | 4.44 | 1.04 | ✅ 76.55%% less | -| Disk Size (GB) | 4.07 | 4.06 | 3.05 | ✅ 24.88% smaller | - -TensorRT-LLM on the laptop dGPU was 29.9% faster in tokens per second throughput than llama.cpp, but significantly slower than the desktop GPUs. - -The intuition for this is fairly simple: the GeForce RTX 4070 Laptop GPU has 53.1% fewer CUDA cores and Tensor Cores (compared to the 4090), and less VRAM (8gb vs. 24gb). This reduces the surface area for GPU-specific optimizations for TensorRT-LLM. - -The GeForce RTX 4070 Laptop GPU is also ~70% slower than the GeForce RTX 4090 desktop GPU, showing the hardware effect of less electricity draw, less VRAM, and thermal constraints on inference speed. - -### Laptop with NVIDIA GeForce RTX 4090 eGPU - -image - - - -Our last benchmark was to experiment with an [Asus RTX 4090 eGPU](https://www.gigabyte.com/Graphics-Card/GV-N4090IXEB-24GD), that was connected via a [Thunderbolt 3 port](https://www.gigabyte.com/Graphics-Card/GV-N4090IXEB-24GD) to the [Razer Blade 14's USB4 port](https://www.razer.com/sg-en/gaming-laptops/razer-blade-14). Theoretically, the results should be fairly similar to the GeForce RTX 4090 desktop GPU as they have identical underlying GPUs, but with very different connection speeds. - -We thought it would be an interesting to see how TensorRT-LLM handles a 68.4% reduction in communication bandwidth between the CPU and GPU: -- Thunderbolt 3 connection (1.25-5 GB/s?) for eGPUs -- PCIe 4.0 x4 (~8 GB/s) for "on device" desktops - - - -Overall, we used mid-to-high-end NVIDIA desktop GPUs for our tests, as TensorRT-LLM’s performance enhancements are most apparent on bigger VRAMs. For users with lower-spec machines, llama.cpp is better. - -| Metrics | GGUF (using CPU) | GGUF (using GPU) | TensorRT-LLM | Difference on GPU | -| ------------------------ | ---------------- | ---------------- | ------------ | ----------------- | -| Throughput (token/s) | 11.56 | 62.22 | 104.95 | ✅ 68.66% faster | -| Max VRAM Utilization (%) | 0 | 65 | 99 | 52.31% more | -| RAM Used (GB) | 0.611 | 5.38 | 4.11 | ✅ 23.61% less | -| Disk Size (GB) | 4.07 | 4.06 | 3.05 | ✅ 24.88% smaller | - -The Thunderbolt 3 eGPU had a 38.5% lower tokens/s as compared to the PCIe4.0 x16 connected GPU. But the % speedup vs. llama.cpp was similar, at around 69%. - -Interestingly, the VRAM used with the eGPU was variably higher. Our hypothesis is that the slower communication bandwidth results in more VRAM being allocated, as memory is released mostly slowly as well. - -## Conclusion - -### Token Speed - -image - -| Throughput (Higher is Better) | TensorRT-LLM | Llama.cpp | % Difference | -| ---------------------------------- | --------------- | ----------- | ------------- | -| GeForce RTX 4090 desktop GPU | ✅ 170.63t/s | 100.43t/s | 69.89% faster | -| GeForce RTX 3090 desktop GPU | ✅ 144.19t/s | 88.70t/s | 62.57% faster | -| GeForce RTX 4090 eGPU | ✅ 104.95t/s | 62.22t/s | 68.66% faster | -| GeForce RTX 4070 Laptop GPU | ✅ 51.57t/s | 39.70t/s | 29.90% faster | -| Laptop AMD Ryzen™ 9 8945HS, 8C/16T | (Not supported) | ✅ 11.57t/s | | - -- TensorRT-LLM is up to **70% faster** than llama.cpp on desktop GPUs (e.g. 3090 GPU, 4090 GPUs) while using less RAM & CPU (but more fully utilizing VRAM) -- TensorRT-LLM is up to **30% faster** on laptop GPUs (e.g. 4070 GPUs) with smaller VRAM - -### Max VRAM Utilization - -| Average VRAM utilization % | TensorRT-LLM | Llama.cpp | % Difference | -| ---------------------------- | ------------ | --------- | ------------ | -| GeForce RTX 4090 desktop GPU | 72.10 | 64.00 | 12.66% more | -| GeForce RTX 3090 desktop GPU | 76.20 | 66.80 | 14.07% more | -| GeForce RTX 4070 Laptop GPU | 81.22 | 72.78 | 11.06% more | -| GeForce RTX 4090 eGPU | N/A | N/A | N/A | - -- TensorRT-LLM used marginally more average VRAM utilization at peak utilization vs. llama.cpp (up to 14%). Though this could have interesting implications on consuming more electricity over time. -- Note: we used comparable (but not identical) quantizations, and TensorRT-LLM’s `AWQ INT4` is implemented differently from llama.cpp’s `q4_k_m` - -### Max RAM Usage - -| Max RAM utilization | TensorRT-LLM | Llama.cpp | % Difference | -| ---------------------------- | ------------ | --------- | ---------------- | -| GeForce RTX 4090 desktop GPU | ✅ 4.98 | 7.11 | ✅ 29.88% less | -| GeForce RTX 3090 desktop GPU | ✅ 0.98 | 2.60 | ✅ 62.41% less | -| GeForce RTX 4070 Laptop GPU | ✅ 1.04 | 4.44 | ✅ 76.55%% less | -| GeForce RTX 4090 eGPU | ✅ 4.11 | 5.38 | ✅ 23.61% less | - -TensorRT-LLM uses a lot less Max RAM vs. llama.cpp on slower connection (PCIe 3.0 or Thunderbolt 3) due to better memory management and efficient delegation to VRAM. On faster connection, it’s at least equal to llama.cpp. - -### Compiled Model Size and Number of Files -- Contrary to popular belief, TensorRT-LLM prebuilt models turned out to not be that huge -- Mistral 7b int4 was actually 25% smaller in TensorRT-LLM, at 3.05gb vs. 4.06gb -- Note: These are approximate comparisons, as TensorRT-LLM’s AWQ INT4 is implemented differently from llama.cpp’s q4_k_m -- The bigger takeaway is that the Compiled model sizes are roughly in the same ballpark, while the number of files for TensorRT-LLM is 7x the GGUF number of files. - -| Model size (Lower is better) | TensorRT-LLM AWQ int4 | Llama.cpp GGUF Q4 | % Difference | -| ---------------------------- | --------------------- | ----------------- | ----------------- | -| Mistral 7B | ✅ 3.05GB | 4.06GB | ✅ 24.88% smaller | - -### Convenience -- Llama.cpp still wins on cross-platform versatility and convenience of a “compile once, run everywhere” approach -- TensorRT-LLM still requires compilation to specific OS and architecture, though this could be solved by pre-compiling and publishing models on [Nvidia's NGC Model Catalog](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/ai-foundation/collections/codellama) - -### Accessibility -- Llama.cpp unsurprisingly beats TensorRT-LLM in terms of accessibility -- TensorRT-LLM does not support older NVIDIA GPUs and won’t work well on smaller VRAM cards (e.g. 2-4gb VRAM) - -## Final Notes - -Our benchmarking is not perfect. We evaluated over a dozen tools ([llmperf](https://github.com/ray-project/llmperf), [psutil](https://pypi.org/project/psutil/), [gpustat](https://github.com/wookayin/gpustat), native utilities, and more) and found that everyone measures TPS, common metrics differently. We eventually settled on using our own tools in Jan, which are consistent across any inference engine and hardware. As for runtime parameters, we went with default settings, likely representing the typical user experience. - -We also did not overclock for this benchmark , as it is not a default setting for most users. But we've measured in our tests that TensorRT-LLM can go even faster with a few tweaks. We see this as a pretty exciting future direction. - - - -We're also publishing the underlying [raw experimental data](https://drive.google.com/file/d/1rDwd8XD8erKt0EgIKqOBidv8LsCO6lef/view?usp=sharing), and would encourage the community to scrutinize and help us improve. - -Special thanks to Asli Sabanci Demiroz, Annamalai Chockalingam, Jordan Dodge from Nvidia, and Georgi Gerganov from llama.cpp for feedback, review and suggestions. diff --git a/website/src/content/blog/bitdefender.mdx b/website/src/content/blog/bitdefender.mdx deleted file mode 100644 index 27a28e4b23..0000000000 --- a/website/src/content/blog/bitdefender.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: 'Bitdefender False Positive Flag' -description: "10th January 2024, Jan's 0.4.4 Release on Windows triggered Bitdefender to incorrectly flag it as infected with Gen:Variant.Tedy.258323, leading to automatic quarantine warnings on users' computers." -date: 2024-01-10 -tags: postmortem, bitdefender -categories: building-jan -keywords: - [ - postmortem, - bitdefender, - false positive, - antivirus, - jan, - nitro, - incident, - incident response, - supply chain security, - user communication, - documentation, - antivirus compatibility, - cross-platform testing, - proactive incident response, - user education, - lessons learned, - ] ---- - -import CTABlog from '@/components/Blog/CTABlog.astro'; - - -# Bitdefender False Positive Flag - -Following the recent incident related to Jan version 0.4.4 triggering Bitdefender on Windows with Gen:Variant.Tedy.258323 on January 10, 2024, we wanted to provide a comprehensive postmortem and outline the necessary follow-up actions. - -## Incident Overview - -### Bug Description - -Jan 0.4.4 installation on Windows triggered Bitdefender to flag it as infected with Gen:Variant.Tedy.258323, leading to automatic quarantine. - -### Affected Antivirus - -- McAfee / Microsoft Defender was unaffected -- Bitdefender consistently flagged the issue. - -### Incident Timeline - -- _10 Jan, 2:18 am SGT:_ Hawke flags up Malware antivirus errors for 0.4.4 installation on Windows computers. -- _10 Jan, 2:21 am SGT:_ @0xSage responds in Discord. -- _10 Jan, 2:35 am SGT:_ Hawke confirms multiple people have experienced this error on fresh installs. -- _10 Jan, 2:41 am SGT:_ @louis-jan and @dan-jan revert 0.4.4 out of an abundance of caution. -- _Incident ongoing:_ To triage and investigate the next day. -- _10 Jan, 11:36 am SGT:_ @Hien has investigated all versions of Nitro and conducted scans using Bitdefender. Only the 2 latest versions raised warnings (0.2.7, 0.2.8). -- _10 Jan, 12:44 pm SGT:_ @Hien tested again for the 0.2.6 and suggested using 0.2.6 for now, the 2 remaining Nitro version (0.2.7, 0.2.8) will under further investigation. -- The team started testing on the fixed build. -- _10 Jan, 3:22 pm SGT:_ Diagnosis found that it's most likely a false positive. @Hien has only found a solution by attempting to build Nitro Windows CPU on a GitHub-hosted runner and hasn't identified the root cause yet. -- _10 Jan, 5:24 pm SGT:_ @Hien testing two scenarios and still trying to understand the workings of Bitdefender. -- _11 Jan, 5:46 pm SGT:_ Postmortem meeting - -## Investigation Update - -- @Hien has investigated all versions of Nitro and conducted scans using Bitdefender. and only the 2 latest versions raised warnings from Bitdefender. Nitro 0.2.6, which is the highest version without the issue, was tested again, and it no longer triggers a warning from Bitdefender. -- We have observed that Nitro versions up to 0.2.6 remain unaffected. However, Bitdefender flags versions 0.2.7 and 0.2.8 as infected, leading to the deletion. In order to proceed with the current release, Hien suggests downgrading Nitro to version 0.2.6 and conducting tests with this version. Simultaneously, he will investigate why Bitdefender is flagging versions 0.2.7 and 0.2.8. -- It's essential to note that between versions 0.2.6, 0.2.7, and 0.2.8, only minor changes were made, which should not trigger a malicious code warning. We can refer to the changelog between 0.2.7 and 0.2.8 to pinpoint these changes. -- Our primary message is to convey that we did not introduce malicious code into Jan (indicating a false positive), and the investigation aims to understand the root cause behind Bitdefender flagging versions 0.2.7 and 0.2.8. -- The current diagnosis looks like a false positive but it's still under investigation. Reference link: [here](https://stackoverflow.com/questions/75886428/fake-positive-bit-defender-problem-genvariant-tedy-304469), [here](https://stackoverflow.com/questions/58010466/bitdefender-detects-my-console-application-as-genvariant-ursu-56053), and [here](https://www.cisa.gov/sites/default/files/2023-06/mar-10365227.r1.v1.clear_.pdf). -- @Hien testing two scenarios and still trying to understand the workings of Bitdefender. Still under investigation: is the issue with the code or the CI? - - In Case 1, using the same CI agent for tags 0.2.6 and 0.2.8, after PRs by Alan and myself, Bitdefender flagged the Nitro CPU binary build. Naturally, one would conclude this is due to the code. - - However, I proceeded with a further experiment: for the 0.2.8 code, instead of using our CI agent, I used a GitHub hosted agent. This time, Bitdefender did not flag our binary build. -- We've identified the Bitdefender warning was not an attack. There is no malicious code -- We've isolated the event to originate from a CI agent, which resulted in a BitDefender false positive alert. - -## Follow-ups and Action Items - -1. **Reproduce Bitdefender Flag in Controlled Environment [Done]:** - - - _Objective:_ To replicate the issue in a controlled environment to understand the triggers and specifics of Bitdefender's detection. - -2. **Investigate Malicious Code or False Positive:** - - - _Objective:_ Determine whether the flagged issue is a result of actual malicious code or a false positive. If it's a false positive, work towards resolution while communicating with Bitdefender. - -3. **Supply Chain Attack Assessment:** - - - _Objective:_ Evaluate the possibility of a supply chain attack. Investigate whether the Nitro 0.4.4 distribution was compromised or tampered with during the release process. - -4. **Testing after the Hotfix:** - - - _Objective:_ In addition to verifying the issue after the fix, it is essential to conduct comprehensive testing across related areas, ensuring compatibility across different operating systems and antivirus software (latest version / free version only). - -5. **Process Improvement for Future Releases:** - - - _Objective:_ Identify and implement improvements to our release process to prevent similar incidents in the future. This may include enhanced testing procedures, code analysis, and collaboration with antivirus software providers during the pre-release phase. Additionally, we should add verifying the latest antivirus software in the release checklist. - -6. **Documentation of Tested Antivirus Versions:** - - _Objective:_ Create a document that outlines the testing conducted, including a matrix that correlates Jan versions with the tested antivirus versions. - - _Sample list:_ for consideration purpose - - Bitdefender - - McAfee - - Avira - - Kaspersky - - Norton - - Microsoft defender - - AVG - - TotalAV - -## Next Steps - -- The team should follow up on each action item with clear ownership priority, and deadlines. -- Communicate progress transparently with the community and clients through appropriate channels. If any insights or suggestions, share them within the dedicated channels. -- Update internal documentation and procedures based on the lessons learned from this incident. - -## Lessons Learned - -1. **Antivirus Compatibility Awareness:** - - - _Observation:_ The incident underscored the significance of recognizing and testing for antivirus compatibility, particularly with widely-used solutions like Bitdefender. - - _Lesson Learned:_ In the future, we will integrate comprehensive checks for compatibility with various antivirus software, including both antivirus and "Malicious Code Detection," into our CI or QA checklist. This proactive measure aims to minimize false positive detections during the release and testing processes. - -2. **Cross-Platform Testing:** - - - _Observation:_ The problem did not occur on MacOS and Linux systems, implying a potential oversight in cross-platform testing during our release procedures. - - _Lesson Learned:_ Clarification — This observation is not directly related to antivirus testing. Instead, it underscores the necessity to improve our testing protocols, encompassing multiple operating systems. This ensures a thorough evaluation of potential issues on diverse platforms, considering the various antivirus software and differences in architectures on Mac and Linux systems. - -3. **User Communication and Documentation:** - - - _Observation:_ Due to the timely response from Nicole, who was still active on Discord and Github at 2 am, this quick response facilitated our ability to assess the impact accurately. - - _Lesson Learned:_ While our communication with users was effective in this instance, it was mainly due to Nicole's presence during the incident. To improve our overall response capability, we should prioritize "24/7 rapid triage and response." This involves ensuring continuous availability or establishing a reliable rotation of team members for swift user communication and issue documentation, further enhancing our incident response efficiency. - -4. **Proactive Incident Response:** - - - _Observation:_ The incident response, while involving a prompt version rollback, experienced a slight delay due to the release occurring at midnight. This delay postponed the initiation of the investigation until the next working hours. - - _Lesson Learned:_ Recognizing the importance of swift incident response, particularly in time-sensitive situations, we acknowledge that releasing updates during off-hours can impact the immediacy of our actions. Moving forward, we will strive to optimize our release schedules to minimize delays and ensure that investigations can commence promptly regardless of the time of day. This may involve considering alternative release windows or implementing automated responses to critical incidents, ensuring a more proactive and timely resolution. - -5. **Supply Chain Security Measures:** - - - _Observation:_ While the incident prompted consideration of a potential supply chain attack, it's crucial to emphasize that this was not the case. Nonetheless, the incident underscored the importance of reviewing our supply chain security measures. - - _Lesson Learned:_ Going forward, we should strengthen supply chain security by introducing additional verification steps to uphold the integrity of our release process. Collaborating with distribution channels is essential for enhancing security checks and ensuring a robust supply chain. - - _Longer-term:_ Exploring options for checking Jan for malicious code and incorporating antivirus as part of our CI/CD pipeline should be considered for a more comprehensive and proactive approach. - -6. **User Education on False Positives:** - - _Observation:_ Users reported Bitdefender automatically "disinfecting" the flagged Nitro version without allowing any user actions. - - _Lesson Learned:_ Educate users about the possibility of false positives and guide them on how to whitelist or report such incidents to their antivirus provider (if possible). Provide clear communication on steps users can take in such situations. - -These lessons learned will serve as a foundation for refining our processes and ensuring a more resilient release and incident response framework in the future. Continuous improvement is key to maintaining the reliability and security of our software. - -Thank you for your dedication and cooperation in resolving this matter promptly. - - \ No newline at end of file diff --git a/website/src/content/blog/data-is-moat.mdx b/website/src/content/blog/data-is-moat.mdx deleted file mode 100644 index 5e238103a2..0000000000 --- a/website/src/content/blog/data-is-moat.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "The Invisible Moat around Open-Source LLM" -description: "Uncover the pivotal role of data ownership in training the next iteration of LLM." -tags: OpenAI has a moat, Catastrophic forgetting, ChatGPT -date: 2024-03-25 -unlisted: true -categories: research ---- - -import CTABlog from '@/components/Blog/CTABlog.astro'; - -import catastrophicDemo from '@/assets/blog/catastrophic-demo.png'; -import gradientDecent from '@/assets/blog/gradient-decent.gif'; -import replay from '@/assets/blog/replay.png'; -import openchatBench0106 from '@/assets/blog/openchat-bench-0106.png'; - - -# The Invisible Moat around Open-Source LLM - -In the crowded AI landscape, OpenAI's ChatGPT stands out, not just for its capabilities but for its unique access to the pre-trained dataset. This post explores the vital role of data in maintaining a competitive edge, focusing on OpenAI's strategic advantage through data ownership. - -## Data: The Secret Weapon -OpenAI, with ChatGPT, has carved a distinct advantage. By harnessing user interactions, it gains invaluable insights into diverse use cases, enabling precise model refinements. The cornerstone of this advantage lies in the "pre-trained dataset." This treasure trove of data empowers OpenAI to cater to specific needs, ensuring sustained improvement and differentiation. - -## The rise of the opensource - -``` -- How they/Mistral/Llama make money? --> around having pretrained data -> finetuning -First para: -Rise of Open Source LLMs like Mistral, Llama2, Llama3 -People think they don't have a moat = everything is open source -Second para: -We actually think these guys have an "invisible moat" -Pre-training data is not released, and makes a huge difference in fine-tuning efficacy -``` - -### Why pretrained data is important? - -> *Owning the pre-trained dataset is crucial as it represents the original distribution.* -Access to the pre-trained dataset acts as a master key to address the critical issue of ["Catastrophic forgetting"](https://en.wikipedia.org/wiki/Catastrophic_interference) in Language Learning Models (LLMs). This phenomenon describes how LLMs lose hold of prior knowledge upon learning new information. Access to the foundational dataset allows for effective fine-tuning, balancing the introduction of new data with the retention of existing knowledge. - -Catastrophic forgetting - -**Figure 1.** Demonstrates the catastrophic forgetting issue: without mixing datasets, AI overfits on new tasks, impairing normal communication. - -### Illustrating Catastrophic Forgetting - -``` -What is fine-tuning -Process of Finetuning (pretrain, instruct, finetune) -Fine-tuning datasets -Risk of catastrophic forgetting -"Why is Pre-trained data important?" -What is pre-training dataset -How does fine-tuning with pre-training dataset differ from when you don't have it -How does it avoid catastrophic forgetting -``` - -Catastrophic forgetting can be visualized as a ball in a multidimensional landscape, where moving towards new knowledge risks losing grasp on the old. -Pre-trained data acts as a map, guiding fine-tuning in a way that incorporates new information while safeguarding existing knowledge. - -Gradient decent - -**Figure 2.** [Gradient decent demonstration](https://en.wikipedia.org/wiki/Gradient_descent) - -### Smoothing Distribution Shifts - -As described above, with the mixture of the pre-trained dataset ensures smoother distribution shifts when introducing new information, as it embodies a comprehensive spectrum of prior knowledge. - -This continuity in knowledge transition helps in maintaining the robustness of the model against sudden changes, akin to providing a more gradual learning curve where the new information is incrementally integrated with the existing knowledge base. - -This concept is supported by the [EleutherAI's research](https://arxiv.org/abs/2403.08763) highlighting the importance of how tasks are sequenced in the learning process, suggesting that introducing dissimilar tasks early on can expand the network's capacity for new information. - -**Table 1.** Final results for English-only 405M parameter models trained with different replay amounts show models with more replay perform better in balancing learning and forgetting (measured as AVG Loss). Notably, just 1% mix with a pre-trained dataset significantly lowers AVG loss, effectively shifting model knowledge from English (the Pile) to German. - -Replay method - -*Note:* **Replay** is the method involves combining the training dataset from the pre-trained model with new task datasets. - -### Acting as a Noise Mask - -The pre-trained data can also serve as a form of "noise masking", similar to techniques used in training [early computer vision models](https://arxiv.org/abs/1911.04252). - -This approach introduces a level of ["noise"](https://arxiv.org/abs/2310.05914) during training, which can prevent the model from overfitting to the new dataset. By retaining a mix of original and new data, the model is exposed to a broader range of scenarios, enhancing its generalization capabilities and robustness across tasks. - -## Solutions - -### Overwhelming approach - -Overcoming these challenges requires a balanced approach. One partial method involves inundating the model with extensive, curated data, allowing for comprehensive fine-tuning. While effective, this approach demands significant computational resources, a comprehensive filtering process for low-quality inputs, and an extraordinarily high cost associated with gathering millions of high-quality responses. - -In the open-source community, 2 notable examples of fine-tuning with Mistral as a base model on large datasets collected from top-rated GPT-4 and human responses demonstrate a distribution shift that enhances model performance, including [OpenChat](https://huggingface.co/openchat/openchat-3.5-0106) and [Hermes-Pro](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B). - -Openchat results - -**Figure 2.** After fine-tuning with a large amount of data samples, the model's performance improved, outperforming ChatGPT and Grok-1 in some benchmarks. - -### Fully open source model - -- Example: Dolma + olma from allenai - -## Conclusion - -The ownership and strategic use of pre-trained data serve as an invisible moat. It not only enables the tackling of complex challenges like catastrophic forgetting but also provides a baseline for continuous, targeted improvements. Although there is a solution to decentralize, the cost remains reasonably high. - -Fully open pretrained + open weight - -## Reference -- [Catastrophic forgetting](https://arxiv.org/abs/2308.08747) -- [Simple and Scalable Strategies to Continually Pre-train Large Language Models](https://arxiv.org/abs/2403.08763) -- [Gradient descent](https://en.wikipedia.org/wiki/Gradient_descent) -- [Neftune](https://arxiv.org/abs/2310.05914) -- [Self-training with Noisy Student improves ImageNet classification](https://arxiv.org/abs/1911.04252) - - \ No newline at end of file diff --git a/website/src/content/blog/deepseek-r1-locally.mdx b/website/src/content/blog/deepseek-r1-locally.mdx deleted file mode 100644 index 5c7ae8f8db..0000000000 --- a/website/src/content/blog/deepseek-r1-locally.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: "Run DeepSeek R1 locally on your device (Beginner-Friendly Guide)" -description: "A straightforward guide to running DeepSeek R1 locally regardless of your background." -tags: DeepSeek, R1, local AI, Jan, GGUF, Qwen, Llama -categories: guides -date: 2025-01-31 -ogImage: assets/deepseek-r1-locally-jan.jpg -twitter: - card: summary_large_image - site: "@jandotai" - title: "Run DeepSeek R1 locally on your device (Beginner-Friendly Guide)" - description: "A straightforward guide to running DeepSeek R1 locally regardless of your background." - image: assets/deepseek-r1-locally-jan.jpg ---- - -import CTABlog from '@/components/Blog/CTABlog.astro'; -import { Aside } from '@astrojs/starlight/components' - - -import deepseekR1LocallyJan from '@/assets/blog/deepseek-r1-locally-jan.jpg'; -import downloadJan from '@/assets/blog/download-jan.jpg'; -import janLibraryDeepseekR1 from '@/assets/blog/jan-library-deepseek-r1.jpg'; -import janHubDeepseekR1 from '@/assets/blog/jan-hub-deepseek-r1.jpg'; -import janRunsDeepseekR1Distills from '@/assets/blog/jan-runs-deepseek-r1-distills.jpg'; - - -# Run DeepSeek R1 locally on your device (Beginner-Friendly Guide) - -DeepSeek R1 running locally in Jan AI interface, showing the chat interface and model settings - -DeepSeek R1 is one of the best open-source models in the market right now, and you can run DeepSeek R1 on your own computer! - - - -DeepSeek R1 requires data-center level computers to run at its full potential, and we'll use a smaller version that works great on regular computers. - -Why use an optimized version? -- Efficient performance on standard hardware -- Faster download and initialization -- Optimized storage requirements -- Maintains most of the original model's capabilities - -## Quick Steps at a Glance -1. Download [Jan](https://jan.ai/) -2. Select a model version -3. Choose settings -4. Set up the prompt template & start using DeepSeek R1 - -Let's walk through each step with detailed instructions. - -## Step 1: Download Jan -[Jan](https://jan.ai/) is an open-source application that enables you to run AI models locally. It's available for Windows, Mac, and Linux. For beginners, Jan is the best choice to get started. - -Jan AI interface, showing the download button - -1. Visit [jan.ai](https://jan.ai) -2. Download the appropriate version for your operating system -3. Install the app - -## Step 2: Choose Your DeepSeek R1 Version - -To run AI models like DeepSeek R1 on your computer, you'll need something called VRAM (Video Memory). Think of VRAM as your computer's special memory for handling complex tasks like gaming or, in our case, running AI models. It's different from regular RAM - VRAM is part of your graphics card (GPU). - - - -Let's first check how much VRAM your computer has. Don't worry if it's not much - DeepSeek R1 has versions for all kinds of computers! - -Finding your VRAM is simple: -- On Windows: Press `Windows + R`, type `dxdiag`, hit Enter, and look under the "Display" tab -- On Mac: Click the Apple menu, select "About This Mac", then "More Info", and check under "Graphics/Displays" -- On Linux: Open Terminal and type `nvidia-smi` for NVIDIA GPUs, or `lspci -v | grep -i vga` for other graphics cards - - - -Once you know your VRAM, here's what version of DeepSeek R1 will work best for you. If you have: -- 6GB VRAM: Go for the 1.5B version - it's fast and efficient -- 8GB VRAM: You can run the 7B or 8B versions, which offer great capabilities -- 16GB or more VRAM: You have access to the larger models with enhanced features - -Available versions and basic requirements for DeepSeek R1 distills: - -| Version | Model Link | Required VRAM | -|---------|------------|---------------| -| Qwen 1.5B | [DeepSeek-R1-Distill-Qwen-1.5B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF) | 6GB+ | -| Qwen 7B | [DeepSeek-R1-Distill-Qwen-7B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF) | 8GB+ | -| Llama 8B | [DeepSeek-R1-Distill-Llama-8B-GGUF](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF) | 8GB+ | -| Qwen 14B | [DeepSeek-R1-Distill-Qwen-14B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF) | 16GB+ | -| Qwen 32B | [DeepSeek-R1-Distill-Qwen-32B-GGUF](https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF) | 16GB+ | -| Llama 70B | [DeepSeek-R1-Distill-Llama-70B-GGUF](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-70B-GGUF) | 48GB+ | - -To download your chosen model: - -Launch Jan and navigate to Jan Hub using the sidebar - -Jan AI interface, showing the model library - -3. Input the model link in this field: - -Jan AI interface, showing the model link input field - -## Step 3: Configure Model Settings -When configuring your model, you'll encounter quantization options: - - - -## Step 4: Configure Prompt Template -Final configuration step: - -1. Access Model Settings via the sidebar -2. Locate the Prompt Template configuration -3. Use this specific format: - - - -This template is for proper communication between you and the model. - -You're now ready to interact with DeepSeek R1: - -Jan interface, showing DeepSeek R1 running locally - -## Need Assistance? - - - - diff --git a/website/src/content/blog/offline-chatgpt-alternative.mdx b/website/src/content/blog/offline-chatgpt-alternative.mdx deleted file mode 100644 index d6fa07e93c..0000000000 --- a/website/src/content/blog/offline-chatgpt-alternative.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: "Offline ChatGPT: You can't run ChatGPT offline, do this instead" -description: "Learn how to use AI offline with Jan - a free, open-source alternative to ChatGPT that works 100% offline on your computer." -tags: AI, ChatGPT alternative, offline AI, Jan, local AI, privacy -categories: guides -date: 2025-02-08 -ogImage: _assets/offline-chatgpt-alternatives-jan.jpg -twitter: - card: summary_large_image - site: "@jandotai" - title: "Offline ChatGPT: You can't run ChatGPT offline, do this instead" - description: "Want to use ChatGPT offline? Learn how to run AI models locally with Jan - free, open-source, and works without internet." - image: _assets/offline-chatgpt-alternatives-jan.jpg ---- - -import CTABlog from '@/components/Blog/CTABlog.astro'; -import { Aside } from '@astrojs/starlight/components' - - -import offlineChatgptAlternativeAiWithoutInternet from '@/assets/blog/offline-chatgpt-alternative-ai-without-internet.jpg'; - - -# Offline ChatGPT: You can't run ChatGPT offline, do this instead - -ChatGPT is a cloud-based service that requires internet access. However, it's not the only way to use AI. You can run AI models offline on your device with [Jan](https://jan.ai/). It's completely free, open-source, and gives you 100% offline capability. You can even use AI on a plane! - - - -## Jan as an offline ChatGPT alternative - -Use Jan to chat with AI models without internet access -*Jan lets you use AI offline - no internet connection needed* - -Here's how to get started with offline AI in 3 simple steps: - -### 1. Download Jan - -Go to [jan.ai](https://jan.ai) and download the version for your computer (Mac, Windows, or Linux). It's completely free. - -![Download Jan for offline AI use](./_assets/jan.ai.jpg "Get Jan for free and start using AI offline") - -### 2. Download an AI model - -You'll need an AI model to use AI offline, so download a model from Jan. Once it's on your computer, you don't need internet anymore. - -![Choose an AI model that works offline](./_assets/jan-model-selection.jpg "Find the perfect AI model for offline use") -*Select an AI model that matches your needs and computer capabilities* - - - -### 3. Start using AI offline - -![Chat with AI offline using Jan's interface](./_assets/run-ai-locally-with-jan.jpg "Experience ChatGPT-like interactions without internet") -*Use Jan's clean interface to chat with AI - no internet required* - -Once downloaded, you can use AI anywhere, anytime: - -- Chat like you do with ChatGPT -- Work on documents offline -- Get coding help without internet -- Keep your conversations private -- Use AI even when servers are down - -## How to chat with your docs in Jan? - -To chat with your docs in Jan, you need to activate experimental mode. - -![Activate experimental mode in Jan's settings](./_assets/chat-with-your-docs-offline-ai.jpg "Enable experimental features to chat with your documents") -*Turn on experimental mode in settings to chat with your docs* - -After activating experimental mode, simply add your files and ask questions about them. - -![Chat with your documents using Jan](./_assets/chat-with-docs-prompt.jpg "Ask questions about your documents offline") -*Chat with your documents privately - no internet needed* - -I did this for you and got a reply from a 7B parameter model. If you'd like to learn what "7B" means and understand other local AI terms, check our [guide on running AI models locally](/blog/run-ai-models-locally). - -A response from AI, Qwen2.5 7B Instruct Q4: - -`This document appears to be about the benefits and advantages of running artificial intelligence (AI) models locally on your device rather than using cloud-based or remote AI services. The key points it highlights include data privacy, offline functionality, freedom from paywalls and restrictions, and giving users full control over their AI models. Additionally, the text mentions that local AI is becoming a new trend and provides a link to a guide for beginners who want to learn more about this topic.` - -Local AI makes possible offline AI use, so Jan is going to be your first step to get started. - -## Why choose Jan over ChatGPT? - -1. **True Offline Use:** Unlike ChatGPT, Jan works without internet -2. **100% Private:** Your data never leaves your computer -3. **Free Forever:** No subscriptions or API costs -4. **No Server Issues:** No more "ChatGPT is at capacity" -5. **Your Choice of Models:** Use newer models as they come out - -**"Is it really free? What's the catch?"** -Yes, it's completely free and open source. Jan is built by developers who believe in making AI accessible to everyone. - -**"How does it compare to ChatGPT?"** -Modern open-source models like DeepSeek and Mistral are very capable. While they might not match GPT-4, they're perfect for most tasks and getting better every month. - -**"Do I need a powerful computer?"** -If your computer is from the last 5 years, it will likely work fine. You need about 8GB of RAM and 10GB of free space for comfortable usage. - -**"What about my privacy?"** -Everything stays on your computer. Your conversations, documents, and data never leave your device unless you choose to share them. - -Want to learn more about the technical side? Check our detailed [guide on running AI models locally](/blog/run-ai-models-locally). It's not required to [use AI offline](https://jan.ai/) but helps understand how it all works. - -## Need help? - - - - diff --git a/website/src/content/blog/qwen3-settings.mdx b/website/src/content/blog/qwen3-settings.mdx deleted file mode 100644 index 0fee2ecc0f..0000000000 --- a/website/src/content/blog/qwen3-settings.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: "Best Settings to Run Qwen3-30B-A3B Locally" -description: "If you're running Qwen3-30B-A3B locally, don't guess your way through the settings. This guide tells you what actually works based on Qwen's own documentation and what we've seen hold up in practice." -tags: Qwen3, local AI, model settings, Jan, offline AI -categories: guides -date: 2025-05-10 -ogImage: assets/images/general/qwen3-30b-settings.jpg ---- - -import CTABlog from '@/components/Blog/CTABlog.astro'; -import { Aside } from '@astrojs/starlight/components' - - -import qwen3SettingsJanAi from '@/assets/blog/qwen3-settings-jan-ai.jpeg'; -import qwen3InJanHub from '@/assets/blog/qwen3-in-jan-hub.jpeg'; -import qwen3SettingsInJan from '@/assets/blog/qwen3-settings-in-jan.jpeg'; - - -# Best Settings to Run Qwen3-30B-A3B Locally - -If you're running Qwen3-30B-A3B locally, don't guess your way through the settings. This guide tells you what actually works based on Qwen's own documentation and what we've seen hold up in practice. - - - -Qwen3 comes with a unique toggle: `enable_thinking`. When it's on, the model "thinks", it breaks down problems, reasons step-by-step, and wraps part of its output in a `...` block. When it's off, the model skips all that and just gives you an answer. - -That changes how you configure it. - ---- - -### Thinking mode (`enable_thinking=True`) - -This is the mode for reasoning, math, coding, logic — anything that benefits from step-by-step generation. - -**Use these generation settings:** - -``` -Temperature: 0.6 -TopP: 0.95 -TopK: 20 -Max tokens: 32,768 -Do not use greedy decoding -``` - - - - -## Quick summary - -Qwen3 settings - -### Non-thinking mode (`enable_thinking=False`) - -This is for fast, general-purpose replies. Instruction following, chat, creative writing — no `` block, no extra steps. - -**Use these settings:** - -```makefile -Temperature: 0.7 -TopP: 0.8 -TopK: 20 -``` - - - - - -## Soft vs. hard switch - -You can toggle thinking dynamically in the prompt using: - -``` -/think # turns thinking ON -/no_think # turns it OFF -``` - -This works only if `enable_thinking=True` is set in the code. If you set it to False, the soft switch won't do anything. - - - -### What most people miss - -- **Don't log the `think` block in chat history.** Qwen recommends keeping only the final answer. Otherwise, the next reply gets bloated and off-topic. -- **Greedy decoding is a trap.** It's tempting to use for consistency, but Qwen3's output gets worse - and sometimes broken - without sampling. -- **YaRN isn't always needed.** The model supports up to 32k context by default. Use YaRN only if you regularly go beyond that. - ---- - -## Running Qwen3 locally with Jan - -The easiest way to run Qwen3-30B-A3B locally is through Jan. - -1. Download and install [Jan](https://jan.ai) -2. Open Jan and navigate to Jan Hub -3. Find `Qwen3` and `Qwen3-30B-A3B` in the model list -4. Click "Download" to get the model - -### Qwen3 in Jan Hub - -You can easily find Qwen3 models in Jan Hub: - -Qwen3 in Jan Hub - -Once downloaded, Jan handles all the technical setup, so you can focus on using the model rather than configuring it. The settings we covered in this guide are automatically applied when you use Qwen3 through Jan. - -### How to customize Qwen3-30B-A3B settings in Jan - -You can also customize these settings anytime by opening the right panel in Jan and adjusting the parameters to match your needs. - -Qwen3 settings in Jan app - -## Bottom Line - -If you're running Qwen3-30B-A3B locally, treat it like two models in one. Flip the thinking mode based on the task, adjust the generation settings accordingly, and let it work how it was meant to. - -## Need help? - - diff --git a/website/src/content/blog/rag-is-not-enough.mdx b/website/src/content/blog/rag-is-not-enough.mdx deleted file mode 100644 index c163b55884..0000000000 --- a/website/src/content/blog/rag-is-not-enough.mdx +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: "RAG is not enough: Lessons from Beating GPT-3.5 on Specialized Tasks with Mistral 7B" -description: We present a straightforward approach to customizing small, open-source models using fine-tuning and RAG that outperforms GPT-3.5 for specialized use cases. -tags: RAG, opensource chatgpt alternative, outperform ChatGPT, Mistral -date: 2024-03-25 -unlisted: true -categories: research ---- - -import CTABlog from '@/components/Blog/CTABlog.astro'; - - -# RAG is not enough: Lessons from Beating GPT-3.5 on Specialized Tasks with Mistral 7B - -## Abstract - -We present a straightforward approach to customizing small, open-source models using fine-tuning and RAG that outperforms GPT-3.5 for specialized use cases. With it, we achieved superior Q&A results of [technical documentation](https://nitro.jan.ai/docs) for a small codebase [codebase](https://github.com/menloresearch/nitro). - -In short, (1) extending a general foundation model like [Mistral](https://huggingface.co/mistralai/Mistral-7B-v0.1) with strong math and coding, and (2) training it over a high-quality, synthetic dataset generated from the intended corpus, and (3) adding RAG capabilities, can lead to significant accuracy improvements. - -Problems still arise with catastrophic forgetting in general tasks, commonly observed during specialized domain fine-tuning. In our case, this is likely exacerbated by our lack of access to Mistral’s original training dataset and various compression techniques used in our approach to keep the model small. - -## Selecting a strong foundation model - -[Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) outshines both [Meta's Llama-2 7B](https://huggingface.co/meta-llama/Llama-2-7b) and [Google's Gemma 7B](https://huggingface.co/google/gemma-7b) in key benchmarks, making it our choice for a base model. Starting with a strong foundation like Mistral allowed us to achieve greater accuracy in our specialized adaptations. - -![image](https://hackmd.io/_uploads/S1TN64kTa.png) - -*Figure 1. Mistral 7B excels in benchmarks, ranking among the top foundational models.* - -*Note: We are not sponsored by the Mistral team, though lots of folks like to run Mistral locally using [Jan](https://jan.ai/)., our desktop client.* - -## Cost effectively improving the base model - -Our technical use case required excellent math capabilities, an area where Mistral can underperform. Thus, we tested a number of Mistral model variants, from foundation models to finetunes to model merges, to find a stronger base model before proceeding to finetuning. - -![image](https://hackmd.io/_uploads/SkYBaVk6a.png) - -*Figure 2: The merged model, Stealth, doubles the mathematical capabilities of its foundational model while retaining the performance in other tasks.* - -We found merging models is quick and cost-effective, enabling fast adjustments based on the result of each iteration. - -We ended up with [Stealth 7B v1.1](https://huggingface.co/jan-hq/stealth-v1.1), a [SLERP](https://github.com/Digitous/LLM-SLERP-Merge) merge of Mistral with the following: - -- [WizardMath](https://huggingface.co/WizardLM/WizardMath-7B-V1.1) for its math capabilities. -- [WizardCoder](https://huggingface.co/WizardLM/WizardCoder-Python-7B-V1.0) for its coding capabilities. -- Our own [Trinity](https://huggingface.co/jan-hq/trinity-v1.2) model for its versatility across general tasks. - -This particular combination yielded the best tradeoff across mathematical & technical reasoning while retaining the most pre-merge performance on general tasks. - -## **DPO finetuning** - -Merging different LLMs can lead to a mixed answering style because each model was originally trained on different types of data. - -Thus, we applied Direct Preference Optimization ([DPO](https://arxiv.org/abs/2305.18290)) using the [Intel's Orca DPO pairs](https://huggingface.co/datasets/Intel/orca_dpo_pairs) dataset, chosen for its helpful answering style in general, math and coding concentration. - -This approach produced a final model - [Stealth 7B v1.2](https://huggingface.co/jan-hq/stealth-v1.2), aligned to our technical preferences and demonstrating minimal loss. - -## **Using our own technical documentation** - -With the base model ready, we started on our specific use case. - -Jan is an open-source project enjoying strong growth, but at one point we began receiving a new support ticket every minute, which quickly overwhelmed our bootstrapped resources. - -So, we directed our efforts toward training a model to answer user questions based on existing technical documentation. - -Specifically, we trained it on on the [Nitro documentation](https://nitro.jan.ai/docs). For context, Nitro is the default inference engine for Jan. It’s a enterprise-ready server implementation of LlamaCPP, written in C++, with multimodal, queues, and other production-level server capabilities. - -It made an interesting corpus because it was rife with post-2023 technical jargon, edge cases, and poor informational layout. - -## Generating training data - -The first step was to transform Nitro’s unstructured format into a synthetic Q&A dataset designed for [instruction tuning](https://arxiv.org/pdf/2109.01652.pdf). - -The text was split into chunks of 300-token segments with 30-token overlaps. This helped to avoid a [lost-in-the-middle](https://arxiv.org/abs/2307.03172) problem where LLM can’t use context efficiently to answer given questions. - -The chunks were then given to GPT-4 with 8k context length to generate 3800 Q&A pairs. The [training dataset](https://huggingface.co/datasets/jan-hq/nitro_binarized_v2) is available on HuggingFace. - -## **Training** - -Training was done with supervised finetuning (SFT) from the [Hugging Face's alignment-handbook](https://github.com/huggingface/alignment-handbook), per [Huggingface's Zephyr Beta](https://github.com/huggingface/alignment-handbook/tree/main/recipes/zephyr-7b-beta) guidelines. - -We used consumer-grade, dual Nvidia RTX 4090s for the training. The end-to-end training took 18 minutes. We found optimal hyperparameters in LoRA for this specific task to be `r = 256` and `alpha = 512`. - -This final model can be found [here on Huggingface](https://huggingface.co/jan-hq/nitro-v1.2-e3). - -![image](https://hackmd.io/_uploads/SJyDTVk6p.png) - -*Figure 3. Using the new finetuned model in [Jan](https://jan.ai/)* - -## Improving results with RAG - -As an additional step, we also added [Retrieval Augmented Generation (RAG)](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/) as an experiment parameter. - -A simple RAG setup was done using **[Llamaindex](https://www.llamaindex.ai/)** and the **[bge-en-base-v1.5 embedding](https://huggingface.co/BAAI/bge-base-en-v1.5)** model for efficient documentation retrieval and question-answering. You can find the RAG implementation [here](https://github.com/menloresearch/open-foundry/blob/main/rag-is-not-enough/rag/nitro_rag.ipynb). - -## Benchmarking the Results - -We curated a new set of [50 multiple-choice questions](https://github.com/menloresearch/open-foundry/blob/main/rag-is-not-enough/rag/mcq_nitro.csv) (MCQ) based on the Nitro docs. The questions had varying levels of difficulty and had trick components that challenged the model's ability to discern misleading information. - -![image](https://hackmd.io/_uploads/By9vaE1Ta.png) - -*Figure 4. Comparison between finetuned model and OpenAI's GPT* - -**Results** - -- GPT-3.5 with RAG: 56.7% -- GPT-4 with RAG: 64.3% -- Merged 7B Model ([Stealth 7B](https://huggingface.co/jan-hq/stealth-v1.3)) with RAG: 47.7% -- Finetuned 7B Model (Nitro 7B) with RAG: 57.8% - -This indicates that with task-specific training, we can improve an open-source, Small Language Model to the level of GPT-3.5 on domain knowledge. - -Notably, the finetuned + RAG approach also demonstrated more consistency across benchmarking, as indicated by its lower standard deviation. - -## Conclusion - -We conclude that this combination of model merging + finetuning + RAG yields promise. This finding is relevant for teams and individuals that need specialized, technical small language models that need to run in resource-constrained or highly secured environments, where GPT may not be an option. - -Anecdotally, we’ve had some success using this model in practice to onboard new team members to the Nitro codebase. - -A full research report with more statistics can be found [here](https://github.com/menloresearch/open-foundry/blob/main/rag-is-not-enough/README.md). - -# References - -- [Catastrophic forgetting](https://arxiv.org/abs/2308.08747) -- [Math specialization](https://arxiv.org/abs/2308.09583) -- [Code specialization](https://arxiv.org/abs/2306.08568) -- [Search specialization](https://github.com/SciPhi-AI/agent-search) -- [Evol Instruct](https://github.com/nlpxucan/WizardLM) -- [Lost in the middle](https://arxiv.org/abs/2307.03172) -- [Instruction tuning](https://arxiv.org/pdf/2109.01652.pdf) - - \ No newline at end of file diff --git a/website/src/content/blog/run-ai-models-locally.mdx b/website/src/content/blog/run-ai-models-locally.mdx deleted file mode 100644 index 99934dfab4..0000000000 --- a/website/src/content/blog/run-ai-models-locally.mdx +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: "How to run AI models locally as a beginner?" -description: "A straightforward guide to running AI models locally on your computer, regardless of your background." -tags: AI, local models, Jan, GGUF, privacy, local AI -categories: guides -date: 2025-01-31 -ogImage: assets/run-ai-locally-with-jan.jpg -twitter: - card: summary_large_image - site: "@jandotai" - title: "How to run AI models locally as a beginner?" - description: "Learn how to run AI models locally on your computer for enhanced privacy and control. Perfect for beginners!" - image: assets/run-ai-locally-with-jan.jpg ---- - -import CTABlog from '@/components/Blog/CTABlog.astro'; - -import { Aside } from '@astrojs/starlight/components' - - -# How to run AI models locally as a beginner? - -Most people think running AI models locally is complicated. It's not. Anyone can run powerful AI models like DeepSeek, Llama, and Mistral on their own computer. This guide will show you how, even if you've never written a line of code. - -## Quick steps: -### 1. Download [Jan](https://jan.ai) - -![Jan AI's official website showing the download options](./_assets/jan.ai.jpg "Download Jan from the official website - it's free and open source") -*Download Jan from [jan.ai](https://jan.ai) - it's free and open source.* - -### 2. Choose a model that fits your hardware - -![Jan's model selection interface showing various AI models](./_assets/jan-model-selection.jpg "Jan helps you pick the right AI model for your computer") -*Jan helps you pick the right AI model for your computer.* - -### 3. Start using AI locally - -That's all to run your first AI model locally! - -![Jan's simple and clean chat interface for local AI](./_assets/run-ai-locally-with-jan.jpg "Jan's easy-to-use chat interface after installation") -*Jan's easy-to-use chat interface after installation.* - -Keep reading to learn key terms of local AI and the things you should know before running AI models locally. - -## How Local AI Works - -Before diving into the details, let's understand how AI runs on your computer: - - - -![llama.cpp GitHub repository showing its popularity and wide adoption](./_assets/ai-locally-llama.cpp.jpg "llama.cpp is widely used and trusted in the AI community") -*llama.cpp helps millions of people run AI locally on their computers.* - - - -## Understanding AI Models - -Think of AI models like apps on your computer - some are light and quick to use, while others are bigger but can do more things. When you're choosing an AI model to run on your computer, you'll see names like "Llama-3-8B" or "Mistral-7B". Let's break down what this means in simple terms. - - - -![Jan Hub interface showing model sizes and types](./_assets/jan-hub-for-ai-models.jpg "Jan Hub makes it easy to understand different model sizes and versions") -*Jan Hub makes it easy to understand different model sizes and versions* - -**Good news:** Jan helps you pick the right model size for your computer automatically! You don't need to worry about the technical details - just choose a model that matches what Jan recommends for your computer. - -## What You Can Do with Local AI - - - -## Hardware Requirements - -Before downloading an AI model, consider checking if your computer can run it. Here's a basic guide: - -**The basics your computer needs:** -- A decent processor (CPU) - most computers from the last 5 years will work fine -- At least 8GB of RAM - 16GB or more is better -- Some free storage space - at least 5GB recommended - -### What Models Can Your Computer Run? - -| | | | -|---|---|---| -| Regular Laptop | 3B-7B models | Good for chatting and writing. Like having a helpful assistant | -| Gaming Laptop | 7B-13B models | More capable. Better at complex tasks like coding and analysis | -| Powerful Desktop | 13B+ models | Better performance. Great for professional work and advanced tasks | - - - -## Getting Started with Models - -### Model Versions - -When browsing models in Jan, you'll see terms like "Q4", "Q6", or "Q8". Here's what that means in simple terms: - - - -**Pro tip**: Start with Q4 versions - they work great for most people and run smoothly on regular computers! - -### Getting Models from Hugging Face - -You'll often see links to "Hugging Face" when downloading AI models. Think of Hugging Face as the "GitHub for AI" - it's where the AI community shares their models. Jan makes it super easy to use: - -1. Jan has a built-in connection to Hugging Face -2. You can download models right from Jan's interface -3. No need to visit the Hugging Face website unless you want to explore more options - -## Setting up your local AI - -### Getting Models from Hugging Face - -You'll often see links to "Hugging Face" when downloading AI models. Think of Hugging Face as the "GitHub for AI" - it's where the AI community shares their models. This sounds technical, but Jan makes it super easy to use: - -1. Jan has a built-in connection to Hugging Face -2. You can download models right from Jan's interface -3. No need to visit the Hugging Face website unless you want to explore more options - - - -### 1. Get Started -Download Jan from [jan.ai](https://jan.ai) - it sets everything up for you. - -### 2. Get an AI Model - -You can get models two ways: - -#### 1. Use Jan Hub (Recommended): - - Click "Download Model" in Jan - - Pick a recommended model - - Choose one that fits your computer - -![AI model parameters explained](./_assets/jan-model-download.jpg "Jan Hub makes it easy to download AI models") -*Use Jan Hub to download AI models* - -#### 2. Use Hugging Face: - - - -##### Step 1: Get the model link -Find and copy a GGUF model link from [Hugging Face](https://huggingface.co) - -![Finding a GGUF model on Hugging Face](./_assets/hugging-face-jan-model-download.jpg "Find GGUF models on Hugging Face") -*Look for models with "GGUF" in their name* - -##### Step 2: Open Jan -Launch Jan and go to the Models tab - -![Opening Jan's model section](./_assets/jan-library-deepseek-r1.jpg "Navigate to the Models section in Jan") -*Navigate to the Models section in Jan* - -##### Step 3: Add the model -Paste your Hugging Face link into Jan - -![Adding a model from Hugging Face](./_assets/jan-hub-deepseek-r1.jpg "Paste your GGUF model link here") -*Paste your GGUF model link here* - -##### Step 4: Download -Select your quantization and start the download - -![Downloading the model](./_assets/jan-hf-model-download.jpg "Choose your preferred model size and download") -*Choose your preferred model size and download* - -### Common Questions - -**"My computer doesn't have a graphics card - can I still use AI?"** - -Yes! It will run slower but still work. Start with 7B models. - -**"Which model should I start with?"** - -Try a 7B model first - it's the best balance of smart and fast. - -**"Will it slow down my computer?"** - -Only while you're using the AI. Close other big programs for better speed. - -## Need help? - - diff --git a/website/src/content/changelog/2023-12-21-faster-inference-across-platform.mdx b/website/src/content/changelog/2023-12-21-faster-inference-across-platform.mdx deleted file mode 100644 index 00515bd5b2..0000000000 --- a/website/src/content/changelog/2023-12-21-faster-inference-across-platform.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Faster inference across: Mac, Windows, Linux, and GPUs" -version: 0.4.3 -description: "" -date: 2023-12-21 -ogImage: "/assets/images/changelog/Jan_v0.4.3.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- Custom models: `Trinity`, `Pandora` (great for general use). -- Faster inference across: Mac, Windows, Linux, and GPUs. -- Connect to remote OpenAI models like GPT4 via API key. \ No newline at end of file diff --git a/website/src/content/changelog/2024-01-16-settings-options-right-panel.mdx b/website/src/content/changelog/2024-01-16-settings-options-right-panel.mdx deleted file mode 100644 index b48fd46012..0000000000 --- a/website/src/content/changelog/2024-01-16-settings-options-right-panel.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Thread settings options in the right panel" -version: 0.4.4 -description: "" -date: 2024-01-16 -ogImage: "" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- You can now see whether the model is compatible with running on your device. -- You can switch model mid-threads. -- More thread settings options in the right panel. -- CI automation, anti-virus checks. -- Social media access to Jan's Discord & Github from the app for further user support. -- Fixed major bugs, more stability. \ No newline at end of file diff --git a/website/src/content/changelog/2024-01-29-local-api-server.mdx b/website/src/content/changelog/2024-01-29-local-api-server.mdx deleted file mode 100644 index d90ae00b20..0000000000 --- a/website/src/content/changelog/2024-01-29-local-api-server.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Local API server" -version: 0.4.5 -description: "" -date: 2024-01-29 -ogImage: "/assets/images/changelog/Jan_v0.4.5.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- Local API Server: Experience Jan's dashboard for the local API server to make your data processing smoother and more efficient. -- HTTP Proxy Support: Now, downloading and connecting are seamless, just like browsing Hugging Face in your browser. -- Updated Settings Page: Find what you need faster! We've updated the settings page. - -### Fixes 💫 - -- Auto Update: Enjoy smoother updates. We've fixed the glitches. -- Swagger API Page: Full documentation, no more blanks. -- GPU for Models: Your imported models now fully leverage GPU power. \ No newline at end of file diff --git a/website/src/content/changelog/2024-02-05-jan-data-folder.mdx b/website/src/content/changelog/2024-02-05-jan-data-folder.mdx deleted file mode 100644 index caf9dec982..0000000000 --- a/website/src/content/changelog/2024-02-05-jan-data-folder.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Jan Data Folder" -version: 0.4.6 -description: "" -date: 2024-02-05 -ogImage: "/assets/images/changelog/jan_product_update_feature.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- `Changing Jan Data Folder`: Now, moving your data across different folders is just a click away, making organization simpler. So you can even use an external drive. -- Factory Settings: You can reset all of Jan's usage data for a fresh start. - -### Fixes 💫 - -- Easily see each thread's last update time, like WhatsApp, keeping you informed. -- A new loading screen during data migration ensures the app is responsive. -- Enhanced notifications for clearer feedback on model runs or errors. diff --git a/website/src/content/changelog/2024-02-10-jan-is-more-stable.mdx b/website/src/content/changelog/2024-02-10-jan-is-more-stable.mdx deleted file mode 100644 index 35e96f35f9..0000000000 --- a/website/src/content/changelog/2024-02-10-jan-is-more-stable.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Jan is more stable 👋" -version: 0.5.5 -description: "Jan supports Llama 3.2 and Qwen 2.5" -date: 2024-10-02 -ogImage: "/assets/images/changelog/jan-v0.5.5.jpeg" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Highlights 🎉 - -- Meta's Llama 3.2 and Alibaba's Qwen 2.5 added to the hub -- Improved starter screen -- Better local vs. cloud model navigation - -Fixes 💫 - -- Solved GPU acceleration for GGUF models -- Improved model caching & threading -- Resolved input & toolbar overlaps - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.5). \ No newline at end of file diff --git a/website/src/content/changelog/2024-02-26-home-servers-with-helm.mdx b/website/src/content/changelog/2024-02-26-home-servers-with-helm.mdx deleted file mode 100644 index 768c3f7bd8..0000000000 --- a/website/src/content/changelog/2024-02-26-home-servers-with-helm.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "Run Jan on your home-servers with Helm" -version: 0.4.7 -description: "" -date: 2024-02-26 -ogImage: "" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- Run Jan on your `home-servers` with `Helm` -- Use Jan headless or with a Web UI -- `Intel Arc` & `AMD GPU` support through `Vulkan` & `LlamaCPP` - - -### Features & Fixes 💫 - -- 48 fixes, refactoring and stability improvements. -- Conversation threads are auto-summarized & messages are editable. -- Encountering an error? We've replaced vague alerts with a troubleshooting assistant. \ No newline at end of file diff --git a/website/src/content/changelog/2024-03-06-ui-revamp-settings.mdx b/website/src/content/changelog/2024-03-06-ui-revamp-settings.mdx deleted file mode 100644 index dd48c7fbb9..0000000000 --- a/website/src/content/changelog/2024-03-06-ui-revamp-settings.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "New UI & Codestral Support" -version: 0.5.0 -description: "Revamped Jan's UI to make it clearer and more user-friendly" -date: 2024-06-03 -ogImage: "/assets/images/changelog/jan_v0.5.0.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Revamped Jan's UI to make it clearer and more user-friendly. - -- Updated Assistant, Model, and Tools sections -- Categorized customization options for easier control -- New settings for models, APIs, and experimental features - -## New Model: Codestral - -Jan now supports Mistral's new model Codestral. Thanks [Bartowski](https://huggingface.co/bartowski) for the GGUF model. You can download the model from the hub. - -## More GGUF models - -More GGUF models can run in Jan - we rebased to llama.cpp b3012.Big thanks to [ggerganov](https://github.com/ggerganov) - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.0). diff --git a/website/src/content/changelog/2024-03-11-import-models.mdx b/website/src/content/changelog/2024-03-11-import-models.mdx deleted file mode 100644 index 1d51fe087a..0000000000 --- a/website/src/content/changelog/2024-03-11-import-models.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Import models directly using the UI" -version: 0.4.8 -description: "" -date: 2024-03-11 -ogImage: "" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- Import models directly using the UI -- A revamped system monitor - -### Features & Fixes 💫 - -- Install Jan using Homebrew on Mac Silicon (thanks to https://github.com/chenrui333 (I'll tag you when I find your Discord handle! :D)). -- Fixed an HTTP Proxy issue causing download errors. -- UI Improvements and more. \ No newline at end of file diff --git a/website/src/content/changelog/2024-03-19-nitro-tensorrt-llm-extension.mdx b/website/src/content/changelog/2024-03-19-nitro-tensorrt-llm-extension.mdx deleted file mode 100644 index 817c5a0919..0000000000 --- a/website/src/content/changelog/2024-03-19-nitro-tensorrt-llm-extension.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "Nitro-Tensorrt-LLM Extension" -version: 0.4.9 -description: "" -date: 2024-03-19 -ogImage: "" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- Nitro-Tensorrt-LLM Extension. -- Update models.json. -- Move tensorrt executable to the engine. - diff --git a/website/src/content/changelog/2024-04-02-groq-api-integration.mdx b/website/src/content/changelog/2024-04-02-groq-api-integration.mdx deleted file mode 100644 index aabcb0ef15..0000000000 --- a/website/src/content/changelog/2024-04-02-groq-api-integration.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Groq API Integration" -version: 0.4.10 -description: "" -date: 2024-04-02 -ogImage: "/assets/images/changelog/jan_update_groq.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- Groq API Integration -- Enhanced hardware troubleshooting guide - -### Features & Fixes 💫 - -- Improved Jan data folder's functionality. -- Fixed URI malformed and `stop` parameter error. -- VRAM-aware model recommendations. \ No newline at end of file diff --git a/website/src/content/changelog/2024-04-15-new-mistral-extension.mdx b/website/src/content/changelog/2024-04-15-new-mistral-extension.mdx deleted file mode 100644 index 97fd9541ef..0000000000 --- a/website/src/content/changelog/2024-04-15-new-mistral-extension.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "New Mistral Extension" -version: 0.4.11 -description: "Jan has a new Mistral Extension letting you chat with larger Mistral models via Mistral API" -date: 2024-04-15 -ogImage: "/assets/images/changelog/jan_mistral_api.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -### Highlights 🎉 - -- Jan has a new `Mistral Extension` letting you chat with larger Mistral models via Mistral API. You can still run smaller Mistral models locally, but now there's a remote option. -- 3rd party extensions can register their own settings pages in the app without any code. -- You can now change set API Prefix for the local API server. -- You can now customize your Assistant's name in Thread Settings. diff --git a/website/src/content/changelog/2024-04-25-llama3-command-r-hugginface.mdx b/website/src/content/changelog/2024-04-25-llama3-command-r-hugginface.mdx deleted file mode 100644 index a2cbdc5b38..0000000000 --- a/website/src/content/changelog/2024-04-25-llama3-command-r-hugginface.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: 'Jan now supports Llama3 and Command R+' -version: 0.4.12 -description: "Jan has added compatibility with Llama3 & Command R+" -date: 2024-04-25 -ogImage: "/assets/images/changelog/jan_llama3.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Jan has added compatibility with Meta’s open-source language model, `Llama3`, through the integration with `llamacpp` (thanks to [@ggerganov](https://github.com/ggerganov)). - -Additionally, `Command R+` is now supported. It is the first open-source model to surpass GPT-4 on the [LMSys leaderboard](https://chat.lmsys.org/?leaderboard). - -![Commandr](/assets/images/changelog/jan_cohere_commandr.gif) - -## Import Huggingface models directly - -Users can now import Huggingface models into Jan. Simply copy the model’s link from Huggingface and paste it into the search bar on Jan Hub. - -![HugginFace](/assets/images/changelog/jan_hugging_face.gif) - -## Enhanced LaTeX understanding - -Jan now understands LaTeX, allowing users to process and understand complex mathematical expressions more effectively. - -![Latex](/assets/images/changelog/jan_update_latex.gif) - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.4.12). diff --git a/website/src/content/changelog/2024-05-20-llamacpp-upgrade-new-remote-models.mdx b/website/src/content/changelog/2024-05-20-llamacpp-upgrade-new-remote-models.mdx deleted file mode 100644 index 531e5948ef..0000000000 --- a/website/src/content/changelog/2024-05-20-llamacpp-upgrade-new-remote-models.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Jan now supports more GGUF models" -version: 0.4.13 -description: "We rebased to llamacpp b2865." -date: 2024-05-20 -ogImage: "/assets/images/changelog/jan_v0.4.13_update.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -With this release, more GGUF models should work now! We rebased to llamacpp b2865! - -## New remote models: Anthropic & Cohere APIs - -Jan now supports `Anthropic API` models `Command R` and `Command R+`, along with `Cohere`'s `Claude 3 Opus`, `Claude 3 Sonnet`, and `Claude 3 Haiku`. - -## New integrations: Martian and OpenRouter - -Jan supports `Martian`, a dynamic LLM router that routes between multiple models and allows users to reduce costs by 20% to 97%. Jan also supports `OpenRouter`, helping users select the best model for each query. - -![New_Integrations](/assets/images/changelog/jan_v0.4.13_update.gif) - -## GPT-4o Access - -Users can now connect to OpenAI's new model GPT-4o. - -![GPT4o](/assets/images/changelog/jan_v0_4_13_openai_gpt4o.gif) - -For more details, see the [GitHub release notes.](https://github.com/menloresearch/jan/releases/tag/v0.4.13) diff --git a/website/src/content/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium.mdx b/website/src/content/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium.mdx deleted file mode 100644 index 8c976106a2..0000000000 --- a/website/src/content/changelog/2024-05-28-cohere-aya-23-8b-35b-phi-3-medium.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "Jan now compatible with Aya 23 8B & 35B and Phi-3-Medium" -version: 0.4.14 -description: "Jan now supports Cohere's Aya 23 8B & 35B and Microsoft's Phi-3-Medium." -date: 2024-05-28 -ogImage: "/assets/images/changelog/jan-v0-4-14-phi3.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Jan now supports `Cohere`'s new models `Aya 23 (8B)` & `Aya 23 (35B)` and `Microsoft`'s `Phi-3-Medium`. - -More GGUF models can run in Jan - we rebased to llama.cpp b2961. - -Huge shoutouts to [ggerganov](https://github.com/ggerganov) and contributors for llama.cpp, and [Bartowski](https://huggingface.co/bartowski) for GGUF models. - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.4.14). diff --git a/website/src/content/changelog/2024-06-21-nvidia-nim-support.mdx b/website/src/content/changelog/2024-06-21-nvidia-nim-support.mdx deleted file mode 100644 index 13191a8692..0000000000 --- a/website/src/content/changelog/2024-06-21-nvidia-nim-support.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Jan supports NVIDIA NIM" -version: 0.5.1 -description: "Jan has integrated NVIDIA NIM and supports Qwen 2 7B" -date: 2024-06-21 -ogImage: "/assets/images/changelog/jan_nvidia_nim_support.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -## NVIDIA NIM - -We've integrated NVIDIA NIM support. - -## Qwen 2 7B - -You can now access Qwen 2 7B directly in the Jan Hub. - -We've updated to llama.cpp b3088 for better performance - thanks to [GG](https://github.com/ggerganov) - -## Fixes - -- Fixed Anthropic API error -- Reduced chat font weight (back to normal!) -- Restored the maximize button - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.1). diff --git a/website/src/content/changelog/2024-07-15-claude-3-5-support.mdx b/website/src/content/changelog/2024-07-15-claude-3-5-support.mdx deleted file mode 100644 index d13228a43f..0000000000 --- a/website/src/content/changelog/2024-07-15-claude-3-5-support.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Jan supports Claude 3.5 Sonnet" -version: 0.5.2 -description: "You can run Claude 3.5 Sonnet in Jan" -date: 2024-07-15 -ogImage: "/assets/images/changelog/jan_supports_claude_3_5.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -## Claude 3.5 Sonnet - -We've integrated support for Claude 3.5 Sonnet, you can run Anthropic's latest model in Jan. - -Plus, You can now use optional spell check for chats. There's also a new shortcut for app logs in System Monitor. - -## Fixes - -In this hotfix, we've addressed several issues to improve your Jan experience: - -### Gemma 2B Stability - -Gemma 2B now runs without any issues. - -### Tooltip Hover Functionality - -We've restored the tooltip hover functionality, which makes it easier to access helpful information without any glitches. - -### Right-click Options for Thread Settings - -The right-click options for thread settings are now fully operational again. You can now manage your threads with this fix. - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.2). diff --git a/website/src/content/changelog/2024-09-01-llama3-1-gemma2-support.mdx b/website/src/content/changelog/2024-09-01-llama3-1-gemma2-support.mdx deleted file mode 100644 index debe207bf1..0000000000 --- a/website/src/content/changelog/2024-09-01-llama3-1-gemma2-support.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "v0.5.3 is out with stability improvements!" -version: 0.5.3 -description: "You can run Llama 3.1 and Gemma 2 in Jan" -date: 2024-08-29 -ogImage: "/assets/images/changelog/janv0.5.3.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -## Llama 3.1 and Gemma 2 Support - -Jan finally supports Meta's Llama 3.1 and Google's Gemma 2. Thanks for the patience folks! - -We've been working on stability issues over the last few weeks. Jan is now more stable. - -### Additional Notes - -- Upgraded the inference engine for better performance -- Model settings now persist across new threads -- Fixed the GPU memory utilization bar -- Some UX and copy improvements - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.3). diff --git a/website/src/content/changelog/2024-09-17-improved-cpu-performance.mdx b/website/src/content/changelog/2024-09-17-improved-cpu-performance.mdx deleted file mode 100644 index 6951c86470..0000000000 --- a/website/src/content/changelog/2024-09-17-improved-cpu-performance.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Jan is faster now" -version: 0.5.4 -description: "Jan has faster CPU inference." -date: 2024-09-17 -ogImage: "/assets/images/changelog/jan-v0.5.4.jpg" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Over the last few weeks, we've been working on improving Jan's stability. Every update helps us make Jan's experience faster and smoother. -With version 0.5.4, you’ll notice AI running quicker on CPU - better performance all around. - -### Model Downloads -You can now download models directly from the model selector in Threads. No more jumping between different tabs – just pick, download, and get started. - -### Fast CPU Inference -We've addressed the slower inference speeds on CPU, so you'll notice faster processing times, especially when using larger models. - -### Model Starts -We tackled the notorious "model can't start / The specified module could not be found" error. -Plus, Phi-3 models are now working smoothly even if you're using an outdated version. - -### Consistent Warnings -Performance warnings are now aligned between Model Hub and Threads, giving you more reliable insights no matter where you're working. - -### Persistent Thread Settings -Switching between threads used to reset your instruction settings. That’s fixed now! Your settings will stay intact as you jump between old and new threads. - -### Minor UI Tweaks & Bug Fixes -We’ve also resolved issues with the input slider on the right panel and tackled several smaller bugs to keep everything running smoothly. - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.4). diff --git a/website/src/content/changelog/2024-10-24-jan-stable.mdx b/website/src/content/changelog/2024-10-24-jan-stable.mdx deleted file mode 100644 index 9acc7f31e1..0000000000 --- a/website/src/content/changelog/2024-10-24-jan-stable.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Jan has Stable, Beta and Nightly versions" -version: 0.5.7 -description: "This release is mostly focused on bug fixes." -date: 2024-10-24 -ogImage: "/assets/images/changelog/jan-v0.5.7.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Highlights 🎉 - -- Jan has Stable, Beta and Nightly versions -- Saving instructions for new threads is now stable - -Fixes 💫 - -- Fixed broken links, hardware issues, and multi-modal download -- Resolved text overlap, scrolling, and multi-monitor reset problems -- Adjusted LLava model EOS token and context input - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.7). \ No newline at end of file diff --git a/website/src/content/changelog/2024-11-22-jan-bugs.mdx b/website/src/content/changelog/2024-11-22-jan-bugs.mdx deleted file mode 100644 index 3d10202172..0000000000 --- a/website/src/content/changelog/2024-11-22-jan-bugs.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "Model downloads & running issues fixed" -version: 0.5.9 -description: "Jan v0.5.9 is here: fixing what needed fixing." -date: 2024-11-22 -ogImage: "/assets/images/changelog/jan-v0.5.9.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Jan v0.5.9 is here: fixing what needed fixing - -### Highlights 🎉 - -- Model downloads & running issues fixed -- Document upload bugs resolved -- System glitches addressed: Factory Reset, HTTP Proxy, Hugging Face tokens -- Fixed issues with code blocks in streaming responses -- Improved the UX of the Local API Server page - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.9). \ No newline at end of file diff --git a/website/src/content/changelog/2024-11.14-jan-supports-qwen-coder.mdx b/website/src/content/changelog/2024-11.14-jan-supports-qwen-coder.mdx deleted file mode 100644 index 430316da9e..0000000000 --- a/website/src/content/changelog/2024-11.14-jan-supports-qwen-coder.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "Jan supports Qwen2.5-Coder 14B & 32B" -version: 0.5.8 -description: "Jan v0.5.8 is out: Jan supports Qwen2.5-Coder 14B & 32B through Cortex" -date: 2024-11-14 -ogImage: "/assets/images/changelog/jan-v0.5.8.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Jan v0.5.8 is out: Jan supports Qwen2.5-Coder 14B & 32B through Cortex - -### Highlights 🎉 - -- A new engine: Jan now runs models via [Cortex](https://cortex.so) -- SupportsAlibaba_Qwen's Coder 14B & 32B Support -- Supports markdown rendering on user messages - -and various UI/UX enhancements 💫 - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.8). \ No newline at end of file diff --git a/website/src/content/changelog/2024-12-03-jan-is-faster.mdx b/website/src/content/changelog/2024-12-03-jan-is-faster.mdx deleted file mode 100644 index 79f05b12dd..0000000000 --- a/website/src/content/changelog/2024-12-03-jan-is-faster.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Jan v0.5.10 is live" -version: 0.5.10 -description: "Jan is faster, smoother, and more reliable." -date: 2024-12-03 -ogImage: "/assets/images/changelog/jan-v0.5.10.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Jan v0.5.10 is live: Jan is faster, smoother, and more reliable. - -### Highlights 🎉 - -- Resolved model startup issues, memory leaks, and improved token limits -- Clearer error messages and subtle UX improvements - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.10). \ No newline at end of file diff --git a/website/src/content/changelog/2024-12-05-jan-hot-fix-mac.mdx b/website/src/content/changelog/2024-12-05-jan-hot-fix-mac.mdx deleted file mode 100644 index d79c77b03f..0000000000 --- a/website/src/content/changelog/2024-12-05-jan-hot-fix-mac.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Jan v0.5.11 is here!" -version: 0.5.11 -description: "Critical issues fixed, Mac installation updated." -date: 2024-12-05 -ogImage: "/assets/images/changelog/jan-v0.5.11.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Jan v0.5.11 is here - critical issues fixed, Mac installation updated. - -### Highlights 🎉 - -- Crashes (markdown & code highlighting) -- Thread switching & auto-scroll -- Syntax highlighting bugs -- API issues (Anthropic, OpenRouter) -- Title glitches with special characters -- Model settings inconsistencies - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.11). \ No newline at end of file diff --git a/website/src/content/changelog/2024-12-30-jan-new-privacy.mdx b/website/src/content/changelog/2024-12-30-jan-new-privacy.mdx deleted file mode 100644 index 3f29dd2455..0000000000 --- a/website/src/content/changelog/2024-12-30-jan-new-privacy.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "Jan gives you full control over your privacy" -version: 0.5.12 -description: "Improved Privacy settings to give full control over analytics" -date: 2024-12-30 -ogImage: "/assets/images/changelog/jan-v0.5.12.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -Jan v0.5.12 is here - critical issues fixed, Mac installation updated. - -### Highlights 🎉 - -- Updated privacy settings with opt-in/out options for Jan Analytics -- Adjustable chat width -- The right sidebar and input box are now optimized for new users - -### Fixes 💫 -- Updated privacy settings with opt-in/out options for Jan Analytics -- Adjustable chat width -- The right sidebar and input box are now optimized for new users - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.12). \ No newline at end of file diff --git a/website/src/content/changelog/2025-01-06-key-issues-resolved.mdx b/website/src/content/changelog/2025-01-06-key-issues-resolved.mdx deleted file mode 100644 index 40e541c2a5..0000000000 --- a/website/src/content/changelog/2025-01-06-key-issues-resolved.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "A few key issues have been solved!" -version: 0.5.13 -description: "Jan v0.5.13 is here: A few key issues have been solved." -date: 2025-01-06 -ogImage: "/assets/images/changelog/jan-v0-5-13.jpg" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -👋 Jan v0.5.13 is here: A few key issues have been solved! - -### Highlights 🎉 - -- Resolved model loading issues on MacOS Intel -- Fixed app resetting max_tokens to 8192 on new threads - now uses model settings -- Fixed Vulkan settings visibility for some users - -Update your product or download the latest: https://jan.ai - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.13). diff --git a/website/src/content/changelog/2025-01-23-deepseek-r1-jan.mdx b/website/src/content/changelog/2025-01-23-deepseek-r1-jan.mdx deleted file mode 100644 index 3006588b88..0000000000 --- a/website/src/content/changelog/2025-01-23-deepseek-r1-jan.mdx +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "Run DeepSeek R1 Distills error-free!" -version: 0.5.14 -description: "Jan v0.5.14 is out: Run DeepSeek R1 Distills error-free!" -date: 2025-01-23 -ogImage: "/assets/images/changelog/jan-v0-5-14-deepseek-r1.jpg" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -👋 Jan v0.5.14 is out: Run DeepSeek R1 Distills error-free! - -You can run DeepSeek R1 distills in Jan error-free. Follow our [step-by-step guide to run DeepSeek R1 locally](/blog/deepseek-r1-locally) and get this AI model running on your device in minutes. - -llama.cpp version updated via Cortex—thanks to GG & llama.cpp community! - -- Paste GGUF links into Jan Hub to download -- Already downloaded the model but facing issues? Update Jan. - -Models: - -Qwen -- DeepSeek-R1-Distill-Qwen-1.5B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF -- DeepSeek-R1-Distill-Qwen-7B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF -- DeepSeek-R1-Distill-Qwen-14B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF -- DeepSeek-R1-Distill-Qwen-32B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF - -Llama -- DeepSeek-R1-Distill-Llama-8B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-8B-GGUF -- DeepSeek-R1-Distill-Llama-70B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-70B-GGUF - -Update your Jan or [download the latest](https://jan.ai/). - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.14). diff --git a/website/src/content/changelog/2025-02-14-enterprise-security.mdx b/website/src/content/changelog/2025-02-14-enterprise-security.mdx deleted file mode 100644 index 00d973bb26..0000000000 --- a/website/src/content/changelog/2025-02-14-enterprise-security.mdx +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: "Jan Enterprise: Security & Compliance" -version: 0.6.2 -description: "Enterprise-grade security features, compliance tools, and team management capabilities" -date: 2025-02-14 -image: "/assets/images/changelog/enterprise-security.png" -featured: false ---- - -## Enterprise Ready 🏢 - -Jan v0.6.2 introduces comprehensive enterprise features designed for organizations that need advanced security, compliance, and management capabilities. Deploy AI with confidence across your entire organization. - -### 🔐 Advanced Security Framework - -Military-grade security for your AI infrastructure: -- **Zero-Trust Architecture**: Verify every request, trust nothing by default -- **End-to-End Encryption**: Data encrypted in transit and at rest -- **Certificate Management**: Full PKI support with automatic rotation -- **Network Isolation**: Separate AI workloads from corporate networks -- **Audit Logging**: Comprehensive logs for security analysis and compliance - -### 👥 Team Management - -Sophisticated user and access control: -- **Role-Based Access**: Granular permissions for different user types -- **Single Sign-On**: Integration with Active Directory, SAML, and OAuth -- **Multi-Factor Authentication**: Required 2FA with hardware token support -- **Session Management**: Control session duration and concurrent logins -- **User Provisioning**: Automated user creation and deactivation - -### 📊 Compliance Dashboard - -Meet regulatory requirements with confidence: -- **SOC 2 Compliance**: Built-in controls for SOC 2 Type II certification -- **GDPR Tools**: Data subject rights management and privacy controls -- **HIPAA Ready**: Healthcare-specific security and privacy features -- **ISO 27001**: Information security management system alignment -- **Custom Frameworks**: Adapt to your specific compliance requirements - -### 🏗️ Infrastructure Management - -Deploy and scale AI across your organization: -- **Containerized Deployment**: Docker and Kubernetes ready -- **Load Balancing**: Distribute AI workloads across multiple instances -- **Auto-Scaling**: Automatically scale based on demand -- **Health Monitoring**: Real-time system health and performance tracking -- **Disaster Recovery**: Automated backups and failover capabilities - -### 🎯 Data Governance - -Complete control over your data: -- **Data Classification**: Automatically tag and classify sensitive information -- **Retention Policies**: Automated data lifecycle management -- **Data Loss Prevention**: Prevent sensitive data from leaving your environment -- **Geographic Controls**: Control where data is processed and stored -- **Right to Deletion**: Complete data removal on request - -### 🔍 Advanced Analytics - -Insights into AI usage across your organization: -- **Usage Analytics**: Understand how teams use AI capabilities -- **Cost Analysis**: Track AI costs by department, team, or user -- **Performance Metrics**: Monitor AI performance and quality -- **Adoption Reports**: Measure AI adoption across the organization -- **Custom Dashboards**: Create reports tailored to your needs - -### 🛡️ Threat Protection - -Advanced protection against AI-specific threats: -- **Prompt Injection Detection**: Identify and block malicious prompts -- **Content Filtering**: Prevent inappropriate content generation -- **Rate Limiting**: Protect against abuse and resource exhaustion -- **Anomaly Detection**: Identify unusual usage patterns -- **Incident Response**: Automated response to security events - -### 🌐 Integration Capabilities - -Connect Jan to your existing enterprise systems: -- **API Gateway**: Secure API access with rate limiting and authentication -- **Webhook Support**: Real-time notifications to external systems -- **Database Connections**: Direct integration with enterprise databases -- **Workflow Integration**: Connect to existing business processes -- **Custom Connectors**: Build integrations specific to your needs - -### 📋 Policy Management - -Implement and enforce AI governance policies: -- **Usage Policies**: Define acceptable use of AI capabilities -- **Content Policies**: Control what types of content can be generated -- **Model Policies**: Restrict access to specific AI models -- **Data Policies**: Control how data is processed and stored -- **Approval Workflows**: Require approval for sensitive operations - -### 🔧 Administrative Tools - -Powerful tools for IT administrators: -- **Centralized Configuration**: Manage settings across all deployments -- **Bulk Operations**: Perform actions across multiple users or systems -- **Migration Tools**: Move data and settings between environments -- **Backup Management**: Automated and manual backup capabilities -- **System Diagnostics**: Comprehensive troubleshooting tools - -### 📞 Enterprise Support - -Dedicated support for mission-critical deployments: -- **24/7 Support**: Round-the-clock assistance for critical issues -- **Dedicated Success Manager**: Personal point of contact for your organization -- **Training Programs**: Comprehensive training for administrators and users -- **Implementation Services**: Professional deployment and configuration -- **Custom Development**: Tailored features for unique requirements - -## Deployment Options - -Choose the deployment model that fits your needs: -- **On-Premises**: Complete control with on-site deployment -- **Private Cloud**: Dedicated cloud environment just for you -- **Hybrid**: Combine on-premises and cloud capabilities -- **Multi-Region**: Deploy across multiple geographic regions -- **Air-Gapped**: Completely isolated environments for maximum security - -## Getting Started - -Ready to deploy Jan Enterprise? - -1. **Assessment**: Our team evaluates your requirements -2. **Pilot Program**: Start with a small-scale deployment -3. **Training**: Comprehensive training for your team -4. **Full Deployment**: Roll out to your entire organization -5. **Ongoing Support**: Continuous support and optimization - -Transform how your organization uses AI. Contact our enterprise team to learn more. - -[Contact Enterprise Sales](mailto:enterprise@jan.ai) • [Enterprise Documentation](/docs/enterprise) • [Security Whitepaper](/security-whitepaper) \ No newline at end of file diff --git a/website/src/content/changelog/2025-02-18-advanced-llama.cpp-settings.mdx b/website/src/content/changelog/2025-02-18-advanced-llama.cpp-settings.mdx deleted file mode 100644 index b24c72908b..0000000000 --- a/website/src/content/changelog/2025-02-18-advanced-llama.cpp-settings.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "You can now tweak llama.cpp settings, and add any cloud model!" -version: 0.5.15 -description: "Jan v0.5.15 is out: Advanced llama.cpp settings and cloud model support" -date: 2025-02-18 -ogImage: "/assets/images/changelog/jan-v0-5-15-llamacpp-settings.jpg" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -👋 Jan v0.5.15 is out with exciting new features and improvements! - -## Highlights 🎉 - -- Improved llama.cpp settings for better model control and performance -- Install and use any cloud model seamlessly -- Enhanced hardware controls for better resource management -- New models supported: - - DeepSeek AI - - Google Gemini - - OpenAI o3-mini - - R1 distills - -Update your Jan or [download the latest](https://jan.ai/). - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.15). diff --git a/website/src/content/changelog/2025-03-05-ui-redesign.mdx b/website/src/content/changelog/2025-03-05-ui-redesign.mdx deleted file mode 100644 index fb47b7b216..0000000000 --- a/website/src/content/changelog/2025-03-05-ui-redesign.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: "Jan v0.6.3: UI Renaissance" -version: 0.6.3 -description: "Complete interface redesign with improved UX, dark mode, and accessibility features" -date: 2025-03-05 -image: "/assets/images/changelog/ui-redesign.png" -featured: false ---- - -## A Beautiful New Beginning 🎨 - -Jan v0.6.3 introduces our most comprehensive UI overhaul yet. Every pixel has been reconsidered, every interaction refined. The result? A beautiful, intuitive interface that gets out of your way and lets you focus on what matters - conversations with AI. - -### 🎯 Design Philosophy - -Our new design principles: -- **Clarity First**: Remove visual noise, highlight what's important -- **Consistent Language**: Unified design patterns throughout the app -- **Accessibility Focus**: Usable by everyone, regardless of ability -- **Performance Minded**: Beautiful interfaces that are also fast -- **Future Ready**: Scalable design system for upcoming features - -### 🌙 Enhanced Dark Mode - -Dark mode, reimagined: -- **True Black Option**: Perfect for OLED displays and low-light use -- **Adaptive Contrast**: Automatically adjusts based on ambient light -- **Custom Accent Colors**: Choose your preferred highlight color -- **Smart Switching**: Follows system preferences or custom schedule -- **Reduced Eye Strain**: Carefully calibrated colors for long usage sessions - -### 💬 Conversation Experience - -Completely redesigned chat interface: -- **Improved Message Bubbles**: Better readability and visual hierarchy -- **Smart Typography**: Optimal font sizes and spacing for every device -- **Code Highlighting**: Syntax highlighting for 200+ programming languages -- **Math Rendering**: Beautiful LaTeX math equation display -- **Image Gallery**: Enhanced image viewing with zoom and navigation - -### 🎛️ Settings Overhaul - -Settings that make sense: -- **Organized Categories**: Logical grouping of related options -- **Search Settings**: Find any setting instantly -- **Visual Previews**: See changes before applying them -- **Quick Actions**: Common tasks accessible with fewer clicks -- **Import/Export**: Backup and restore your entire configuration - -### 📱 Responsive Design - -Perfect on every screen: -- **Mobile Optimized**: Touch-friendly interface for tablets and phones -- **Desktop Polish**: Take advantage of larger screens and precise input -- **Window Management**: Better handling of multiple windows and panels -- **Flexible Layouts**: Adapt to any screen size or orientation -- **High DPI Support**: Crisp on retina and 4K displays - -### ♿ Accessibility Improvements - -Jan for everyone: -- **Screen Reader Support**: Full compatibility with assistive technologies -- **Keyboard Navigation**: Complete interface control without a mouse -- **High Contrast Mode**: Enhanced visibility for low-vision users -- **Font Scaling**: Respect system font size preferences -- **Motion Controls**: Reduced motion options for sensitive users - -### 🎨 Theming System - -Express your style: -- **Built-in Themes**: 12 carefully crafted color schemes -- **Custom Themes**: Create your own with our theme editor -- **Theme Sharing**: Import themes created by the community -- **Seasonal Themes**: Special themes for holidays and events -- **Auto-Theming**: Themes that change based on time of day - -### 🔍 Improved Navigation - -Find everything faster: -- **Global Search**: Search conversations, settings, and help instantly -- **Breadcrumbs**: Always know where you are in the app -- **Quick Switcher**: Jump between conversations with keyboard shortcuts -- **Recent Items**: Quick access to your most-used features -- **Favorites System**: Pin important conversations and tools - -### 🎪 Animation & Transitions - -Delightful micro-interactions: -- **Smooth Transitions**: Fluid movement between screens and states -- **Loading Animations**: Engaging feedback during wait times -- **Hover Effects**: Subtle responses to mouse interaction -- **Focus Indicators**: Clear visual feedback for keyboard users -- **Performance Optimized**: 60fps animations that don't drain battery - -### 📊 Visual Data - -Information design that informs: -- **Usage Charts**: Beautiful visualizations of your AI usage -- **Performance Graphs**: Real-time system performance monitoring -- **Progress Indicators**: Clear feedback for long-running operations -- **Status Displays**: At-a-glance system health information -- **Comparison Views**: Side-by-side analysis of models and settings - -### 🚀 Performance Improvements - -Beauty with brains: -- **Faster Rendering**: 40% improvement in interface responsiveness -- **Memory Efficiency**: Reduced RAM usage for smoother operation -- **Bundle Optimization**: Smaller app size, faster loading -- **Asset Loading**: Progressive loading for smoother startup -- **Animation Performance**: Hardware-accelerated animations - -## Migration Guide - -Your existing data and settings are automatically preserved. Some visual elements may look different, but all functionality remains the same or improved. - -**New Users**: Welcome to the most beautiful Jan yet! -**Existing Users**: Your themes and customizations will be migrated automatically. - -Experience the new Jan. Clean, beautiful, and more powerful than ever. - -[Download Jan v0.6.3](https://jan.ai/) • [UI Guide](/docs/interface) • [Accessibility Documentation](/docs/accessibility) \ No newline at end of file diff --git a/website/src/content/changelog/2025-03-14-jan-security-patch.mdx b/website/src/content/changelog/2025-03-14-jan-security-patch.mdx deleted file mode 100644 index 19d5bbf9e6..0000000000 --- a/website/src/content/changelog/2025-03-14-jan-security-patch.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Security fixes and UI improvements" -version: 0.5.16 -description: "Jan v0.5.16 is out: Security fixes and major improvements to Model Hub and chat experience" -date: 2025-03-14 -ogImage: "/assets/images/changelog/jan-v0-5-16-security-patch.jpg" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -👋 Jan v0.5.16 is out: Security fixes (found in Cortex) and a few improvements! - -## Highlights 🎉 - -- Redesigned Model Hub for better user experience -- Faster chat response times -- Cleaner layout and improved model picker -- New model support: - - GPT-4.5 preview - - Claude 3.7 Sonnet - - Gemma 3 - -⚠️ Important: This release includes critical security fixes. We'll share more details about these security improvements soon. - -Update your Jan or [download the latest](https://jan.ai/). - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.16). diff --git a/website/src/content/changelog/2025-04-10-mobile-launch.mdx b/website/src/content/changelog/2025-04-10-mobile-launch.mdx deleted file mode 100644 index 1f75b15a97..0000000000 --- a/website/src/content/changelog/2025-04-10-mobile-launch.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "Jan Mobile: AI in Your Pocket" -version: 1.0.0 -description: "Introducing Jan Mobile - full-featured AI assistant for iOS and Android with cloud sync" -date: 2025-04-10 -image: "/assets/images/changelog/jan-mobile.png" -featured: true ---- - -## AI Goes Mobile 📱 - -After months of development, we're thrilled to announce Jan Mobile - bringing the full power of Jan's AI capabilities to your smartphone. Chat with AI anywhere, sync across devices, and never miss a conversation. - -### 📱 Native Mobile Experience - -Built from the ground up for mobile: -- **Native Performance**: Smooth, responsive interface optimized for touch -- **Offline Capable**: Continue conversations even without internet -- **Battery Optimized**: Efficient background processing preserves battery life -- **Dark Mode**: Beautiful dark theme that's easy on the eyes -- **Haptic Feedback**: Tactile responses for better interaction - -### ☁️ Seamless Cloud Sync - -Your conversations follow you everywhere: -- **Real-time Sync**: Conversations update instantly across all devices -- **Conflict Resolution**: Smart merging when editing on multiple devices -- **Selective Sync**: Choose which conversations to sync to mobile -- **End-to-End Encryption**: Your data remains private and secure -- **Offline Queue**: Messages sync when connection returns - -### 🎯 Mobile-First Features - -Designed for how you use your phone: -- **Voice Input**: Speak your messages instead of typing -- **Voice Output**: AI responses read aloud with natural voices -- **Quick Actions**: Swipe gestures for common tasks -- **Share Integration**: Share content to Jan from any app -- **Widget Support**: Quick access from your home screen - -### 🔒 Privacy & Security - -Your privacy, protected: -- **Local Processing**: Sensitive conversations can stay on-device -- **Biometric Lock**: Secure Jan with fingerprint or face recognition -- **Auto-Lock**: Automatically locks after inactivity -- **Private Mode**: Conversations that don't sync to cloud -- **Data Controls**: Full control over what data is stored where - -### 🤖 Full Model Support - -Access all your favorite models: -- **Cloud Models**: GPT-4, Claude, Gemini, and more -- **Local Models**: Run smaller models directly on your phone -- **Model Switching**: Change models mid-conversation -- **Smart Routing**: Automatically choose the best model for your query -- **Offline Models**: Basic AI capabilities without internet - -### 📸 Rich Media Support - -Beyond just text: -- **Image Analysis**: Upload photos for AI to analyze and discuss -- **Camera Integration**: Take photos directly in Jan for analysis -- **Voice Messages**: Send and receive voice messages -- **File Sharing**: Share documents, PDFs, and more -- **Link Previews**: Rich previews for shared links - -### 🎨 Personalization - -Make Jan your own: -- **Custom Themes**: Choose colors and appearance -- **Chat Backgrounds**: Personalize your conversation view -- **Notification Settings**: Control when and how you're notified -- **Assistant Personalities**: Different AI personalities for different contexts -- **Quick Replies**: Set up common responses - -### 🔄 Cross-Platform Features - -Unified experience across desktop and mobile: -- **Universal Search**: Find conversations across all devices -- **Shared Assistants**: Use the same AI assistants everywhere -- **Unified Settings**: Preferences sync between devices -- **File Access**: Access files shared from desktop -- **Continuous Conversations**: Start on desktop, continue on mobile - -### 📊 Usage Analytics - -Understand your AI usage: -- **Conversation Stats**: See your most active conversations -- **Model Usage**: Track which models you use most -- **Time Analytics**: Understand your usage patterns -- **Export Data**: Download your conversation history -- **Privacy Dashboard**: See exactly what data is stored - -### 🌟 Launch Features - -Available from day one: -- **Free Tier**: Full functionality with generous usage limits -- **Pro Features**: Enhanced models and advanced features -- **Family Sharing**: Share Pro features with family members -- **Student Discount**: Special pricing for students -- **Enterprise Options**: Advanced security and management - -## Platform Availability - -- **iOS**: Available on the App Store (iOS 15.0+) -- **Android**: Available on Google Play (Android 8.0+) -- **Cross-Platform**: Full feature parity between platforms - -## Getting Started - -1. Download Jan Mobile from your app store -2. Sign in with your Jan account (or create one) -3. Your desktop conversations automatically appear -4. Start chatting with AI on the go! - -Your AI assistant is now truly everywhere. Download Jan Mobile today and experience AI without boundaries. - -[Download for iOS](https://apps.apple.com/app/jan-ai) • [Download for Android](https://play.google.com/store/apps/jan) • [Mobile Guide](/docs/mobile) \ No newline at end of file diff --git a/website/src/content/changelog/2025-05-14-jan-qwen3-patch.mdx b/website/src/content/changelog/2025-05-14-jan-qwen3-patch.mdx deleted file mode 100644 index 24d7841534..0000000000 --- a/website/src/content/changelog/2025-05-14-jan-qwen3-patch.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Qwen3 support is now more reliable." -version: 0.5.17 -description: "Jan v0.5.17 is out: Qwen3 support is now more reliable" -date: 2025-05-14 -ogImage: "/assets/images/changelog/jan-v0-5-17-gemm3-patch.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -👋 Jan v0.5.17 is out: Qwen3 support is now more reliable - -## Highlights 🎉 - -- Improved Qwen3 support with cleaner token output -- Clearer install and quickstart docs -- UI polish and bug fixes throughout - -Update your Jan or [download the latest](https://jan.ai/). - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.5.17). diff --git a/website/src/content/changelog/2025-05-20-performance-boost.mdx b/website/src/content/changelog/2025-05-20-performance-boost.mdx deleted file mode 100644 index 489bb8eea9..0000000000 --- a/website/src/content/changelog/2025-05-20-performance-boost.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "Jan v0.6.4: Performance Powerhouse" -version: 0.6.4 -description: "Massive speed improvements, GPU optimization, and streamlined model management" -date: 2025-05-20 -image: "/assets/images/changelog/performance-boost.png" -featured: false ---- - -## Speed Like Never Before ⚡ - -Jan v0.6.4 delivers our biggest performance update yet. Models load faster, inference is smoother, and memory usage is dramatically reduced. This is the Jan you've been waiting for. - -### 🚀 Inference Speed Improvements - -Dramatic performance gains across the board: -- **3x Faster Model Loading**: Optimized model initialization reduces wait times -- **50% Faster Inference**: Improved CUDA kernels and memory management -- **Instant Model Switching**: Switch between models with near-zero delay -- **Background Preloading**: Frequently used models stay ready in memory -- **Smart Caching**: Intelligent context caching reduces repeated work - -### 🎯 GPU Optimization Revolution - -Completely rewritten GPU acceleration: -- **Auto-GPU Detection**: Automatically finds and uses your best GPU -- **Multi-GPU Support**: Distribute model layers across multiple GPUs -- **Memory Optimization**: 40% reduction in VRAM usage -- **Dynamic Offloading**: Automatically balance between GPU and CPU -- **CUDA 12 Support**: Latest NVIDIA drivers and optimizations - -### 🧠 Smarter Memory Management - -Revolutionary memory handling: -- **Adaptive Memory**: Automatically adjusts to available system memory -- **Memory Pressure Detection**: Gracefully handles low-memory situations -- **Efficient Model Unloading**: Frees memory when models aren't needed -- **Context Length Optimization**: Handle longer conversations without slowdown -- **Memory Usage Dashboard**: Real-time visibility into memory consumption - -### 📱 Startup Speed Breakthrough - -Jan now starts in seconds, not minutes: -- **Cold Start Optimization**: 5x faster first launch -- **Background Services**: Core services start in parallel -- **Lazy Loading**: Only load components when needed -- **Configuration Caching**: Settings load instantly -- **Progressive Initialization**: UI appears immediately, features load progressively - -### 🔧 Model Management Overhaul - -Streamlined model experience: -- **One-Click Downloads**: Simplified model acquisition -- **Download Resume**: Interrupted downloads continue automatically -- **Parallel Downloads**: Download multiple models simultaneously -- **Storage Optimization**: Automatic cleanup of unused model files -- **Model Recommendations**: AI suggests optimal models for your hardware - -### 💾 Storage Efficiency - -Dramatic reduction in disk usage: -- **Model Compression**: 30% smaller model files without quality loss -- **Duplicate Detection**: Automatically removes duplicate models -- **Incremental Updates**: Only download model changes, not entire files -- **Smart Cleanup**: Removes temporary files and caches automatically -- **Storage Analytics**: See exactly what's using your disk space - -### 🌐 Network Optimizations - -Faster downloads and better connectivity: -- **CDN Integration**: Download models from the closest server -- **Connection Pooling**: Efficient network resource usage -- **Retry Logic**: Automatic recovery from network interruptions -- **Bandwidth Adaptation**: Adjusts download speed to network conditions -- **Offline Mode**: Better handling when internet is unavailable - -### 🔍 Performance Monitoring - -New tools to understand performance: -- **Real-time Metrics**: See inference speed, memory usage, GPU utilization -- **Performance History**: Track performance over time -- **Bottleneck Detection**: Identify what's slowing down your system -- **Benchmark Tools**: Compare performance across different configurations -- **Performance Profiles**: Save optimal settings for different use cases - -### 🐛 Critical Fixes - -Major stability improvements: -- Fixed memory leaks during long conversations -- Resolved GPU driver compatibility issues -- Eliminated random crashes during model switching -- Fixed model corruption during interrupted downloads -- Resolved race conditions in multi-threaded operations - -## Technical Details - -This release includes fundamental changes to our inference engine, memory management, and GPU acceleration systems. While backwards compatible, you may notice different memory usage patterns and significantly improved performance. - -Experience the fastest Jan ever. Download v0.6.4 and feel the difference. - -[Download Jan v0.6.4](https://jan.ai/) • [Performance Guide](/docs/performance) • [Release Notes](https://github.com/menloresearch/jan/releases/tag/v0.6.4) \ No newline at end of file diff --git a/website/src/content/changelog/2025-06-15-mcp-revolution.mdx b/website/src/content/changelog/2025-06-15-mcp-revolution.mdx deleted file mode 100644 index 3bf1b03e4f..0000000000 --- a/website/src/content/changelog/2025-06-15-mcp-revolution.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Jan v0.6.5: MCP Revolution - Connect AI to Everything" -version: 0.6.5 -description: "Introducing Model Context Protocol support, browser automation, and powerful tool integrations" -date: 2025-06-15 -image: "../../assets/images/changelog/mcp-revolution.gif" -featured: true ---- - -## The MCP Era Begins 🚀 - -Jan v0.6.5 introduces groundbreaking Model Context Protocol (MCP) support, transforming Jan from a simple chat interface into a powerful AI automation platform. Connect your AI to browsers, APIs, databases, and countless tools. - -### 🔗 Model Context Protocol Integration - -MCP opens up infinite possibilities: -- **Universal Tool Access**: Connect to any service that supports MCP -- **Real-time Data**: Access live information from APIs and databases -- **Browser Automation**: Control web browsers directly through AI commands -- **File System Access**: Read, write, and manipulate files with AI assistance -- **Custom Tools**: Build your own MCP servers for specialized workflows - -### 🌐 Built-in MCP Servers - -Launch with powerful integrations: -- **Browser Control**: Automate web tasks, scrape data, fill forms -- **File Management**: AI-powered file operations and organization -- **API Integration**: Connect to REST APIs, GraphQL endpoints -- **Database Access**: Query and update databases through natural language -- **Git Operations**: Manage repositories with AI assistance - -### 🎯 Smart Tool Discovery - -Jan automatically discovers and configures MCP tools: -- **Auto-detection**: Finds available MCP servers on your system -- **One-click Setup**: Enable tools with simple toggle switches -- **Permission Control**: Fine-grained control over tool access -- **Usage Analytics**: Track which tools your AI uses most - -### 🛡️ Enhanced Security Framework - -Built with security as a priority: -- **Sandboxed Execution**: Tools run in isolated environments -- **Permission System**: Explicit approval for sensitive operations -- **Audit Logging**: Complete history of tool usage and permissions -- **Safe Defaults**: Conservative permissions that you can expand - -### 🎨 Redesigned Tool Interface - -Beautiful new interface for tool management: -- **Visual Tool Cards**: See available tools at a glance -- **Real-time Status**: Know when tools are active or inactive -- **Interactive Setup**: Guided configuration for complex tools -- **Usage Insights**: Understand how your AI uses different tools - -### 🔧 Developer Experience - -For MCP server developers: -- **Local Development**: Test MCP servers directly in Jan -- **Debug Tools**: Built-in logging and error reporting -- **Hot Reload**: Changes to MCP servers update instantly -- **Protocol Validation**: Ensure your servers follow MCP standards - -### 🚀 Performance Improvements - -Under the hood optimizations: -- **Faster Tool Loading**: MCP servers start 3x faster -- **Memory Efficiency**: Reduced memory usage for tool operations -- **Connection Pooling**: Efficient management of tool connections -- **Async Operations**: Non-blocking tool execution - -### 🌟 Coming Next - -The MCP ecosystem is just getting started: -- More built-in integrations (Slack, Discord, GitHub) -- Visual workflow builder for complex automations -- Community marketplace for sharing MCP servers -- Enterprise-grade security and compliance features - -## Breaking Changes - -- Tool permissions now require explicit user approval -- Some legacy integrations have been migrated to MCP -- Configuration format updated for better security - -Transform how you work with AI. Download Jan v0.6.5 and enter the MCP era. - -For technical details, see our [MCP documentation](/docs/mcp) and [GitHub release](https://github.com/menloresearch/jan/releases/tag/v0.6.5). diff --git a/website/src/content/changelog/2025-06-19-jan-ui-revamp.mdx b/website/src/content/changelog/2025-06-19-jan-ui-revamp.mdx deleted file mode 100644 index ae9e2d4f0f..0000000000 --- a/website/src/content/changelog/2025-06-19-jan-ui-revamp.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Jan v0.6.1 is here: It's a whole new vibe!" -version: 0.6.1 -description: "Are you ready for the sexiest UI ever?" -date: 2025-06-19 -ogImage: "/assets/images/changelog/jan-v0.6.1-ui-revamp.png" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -## Highlights 🎉 - -- Jan's been redesigned to be faster, cleaner, and easier to use. -- You can now create assistants with custom instructions and settings from a dedicated tab. -- You can now use Jan with Menlo's models. - -Update your Jan or [download the latest](https://jan.ai/). - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.6.1). \ No newline at end of file diff --git a/website/src/content/changelog/2025-06-26-jan-nano-mcp.mdx b/website/src/content/changelog/2025-06-26-jan-nano-mcp.mdx deleted file mode 100644 index 9380c22725..0000000000 --- a/website/src/content/changelog/2025-06-26-jan-nano-mcp.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Jan v0.6.3 brings new features and models!" -version: 0.6.3 -description: "Unlocking MCP for everyone and bringing our latest model to Jan!" -date: 2025-06-26 -ogImage: "/assets/images/changelog/jn128.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -## Highlights 🎉 - -- We have added Model Context Protocol (MCP) support to the stable build of Jan. It needs to be enabled in the General Settings tab. -- Jan now supports Menlo's latest model, Jan-Nano-128k. -- Some hot fixes and improvements. - -Update your Jan or [download the latest](https://jan.ai/). - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.6.3). \ No newline at end of file diff --git a/website/src/content/changelog/2025-07-17-responsive-ui.mdx b/website/src/content/changelog/2025-07-17-responsive-ui.mdx deleted file mode 100644 index ac27d3630e..0000000000 --- a/website/src/content/changelog/2025-07-17-responsive-ui.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Jan v0.6.5 brings responsive UI and MCP examples!" -version: 0.6.5 -description: "New MCP examples, updated pages, and bug fixes!" -date: 2025-07-17 -ogImage: "/assets/images/changelog/release_v0_6_5.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -## Highlights 🎉 - -Jan v0.6.5 brings responsive UI improvements, enhanced model provider management, and better Linux compatibility alongside -new MCP examples. - -- Support responsive UI on Jan -- Rework of Model Providers UI -- Bump version of llama.cpp -- Fix the bug where fetching models from custom provider can cause app to crash -- AppImage can now render on wayland + mesa - -Update your Jan or [download the latest](https://jan.ai/). - -For more details, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.6.5). \ No newline at end of file diff --git a/website/src/content/changelog/2025-07-31-llamacpp-tutorials.mdx b/website/src/content/changelog/2025-07-31-llamacpp-tutorials.mdx deleted file mode 100644 index fc0310174d..0000000000 --- a/website/src/content/changelog/2025-07-31-llamacpp-tutorials.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "Jan v0.6.6: Enhanced llama.cpp integration and smarter model management" -version: 0.6.6 -description: "Major llama.cpp improvements, Hugging Face provider support, and refined MCP experience" -date: 2025-07-31 -ogImage: "/assets/images/changelog/changelog0.6.6.gif" ---- - -import ChangelogHeader from '@/components/Changelog/ChangelogHeader.astro' - - - -## Highlights 🎉 - -Jan v0.6.6 delivers significant improvements to the llama.cpp backend, introduces Hugging Face as a -built-in provider, and brings smarter model management with auto-unload capabilities. This release -also includes numerous MCP refinements and platform-specific enhancements. - -### 🚀 Major llama.cpp Backend Overhaul - -We've completely revamped the llama.cpp integration with: -- **Smart Backend Management**: The backend now auto-updates and persists your settings properly -- **Device Detection**: Jan automatically detects available GPUs and hardware capabilities -- **Direct llama.cpp Access**: Models now interface directly with llama.cpp (previously hidden behind Cortex) -- **Automatic Migration**: Your existing models seamlessly move from Cortex to direct llama.cpp management -- **Better Error Handling**: Clear error messages when models fail to load, with actionable solutions -- **Per-Model Overrides**: Configure specific settings for individual models - -### 🤗 Hugging Face Cloud Router Integration - -Connect to Hugging Face's new cloud inference service: -- Access pre-configured models running on various providers (Fireworks, Together AI, and more) -- Hugging Face handles the routing to the best available provider -- Simplified setup with just your HF token -- Non-deletable provider status to prevent accidental removal -- Note: Direct model ID search in Hub remains available as before - -### 🧠 Smarter Model Management - -New intelligent features to optimize your system resources: -- **Auto-Unload Old Models**: Automatically free up memory by unloading unused models -- **Persistent Settings**: Your model capabilities and settings now persist across app restarts -- **Zero GPU Layers Support**: Set N-GPU Layers to 0 for CPU-only inference -- **Memory Calculation Improvements**: More accurate memory usage reporting - -### 🎯 MCP Refinements - -Enhanced MCP experience with: -- Tool approval dialog improvements with scrollable parameters -- Better experimental feature edge case handling -- Fixed tool call button disappearing issue -- JSON editing tooltips for easier configuration -- Auto-focus on "Always Allow" action for smoother workflows - -### 📚 New MCP Integration Tutorials - -Comprehensive guides for powerful MCP integrations: -- **Canva MCP**: Create and manage designs through natural language - generate logos, presentations, and marketing materials directly from chat -- **Browserbase MCP**: Control cloud browsers with AI - automate web tasks, extract data, and monitor sites without complex scripting -- **Octagon Deep Research MCP**: Access finance-focused research capabilities - analyze markets, investigate companies, and generate investment insights - -### 🖥️ Platform-Specific Improvements - -**Windows:** -- Fixed terminal windows popping up during model loading -- Better process termination handling -- VCRuntime included in installer for compatibility -- Improved NSIS installer with app running checks - -**Linux:** -- AppImage now works properly with newest Tauri version and it went from almost 1GB to less than 200MB -- Better Wayland compatibility - -**macOS:** -- Improved build process and artifact naming - -### 🎨 UI/UX Enhancements - -Quality of life improvements throughout: -- Fixed rename thread dialog showing incorrect thread names -- Assistant instructions now have proper defaults -- Download progress indicators remain visible when scrolling -- Better error pages with clearer messaging -- GPU detection now shows accurate backend information -- Improved clickable areas for better usability - -### 🔧 Developer Experience - -Behind the scenes improvements: -- New automated QA system using CUA (Computer Use Automation) -- Standardized build process across platforms -- Enhanced error stream handling and parsing -- Better proxy support for the new downloader -- Reasoning format support for advanced models - -### 🐛 Bug Fixes - -Notable fixes include: -- Factory reset no longer fails with access denied errors -- OpenRouter provider stays selected properly -- Model search in Hub shows latest data only -- Temporary download files are cleaned up on cancel -- Legacy threads no longer appear above new threads -- Fixed encoding issues on various platforms - -## Breaking Changes - -- Models previously managed by Cortex now interface directly with llama.cpp (automatic migration included) -- Some sampling parameters have been removed from the llama.cpp extension for consistency -- Cortex extension is deprecated in favor of direct llama.cpp integration - -## Coming Next - -We're working on expanding MCP capabilities, improving model download speeds, and adding more provider -integrations. Stay tuned! - -Update your Jan or [download the latest](https://jan.ai/). - -For the complete list of changes, see the [GitHub release notes](https://github.com/menloresearch/jan/releases/tag/v0.6.6). diff --git a/website/src/content/docs/handbook/brand/index.mdx b/website/src/content/docs/handbook/brand/index.mdx deleted file mode 100644 index 74f723dc79..0000000000 --- a/website/src/content/docs/handbook/brand/index.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Brand & Identity -asIndexPage: true ---- \ No newline at end of file diff --git a/website/src/content/docs/handbook/comp/bonus.mdx b/website/src/content/docs/handbook/comp/bonus.mdx deleted file mode 100644 index e303215797..0000000000 --- a/website/src/content/docs/handbook/comp/bonus.mdx +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Handbook" -description: "Jan Team Handbook" ---- - diff --git a/website/src/content/docs/handbook/comp/esop.mdx b/website/src/content/docs/handbook/comp/esop.mdx deleted file mode 100644 index 53f719ee71..0000000000 --- a/website/src/content/docs/handbook/comp/esop.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "ESOP" -description: "Employee Stock Options" ---- - - -## Shares and Equity - -Jan is [employee-owned](/handbook/why/ownership). The people building Jan own a meaningful percentage of the company over time. - -### Distributions - -Every 6 months, Jan distributes 1% of company equity among its team members: - -- Dec 2023: 1% distributed among 10 team members (1 part-time) -- June 2024: 1% distributed among 15 team members (4 part-time) -- Dec 2024: 1% distributed among 18 team members (5 part-time) - -Distributions are performance-based, and cover both full-time and part-time team members and open source contributors. - -This schedule is subject to change based on the discretion of the board. diff --git a/website/src/content/docs/handbook/comp/index.mdx b/website/src/content/docs/handbook/comp/index.mdx deleted file mode 100644 index 63e59206a7..0000000000 --- a/website/src/content/docs/handbook/comp/index.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Compensation" -description: "Payment and benefits schemes." -asIndexPage: true ---- - - -### Pay - -Everyone at Jan is on the same payscale, with cost of living adjustments based on [location](#location). - -Your base cash pay is determined based on realized impact, not titles. Workstream owners, responsible for the success/failure of a project & team, are compensated more than individual contributors. Folks at the same contribution levels receive equitable base pay. - -Based on your [progression](progression) speed, we have been known to adjust pay frequently and aggressively. - -### Location - -We provide a location adjustment for folks living in a high cost of living area. We use the [Numbeo](https://www.numbeo.com/cost-of-living/) index to arrive at a fair & equitable total. - -### Bonus - -Depending on location, we pay a discretionary cash bonus twice per year. The bonus typically ranges from 5-17% of your base pay, and is based on the company's performance and your individual contribution. - -### ESOP - -We offer an employee stock ownership plan to all full-time employees. The ESOP is a key part of our compensation package and is designed to align everyone's incentives with the long-term success of the company. Read our [ESOP policy](esop). - -### Benefits - -- **Equipment**: After the probation period, you get a $1000-$1500 USD budget for equipment. Eligible items include: laptop, monitors, keyboard, mouse, and noise cancelling headphones. Please see our [spending policy](spending) for more details. - -- **AI Subscriptions**: We are an AI native team - the expectation is to use AI to 100x your productivity. You get $100/month towards AI subscriptions and tools. Search `AI API Key Access Instructions` in Discord to get access. - -- **Language & Presentation Skills**: We grant a $100 yearly budget to improve in language skills (eg [Italki](https://www.italki.com/), [Elsa](https://elsaspeak.com/en/), [Cambly](https://www.cambly.com/english?lang=en) tutors, classes and books) diff --git a/website/src/content/docs/handbook/culture/communicate.mdx b/website/src/content/docs/handbook/culture/communicate.mdx deleted file mode 100644 index c651ad38f4..0000000000 --- a/website/src/content/docs/handbook/culture/communicate.mdx +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: How We Communicate -description: Building a global AI platform requires clear, open communication ---- - -We're a fully-remote team building open superintelligence across continents -and cultures. Clear communication isn't just nice to have—it's how we ship fast, -stay aligned, and build trust with millions of users. - -## Core Communication Principles - -### Default to Open - -- **Build in Public**: Development discussions happen in open GitHub issues -- **Share Progress**: Daily updates in Discord keep everyone informed -- **Document Decisions**: Important choices are recorded for future reference -- **Learn Together**: Share discoveries, failures, and insights with the team - -### Async First, Sync When Needed - -As a global team, we optimize for asynchronous communication: - -- **GitHub**: Technical discussions, code reviews, feature planning -- **Discord**: Quick questions, daily updates, community engagement -- **Documentation**: Long-form thinking, architectural decisions -- **Meetings**: Only when real-time collaboration adds value - -### Write for Clarity - -With team members from different linguistic backgrounds: - -- **Simple English**: Clear over clever, direct over diplomatic -- **Context Included**: Assume readers lack your specific context -- **Examples Help**: Show, don't just tell -- **Visual Aids**: Diagrams, screenshots, and code samples - -> Good: "The model fails to load on M1 Macs. Here's the error log and steps to reproduce..." -> Bad: "It's broken on Mac." - -## Communication Channels - -### Where to Communicate What - -| Type | Channel | Examples | -|------|---------|----------| -| **Feature Development** | GitHub Issues | New features, bug reports, technical discussions | -| **Daily Updates** | Discord #daily-updates | What you worked on, blockers, discoveries | -| **Quick Questions** | Discord team channels | "Anyone know why X happens?" | -| **Long-form Thinking** | GitHub Discussions / Docs | Architecture proposals, post-mortems | -| **User Support** | Discord #support | Helping users with Jan | -| **Urgent Issues** | Discord + @mention | Production bugs, security issues | - -### Response Time Expectations - -- **GitHub Issues**: Within 24-48 hours -- **Discord Questions**: Best effort, timezone dependent -- **User Bug Reports**: Acknowledge within 24 hours -- **Security Issues**: Immediate escalation - -## Communication Best Practices - -### For Engineers - -``` -✅ DO: -- Comment your code like someone will read it at 3am -- Update documentation when you change behavior -- Share WIP early for feedback -- Close the loop: report back on solutions - -❌ DON'T: -- Assume context that isn't written down -- DM technical discussions (keep them public) -- Wait until perfect to share progress -- Leave PRs without description -``` - -### For Everyone - -**Assume Positive Intent** -- We're all working toward the same goal -- Language barriers can cause misunderstandings -- Ask for clarification before assuming - -**Be Specific** -- "The download button doesn't work" → "The download button on Windows returns 404 for model X" -- "It's slow" → "Model loading takes 45 seconds on 8GB RAM" -- "Users want this" → "15 users requested this in issue #123" - -**Close Loops** -- Follow up on questions you ask -- Update issues with solutions -- Thank people who help you -- Share outcomes of discussions - -## Meetings That Work - -We minimize meetings but when we have them: - -### Before the Meeting -- Share agenda 24 hours prior -- Include pre-read materials -- State desired outcome clearly -- Invite only necessary people - -### During the Meeting -- Start with 5-minute silent reading of materials -- Stick to agenda -- Assign action items with owners -- End 5 minutes early - -### After the Meeting -- Post summary in relevant channel -- Create GitHub issues for action items -- Share recording if applicable - -## Writing Culture - -### Pull Requests - -```markdown -## What -Brief description of changes - -## Why -Context and motivation - -## How -Technical approach - -## Testing -How to verify it works - -## Screenshots -If UI changes -``` - -### Daily Updates - -Keep them brief but informative: - -``` -**Yesterday**: Shipped GGUF loader optimization (30% faster) -**Today**: Working on Windows installer bug #456 -**Blockers**: Need review on PR #789 -**TIL**: Quantization affects different models differently -``` - -### Documentation - -- Write docs as you code -- Include examples users can copy -- Explain the why, not just the how -- Keep it up to date or delete it - -## Community Communication - -When engaging with our open-source community: - -### Be Helpful -- No question is too basic -- Provide context and examples -- Point to relevant documentation -- Follow up on issues - -### Be Humble -- We don't have all the answers -- Users often have great ideas -- Mistakes happen, own them -- Thank contributors publicly - -### Be Human -- Personality is welcome -- Celebrate wins together -- Share the journey -- Build relationships - -## Global Team Considerations - -### Time Zones -- Post updates at consistent times -- Record important discussions -- Rotate meeting times fairly -- Respect off hours - -### Cultural Awareness -- Direct feedback styles vary by culture -- Silence doesn't mean agreement -- Ask if unsure about interpretation -- Celebrate diverse perspectives - -### Language -- English is second language for many -- Avoid idioms and slang -- Use simple, clear language -- Be patient with communication - -## Red Flags to Avoid - -- **Information Hoarding**: Share knowledge freely -- **Private Discussions**: Keep technical talk public -- **Assuming Context**: Document everything -- **Delayed Responses**: Acknowledge even if you can't solve immediately -- **Unclear Communication**: If confused, ask for clarification - -## The Jan Way - -Our communication style reflects our values: -- **Open**: Like our code -- **Inclusive**: Like our community -- **Clear**: Like our mission -- **Async**: Like our architecture -- **Global**: Like our vision - -Good communication at Jan isn't about perfection—it's about clarity, openness, and building together across any distance. - ---- - -*"The best code is documented code. The best decisions are documented decisions. The best team is one that communicates openly."* diff --git a/website/src/content/docs/handbook/culture/fight.mdx b/website/src/content/docs/handbook/culture/fight.mdx deleted file mode 100644 index 86fae1b201..0000000000 --- a/website/src/content/docs/handbook/culture/fight.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -title: How We Fight ---- \ No newline at end of file diff --git a/website/src/content/docs/handbook/how/sprints.mdx b/website/src/content/docs/handbook/how/sprints.mdx deleted file mode 100644 index f5588ee7d7..0000000000 --- a/website/src/content/docs/handbook/how/sprints.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: "Sprints" -description: "Jan Team Handbook" ---- - -[Jan sprints](https://github.com/orgs/menloresearch/projects/5/views/25) are every 2 weeks. - -Releases are QA'd prior to release. We never release on Fridays, unless ya'll -wanna come back on Saturday. diff --git a/website/src/content/docs/handbook/how/tools.mdx b/website/src/content/docs/handbook/how/tools.mdx deleted file mode 100644 index b54947305b..0000000000 --- a/website/src/content/docs/handbook/how/tools.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Internal Tools" -description: "Jan Team Handbook" ---- - -We like to make AI do the boring stuff. We also automate and dogfood our own tools as much as possible. - -### Tools - -- **Github**: For engineering specs, sprint planning, and documentation. -- **Discord**: For chat, voice, and video calls. -- **Google Hangouts**: For a short, daily standup. -- **Gmail**: For external communication. -- **HackMD/Obsidian**: For ad hoc ideas and meeting notes. -- **Excalidraw**: For whiteboarding. -- **Password Manager**: You will be invited. -- **Jan/Cortex**: A personal, private copilot. - -### Infrastructure - -We use a mix of cloud providers and build our on-premises hardware to cut costs and optimize performance. - -- Singapore Cluster: For training and inference. -- Hanoi Cluster: For CI/CD and data pipelines. -- Burst Compute: We like Runpod for larger training runs. - -See [Infra](/handbook/infra/infra) for more details. - -### CI/CD - -We have a dedicated devops/mlops team. Ping in the `infra-internal` channel for any requests. - -### Automations - -We have a dedicated automations engineer. Her goal is to automate yesterday's tasks today, so that you are doing something different tomorrow. - -Ping in the `#automations` channel for any requests. diff --git a/website/src/content/docs/handbook/hr/1-on-1s.mdx b/website/src/content/docs/handbook/hr/1-on-1s.mdx deleted file mode 100644 index e19e83fd1d..0000000000 --- a/website/src/content/docs/handbook/hr/1-on-1s.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "One on Ones" -description: "One on Ones." ---- - -We don't schedule recurring 1-on-1s, because in our experience, engineers and researchers [hate it](https://www.reddit.com/r/cscareerquestions/comments/14rkbwv/does_anybody_else_hate_their_11_meetings/). We believe it's better to address issues and provide feedback as they arise. - -If you need to discuss something with us, simply book a time on the founder or team lead's calendar. **We're always available to chat.** - -✅ Do: -- Schedule check-ins as soon as you need them -- Provide honest feedback and criticism (we appreciate it!) - -❌ Don't: -- Wait for a scheduled one-on-one to raise issues -- Ask for confirmation before scheduling a check-in - -### Exceptions - -If you'd like to establish regular check-ins, please let us know and we're very happy to set one up. diff --git a/website/src/content/docs/handbook/hr/interviewing.mdx b/website/src/content/docs/handbook/hr/interviewing.mdx deleted file mode 100644 index 696f44f8c1..0000000000 --- a/website/src/content/docs/handbook/hr/interviewing.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: "Interviewing" -description: "How to interview and hire outliers." ---- - -Our talent pipeline is an organizational moat. This guide walks you through our interviewing process. - -### Process - -Jan's interview process is designed to be self-serve. - -1. **Application**: Candidates apply through our [careers page](https://homebrew.bamboohr.com/careers). We use a combination of AI and human review to shortlist candidates. -2. **Initial Screening**: Shortlisted candidates are invited to a 30-minute initial screening, usually with HR/founder. -3. **2-3 Interviews**: Candidates are interviewed by 2-3 team members. Each interview is 30-45 minutes long. -4. **Founder interview**: The final interview is with one of the founders. This is a 30-minute interview. -5. **Deliberation**: Candidates are evaluated based on their interviews and portfolio. We aim to make a decision within 1 week. - -We aim to be data-driven, and each open role is tracked in our ATS (BambooHR). We use this data to improve our hiring process. - -> Historically, our acceptance rate has been less than 1% of all applications. For direct referrals, this rate is higher at >10%. - -### Preparing for Interviews - -To start interviewing, please follow these steps: - -- [ ] Indicate your interest in helping with interviews to HR. -- [ ] Shadow existing interviews (2-3) to understand our process and ask questions. -- [ ] Create a [Google Appointments link](https://workspace.google.com/intl/en_sg/resources/appointment-scheduling/) for your interviews. Make sure to set a default Google Hangouts link. Share the link with HR. -- [ ] New candidates will automatically schedule interviews with you using the Google Appointments link. -- [ ] If you have an upcoming interview, review their resume and prepare thoughtful questions. -- [ ] Input **detailed evaluations in BambooHR ATS** after each interview. - -### Evaluation - -We do not use a scoring system for interviews and prefer to encourage natural conversation. - -However, if you do need an evaluation template, you can use the following: - -| Criteria | Description | Response | Notes | -|--------------|----------------------------------------|--------------|--------------------------------------------| -| Technical Skills | Proficiency in relevant technical areas (AI, robotics, programming, etc.) indicated in the resume ||| -| Fundamentals | Understanding of core concepts in the candidate's field of expertise (math, statistics, ML, physics, etc.) | | | -| Problem-Solving Ability | Ability to approach and solve complex problems ||| -| Communication Skills | Clarity and effectiveness in communication ||| -| Cultural Fit | Alignment with company [values](/handbook/philosophy/humanity) and [culture](/handbook/who/curiosity) ||| -| Overall Impression | General impression of the candidate's suitability for the role ||| diff --git a/website/src/content/docs/handbook/hr/leave.mdx b/website/src/content/docs/handbook/hr/leave.mdx deleted file mode 100644 index ffbe11cb27..0000000000 --- a/website/src/content/docs/handbook/hr/leave.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Time Off" -description: "Vacation and unplanned leave policy." ---- - -We offer 14 base vacation days, unlimited sick leave, and additional public holidays based on your respective region. - -### Planning Leave - -1. Submit the days in BambooHR at least 1-2 weeks in advance -2. Inform your team as early as possible - -### Public Holidays - -We observe public holidays in our respective countries of residence. You do not need to use vacation days. - -### Illness & Unplanned Leave - -Please submit all sick leave requests in BambooHR with a medical certificate (when applicable). For sick leave longer than 1 week, please inform HR and your team. - -### Parental - -We offer parental leave for eligible staff. Please inform HR to coordinate parental leave. diff --git a/website/src/content/docs/handbook/hr/progression.mdx b/website/src/content/docs/handbook/hr/progression.mdx deleted file mode 100644 index d4105e07b8..0000000000 --- a/website/src/content/docs/handbook/hr/progression.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "One on Ones" -description: "One on Ones." ---- - -### Promotions - -We don't believe in titles. We believe in fair compensation and fast progression. - -> Previously, we have been known to organically promote team members 2x within 6 months of joining. - -### Perks - -- Your contributions are Open Source and public -- Speaking opportunities at conferences (we get invited often) -- Direct access to a large network of advisors -- No shortage of hard problems (you don't need to "prove" yourself before working on something interesting) -- Visibility across our hardware, software, and research projects -- Real, blunt feedback, at scale (our users are not shy!) -- Hard, tough, evergreen problems diff --git a/website/src/content/docs/handbook/hr/retreats.mdx b/website/src/content/docs/handbook/hr/retreats.mdx deleted file mode 100644 index 2560a7bd86..0000000000 --- a/website/src/content/docs/handbook/hr/retreats.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Retreats" -description: "Annual Team Retreat." ---- - -## Retreats - -We host an annual team retreat to bring the team together, bond, and plan for the year ahead. - -Previous retreats have been held in: -- 2023 (8 people): Ho Chi Minh City, Vietnam -- 2024 (16 people): Singapore -- 2025: TBA! - -## Meetups - -We encourage team members to meet up in person whenever possible. Remote work can get lonely, and we offer a small budget for regional get-togethers. diff --git a/website/src/content/docs/handbook/hr/side-gigs.mdx b/website/src/content/docs/handbook/hr/side-gigs.mdx deleted file mode 100644 index a5e9a73dc6..0000000000 --- a/website/src/content/docs/handbook/hr/side-gigs.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Side Gigs" -description: "Best practices around external engagements." ---- - -We have a permissive policy regarding side gigs, similar to [Posthog's](https://posthog.com/handbook/people/side-gigs). - -However, our main ask is for Jan to be your "main gig", as you will be taking a crucial role in success/failure of Jan. Manage your time in a way that is fair to the company and your colleagues. - -At the end of the day, you all own (or will own) shares of the company. Handle company IP as you would your own property. - -### Declaration - -**For work in related industries, we do ask for an upfront declaration**, so that we can clear potential conflicts of interest. - -Please fill out the following form and submit it to: `hr@menlo.ai` - -| Dates | Organization | Nature of Work | Hours/week | -|-------------|--------------|----------------|------------| -| | | | | -| | | | | -| | | | | - -### A Note on IP - -Startups & careers die because of IP disputes from moonlighting employees. Please respect all party's IP - never intermingle assets (like laptops and logins) between engagements. diff --git a/website/src/content/docs/handbook/hr/spending.mdx b/website/src/content/docs/handbook/hr/spending.mdx deleted file mode 100644 index 48e72dabff..0000000000 --- a/website/src/content/docs/handbook/hr/spending.mdx +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "Spending" -description: "Spending and reimbursement policy." ---- - - -We are incredibly frugal and intend to be around in 50 years. However, we avoid being "penny-wise & pound-foolish" and spend when it matters. - -### Expenses - -#### Software - -For software subscriptions and other expenses incurred as a part of [benefits](compensation#benefits), please save receipts and submit it as a reimbursement [request](#reimbursements). - -#### Large Purchases - -For larger purchases like equipment, please first submit a purchase request through `#admin-requests` in Discord. - -We can directly order for you or reimburse you for the purchase. - -#### Incidentals - -We don't have a policy for incidentals. We trust you to make the right decisions, while keeping in mind that we are a startup and every dollar counts. - -### Reimbursements - -Reimbursements are processed at the end of every month with payroll. - -Email all receipts (required) & the following form to `finance@menlo.ai`: - -| Date | Description | Amount | Currency | Exchange Rate | Total (USD) | -|------------|-------------|--------|----------|---------------|-------------| -| | | | | | | -| | | | | | | -| | | | | | | -| | | | | Grand Total | | diff --git a/website/src/content/docs/handbook/hr/travel.mdx b/website/src/content/docs/handbook/hr/travel.mdx deleted file mode 100644 index a446435cc4..0000000000 --- a/website/src/content/docs/handbook/hr/travel.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "Travel" -description: "Travel policy." ---- - - -We encourage team members to represent Jan at conferences and travel for in-person alignment - -For all work travel, we ask that you get approval from HR before confirming your trip & booking flights/hotels. - -### Bookings - -We prefer to directly book flights & hotels on your behalf, through our travel portals. - -If you need to arrange travel by yourself, please obtain a travel budget first. Save all receipts for reimbursement. - -### Per Diem - -Depending on the location, we provide a $20 USD per diem for meals and incidentals. This amount is adjustable based on the location. diff --git a/website/src/content/docs/handbook/index.mdx b/website/src/content/docs/handbook/index.mdx deleted file mode 100644 index f023d7bc3d..0000000000 --- a/website/src/content/docs/handbook/index.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: "Jan Team Handbook" -description: "Building superintelligence that you can own and run anywhere." ---- - -{/* TODO: Replace with Astro-compatible icons: FaDiscord, FaMap */}; -import Steps from '@/components/Steps.astro'; - -## Jan Handbook - -> Jan's Handbook is inspired by [Posthog](https://posthog.com/handbook) and [Gitlab](https://handbook.gitlab.com/). -> Thank you for showing us the way 🙏 - -### Welcome! - -This handbook explains how [Jan](https://jan.ai) works, and is public. - -We're building superintelligence that you can self-host and use locally. Not as a limitation, but as a feature. Your AI should work wherever you need it - on your laptop during a flight, on your company's servers for compliance, or in the cloud for scale. - -Jan's Handbook is a [living document](https://en.wikipedia.org/wiki/Living_document), constantly evolving as we build the future of AI ownership. - -## 1. Chapter 1: Why does Jan exist? - -- [What problem are we solving?](/handbook/why/problem) - Why AI needs to be ownable, not just rentable -- [Who we are building for](/handbook/why/icp) - People who want flexibility and control -- [Our vision for open superintelligence](/handbook/why/ownership) - An ecosystem you can own - -## 2. Chapter 2: How we make money - -- [How we make money](/handbook/money/how) - Open core + optional services -- [What is our moat](/handbook/money/moat) - Community, trust, and aligned incentives - -## 3. Chapter 3: Who we hire - -- [The Fast and the Curious](/handbook/who/curiosity) - Ship today, learn for tomorrow -- [Underdogs Welcome](/handbook/who/underdogs) - Talent is everywhere, opportunity isn't -- [dy/dx > y-intercept](/handbook/who/dydx) - Growth rate beats starting position -- [Obsession](/handbook/who/obsession) - We seek those who can't not build this -- [Team, not family](/handbook/who/team) - Pro sports team approach - -## 4. Chapter 4: How we work - -- [Remote-first](/handbook/how/remote) - Global team, local impact -- [Open-source](/handbook/how/open-source) - Build in public -- [How we coordinate](/handbook/how/tools) - Tools and processes -- [Sprints](/handbook/how/sprints) - Ship every week -- [How we communicate](/handbook/culture/communicate) - Clear, open, async-first - -## 5. Chapter 5: Jan's Philosophy - -- [Open Superintelligence Platform](/handbook/philosophy/open-superintelligence) - Not one model, but an ecosystem -- [Lateral Thinking with Proven Technology](/handbook/philosophy/lateral) - Innovation through integration -- [Humanity-aligned](/handbook/philosophy/humanity) - Technology that unites -- [Perpetual Optimism](/handbook/philosophy/optimism) - Building the future we want to live in - -## 6. Chapter 6: Team & Operations - -- [Team Structure](/handbook/team) - How we're organized -- [Compensation](/handbook/comp) - Fair pay, meaningful equity -- [HR & Culture](/handbook/hr/onboarding) - Joining and growing with Jan - ---- - -## Quick Links - -- **For new team members**: Start with [Onboarding](/handbook/hr/onboarding) -- **For contributors**: Check out our [GitHub](https://github.com/janhq) and [Discord](https://discord.gg/FTk2MvZwJH) -- **For the curious**: Read about [our vision](/handbook/why/ownership) - -## Our North Star - -We're building superintelligence that: -- **Works anywhere**: From your laptop to your data center -- **Belongs to you**: Download it, own it, modify it -- **Scales infinitely**: One person or ten thousand, same platform -- **Improves constantly**: Community-driven development - -This isn't just about making AI accessible. It's about ensuring the most transformative technology in human history can be owned by those who use it. - ---- - -*"The future of AI isn't about choosing between local or cloud. It's about having both, and everything in between, working perfectly together."* \ No newline at end of file diff --git a/website/src/content/docs/handbook/lifecycle/onboarding.mdx b/website/src/content/docs/handbook/lifecycle/onboarding.mdx deleted file mode 100644 index 9bf1bbceab..0000000000 --- a/website/src/content/docs/handbook/lifecycle/onboarding.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "Onboarding to Jan" -description: "Onboarding Checklist for New Hires." -asIndexPage: true ---- - -Welcome to Jan! We are excited to have you join our team. This guide walks you through the onboarding process. - -> You will receive an onboarding invitation morning of your first day. Prior to the scheduled onboarding call, please send your Github & Discord handles to `hr@menlo.ai`. - -### Expectations - -| Expectation | Description | -|-------------|-------------| -| **Take Initiative** | Take ownership of an area. If you see a problem, take it and own it to completion. Your work will often not be defined. Take the initiative to figure out what needs to be done, seek clarification, and then communicate what your plan to the team. | -| **Bias to Action** | There are many problems to solve. Don't ask for permission or try to build consensus - just take action. After 2-3 months, you should be able to show clear results having turned around a problem-filled area. | -| **Communication** | Clear and timely communication is key. If unsure, ask questions. We are a global team and respect is paramount. Disrespectful behavior is not tolerated. Focus on problem-solving, not personal attacks. Assume Hanlon’s Razor: “Never attribute to malice that which is adequately explained by lack of ~~stupidity~~ communication or ambiguity.” | -| **Mastery** | In this frontier industry, expertise comes from doing. Own your area and build mastery. | - -For more details, see our guides on [remote work](/handbook/how/remote). - -### Code of Conduct - -- **Availability and Communication**: Ensure you are available and engaged during designated work hours and scheduled meetings. -- **Work Environment**: Strive to create a quiet and distraction-free workspace whenever possible, especially on video calls. -- **Video Participation**: Video participation is expected unless there are exceptional circumstances. -- **Dress Code**: Casual attire is perfectly acceptable for meetings, but please exercise good judgment (e.g. no pajamas). -- **Vacations:** Communicate leave at least 2 weeks (1 sprint) in advance via Bamboo. -- **Emergency Leave:** Please inform Daniel, Nicole and HR in your #hr-channel if you require emergency leave. -- **Side Gigs Policy**: See [side-gigs](side-gigs). Please consult with HR on engagements with potential legal & IP implications. -- **Sexual harassment:** We have a zero tolerance policy against behavior of a sexual nature that could reasonably be expected to cause offense or humiliation, e.g. verbal, nonverbal, or physical conduct, via written and electronic communications. - -### Tools - -As much as possible, we build-in-public and use the following tools to asynchronously collaborate: - -- [Github](https://github.com/menloresearch) -- [Jan Discord](https://discord.gg/VSbRN3vwCD) -- [Google Workspace](https://workspace.google.com) -- [Hugging Face](https://huggingface.co/menloresearch) -- Password Manager: You will be invited -- AI Tools and API keys: Coding with AI is heavily encouraged - -### Checklists - -#### Day 1 - -- Sign all HR documents. -- Download and access all tools. -- Check calendar invites: daily standups and TGIF. -- Introduce yourself in the [`#welcome`](https://discord.gg/VSbRN3vwCD) Discord channel. -- Set up your [BambooHR](https://homebrew.bamboohr.com/home) account. -- Set up VPN. Search `VPN access instructions` in Discord for the latest instructions. -- Check out the current sprint in [Github](https://github.com/orgs/menloresearch/projects/5) -- Ask questions in your private `#hr-NAME` channel. - -
-Import **Who's out** (on leave) calendar from BambooHR - -- Go to https://homebrew.bamboohr.com/calendar. Login if needed. -- Click on the gear icon, select **iCal Feeds...** -- Select **Create Calendar Feed** under **Who's Out**. Copy the generated link. -- In Google Calendar, you can import the new calendar from URL. - -
- -#### Week 1 - -- Post daily updates in the [`#daily-updates`](https://discord.gg/AxypHJRQxd) channel prior to standup. -- Review this [Jan Handbook](https://menlo.ai/handbook). -- Push 1 PR into this Handbook. This a living document! -- Disclose side gigs with potential legal & IP implications to HR. -- Attend TGIF demos on Friday afternoon (6PM GMT+8). diff --git a/website/src/content/docs/handbook/money/how.mdx b/website/src/content/docs/handbook/money/how.mdx deleted file mode 100644 index a49701b1c5..0000000000 --- a/website/src/content/docs/handbook/money/how.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: "How we make money" -description: "Building a sustainable business around open superintelligence" ---- - -Jan is building a sustainable business that aligns our incentives with our users' -success. We believe the best way to build beneficial AI is to ensure our business -model reinforces our values: privacy, ownership, and universal access. - -## Our Business Model - -### 1. Open Core Foundation - -**Jan Desktop is free forever** -- Download and run AI models locally -- No subscriptions, no accounts required -- Full privacy and ownership of your data -- Community-driven development - -This isn't a loss leader—it's our foundation. A thriving open-source community creates: -- Trust through transparency -- Innovation through collaboration -- Distribution without marketing spend -- Feedback loops that improve the product - -### 2. Premium Features & Services - -We monetize through optional add-ons that enhance the Jan experience: - -**Jan Cloud (Coming Soon)** -- Optional cloud sync across devices -- Managed model hosting for teams -- Advanced collaboration features -- Pay only for what you use - -**Enterprise Support** -- Priority support and SLAs -- Custom model deployment -- Air-gapped installations -- Compliance and audit tools - -**Professional Tools** -- Advanced model fine-tuning interfaces -- Team management and permissions -- Analytics and usage insights -- API rate limit increases - -### 3. Model Marketplace - -**Curated Model Hub** -- Premium access to specialized models -- Early access to new model releases -- Commercial licensing for enterprise models -- Revenue sharing with model creators - -We use **Jan Exam** to ensure quality—whether models are free or paid, they must meet our standards. - -### 4. Hardware Integration - -**Optimized Hardware Solutions** -- Pre-configured devices for AI inference -- Plug-and-play AI workstations -- Edge deployment solutions -- Partnership with hardware manufacturers - -### 5. Training & Certification - -**Jan Certified Program** -- Professional certification for developers -- Enterprise training workshops -- Educational institution partnerships -- Community education initiatives - -## Revenue Philosophy - -### What We'll Never Do - -- **Sell your data**: Your conversations remain private -- **Lock you in**: Always provide export and migration tools -- **Paywall core features**: Local AI remains free -- **Compromise on privacy**: No ads, no tracking, no surveillance - -### What We Always Do - -- **Align incentives**: We succeed when you succeed with AI -- **Transparent pricing**: Clear, predictable costs -- **Community first**: Free tier remains fully functional -- **Open standards**: No proprietary lock-in - -## Unit Economics - -Our approach creates sustainable unit economics: - -1. **Zero marginal cost** for open-source users -2. **High-margin** cloud and enterprise services -3. **Network effects** as community grows -4. **Reduced CAC** through open-source distribution - -## The Flywheel - -``` -Open Source Adoption → Community Growth → Better Models → -More Users → Enterprise Adoption → Revenue → Investment in Open Source -``` - -Each turn of the wheel strengthens the next: -- More users improve models through feedback -- Better models attract enterprise customers -- Enterprise revenue funds open development -- Open development attracts more users - -## Long-term Vision - -We're building for a future where: - -- Every organization runs their own AI -- Privacy is the default, not premium -- Open models outperform closed ones -- AI infrastructure is as common as web servers - -Our business model ensures we'll be here to see it through. - -## Current Status - -As of 2024: -- ✅ Sustainable open-source project -- ✅ Growing community adoption -- 🚧 Enterprise features in development -- 🚧 Cloud services in beta -- 📅 Hardware partnerships planned - -## Join Our Mission - -Whether you're a: -- **User**: Your adoption drives our mission -- **Contributor**: Your code shapes our platform -- **Customer**: Your success funds development -- **Partner**: Your integration expands possibilities - -You're part of building the open superintelligence economy. - ---- - -*"The best business model is one where doing the right thing is also the profitable thing. That's what we're building at Jan."* diff --git a/website/src/content/docs/handbook/money/moat.mdx b/website/src/content/docs/handbook/money/moat.mdx deleted file mode 100644 index 7faedf2db2..0000000000 --- a/website/src/content/docs/handbook/money/moat.mdx +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: "What is our moat?" -description: "Building defensibility through community, trust, and aligned incentives" ---- - -Traditional moats rely on keeping secrets, locking in users, or maintaining -technical advantages. Jan's moat is different—it's built on openness, community, -and aligned values that are impossible to replicate with a closed approach. - -## Community as Competitive Advantage - -Our strongest moat is our community. While others guard their code, we share everything: - -### Network Effects at Scale -- Every user improves Jan for everyone else -- Bug reports, feature requests, and contributions compound -- Community support scales infinitely -- Local communities solve local problems - -### Trust Through Transparency -- Open source code earns trust closed systems can't buy -- Users verify our privacy claims themselves -- No hidden agendas or dark patterns -- Mistakes are public, fixes are collaborative - -### Innovation Velocity -- Thousands of contributors vs hundreds of employees -- Global perspectives vs Silicon Valley groupthink -- 24/7 development across time zones -- Passionate users become passionate builders - -## Technical Moats - -### Local-First Architecture -While others race to build bigger data centers, we're optimizing for edge computing: -- Years of optimization for consumer hardware -- Deep integration with local systems -- Efficient model quantization expertise -- Cross-platform compatibility knowledge - -### Privacy-Preserving Design -Privacy isn't a feature we added—it's our foundation: -- Architecture that makes surveillance impossible -- No user data to monetize or leak -- Local-first eliminates attack surfaces -- Trust that compounds over time - -### Model Agnostic Platform -We're not tied to any single model: -- Support for all open models -- Jan Exam ensures quality across providers -- Users aren't locked to our models -- Best-in-class always available - -## Business Model Alignment - -Our moat strengthens because our business model reinforces our values: - -### No Conflict of Interest -- We don't profit from user data -- No ads means no perverse incentives -- Success comes from user success -- Premium features enhance, not gatekeep - -### Sustainable Growth -- Open source distribution = zero CAC -- Community support = reduced support costs -- User contributions = free R&D -- Word of mouth = organic growth - -## Cultural Moats - -### Values Alignment -Users choose Jan because they share our values: -- Privacy is non-negotiable -- Ownership matters -- Local-first is the future -- Community over corporation - -### Mission-Driven Team -- We attract talent that believes in the mission -- Lower salaries offset by meaningful work -- Equity alignment with long-term vision -- Cultural fit over pure technical skills - -## The Anti-Moat Strategy - -Paradoxically, our moat comes from not trying to build traditional moats: - -### No Lock-In Creates Loyalty -- Easy to leave = users choose to stay -- Export everything = trust in the platform -- No switching costs = genuine preference -- Freedom of choice = actual choice - -### Open Source as Defense -- Can't be acquired and shut down -- Can't be feature-frozen by new management -- Community can fork if we lose our way -- Immortal through decentralization - -## Compounding Advantages - -Our moats compound over time: - -``` -Year 1: Build trust through transparency -Year 2: Community starts contributing significantly -Year 3: Network effects accelerate development -Year 4: Ecosystem becomes self-sustaining -Year 5: Platform effect makes leaving costly (by choice) -``` - -## What Can't Be Copied - -Competitors can't replicate: - -### Time and Trust -- Years of consistent privacy-first decisions -- Track record of putting users first -- Community relationships built over time -- Reputation for reliability and openness - -### Community Culture -- Shared values and mission -- Collaborative problem-solving approach -- Global perspective on AI needs -- Bottom-up innovation mindset - -### Architectural Decisions -- Local-first can't be bolted onto cloud-first -- Privacy can't be added to surveillance systems -- Community-driven can't be faked by corporations -- Open source commitment can't be half-hearted - -## The Ultimate Moat - -Our ultimate moat is simple: **we're building what we'd want to use**. - -- We're users of our own platform -- We feel the pain points personally -- We can't betray users without betraying ourselves -- Our incentives perfectly align with our community - -## Sustainable Defensibility - -Traditional moats erode: -- Patents expire -- Technical advantages get copied -- Network effects can shift platforms -- Regulations can break monopolies - -Jan's moats strengthen: -- Trust compounds daily -- Community grows stronger -- Values attract like-minded people -- Open source ensures immortality - -## Conclusion - -Our moat isn't about keeping others out—it's about bringing everyone in. By building in the open, aligning our incentives with users, and creating genuine value, we're building defensibility that transcends traditional business strategy. - -The question isn't "How do we prevent competition?" but rather "How do we make competition irrelevant by building something that can't exist any other way?" - -That's our moat: being the platform that puts users first, not because it's good business, but because it's the only way to build the AI future we want to live in. - ---- - -*"The strongest moat is a community that would rebuild you if you disappeared."* diff --git a/website/src/content/docs/handbook/philosophy/humanity.mdx b/website/src/content/docs/handbook/philosophy/humanity.mdx deleted file mode 100644 index c8a1e68077..0000000000 --- a/website/src/content/docs/handbook/philosophy/humanity.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Humanity-aligned" -description: "Jan exists because technology lets humanity work as one." ---- - - -![Our Blessed Kingdom](../../../../assets/tom_gauld.png) - -> "The imagined community of the nation is always limited and sovereign, yet human connections and cooperation stretch far beyond borders." — Benedict Anderson - -## Why this matters - -- AI is geopolitical. Nation-states compete to control it -- We are concerned about those using AI to stoke division, profit from lies, and spur conflict -- Some of us come from places where war isn't distant—it's family history -- Technology should unite, not divide - -## Who we are - -- Remote team across 7 countries -- Diverse backgrounds, united by mission -- Many of us are [third culture kids](https://en.wikipedia.org/wiki/Third_culture_kid)—growing up between worlds -- We often find ourselves misunderstood: an Asian-majority team, English-speaking, influenced by global philosophies -- Crossroad cultures shape us: - - Turkiye: Europe, Asia, Middle East converge - - Singapore: Trade hub bridging worlds -- We respect each other's cultures and build on shared values - -## Jan's stance - -- Humanity first. We build for people, not governments or factions -- AI should enable: - - Shared prosperity - - Universal education - - Peaceful collaboration -- Technology **must** empower humanity to do more—together - -## The bigger picture - -- Human history is one of scaling cooperation: - - Small-scale [kin groups](https://www.sciencedirect.com/science/article/pii/S0960982219303343) → diverse political formations → [modern nation-states](https://en.wikipedia.org/wiki/Westphalian_system) → global networks - - Empires rose and fell. Nationalism united and divided. Globalization connected and excluded -- History doesn't progress. It moves—messy, cyclical, and full of contradiction -- Technology changes the terrain: - - Like ant colonies forming complex systems from simple interactions, humans have always built networks beyond central control - - Complexity emerges from countless small decisions—but unlike ants, we carry ideologies, ambitions, and fears -- AI is another fork in the road. It can reinforce old hierarchies or dismantle them. It can be used to surveil or to liberate - -## Why we exist - -- 30 people, from different countries, met online to build together -- The internet enables connections that were impossible a generation ago -- Ideas cross borders: an anthropologist in Turkiye collaborates with a roboticist in Saigon -- Jan exists because technology lets humanity work as one - -## Our vision - -- AI can accelerate global coordination and shared progress -- Our goal: help humanity align, collaborate, and solve collective challenges -- Success = contributing to humanity's long arc toward unity - -If our work helps the world coordinate better—even slightly—we've done something that matters diff --git a/website/src/content/docs/handbook/philosophy/lateral.mdx b/website/src/content/docs/handbook/philosophy/lateral.mdx deleted file mode 100644 index b783a08805..0000000000 --- a/website/src/content/docs/handbook/philosophy/lateral.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: "Lateral Thinking with Proven Technology" -description: "Building the future with battle-tested tools" ---- - -> "Innovation is not about using the newest technology, but about using technology in new ways." — Adapted from Nintendo's philosophy - -## Our Approach - -At Jan, we don't chase the bleeding edge for its own sake. We take proven, battle-tested technologies and combine them in innovative ways to solve real problems. This philosophy shapes everything we build. - -## Why Proven Technology Wins - -### Stability Over Novelty -- **GGUF Format**: We didn't invent a new model format. We adopted the community standard that already works across platforms -- **llama.cpp**: Years of optimization by brilliant minds—why reinvent when we can build upon? -- **Tauri + Web Tech**: Proven UI stack that works everywhere, letting us focus on AI innovation -- **SQLite**: The world's most deployed database for local data—perfect for privacy-first architecture - -### Accessibility Through Maturity -When we use proven technology: -- Hardware requirements are well understood -- Optimization paths are clear -- Community knowledge exists -- Documentation is comprehensive -- Edge cases are known - -This means Jan works on more devices, for more people, with fewer surprises. - -## Lateral Innovation Examples - -### Local AI That Actually Works -**Traditional approach**: Build massive cloud infrastructure, require constant internet -**Our approach**: Use proven quantization techniques to run powerful models on consumer hardware - -### Privacy Without Compromise -**Traditional approach**: Complex encryption and privacy theater -**Our approach**: Simply don't collect data. Use local SQLite. Problem solved. - -### Universal Model Support -**Traditional approach**: Proprietary model formats and exclusive partnerships -**Our approach**: Support the open GGUF standard that the community already uses - -### Cross-Platform Without Complexity -**Traditional approach**: Native apps for each platform, massive development overhead -**Our approach**: One codebase using Electron, works everywhere, ships faster - -## The Power of Constraints - -Working with proven technology creates healthy constraints: - -### Resource Efficiency -- Can't throw infinite compute at problems -- Must optimize for consumer hardware -- Forces elegant solutions over brute force -- Makes us accessible globally, not just in rich markets - -### Clear Boundaries -- Known limitations guide design decisions -- Realistic about what's possible today -- Focus on solving real problems now -- Build stepping stones to the future - -### Community Alignment -- Use formats the community knows -- Build on protocols people trust -- Contribute improvements back upstream -- Stand on the shoulders of giants - -## Innovation Through Integration - -Our innovation comes from how we combine proven pieces: - -``` -llama.cpp (proven inference) - + GGUF (standard format) - + Electron (cross-platform UI) - + SQLite (local data) - + MCP (extensibility) - = Jan (accessible AI for everyone) -``` - -Each component is battle-tested. The magic is in the combination. - -## Real Problems, Real Solutions - -This philosophy keeps us grounded: - -### What We Build -- AI that runs on the laptop you already own -- Privacy that doesn't require a PhD to understand -- Tools that work offline in rural areas -- Features users actually need today - -### What We Don't Build -- Exotic architectures that need exotic hardware -- Complex systems that break in simple ways -- Features that demo well but fail in practice -- Solutions looking for problems - -## The Compound Effect - -Using proven technology creates compounding benefits: - -1. **Faster Development**: Less time debugging novel approaches -2. **Better Reliability**: Years of battle-testing by others -3. **Easier Adoption**: Users already understand the patterns -4. **Stronger Ecosystem**: Can leverage existing tools and knowledge -5. **Lower Costs**: Commodity hardware and proven optimizations - -## Learning from History - -Great innovations often use "old" technology in new ways: -- The iPhone used existing touchscreen tech -- Tesla started with laptop batteries -- SpaceX used proven rocket designs more efficiently -- The web succeeded using simple protocols - -Jan follows this tradition: proven AI inference, standard formats, and simple principles—combined in a way that makes AI accessible to everyone. - -## Building for the Long Term - -Proven technology has staying power: -- Standards that last decades -- Protocols that survive company failures -- Formats that outlive their creators -- Communities that maintain forever - -By building on these foundations, Jan can focus on what matters: making AI useful, private, and accessible to everyone. - -## Our Commitment - -We commit to: -- **Practical over Perfect**: Ship what works today -- **Simple over Sophisticated**: Elegant solutions to real problems -- **Proven over Promising**: Build on solid foundations -- **Accessible over Advanced**: Reach everyone, not just enthusiasts - -## The Future is Already Here - -The technologies needed for private, local AI already exist. They're proven, optimized, and ready. Our job isn't to invent new technology—it's to combine what exists in ways that serve humanity. - -That's lateral thinking. That's how we build the future: not by reaching for tomorrow's technology, but by using today's technology in tomorrow's ways. - ---- - -*"The best technology is invisible. It just works, everywhere, for everyone."* diff --git a/website/src/content/docs/handbook/philosophy/open-superintelligence.mdx b/website/src/content/docs/handbook/philosophy/open-superintelligence.mdx deleted file mode 100644 index 2fb1ecd359..0000000000 --- a/website/src/content/docs/handbook/philosophy/open-superintelligence.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: "Open Superintelligence Platform" -description: "Building superintelligence as an ecosystem you can own and deploy anywhere" ---- - - -> "Superintelligence isn't one massive model. It's an ecosystem of specialized models, tools, and applications working together - and you should own all of it." — Jan Philosophy - -## What is Open Superintelligence? - -Open superintelligence is AI that adapts to how you want to work, not the other way around. It's not about forcing you to choose between local or cloud, powerful or private, easy or extensible. It's about having it all, and owning it. - -### The Ecosystem Approach - -We're not trying to build GPT-5. We're building something better: specialized models that excel at specific tasks, tools that extend their capabilities, and applications that tie it all together. - -``` -Models (specialized for tasks) - + Tools (extend capabilities) - + Applications (work everywhere) - = Superintelligence you own -``` - -## Why Not One Giant Model? - -The "one model to rule them all" approach has fundamental flaws: - -- **Inefficient**: Using the same weights for poetry and mathematics -- **Inflexible**: Can't optimize for specific use cases -- **Expensive**: Massive compute for simple tasks -- **Monolithic**: Single point of failure and control - -### Our Approach: Specialized Excellence - -- **Jan-Search**: Knows how to find and synthesize information -- **Jan-Write**: Understands structure, tone, and creativity -- **Jan-Analyze**: Excels at reasoning and data interpretation -- **Jan-Code**: Optimized for programming tasks - -Each model does one thing brilliantly. Together, they form superintelligence. - -## The Three Pillars - -### 1. Models That Adapt - -Not just our models, but any model you need: -- **Jan Models**: Purpose-built for specific tasks -- **Community Models**: Any GGUF from Hugging Face -- **Cloud Models**: Connect to OpenAI, Anthropic when needed -- **Your Models**: Fine-tune and deploy your own - -### 2. Tools That Extend - -Models need capabilities beyond conversation: -- **Search**: Get answers, not just links -- **DeepResearch**: Autonomous multi-step investigation -- **BrowserUse**: Let AI interact with the web -- **MCP Protocol**: Connect any data source or API - -### 3. Applications That Scale - -Same experience, infinite deployment options: -- **Jan Desktop**: Your local AI workstation -- **Jan.ai**: Browser-based, no setup required -- **Jan Mobile**: AI that follows you (coming soon) -- **Jan Server**: Self-hosted for teams (coming soon) - -## Ownership Through Flexibility - -True ownership means having choices: - -### Choose Your Deployment -- **Full Local**: Everything on your hardware, works offline -- **Full Cloud**: We host it, you just use it -- **Hybrid**: Local for sensitive work, cloud for heavy compute -- **Self-Hosted**: Your servers, your control - -### Choose Your Models -- Use our models when they're best -- Use community models when they fit -- Use cloud models when you need them -- Train your own when you want to - -### Choose Your Scale -- **Personal**: Run on your laptop -- **Team**: Deploy on your server -- **Enterprise**: Scale across infrastructure -- **Global**: Distribute to the edge - -## Building in the Open - -We don't just open source our code. We open source our entire development process: - -### Watch Us Train -Live at [train.jan.ai](https://train.jan.ai): -- See models training in real-time -- View our datasets and methods -- Learn from our failures -- Track actual progress - -### Test With Us -Evaluate at [eval.jan.ai](https://eval.jan.ai): -- Compare model outputs -- Vote on what works -- Suggest improvements -- Access all evaluation data - -### Build With Us -Everything on [GitHub](https://github.com/janhq): -- Contribute features -- Report issues -- Fork and modify -- Join discussions - -## The Network Effect of Openness - -Open systems create compound benefits: - -### For Users -- More models to choose from -- More tools to extend capabilities -- More deployment options -- More control over your AI - -### For Developers -- Build on a stable platform -- Access to growing ecosystem -- No vendor lock-in fears -- Community support - -### For Organizations -- Deploy how you need -- Customize for your use case -- Control your costs -- Own your infrastructure - -## Quality Without Compromise - -**Jan Exam** ensures excellence across the ecosystem: -- Objective benchmarks -- Real-world testing -- Community validation -- Transparent results - -Whether it's our model or someone else's, if it performs well, it belongs on Jan. - -## The Path Forward - -### Today (Available Now) -- Jan Desktop with local and cloud models -- Basic tools via MCP -- Growing model ecosystem -- Active community - -### Next 12 Months -- Jan v1 specialized models -- Advanced tools (search, research, browser) -- Jan Server for teams -- Mobile applications - -### The Vision -- Models that understand your context -- Tools that act autonomously -- Applications that work everywhere -- An ecosystem owned by its users - -## Why This Wins - -### Against Closed Platforms -- No vendor lock-in vs. total dependence -- Own forever vs. rent forever -- Infinite flexibility vs. their way only -- Community innovation vs. corporate roadmap - -### Against DIY Open Source -- Complete ecosystem vs. fragmented tools -- Works out of box vs. endless configuration -- Unified experience vs. duct-tape integration -- Professional polish vs. research prototypes - -## Join the Revolution - -This isn't just about building better AI. It's about ensuring AI serves humanity, not the other way around. - -When you use Jan, you're not just a user. You're part of a movement building: -- AI that works anywhere -- Intelligence you can own -- Tools that adapt to you -- A future without gatekeepers - -## Our Promise - -We promise to build superintelligence that: - -1. **Works Everywhere**: From laptop to data center to edge -2. **Belongs to You**: Download it, modify it, own it forever -3. **Stays Open**: Core will always be open source -4. **Keeps Improving**: Community-driven development -5. **Respects Choice**: Your deployment, your rules - -## The Bottom Line - -Superintelligence shouldn't be controlled by a few companies. It shouldn't force you to choose between power and ownership. It shouldn't lock you into one way of working. - -With Jan, it doesn't. - -You get superintelligence that adapts to how you work, runs where you need it, and belongs to you completely. - -That's open superintelligence. That's what we're building. - ---- - -*"The best AI platform is one where choosing local or cloud is like choosing between laptop or desktop - same experience, different form factor, your choice."* diff --git a/website/src/content/docs/handbook/philosophy/optimism.mdx b/website/src/content/docs/handbook/philosophy/optimism.mdx deleted file mode 100644 index a0221e0268..0000000000 --- a/website/src/content/docs/handbook/philosophy/optimism.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "Perpetual Optimism is a Force Multiplier" -description: "We believe in perpetual optimism." ---- - -import YouTube from '@/components/YouTube.astro' - - - -> "Success consists of going from failure to failure without loss of enthusiasm." — Winston Churchill - -In 1903, [we barely left the ground](https://en.wikipedia.org/wiki/Wright_Flyer). By 1969, [we stood on the moon](https://en.wikipedia.org/wiki/Apollo_11). What once seemed impossible became reality through persistence and optimism. - -## Augmenting Humanity - -- We reject AI doomerism. Focus on possibilities, not fear -- Children represent our hope. We build for their future -- Humanity progresses faster than it feels -- AI is a tool—like electricity or the telephone. It's not the first revolution, nor the last -- History shows resilience. We adapt, mitigate risks, and move forward -- Airplanes once terrified—yet they helped humanity reach the moon and connect the world - -## AI Safety - -- Safety is non-negotiable. Protecting people is the baseline - - AI safety == human safety. If we haven’t solved human safety, we haven’t solved AI safety. - - AI alignment == human alignment. Misaligned societies can’t build aligned systems. -- AI safety requires human alignment first -- Fear and progress must be balanced—panic stalls; awareness guides -- Building for our kids ensures safety is built-in, purpose drives caution -- Airplanes once terrified—yet they helped humanity reach the moon and connect the world - -## Why we believe in optimism - -- Optimism drives solutions; fear paralyzes -- Hope fuels persistence. Failures aren't endpoints -- Every breakthrough began as a dream. We build toward better because we believe in it -- Perpetual optimism multiplies effort and impact diff --git a/website/src/content/docs/handbook/sell/marketing.mdx b/website/src/content/docs/handbook/sell/marketing.mdx deleted file mode 100644 index 71b4bd77b4..0000000000 --- a/website/src/content/docs/handbook/sell/marketing.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Marketing" -description: "How we structure marketing to drive revenue and growth" -asIndexPage: true ---- - -import YouTube from '@/components/YouTube.astro'; - -> "Marketing is about values." - Steve Jobs - - - -Without a strategy, marketing is: -- throwing social media posts with reach anxiety -- starting podcasts that no one listens -- writing newsletter that even team members don't read - -Marketing is telling your own words in some channels for a purpose. Without a purpose, it's just noise - like how some do. - -Before starting we should align on some terms: - -- **Framework**: The blueprint that organizes our marketing efforts -- **Purpose**: The fundamental reason behind our marketing activities -- **Campaign**: Marketing actions -- **Goal**: The specific, measurable result we aim to achieve through our marketing activities -- **Brief**: The document outlining how we're executing a campaign - -## Framework(s) - -Marketings looks like art, must be managed like math. At Jan, we follow some frameworks for alignment. - -Our marketing efforts consist of 3 purposes and each marketing action must target at least one: - -1. Grow the market -2. Increase the market share -3. Capture market share in a more efficient way - -Each purpose requires campaigns with clear goals. Goal types: - -- KPI Goals -- Project & Campaign Goals -- Experiment Goals -- Hiring Goals - -Campaign executions must leave no questions, so each marketing campaign requires a brief format: - -- **Goals**: KPIs, timeline, relevant OKRs -- **Audience**: Who we're targeting -- **Creatives**: Messaging & creative assets -- **Channels**: Distribution -- **Ownership**: Who is involved - -## Positioning - -Marketing starts with positioning - we always think thorough where to sit in the market before new launches. - -No one cares about product functions, it's all about what you provide. If your positioning requires explanation, it isn't working. We never talk about what our product does until we've established what problem it eliminates. - -We start with a positioning: - -- What is our product/service/platform? - - In customer language, what is it? - - What pain point do we eliminate? - - What we improve? -- Who is this for? - - Who benefits most from this solution? - - What characteristics define this segment? -- Why is it better? - - What are the other players? - - How do we outperform alternatives? - - What makes us uniquely valuable here? - -## Big no's on marketing - -We're playing our game, not theirs. - -- Throwing out marketing activities to see what sticks -- Burning money at Ads -- Random posts -- Copying what others do -- Actions without planning or goals -- Prioritizing paid activities over organic -- Jumping on hypes over strategy - -## Big yes's on marketing - -- Growing together with others -- Playing our game in the highest level -- Listening selectively - Absorb market feedback, but filter it through strategy -- Adding value to what we're working on -- Repurposing content -- Being opinionated about the differentiation and why we're doing -- Understanding the technical aspects at a level that explains a child -- Being aware of the target audience and the messaging diff --git a/website/src/content/docs/handbook/team/index.mdx b/website/src/content/docs/handbook/team/index.mdx deleted file mode 100644 index 1465f6e86c..0000000000 --- a/website/src/content/docs/handbook/team/index.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: "Team Structure" -description: "How Jan's team is organized" ---- - -Jan is an open-source AI platform built by a distributed team focused on making AI -accessible, private, and locally-run. - -Jan is currently ~8 people working across different areas to build the open -superintelligence platform. - -## Jan Desktop & Platform - -Building the core Jan application and platform infrastructure. - -### Engineering -- **Core Platform**: Building Jan Desktop and core infrastructure -- **Dev Relations**: Ramon - Community engagement and developer experience -- **QA & Release**: Ensuring quality and stability across releases - -### Model Team - -Focused on making the best models available to Jan users. - -| Focus Area | Team Members | Description | -|------------|--------------|-------------| -| **Model Hub** | Thien | Leading model quantization and evaluation for Jan's Local Model Hub - ensuring Jan supports the latest and best open source models | -| **Jan Models** | Alan, Alex, Bách, Warren | Developing Jan's flagship model to be the default in Jan Desktop | -| **Advanced Features** | Thinh, Norapat | Working with Ramon to spec and implement:
• Jan Attachments
• Jan DeepResearch
• Jan Voice Mode | -| **Engineering** | Akarshan, Louis, Dinh | Building the core of Jan | -| **Design** | Faisal | Making Jan look gooood, function well, and be accessible to everyone | -| **Product & DevRel** | Yuuki and Ramon | Thinking about the future of Jan and how to best communicate it to users | -| **Management** | Yuuki | Keeping people in check by threatening everyone with corporate speak | -| **Marketing** | Emre | Spreading the word about Jan and its capabilities | -| **Infra** | Minh and Hien| They make things run smoothly | - - -## Infrastructure & Operations - -### Technical Infrastructure -- **Jan Cloud**: Cloud infrastructure for optional services -- **Hardware**: Optimizing for local hardware performance -- **CI/CD**: Continuous integration and deployment - -### Business Operations - -| Team | Focus | Members | -|------|-------|---------| -| **Marketing** | Brand, growth, and community | Emre | -| **Product** | Product strategy and roadmap | Ramon | -| **Finance & HR** | Operations and people | Nicole, finance team | - -## Working Groups - -Cross-functional teams that form around specific initiatives: - -- **Model Evaluation**: Jan Exam benchmarking and quality assurance -- **Community Features**: Building based on user feedback -- **Open Standards**: MCP and other protocol development - -## Our Approach - -- **Open Development**: Build in public with community input -- **Quality First**: Jan Exam as our north star for model quality -- **User Focused**: Every role ultimately serves our users' need for private, local AI -- **Flexible Structure**: Teams adapt based on current priorities and user needs - -## Join Us - -Interested in joining our mission to build open superintelligence? Check out our [careers page](https://jan.ai/careers) or contribute to our [open source projects](https://github.com/janhq). diff --git a/website/src/content/docs/handbook/who/curiosity.mdx b/website/src/content/docs/handbook/who/curiosity.mdx deleted file mode 100644 index 9a0e8378d6..0000000000 --- a/website/src/content/docs/handbook/who/curiosity.mdx +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: "The Fast and the Curious" -description: "We hire people who move quickly and never stop learning" ---- - -> "In the AI age, the ability to learn faster than the rate of change is the only sustainable advantage." — Adapted from Eric Ries - -We hire people who are both fast and curious—those who ship today while learning for tomorrow. - -## Ship Fast, Ship Often - -Speed is our advantage in the rapidly evolving AI landscape: - -- **Weekly Releases**: We ship updates to Jan every week, not every quarter -- **Rapid Experimentation**: Test ideas with real users, not focus groups -- **Fail Fast, Learn Faster**: Kill features that don't work, double down on what does -- **User Feedback Loop**: From idea to user's hands in days, not months - -### What Fast Means at Jan - -- **Bias for Action**: See a problem? Fix it. Don't wait for permission. -- **MVP Mindset**: Launch at 80% perfect, iterate to 100% -- **Quick Decisions**: Make reversible decisions quickly, deliberate only on irreversible ones -- **Async by Default**: Don't let time zones slow us down - -We've shipped: -- Major features in days that others debate for months -- Model support hours after release, not weeks -- Bug fixes while users are still typing the report - -## Stay Endlessly Curious - -In AI, yesterday's breakthrough is today's baseline: - -### Learning is Non-Negotiable - -- **New Models Weekly**: Understand and integrate the latest AI advances -- **Cross-Domain Knowledge**: From quantization techniques to UI design -- **Community Learning**: Our users teach us as much as we teach them -- **Open Source Study**: Learn from the best codebases in the world - -### Curiosity in Practice - -- **Why Over What**: Don't just implement—understand the reasoning -- **Question Everything**: "Why do we collect user data?" led to our privacy-first approach -- **Learn in Public**: Share discoveries with the community -- **Teach to Learn**: Explaining concepts deepens understanding - -## The Jan Learning Culture - -### Everyone is a Student - -- **No Experts**: In a field moving this fast, everyone is learning -- **Share Knowledge**: Daily discoveries in our Discord channels -- **Document Learning**: Today's experiment is tomorrow's documentation -- **Celebrate Questions**: The "stupid" question often reveals the biggest insight - -### Everyone is a Teacher - -- **Onboarding**: New hires teach us fresh perspectives -- **Community Education**: Blog posts, tutorials, and demos -- **Code as Teaching**: Well-commented code educates future contributors -- **Failure Stories**: Share what didn't work and why - -## What We Look For - -### Signs of Speed - -- **GitHub Velocity**: Frequent commits, quick iterations -- **Project Completion**: Finished projects, not just started ones -- **Response Time**: Quick to engage, quick to deliver -- **Adaptation Speed**: How fast do you integrate feedback? - -### Signs of Curiosity - -- **Side Projects**: What do you build for fun? -- **Learning Artifacts**: Blogs, notes, or projects showing learning -- **Question Quality**: Do you ask insightful questions? -- **Knowledge Breadth**: Interests beyond your specialty - -## Why This Matters for Jan - -### AI Moves Too Fast for Slow - -- Models improve monthly -- User expectations evolve weekly -- Competition ships daily -- Standards change quarterly - -If we're not fast and curious, we're obsolete. - -### Local-First Demands Both - -- **Fast**: Users expect immediate responses, not cloud latency -- **Curious**: Supporting every model requires understanding each one -- **Fast**: Privacy bugs need instant fixes -- **Curious**: New quantization methods need quick adoption - -## The Compound Effect - -Fast + Curious creates exponential growth: - -``` -Ship Fast → User Feedback → Learn → -Ship Smarter → More Users → More Learning → -Ship Even Faster → Compound Growth -``` - -Each cycle makes us: -- Faster at shipping -- Better at learning -- More valuable to users -- More attractive to talent - -## Join Us If... - -- You've shipped something this week (not this year) -- You've learned something new today (not last month) -- You see a Jan issue and think "I could fix that" -- You read our codebase and think "I could improve that" -- You use Jan and think "It could also do this" - -## The Promise - -If you join Jan as someone fast and curious, in a year you'll be: -- **Faster**: Shipping features you can't imagine today -- **Smarter**: Understanding AI at a level that surprises you -- **Connected**: Part of a global community of builders -- **Impactful**: Your code running on millions of devices - -## The Bottom Line - -We don't hire for what you know today. We hire for how fast you'll know what matters tomorrow. - -In the race to build open superintelligence, the fast and curious don't just keep up—they set the pace. - ---- - -*"At Jan, we measure progress in iterations per week, not years of experience."* diff --git a/website/src/content/docs/handbook/who/dydx.mdx b/website/src/content/docs/handbook/who/dydx.mdx deleted file mode 100644 index 93c39d2746..0000000000 --- a/website/src/content/docs/handbook/who/dydx.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "dy/dx > y-intercept" -description: "The rate of a function is more important than its starting point." ---- - -> "A little bit of slope makes up for a lot of Y-intercept." — John Ousterhout - -The rate of a function is more important than its starting point. We value growth -rate over initial advantage. - -## Why slope matters - -- Knowledge fades, but the ability to learn compounds -- A fast learner will outpace someone with more initial experience over time -- Slow, steady improvement outperforms quick starts with no growth - -## What it means day-to-day - -- Don't hesitate to try something new—even if you start clueless. Learning speed matters -- Mistakes are expected. Learning from them is required -- We'll invest in helping you grow, but the drive has to come from you -- Your trajectory is more important than where you begin - -## Why we believe in this - -- Building something lasting requires patience and commitment to improvement -- We're not interested in shortcuts. We value the work that compounds quietly until it becomes obvious -- If Jan's greatest impact is helping people and ideas grow steadily over time—that's the kind of success we stand for diff --git a/website/src/content/docs/handbook/who/obsession.mdx b/website/src/content/docs/handbook/who/obsession.mdx deleted file mode 100644 index 3b14c2da2d..0000000000 --- a/website/src/content/docs/handbook/who/obsession.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Obsession" -description: "We seek obsession." ---- - -> "If you want to build a ship, don't drum up the men to gather wood, divide the work, and give orders. Instead, teach them to yearn for the vast and endless sea." — Antoine de Saint-Exupéry - -We don't hire for skills alone. We hire for obsession. - -## Find the obsessed - -- Breakthroughs require long-term focus, not fleeting interest -- Failure isn't the end—just another step in relentless experimentation -- People who obsess push beyond limits others accept - -## We seek those already consumed - -- You're already working on the problem—because you can't not -- We don't assign you a job; we support your life's work -- Obsession isn't just personal—it becomes more powerful in a team -- Together, a group of obsessives becomes greater than the sum of its parts - -## The best hires share common traits - -- Desperation: Solving the problem feels like a need, not a choice -- Relentlessness: You try, fail, adapt—again and again -- Defiance: Others call it crazy; we call it genius - -## We're looking for the dreamers - -- That deep, persistent song you've been trying to sing? We hear it -- Maybe you've been mocked, dismissed, or misunderstood -- We seek those lonely, weird dreamers who refuse to give up - -Wherever you are in the world, if this feels like you—apply here diff --git a/website/src/content/docs/handbook/who/team.mdx b/website/src/content/docs/handbook/who/team.mdx deleted file mode 100644 index 952780265f..0000000000 --- a/website/src/content/docs/handbook/who/team.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Jan is a Sports Team" -description: "Our approach is super simple: Retain. Build trust. Win together." ---- - -> "First, you need to learn how to play. You need 2-3 years together to learn how to play with each other." — [Nikola Jokić](https://www.reddit.com/r/NBASpurs/comments/1cdscti/wise_words_from_jokic_first_you_need_to_learn_how/) - -Jan is a sports team, not unlike a NBA team or a NFL team. We focus on putting the best players on the court, focusing on their 1% strengths, and building a team that can win together. - -## Team vs. Family - -We learn the best principles from [Netflix Culture Deck](https://www.slideshare.net/slideshow/culture-1798664/1798664): - -- We operate like a pro sports team -- There are limited spots on every team — every roster spot counts -- We hire, develop and cut smartly - -## Continuity - -However, we balance this with our belief in Continuity and Stability: - -- Teams with psychological safety take better, long-term risks -- Teams need 2–3 years to truly gel -- Continuity matters; balance change carefully -- Cohesive teams outperform constant reshuffles -- Time builds trust, speed, and better decisions diff --git a/website/src/content/docs/handbook/who/underdogs.mdx b/website/src/content/docs/handbook/who/underdogs.mdx deleted file mode 100644 index f2db21c51d..0000000000 --- a/website/src/content/docs/handbook/who/underdogs.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Underdogs" -description: "We hire from unconventional backgrounds." ---- - -> "I am, somehow, less interested in the weight and convolutions of Einstein's brain than in the near certainty that people of equal talent have lived and died in cotton fields and sweatshops." — Stephen Jay Gould - -We hire from unconventional backgrounds. - -## Opportunity isn't equal - -- Where you're born shapes your chances—but it shouldn't define them -- Talent is everywhere. Opportunity isn't -- We believe the internet has changed how we work and live together. Our team—spread across 10 cities worldwide—couldn't have existed just five years ago -- Fast connections (5G and beyond) mean geography no longer decides your future. We'll find you—wherever you are - -## We seek the overlooked - -- Lonely geniuses, relentless tinkerers, people with potential others missed -- We don't care where you went to school—or if you went at all -- We bet on people like startups: high risk, high potential -- Skills can be taught; drive cannot. We'll teach you to communicate and collaborate -- We trust you'll prove yourself through your work and integrity - -## Meaningful work, close to home - -- We create jobs that matter, in the places people call home -- If Jan's greatest achievement is enabling people to build meaningful careers in their hometowns—that would be success worth celebrating - -## This is your shot - -- Underdogs, dreamers, the overlooked—this place is for you -- We don't just welcome you. We're looking for you -- Wherever you are in the world—apply here diff --git a/website/src/content/docs/handbook/why/icp.mdx b/website/src/content/docs/handbook/why/icp.mdx deleted file mode 100644 index 0393dfc412..0000000000 --- a/website/src/content/docs/handbook/why/icp.mdx +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: "Who we are building for" -description: "Building for people who want to own their AI and choose how to run it" ---- - -Jan is built for people who want control over their AI experience. Our users span from individual developers to global enterprises, united by a desire to own their AI infrastructure and choose how they deploy it. - -## Primary Users - -### Developers Who Want Flexibility -Engineers and creators who need: -- The freedom to run models locally or in the cloud -- Complete control over their AI stack -- The ability to switch providers without losing their work -- Integration options that fit their existing workflows - -### Individuals Who Value Ownership -People who understand that AI should be like any other software: -- Download it once, use it forever -- Your conversations and data belong to you -- No vendor lock-in or subscription traps -- The freedom to modify and extend as needed - -### Teams That Need Options -Organizations that require: -- The flexibility to run on-premises or in the cloud -- The ability to scale from laptop to server cluster -- Control over costs and infrastructure -- Options that adapt to changing requirements - -### Global Users Who Need Access -Millions of potential AI users who face: -- Expensive or unreliable cloud services -- The need for AI that works offline -- Hardware limitations that require optimization -- Different deployment needs for different situations - -## Our North Star: Best Experience, Your Way - -We use **Jan Exam** as our benchmark to ensure quality across all deployment options. Whether you're running locally on a laptop or scaled across a server farm, you get: - -- The same powerful AI capabilities -- Consistent performance for your hardware -- The flexibility to change your setup anytime -- No compromise between local and cloud - -## The Flexibility Spectrum - -Our users choose their own balance: - -### Full Local -- Everything runs on your hardware -- Complete offline capability -- Total data ownership -- Zero recurring costs - -### Hybrid Approach -- Local for sensitive work -- Cloud for heavy compute -- Seamless switching between modes -- Optimize cost vs performance - -### Full Cloud -- Jan.ai for zero setup -- Team collaboration features -- Managed infrastructure -- Scale without limits - -## What Sets Our Users Apart - -They understand that: -- **Ownership matters**: Your AI tools should be assets, not rentals -- **Flexibility is power**: Different tasks need different approaches -- **Control creates value**: The ability to customize and extend is crucial -- **Choice prevents lock-in**: Multiple options keep you free - -## Real User Scenarios - -### The Solo Developer -Runs Jan locally during development, uses cloud for production deployments. Owns their entire stack. - -### The Research Team -Uses local models for sensitive data, cloud models for general research. Switches seamlessly based on needs. - -### The Enterprise -Deploys Jan Server on-premises for compliance, uses Jan.ai for non-critical workloads. One platform, multiple deployment options. - -### The Student -Runs lightweight models locally on their laptop, accesses more powerful models via cloud when needed. Learns AI without breaking the bank. - -## Growing With Our Users - -We build for users at every stage: - -**Day 1**: Download Jan, start chatting locally -**Week 1**: Connect to cloud providers for more power -**Month 1**: Deploy your own server for team access -**Year 1**: Scale across infrastructure you control - -## The Future User - -We're building for the next billion AI users who will expect: -- Software they can own, not just rent -- The freedom to run AI anywhere -- Tools that adapt to their needs -- No artificial limitations - -They'll start with Jan because it gives them choices, and they'll stay because those choices grow with them. - ---- - -*"The best AI is the one that works where you need it, how you need it, when you need it."* diff --git a/website/src/content/docs/handbook/why/ownership.mdx b/website/src/content/docs/handbook/why/ownership.mdx deleted file mode 100644 index 67e90b34c0..0000000000 --- a/website/src/content/docs/handbook/why/ownership.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: "Our Vision for Open Superintelligence" -description: "Building superintelligence that you can own, modify, and deploy however you want" ---- - -Jan believes the path to superintelligence isn't through one massive model controlled by a few companies. It's through an ecosystem of specialized models, tools, and applications that anyone can own and deploy. - -## What is Open Superintelligence? - -Open superintelligence is AI that: - -- **Works Everywhere**: From your laptop to your data center to the edge -- **Belongs to You**: Download it, modify it, deploy it - it's yours -- **Scales Infinitely**: Same AI whether you're one person or ten thousand -- **Evolves with Community**: Thousands of contributors, not hundreds of employees - -## The Ecosystem Approach - -Superintelligence isn't one thing - it's many things working together: - -### Specialized Models -Instead of one model trying to do everything: -- **Jan-Search** excels at finding and synthesizing information -- **Jan-Write** understands creativity and structure -- **Jan-Analyze** specializes in reasoning and data -- **Jan-Code** focuses on programming tasks - -Each model does one thing brilliantly, and they hand off tasks to each other. - -### Extensible Tools -Models alone aren't enough. Our tools make them useful: -- Search the web and get answers, not links -- Automate browser tasks naturally -- Parse documents and understand context -- Connect to your data sources via MCP - -### Flexible Deployment -One codebase, infinite configurations: -- **Local Mode**: Everything on your hardware -- **Server Mode**: Self-host for your team -- **Cloud Mode**: We host it for you -- **Hybrid Mode**: Mix and match as needed - -## Why Ownership Matters - -When you own your AI infrastructure: - -### You Control the Experience -- Customize models for your specific needs -- Build workflows that match how you work -- Integrate with your existing tools -- Scale based on your requirements - -### You Keep What You Build -- Your prompts and conversations -- Your fine-tuned models -- Your custom workflows -- Your accumulated knowledge - -### You Choose How to Run -- Offline when you need privacy -- Cloud when you need power -- Local when you need speed -- Distributed when you need scale - -## Building in Public - -We're not building this in secret. Everything is open: - -### Watch Us Train -See our models being trained in real-time at [train.jan.ai](https://train.jan.ai): -- Live loss curves -- Training datasets -- Failed experiments -- Actual progress - -### Test With Us -Help evaluate models at [eval.jan.ai](https://eval.jan.ai): -- Compare outputs side-by-side -- Vote on what actually works -- Suggest improvements -- See all the data - -### Build With Us -Everything on [GitHub](https://github.com/janhq): -- Contribute code -- Report issues -- Suggest features -- Fork and modify - -## The Path to Superintelligence - -### Today: Foundation (✅) -- Jan Desktop works with local and cloud models -- Basic tools via MCP -- Community growing rapidly - -### Next 12 Months: Ecosystem -- Jan v1 models optimized for specific tasks -- Jan Server for self-hosted deployments -- Advanced tools (browser automation, deep research) -- Cross-device synchronization - -### Future: True Superintelligence -- Models that understand your context -- Tools that act on your behalf -- Applications that work everywhere -- An ecosystem that belongs to everyone - -## Our Commitment - -We commit to building superintelligence that: - -### Stays Open -- Core will always be open source -- Models will always be downloadable -- Standards will always be public -- Development will always be transparent - -### Stays Flexible -- Run it anywhere you have compute -- Mix local and cloud as needed -- Scale up or down instantly -- Switch providers without friction - -### Stays Yours -- No vendor lock-in -- No forced updates -- No subscription traps -- No data exploitation - -## Why This Approach Wins - -### Network Effects -Every user makes Jan better: -- Bug reports improve stability -- Feature requests guide development -- Model feedback enhances quality -- Community support helps everyone - -### Compound Innovation -Open development accelerates progress: -- Best ideas come from anywhere -- Solutions shared instantly -- Problems solved collectively -- Innovation happens 24/7 - -### Aligned Incentives -We succeed when you succeed: -- Your productivity is our metric -- Your ownership is our philosophy -- Your freedom is our product -- Your success is our business model - -## Join the Movement - -This isn't just about building better AI. It's about ensuring the most powerful technology in human history belongs to humanity, not corporations. - -Whether you: -- Use Jan for daily work -- Contribute code or ideas -- Share it with others -- Build on top of it -- Deploy it in your organization - -You're part of building superintelligence that everyone can own. - -## The Choice is Yours - -In five years, AI will be everywhere. The question is: - -**Will you rent intelligence from a monopoly, or will you own your own superintelligence?** - -With Jan, you don't have to choose between powerful and private, between cloud and local, between easy and extensible. - -You get it all. And it's yours. - ---- - -*"The best superintelligence is the one you can run anywhere, modify for anything, and own forever."* diff --git a/website/src/content/docs/handbook/why/problem.mdx b/website/src/content/docs/handbook/why/problem.mdx deleted file mode 100644 index 26a7b08d12..0000000000 --- a/website/src/content/docs/handbook/why/problem.mdx +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "What problem are we solving?" -description: "Building superintelligence that you can own and run anywhere" ---- - -Current AI platforms force unnecessary trade-offs: - -- **All or Nothing**: Either use their cloud or build everything yourself from scratch -- **Vendor Lock-in**: Your prompts, workflows, and knowledge trapped in one platform -- **One-Size-Fits-All**: Same solution whether you're a student or an enterprise -- **Rent Forever**: Monthly subscriptions for software that should be yours -- **Limited Deployment**: Can't run where you need it - offline, on-premises, or at the edge - -## Mission - -Jan is building a complete AI ecosystem that adapts to how you want to work. We believe AI should be: - -- **Truly Owned**: Like any software, download it and it's yours forever -- **Infinitely Flexible**: Run locally, self-host, or use our cloud - same experience -- **Professionally Complete**: Not just models, but the full stack of tools and applications -- **Universally Deployable**: From your laptop to your data center to the edge - -## The Full Stack Approach - -We're not building another chatbot. We're building superintelligence you can own: - -### Models That Specialize -- **Jan Models**: Purpose-built for specific tasks (search, analysis, writing) -- **Community Models**: Any GGUF model from Hugging Face -- **Cloud Models**: Connect to OpenAI, Anthropic, or others when needed -- **Your Models**: Fine-tune and deploy your own - -### Tools That Extend -- **Search**: Get answers, not just links -- **DeepResearch**: Multi-step autonomous research -- **BrowserUse**: Let AI interact with the web for you -- **MCP Integration**: Connect to any data source or service - -### Applications That Scale -- **Jan Desktop**: Your local AI workstation -- **Jan.ai**: Web access with no setup -- **Jan Mobile**: AI that follows you (coming soon) -- **Jan Server**: Your own private AI cloud (coming soon) - -## Why This Matters - -In 5 years, AI will be embedded in everything you do. The question isn't whether you'll use AI, but how: - -**Option A**: Rent access from tech giants, accept their limitations, lose your data when you stop paying - -**Option B**: Own your AI infrastructure, deploy it anywhere, keep everything you build - -We're making Option B not just possible, but better than Option A. - -## The Problem With Status Quo - -### Cloud-Only Platforms -- Can't work offline -- Can't handle sensitive data -- Can't customize deeply -- Can't control costs - -### DIY Open Source -- Requires deep technical knowledge -- Fragmented tools and models -- No unified experience -- Massive integration overhead - -### Jan's Solution -- Download and run in minutes -- Complete ecosystem out of the box -- Scale from laptop to cluster -- Switch between local and cloud seamlessly - -## Real Problems We Solve Today - -### For Developers -"I want to use AI in my app but don't want vendor lock-in" -→ OpenAI-compatible API that runs anywhere - -### For Teams -"We need AI but can't send data to the cloud" -→ Self-host Jan Server on your infrastructure - -### For Individuals -"I want ChatGPT capabilities without the subscription" -→ Download Jan Desktop and own your AI - -### For Enterprises -"We need to scale AI across different deployment scenarios" -→ One platform that works from edge to cloud - -## Vision - -We envision a future where: - -- AI runs wherever you have compute - laptop, phone, server, edge device -- You own your AI infrastructure like you own your other software -- Models, tools, and applications work together seamlessly -- The best AI isn't gated behind corporate APIs - -Jan is the platform that makes this future real, today. - ---- - -*"The future of AI isn't about choosing between local or cloud. It's about having both, and everything in between, working perfectly together."* diff --git a/website/src/content/docs/index.mdx b/website/src/content/docs/index.mdx new file mode 100644 index 0000000000..4acbec801a --- /dev/null +++ b/website/src/content/docs/index.mdx @@ -0,0 +1,199 @@ +--- +title: Jan +description: Build, run, and own your AI. From laptop to superintelligence. +keywords: + [ + Jan, + open superintelligence, + AI ecosystem, + self-hosted AI, + local AI, + llama.cpp, + GGUF models, + MCP tools, + Model Context Protocol + ] +--- + +import { Aside } from '@astrojs/starlight/components'; + +![Jan Desktop](../../assets/jan-app-new.png) + +## Jan's Goal + +> Jan's goal is to build superintelligence that you can self-host and use locally. + +## What is Jan? + +Jan is an open-source AI ecosystem that runs on your hardware. We're building towards open superintelligence - a complete AI platform you actually own. + +### The Ecosystem + +**Models**: We build specialized models for real tasks, not general-purpose assistants: +- **Jan-Nano (32k/128k)**: 4B parameters designed for deep research with MCP. The 128k version processes entire papers, codebases, or legal documents in one go +- **Lucy**: 1.7B model that runs agentic web search on your phone. Small enough for CPU, smart enough for complex searches +- **Jan-v1**: 4B model for agentic reasoning and tool use, achieving 91.1% on SimpleQA + +We also integrate the best open-source models - from OpenAI's gpt-oss to community GGUF models on Hugging Face. The goal: make powerful AI accessible to everyone, not just those with server farms. + +**Applications**: Jan Desktop runs on your computer today. Web, mobile, and server versions coming in late 2025. Everything syncs, everything works together. + +**Tools**: Connect to the real world through [Model Context Protocol (MCP)](https://modelcontextprotocol.io). Design with Canva, analyze data in Jupyter notebooks, control browsers, execute code in E2B sandboxes. Your AI can actually do things, not just talk about them. + + + +## Core Features + +### Run Models Locally +- Download any GGUF model from Hugging Face +- Use OpenAI's gpt-oss models (120b and 20b) +- Automatic GPU acceleration (NVIDIA/AMD/Intel/Apple Silicon) +- OpenAI-compatible API at `localhost:1337` + +### Connect to Cloud (Optional) +- Your API keys for OpenAI, Anthropic, etc. +- Jan.ai cloud models (coming late 2025) +- Self-hosted Jan Server (soon) + +### Extend with MCP Tools +Growing ecosystem of real-world integrations: +- **Creative Work**: Generate designs with Canva +- **Data Analysis**: Execute Python in Jupyter notebooks +- **Web Automation**: Control browsers with Browserbase and Browser Use +- **Code Execution**: Run code safely in E2B sandboxes +- **Search & Research**: Access current information via Exa, Perplexity, and Octagon +- **More coming**: The MCP ecosystem is expanding rapidly + +## Architecture + +Jan is built on: +- [Llama.cpp](https://github.com/ggerganov/llama.cpp) for inference +- [Model Context Protocol](https://modelcontextprotocol.io) for tool integration +- Local-first data storage in `~/jan` + +## Why Jan? + +| Feature | Other AI Platforms | Jan | +|:--------|:-------------------|:----| +| **Deployment** | Their servers only | Your device, your servers, or our cloud | +| **Models** | One-size-fits-all | Specialized models for specific tasks | +| **Data** | Stored on their servers | Stays on your hardware | +| **Cost** | Monthly subscription | Free locally, pay for cloud | +| **Extensibility** | Limited APIs | Full ecosystem with MCP tools | +| **Ownership** | You rent access | You own everything | + +## Development Philosophy + +1. **Local First**: Everything works offline. Cloud is optional. +2. **User Owned**: Your data, your models, your compute. +3. **Built in Public**: Watch our models train. See our code. Track our progress. + + + +## System Requirements + +**Minimum**: 8GB RAM, 10GB storage +**Recommended**: 16GB RAM, GPU (NVIDIA/AMD/Intel), 50GB storage +**Supported**: Windows 10+, macOS 12+, Linux (Ubuntu 20.04+) + +## What's Next? + +
+When will mobile/web versions launch? + +- **Jan Web**: Beta late 2025 +- **Jan Mobile**: Late 2025 +- **Jan Server**: Late 2025 + +All versions will sync seamlessly. +
+ +
+What models are available? + +**Jan Models:** +- **Jan-Nano (32k/128k)**: Deep research with MCP integration +- **Lucy**: Mobile-optimized agentic search (1.7B) +- **Jan-v1**: Agentic reasoning and tool use (4B) + +**Open Source:** +- OpenAI's gpt-oss models (120b and 20b) +- Any GGUF model from Hugging Face + +**Cloud (with your API keys):** +- OpenAI, Anthropic, Mistral, Groq, and more + +**Coming late 2025:** +- More specialized models for specific tasks + +[Watch live training progress →](https://train.jan.ai) +
+ +
+What are MCP tools? + +MCP (Model Context Protocol) lets AI interact with real applications. Instead of just generating text, your AI can: +- Create designs in Canva +- Analyze data in Jupyter notebooks +- Browse and interact with websites +- Execute code in sandboxes +- Search the web for current information + +All through natural language conversation. +
+ +
+How does Jan make money? + +- **Local use**: Always free +- **Cloud features**: Optional paid services (coming late 2025) +- **Enterprise**: Self-hosted deployment and support + +We don't sell your data. We sell software and services. +
+ +
+Can I contribute? + +Yes. Everything is open: +- [GitHub](https://github.com/janhq/jan) - Code contributions +- [Model Training](https://jan.ai/docs/models) - See how we train +- [Discord](https://discord.gg/FTk2MvZwJH) - Join discussions +- [Model Testing](https://eval.jan.ai) - Help evaluate models +
+ +
+Is this just another AI wrapper? + +No. We're building: +- Our own models trained for specific tasks +- Complete local AI infrastructure +- Tools that extend model capabilities via MCP +- An ecosystem that works offline + +Other platforms are models behind APIs you rent. Jan is a complete AI platform you own. +
+ +
+What about privacy? + +**Local mode**: Your data never leaves your device. Period. +**Cloud mode**: You choose when to use cloud features. Clear separation. + +See our [Privacy Policy](./privacy). +
+ +## Get Started + +1. [Install Jan Desktop](./jan/installation) - Your AI workstation +2. [Download Models](./jan/models) - Choose from gpt-oss, community models, or cloud +3. [Explore MCP Tools](./mcp) - Connect to real applications +4. [Build with our API](./api-reference) - OpenAI-compatible at localhost:1337 + +--- + +**Questions?** Join our [Discord](https://discord.gg/FTk2MvZwJH) or check [GitHub](https://github.com/janhq/jan/). \ No newline at end of file diff --git a/website/src/content/docs/jan/index.mdx b/website/src/content/docs/jan/index.mdx deleted file mode 100644 index fce7a8db05..0000000000 --- a/website/src/content/docs/jan/index.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: Jan -description: Build, run, and own your AI. From laptop to superintelligence. -keywords: - [ - Jan, - self-hosted AI, - local AI, - open superintelligence, - AI ecosystem, - llama.cpp, - GGUF models, - MCP tools - ] ---- - -import { Aside } from '@astrojs/starlight/components'; - -![Jan Desktop](../../../assets/jan-app-new.png) - -## Jan's Goal - -> Jan's goal is to build superintelligence that you can self-host and use locally. - -## What is Jan? - -Jan is an open-source AI ecosystem that runs on your hardware. - -- **Available Today**: Jan Desktop - run AI models on your computer with zero setup -- **Coming Soon**: Complete ecosystem with specialized models, tools, and cross-device sync - -### The Stack - -**Models**: Specialized AI models trained for real tasks (search, analysis, writing) -**Tools**: Browser automation, web search, memory systems via MCP -**Applications**: Desktop (now), Web/Mobile/Server (coming 2025) - -Everything works together. Everything runs where you want it. - - - -## Core Features - -### Run Models Locally -- Download any GGUF model from Hugging Face -- Use our Jan models optimized for specific tasks -- Automatic GPU acceleration (NVIDIA/AMD/Intel) -- OpenAI-compatible API at `localhost:1337` - -### Connect to Cloud (Optional) -- Your API keys for OpenAI, Anthropic, etc. -- Jan.ai cloud models (coming soon) -- Self-hosted Jan Server - -### Extend with Tools -- Web search via MCP -- Browser automation -- File parsing and analysis -- Custom tool development - -## Architecture - -Jan is built on: -- [Llama.cpp](https://github.com/ggerganov/llama.cpp) for inference -- [Model Context Protocol](https://modelcontextprotocol.io) for tools -- Local-first data storage in `~/jan` - -## Why Jan? - -| Feature | Other AI Platforms | Jan | -|:--------|:-------------------|:----| -| **Deployment** | Their servers only | Your device, your servers, or our cloud | -| **Models** | One-size-fits-all | Specialized models for specific tasks | -| **Data** | Stored on their servers | Stays on your hardware | -| **Cost** | Monthly subscription | Free locally, pay for cloud | -| **Extensibility** | Limited APIs | Full ecosystem with MCP tools | - -## Development Philosophy - -1. **Local First**: Everything works offline. Cloud is optional. -2. **User Owned**: Your data, your models, your compute. -3. **Built in Public**: Watch our models train. See our code. Track our progress. - - - -## System Requirements - -**Minimum**: 8GB RAM, 10GB storage -**Recommended**: 16GB RAM, NVIDIA GPU, 50GB storage -**Supported**: Windows 10+, macOS 12+, Linux (Ubuntu 20.04+) - -## What's Next? - -
-When will mobile/web versions launch? - -- **Jan Web**: Beta Q1 2025 -- **Jan Mobile**: Q4 2025 -- **Jan Server**: Q3 2025 - -All versions will sync seamlessly with your desktop. -
- -
-What models are available? - -**Now**: -- Any GGUF model from Hugging Face -- Cloud models via API keys (OpenAI, Anthropic, etc.) - -**Coming Q1 2025**: -- Jan-Search: Optimized for web search and synthesis -- Jan-Write: Creative and technical writing -- Jan-Analyze: Data analysis and reasoning - -[Watch live training progress →](https://train.jan.ai) -
- -
-How does Jan make money? - -- **Local use**: Always free -- **Cloud features**: Optional paid services (coming 2025) -- **Enterprise**: Self-hosted deployment and support - -We don't sell your data. We sell software and services. -
- -
-Can I contribute? - -Yes. Everything is open: -- [GitHub](https://github.com/janhq/jan) - Code contributions -- [Model Training](https://jan.ai/docs/models) - See how we train -- [Discord](https://discord.gg/FTk2MvZwJH) - Join discussions -- [Model Testing](https://eval.jan.ai) - Help evaluate models -
- -
-Is this just another ChatGPT wrapper? - -No. We're building: -- Our own models trained for specific tasks -- Complete local AI infrastructure -- Tools that extend model capabilities -- An ecosystem that works offline - -ChatGPT is one model behind an API. Jan is a complete AI platform you own. -
- -
-What about privacy? - -**Local mode**: Your data never leaves your device. Period. -**Cloud mode**: You choose when to use cloud features. Clear separation. - -See our [Privacy Policy](./privacy). -
- -## Get Started - -1. [Install Jan Desktop](./jan/installation) - Your AI workstation -2. [Explore Models](./jan/models) - Download and configure -3. [Learn the API](./api-reference) - Build with Jan - ---- - -**Questions?** Join our [Discord](https://discord.gg/FTk2MvZwJH) or check [GitHub roadmap](https://github.com/janhq/jan/). diff --git a/website/src/content/docs/jan/jan-models/jan-v1.mdx b/website/src/content/docs/jan/jan-models/jan-v1.mdx new file mode 100644 index 0000000000..e6f4668421 --- /dev/null +++ b/website/src/content/docs/jan/jan-models/jan-v1.mdx @@ -0,0 +1,116 @@ +--- +title: Jan-v1 +description: 4B parameter model with strong performance on reasoning benchmarks +--- + +import { Aside } from '@astrojs/starlight/components'; + +## Overview + +Jan-v1 is a 4B parameter model based on Qwen3-4B-thinking, designed for reasoning and problem-solving tasks. The model achieves 91.1% accuracy on SimpleQA through model scaling and fine-tuning approaches. + +## Performance + +### SimpleQA Benchmark + +Jan-v1 demonstrates strong factual question-answering capabilities: + +![Jan-v1 SimpleQA Performance](../../../../assets/simpleqa_jan_v1.png) + +At 91.1% accuracy, Jan-v1 outperforms several larger models on SimpleQA, including Perplexity's 70B model. This performance represents effective scaling and fine-tuning for a 4B parameter model. + +### Chat and Creativity Benchmarks + +Jan-v1 has been evaluated on conversational and creative tasks: + +![Jan-v1 Creativity Benchmarks](../../../../assets/creative_bench_jan_v1.png) + +These benchmarks (EQBench, CreativeWriting, and IFBench) measure the model's ability to handle conversational nuance, creative expression, and instruction following. + +## Requirements + +- **Memory**: + - Minimum: 8GB RAM (with Q4 quantization) + - Recommended: 16GB RAM (with Q8 quantization) +- **Hardware**: CPU or GPU +- **API Support**: OpenAI-compatible at localhost:1337 + +## Using Jan-v1 + +### Quick Start + +1. Download Jan Desktop +2. Select Jan-v1 from the model list +3. Start chatting - no additional configuration needed + +### Demo + +![Jan-v1 Demo](/gifs/jan_v1_demo.gif) + +### Deployment Options + +**Using vLLM:** +```bash +vllm serve janhq/Jan-v1-4B \ + --host 0.0.0.0 \ + --port 1234 \ + --enable-auto-tool-choice \ + --tool-call-parser hermes +``` + +**Using llama.cpp:** +```bash +llama-server --model jan-v1.gguf \ + --host 0.0.0.0 \ + --port 1234 \ + --jinja \ + --no-context-shift +``` + +### Recommended Parameters + +```yaml +temperature: 0.6 +top_p: 0.95 +top_k: 20 +min_p: 0.0 +max_tokens: 2048 +``` + +## What Jan-v1 Does Well + +- **Question Answering**: 91.1% accuracy on SimpleQA +- **Reasoning Tasks**: Built on thinking-optimized base model +- **Tool Calling**: Supports function calling through hermes parser +- **Instruction Following**: Reliable response to user instructions + +## Limitations + +- **Model Size**: 4B parameters limits complex reasoning compared to larger models +- **Specialized Tasks**: Optimized for Q&A and reasoning, not specialized domains +- **Context Window**: Standard context limitations apply + +## Available Formats + +### GGUF Quantizations + +- **Q4_K_M**: 2.5 GB - Good balance of size and quality +- **Q5_K_M**: 2.89 GB - Better quality, slightly larger +- **Q6_K**: 3.31 GB - Near-full quality +- **Q8_0**: 4.28 GB - Highest quality quantization + +## Models Available + +- [Jan-v1 on Hugging Face](https://huggingface.co/janhq/Jan-v1-4B) +- [Jan-v1 GGUF on Hugging Face](https://huggingface.co/janhq/Jan-v1-4B-GGUF) + +## Technical Notes + + + +## Community + +- **Discussions**: [HuggingFace Community](https://huggingface.co/janhq/Jan-v1-4B/discussions) +- **Support**: Available through Jan App at [jan.ai](https://jan.ai) diff --git a/website/src/content/docs/jan/jan-models/lucy.mdx b/website/src/content/docs/jan/jan-models/lucy.mdx new file mode 100644 index 0000000000..371e7f8392 --- /dev/null +++ b/website/src/content/docs/jan/jan-models/lucy.mdx @@ -0,0 +1,111 @@ +--- +title: Lucy +description: Compact 1.7B model optimized for web search with tool calling +--- + +import { Aside } from '@astrojs/starlight/components'; + +![Lucy](../../../../assets/lucy.jpeg) + +## Overview + +Lucy is a 1.7B parameter model built on Qwen3-1.7B, optimized for web search through tool calling. The model has been trained to work effectively with search APIs like Serper, enabling web search capabilities in resource-constrained environments. + +## Performance + +### SimpleQA Benchmark + +Lucy achieves competitive performance on SimpleQA despite its small size: + +![Lucy SimpleQA Performance](../../../../assets/simpleqa_lucy.png) + +The benchmark shows Lucy (1.7B) compared against models ranging from 4B to 600B+ parameters. While larger models generally perform better, Lucy demonstrates that effective web search integration can partially compensate for smaller model size. + +## Requirements + +- **Memory**: + - Minimum: 4GB RAM (with Q4 quantization) + - Recommended: 8GB RAM (with Q8 quantization) +- **Search API**: Serper API key required for web search functionality +- **Hardware**: Runs on CPU or GPU + + + +## Using Lucy + +### Quick Start + +1. Download Jan Desktop +2. Download Lucy from the Hub +3. Configure Serper MCP with your API key +4. Start using web search through natural language + +### Demo + +![Lucy Demo](/gifs/lucy_demo.gif) + +### Deployment Options + +**Using vLLM:** +```bash +vllm serve Menlo/Lucy-128k \ + --host 0.0.0.0 \ + --port 1234 \ + --enable-auto-tool-choice \ + --tool-call-parser hermes \ + --rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' \ + --max-model-len 131072 +``` + +**Using llama.cpp:** +```bash +llama-server model.gguf \ + --host 0.0.0.0 \ + --port 1234 \ + --rope-scaling yarn \ + --rope-scale 3.2 \ + --yarn-orig-ctx 40960 +``` + +### Recommended Parameters + +```yaml +Temperature: 0.7 +Top-p: 0.9 +Top-k: 20 +Min-p: 0.0 +``` + +## What Lucy Does Well + +- **Web Search Integration**: Optimized to call search tools and process results +- **Small Footprint**: 1.7B parameters means lower memory requirements +- **Tool Calling**: Reliable function calling for search APIs + +## Limitations + +- **Requires Internet**: Web search functionality needs active connection +- **API Costs**: Serper API has usage limits and costs +- **Context Processing**: While supporting 128k context, performance may vary with very long inputs +- **General Knowledge**: Limited by 1.7B parameter size for tasks beyond search + +## Models Available + +- [Lucy on Hugging Face](https://huggingface.co/Menlo/Lucy-128k) +- [Lucy GGUF on Hugging Face](https://huggingface.co/Menlo/Lucy-128k-gguf) + +## Citation + +```bibtex +@misc{dao2025lucyedgerunningagenticweb, + title={Lucy: edgerunning agentic web search on mobile with machine generated task vectors}, + author={Alan Dao and Dinh Bach Vu and Alex Nguyen and Norapat Buppodom}, + year={2025}, + eprint={2508.00360}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2508.00360}, +} +``` diff --git a/website/src/content/docs/jan/mcp-examples/search/serper.mdx b/website/src/content/docs/jan/mcp-examples/search/serper.mdx new file mode 100644 index 0000000000..213e113507 --- /dev/null +++ b/website/src/content/docs/jan/mcp-examples/search/serper.mdx @@ -0,0 +1,165 @@ +--- +title: Serper Search MCP +description: Connect Jan to real-time web search with Google results through Serper API. +--- + +import { Aside } from '@astrojs/starlight/components'; + +# Serper Search MCP + +[Serper](https://serper.dev) provides Google search results through a simple API, making it perfect for giving AI models access to current web information. The Serper MCP integration enables Jan models to search the web and retrieve real-time information. + +## Available Tools + +- `google_search`: Search Google and retrieve results with snippets +- `scrape`: Extract content from specific web pages + +## Prerequisites + +- Jan with experimental features enabled +- Serper API key from [serper.dev](https://serper.dev) +- Model with tool calling support (recommended: Jan v1) + + + +## Setup + +### Enable Experimental Features + +1. Go to **Settings** > **General** +2. Toggle **Experimental Features** ON + +![Enable experimental features](../../../../../assets/enable_mcp.png) + +### Enable MCP + +1. Go to **Settings** > **MCP Servers** +2. Toggle **Allow All MCP Tool Permission** ON + +![Turn on MCP](../../../../../assets/turn_on_mcp.png) + +### Get Serper API Key + +1. Visit [serper.dev](https://serper.dev) +2. Sign up for a free account +3. Copy your API key from the playground + +![Serper homepage](../../../../../assets/serper_page.png) + +![Serper playground with API key](../../../../../assets/serper_playground.png) + +### Configure MCP Server + +Click `+` in MCP Servers section: + +**Configuration:** +- **Server Name**: `serper` +- **Command**: `npx` +- **Arguments**: `-y serper-search-scrape-mcp-server` +- **Environment Variables**: + - Key: `SERPER_API_KEY`, Value: `your-api-key` + +![Serper MCP configuration in Jan](../../../../../assets/serper_janparams.png) + +### Download Jan v1 + +Jan v1 is optimized for tool calling and works excellently with Serper: + +1. Go to the **Hub** tab +2. Search for **Jan v1** +3. Choose your preferred quantization +4. Click **Download** + +![Download Jan v1 from Hub](../../../../../assets/download_janv1.png) + +### Enable Tool Calling + +1. Go to **Settings** > **Model Providers** > **Llama.cpp** +2. Find Jan v1 in your models list +3. Click the edit icon +4. Toggle **Tools** ON + +![Enable tools for Jan v1](../../../../../assets/toggle_tools.png) + +## Usage + +### Start a New Chat + +With Jan v1 selected, you'll see the available Serper tools: + +![Chat view with Serper tools](../../../../../assets/chat_jan_v1.png) + +### Example Queries + +**Current Information:** +``` +What are the latest developments in quantum computing this week? +``` + +**Comparative Analysis:** +``` +What are the main differences between the Rust programming language and C++? Be spicy, hot takes are encouraged. 😌 +``` + + +**Research Tasks:** +``` +Find the current stock price of NVIDIA and recent news about their AI chips. +``` + +**Fact-Checking:** +``` +Is it true that the James Webb telescope found signs of life on an exoplanet? What's the latest? +``` + +**Local Information:** +``` +What restaurants opened in San Francisco this month? Focus on Japanese cuisine. +``` + +## How It Works + +1. **Query Processing**: Jan v1 analyzes your question and determines what to search +2. **Web Search**: Calls Serper API to get Google search results +3. **Content Extraction**: Can scrape specific pages for detailed information +4. **Synthesis**: Combines search results into a comprehensive answer + +## Tips for Best Results + +- **Be specific**: "Tesla Model 3 2024 price Australia" works better than "Tesla price" +- **Request recent info**: Add "latest", "current", or "2024/2025" to get recent results +- **Ask follow-ups**: Jan v1 maintains context for deeper research +- **Combine with analysis**: Ask for comparisons, summaries, or insights + +## Troubleshooting + +**No search results:** +- Verify API key is correct +- Check remaining credits at serper.dev +- Ensure MCP server shows as active + +**Tools not appearing:** +- Confirm experimental features are enabled +- Verify tool calling is enabled for your model +- Restart Jan after configuration changes + +**Poor search quality:** +- Use more specific search terms +- Try rephrasing your question +- Check if Serper service is operational + + + +## API Limits + +- **Free tier**: 2,500 searches +- **Paid plans**: Starting at $50/month for 50,000 searches +- **Rate limits**: 100 requests per second + +## Next Steps + +Serper MCP enables Jan v1 to access current web information, making it a powerful research assistant. Combine with other MCP tools for even more capabilities - use Serper for search, then E2B for data analysis, or Jupyter for visualization. diff --git a/website/src/content/docs/jan/threads.mdx b/website/src/content/docs/jan/quickstart.mdx similarity index 73% rename from website/src/content/docs/jan/threads.mdx rename to website/src/content/docs/jan/quickstart.mdx index d7e2a2bd53..a1bd32078a 100644 --- a/website/src/content/docs/jan/threads.mdx +++ b/website/src/content/docs/jan/quickstart.mdx @@ -1,6 +1,6 @@ --- -title: Start Chatting -description: Download models and manage your conversations with AI models locally. +title: QuickStart +description: Get started with Jan and start chatting with AI in minutes. keywords: [ Jan, @@ -17,6 +17,10 @@ keywords: import { Aside } from '@astrojs/starlight/components'; +# QuickStart + +Get up and running with Jan in minutes. This guide will help you install Jan, download a model, and start chatting immediately. +
    ### Step 1: Install Jan @@ -25,19 +29,21 @@ import { Aside } from '@astrojs/starlight/components'; 2. Install the app ([Mac](/docs/desktop/mac), [Windows](/docs/desktop/windows), [Linux](/docs/desktop/linux)) 3. Launch Jan -### Step 2: Download a Model +### Step 2: Download Jan v1 -Jan requires a model to chat. Download one from the Hub: +We recommend starting with **Jan v1**, our 4B parameter model optimized for reasoning and tool calling: 1. Go to the **Hub Tab** -2. Browse available models (must be GGUF format) -3. Select one matching your hardware specs +2. Search for **Jan v1** +3. Choose a quantization that fits your hardware: + - **Q4_K_M** (2.5 GB) - Good balance for most users + - **Q8_0** (4.28 GB) - Best quality if you have the RAM 4. Click **Download** -![Download a Model](../../../assets/model-management-01.png) +![Download Jan v1](../../../assets/download_janv1.png) -
## Managing Conversations diff --git a/website/src/content/products/index.mdx b/website/src/content/products/index.mdx deleted file mode 100644 index fdc2dfdaab..0000000000 --- a/website/src/content/products/index.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Vision 🚀 -description: AI that runs where you need it, how you need it. -sidebar: - order: 0 ---- -import { Aside, Card, CardGrid } from '@astrojs/starlight/components'; - - -> **to build a superintelligence that you can self-host and use locally on your own devices.** - -We know it's hard but we believe this will be possible in the next decade through a combination of -models, applications and tools. For this we are... - -> **building Jan as the ecosystem that ties all of these seamlessly so that users, regardless of their technical -background, add intelligence to their day-to-day lives like they would, but better, with similar tools.** - -![Jan Vision](../../assets/jan-vision.png) - - -## Core Principles - -### 1) Build the Full Stack - -Models alone aren't enough. Neither are applications. Superintelligence requires models that -understand your needs, tools that extend capabilities, and applications that tie it all -together. We're building all three, openly. - -### 2) You Choose Who Runs It -Run Jan on your laptop. Self-host it on your servers. Use our cloud. The same superintelligence -works everywhere. Your data, your compute, your choice. - -### 3) Start Simple, Scale Infinitely -Open Jan and start chatting. No setup required. When you need more - better models, advanced tools, -team deployment - everything's there. The complexity scales with your ambition, not our architecture. - -## The Path to Superintelligence - -### Today we have the Foundation - -- **A desktop app** that works both with local and cloud-based models -- **Jan models** small enough to run on any laptop and powerful enough to scale on any server -- **Basic tools** enabled through MCP Search, file parsing, simple workflows - -### Next 12 Months: Ecosystem -- **The Jan v1 models** are a specialized series of models with general capabilities but optimsed -for specific tasks like search, analysis, creative writing and more -- **The Jan server** works as a self-hosted AI infrastructure for teams -- **Advanced tools** like browser use, deep research, and long-term memory works across devices, excels -across different day-to-day use cases, and scales with the needs of large teams -- **Cross-device sync** allows you to take your AI everywhere - -### End State: Open Superintelligence -Not one massive model, but an ecosystem of specialized models, tools, and applications working -together. Built in public. Owned by whoever runs it. - -## Why This Matters: The Status Quo - -Every other AI company wants to be your AI provider. We want you to own your AI. - -- **OpenAI/Anthropic**: Their models, their servers, their rules -- **Open Source Models**: Powerful but fragmented - no cohesive experience -- **Jan**: Complete ecosystem you can own, modify, and deploy however you want - -## Watch Us Build - -### Live Model Training -We train our models in public. Check the [models page](./models/jan-v1) to see: -- Real-time training progress -- Failed runs and what went wrong -- Models in testing before release - -No "trust us, it's good." Watch the entire process from dataset to deployment. - -### Help Evaluate Our Models -Every model needs real-world testing. Join our open evaluation platform where you can: -- Compare model outputs side-by-side -- Test specific capabilities you care about -- Vote on which responses actually help -- Suggest improvements based on your use cases - -Think LMArena, but you can see all the data, run your own evals, and directly influence what we train next. - -[Test/evaluate our models here](link) - -### Our Models Training Right Now - -We don't just talk about open development. Here's what's actually happening: - -| Model | Progress | Status | Details | -|:------|:---------|:-------|:--------| -| **Jan-Search-7B** | ████████░░ 82% | Testing | [View run](/) • 2.1M steps • ETA 3 days | -| **Jan-Write-13B** | ████░░░░░░ 41% | Training | [View run](/) • 980K steps • On track | -| **Jan-Analyze-13B** | ████████░░ ~~67%~~ | Failed | [View logs](/) • OOM at step 1.5M • Restarting | - -These are our actual models training on our hardware in our Singapore office. Click any run to see: -- Live loss curves -- Training datasets -- Evaluation metrics -- Even our failures - -[Watch live training →](/train) - - -## Get Involved - -We build in public. Everything from our model training to our product roadmap is open. - -- [GitHub](link) - Contribute code -- [Handbook](link) - See how we train models -- [Discord](link) - Join the discussion diff --git a/website/src/content/products/models/jan-nano.mdx b/website/src/content/products/models/jan-nano.mdx deleted file mode 100644 index 4b321aeda3..0000000000 --- a/website/src/content/products/models/jan-nano.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Jan Nano -description: Compact research model optimized for finding answers through tool use. -sidebar: - order: 2 ---- -import { Aside } from '@astrojs/starlight/components'; - -Jan Nano is a 4-billion parameter model designed for research and information retrieval. Instead of trying to know everything, it excels at finding anything through deep integration with Model Context Protocol (MCP) tools. - -## Two Variants - -| Model | Context Window | Size | Use Case | -|:---|:---|:---|:---| -| Jan Nano 32k | 32,768 tokens | 4-8GB | Quick research, general queries | -| Jan Nano 128k | 131,072 tokens | 8-12GB | Deep research, document analysis | - - - -## What Makes Nano Different -- **Research-First Design:** Trained to find relevant information, synthesize findings, and provide accurate citations. -- **MCP Integration:** Works seamlessly with tools like web search, document analysis, and code repositories. -- **Extended Context:** The 128k variant can process entire codebases, book-length documents, or 50+ research papers simultaneously. - -## Technical Details -- **Base:** 4B parameter transformer -- **Training:** Optimized for tool use and retrieval -- **Quantization:** Q4, Q8, FP16 variants available - -## Philosophy -Most models try to be encyclopedias. Jan Nano is a research assistant. It doesn't memorize the internet—it knows how to navigate it. \ No newline at end of file diff --git a/website/src/content/products/models/jan-v1.mdx b/website/src/content/products/models/jan-v1.mdx deleted file mode 100644 index bde5817b2c..0000000000 --- a/website/src/content/products/models/jan-v1.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Jan Models -description: Specialized AI models trained in public for real tasks. -sidebar: - order: 1 -banner: - content: 'Watch live training progress below. First models releasing Q1 2025.' ---- -import { Aside } from '@astrojs/starlight/components'; - -## Not Just Another Model Family - -Jan Models aren't general-purpose chatbots. Each model is trained for specific tasks that matter in daily work: search, analysis, creative writing, coding, research. They work together, each handling what it does best. - -### Current Training Status - -| Model | Specialization | Size | Training Progress | Status | -|:------|:--------------|:-----|:------------------|:-------| -| Jan-Search | Web search + synthesis | 7B | ████████░░ 82% | Testing phase | -| Jan-Write | Creative + technical writing | 13B | ████░░░░░░ 41% | Active training | -| Jan-Analyze | Data analysis + reasoning | 13B | ██░░░░░░░░ 23% | Dataset prep | -| Jan-Code | Code generation + debugging | 7B | ░░░░░░░░░░ 0% | Starting Jan 2025 | - - - -### Why Specialized Models? - -One model can't excel at everything. GPT-4o, o3, o4, or Claude 4 Sonnet writing poetry -use the same weights to do math as well, which can be inefficient and expensive. - -Our approach: -- **Jan-Search** knows how to query, crawl, and synthesize -- **Jan-Write** understands tone, structure, and creativity -- **Jan-Analyze** excels at reasoning and data interpretation -- Models work together through the Jan orchestration layer - -### Built for the Ecosystem - -These aren't standalone models. They're designed to: -- Run efficiently on local hardware (quantized to 4-8GB) -- Work with Jan Tools (browser automation, file parsing, memory) -- Scale from laptop to server without code changes -- Share context and hand off tasks to each other - -### Help Us Improve - -Models are only as good as their real-world performance. [Test our models](link) against your actual use cases and vote on what works. - -We train on your feedback, not just benchmarks. diff --git a/website/src/content/products/platforms/desktop.mdx b/website/src/content/products/platforms/desktop.mdx deleted file mode 100644 index 31e7b059fb..0000000000 --- a/website/src/content/products/platforms/desktop.mdx +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Jan Desktop -description: The foundation of your AI ecosystem. Runs on your hardware. -sidebar: - order: 2 ---- -import { Aside, Card, CardGrid, Tabs, TabItem } from '@astrojs/starlight/components'; - -Jan Desktop is your local AI workstation. Download it, run your own models, or connect to -cloud providers. Your computer, your choice. - -## How It Works - -### Default: Local Mode -Open Jan. Start chatting with Jan Nano. No internet, no account, no API keys. Your conversations -never leave your machine. - -### Optional: Cloud Mode -Need more power? Connect to: -- Your own Jan Server -- jan.ai (coming soon) -- Any OpenAI-compatible API - - - -## Why Desktop First - -Your desktop has the GPU, storage, and memory to run real AI models. Not toy versions. Not -demos. The same models that power ChatGPT-scale applications. - -More importantly: it becomes the hub for your other devices. Your phone connects to your -desktop. Your team connects to your desktop. Everything stays in your control. - -## Specifications - - - - Everything in `~/.local/share/jan`. Your data, your models, your configuration. Back it up, move it, delete - it - it's just files. - - - OpenAI-compatible API at `localhost:1337`. Any tool that works with OpenAI works with Jan. No - code changes. - - - NVIDIA CUDA acceleration out of the box. Automatically detects and uses available GPUs. CPU fallback always works. - - - Run any GGUF model from Hugging Face. Or our models. Or your fine-tuned models. If it's GGUF, it runs. - - - -## System Requirements - -**Minimum**: 8GB RAM, 10GB storage, any 64-bit OS from the last 5 years - -**Recommended**: 16GB RAM, NVIDIA GPU, 50GB storage for multiple models - -**Runs on**: Windows 10+, macOS 12+, Ubuntu 20.04+ - -## Installation - -```bash -# macOS/Linux -curl -sSL https://jan.ai/install.sh | bash - -# Windows -# Download from jan.ai/download -``` - - - -## For Developers - -### Use Jan as an OpenAI Drop-in - -```javascript -// Your existing OpenAI code -const openai = new OpenAI({ - apiKey: "not-needed", - baseURL: "http://localhost:1337/v1" -}); - -// Works exactly the same -const completion = await openai.chat.completions.create({ - model: "jan-nano", - messages: [{ role: "user", content: "Hello" }] -}); -``` - -### Available Endpoints -- `/v1/chat/completions` - Chat with any loaded model -- `/v1/models` - List available models -- `/v1/embeddings` - Generate embeddings -- `/routes` - See all available routes - -## The Foundation - -Jan Desktop isn't just an app. It's the foundation of your personal AI infrastructure: - -1. **Today**: Run models locally, connect to cloud APIs -2. **Soon**: Your phone connects to your desktop -3. **Next**: Your desktop serves your team -4. **Future**: Your personal AI that knows you, runs everywhere - -No subscriptions. No lock-in. Just software that's yours. - ---- - -**Next steps**: -- [Download Jan Desktop](https://jan.ai/download) -- [Try Jan Models →](../models/jan-v1) -- [Explore Tools →](../tools/search) diff --git a/website/src/content/products/platforms/jan-ai.mdx b/website/src/content/products/platforms/jan-ai.mdx deleted file mode 100644 index 2c570c7ebd..0000000000 --- a/website/src/content/products/platforms/jan-ai.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Jan.ai -description: Cloud AI that respects your privacy. Web-based access to Jan with no setup required. -sidebar: - order: 1 ---- -import { Aside, Card, CardGrid } from '@astrojs/starlight/components'; - -![Jan Web](../../../assets/jan_web.png) - -**Status:** Beta Launch Soon 🚀 - -Web-based version of Jan with no setup required. Same default cloud mode for (soon) mobile and desktop users. - -## What is Jan Web? - -Jan Web is the cloud-hosted version of Jan that runs in your browser. No installation needed, instant access from any device, with the same AI experience you get locally. - - - -## How It Works - - - - You can sync conversations from Jan desktop to Jan Web. - - - Jan Web uses the same models you have access to on the go. - - - Visit (soon) ask.jan.ai directly in your browser for instant access to AI without downloading anything. - - - Share prompts, workflows, and collaborate on threads with your team members. - - - -## Pricing - -| Tier | Features | Price | -| :--- | :--- | :--- | -| **Free** | Free for everyone | $0 | -| **Pro** | Access our latest models
Access other cloud providers without API keys | Coming Soon | -| **Enterprise** | Self-host or we host it for you
Active support and SLAs
SSO integration
Team features | Contact Sales | \ No newline at end of file diff --git a/website/src/content/products/platforms/mobile.mdx b/website/src/content/products/platforms/mobile.mdx deleted file mode 100644 index 5729a23fd4..0000000000 --- a/website/src/content/products/platforms/mobile.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Jan Mobile -description: Your AI assistant on the go. Seamlessly connect to local, desktop, or server models. -sidebar: - order: 3 ---- -import { Aside, Card, CardGrid } from '@astrojs/starlight/components'; - -![Jan Mobile](../../../assets/jan_mobile.png) - -**Status:** Coming Q4 2025 - -Jan Mobile brings the same AI experience to your phone. Connect to your desktop, your server, or run models locally. - -## How It Works - -Jan Mobile adapts to your situation: -- **At Home:** Connect to your Jan Desktop over WiFi. -- **At Work:** Connect to your company Jan Server. -- **On the Go:** Run Jan Nano on your phone or use a cloud model. - -## Three Modes, One Experience - -### Desktop Mode -Access larger, more powerful models running on your home computer. No phone battery drain. - -### Server Mode -Connect to your organization's private AI cloud for team collaboration and access to shared knowledge. - -### Local Mode -No connection? No problem. Run models like 'Jan Nano' directly on your phone for complete privacy and offline access. - -## Key Features - - - - - - - - - - -## Development Status -- Core architecture in progress -- Desktop/Server connection protocols next -- Jan Nano mobile optimization in progress -- Beta launch planned for Q4 2025 \ No newline at end of file diff --git a/website/src/content/products/platforms/server.mdx b/website/src/content/products/platforms/server.mdx deleted file mode 100644 index fe3c4629d1..0000000000 --- a/website/src/content/products/platforms/server.mdx +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Jan Server -description: Your own private AI cloud. Self-hosted AI for teams and enterprises. -sidebar: - order: 4 ---- - -import { Aside, Card, CardGrid } from '@astrojs/starlight/components'; - -![Jan Server](../../../assets/jan-server.png) - -**Status:** Coming Q2 2025 - -Jan Server is a powerful AI API platform with multi-user support that you can self-host. Deploy it on your hardware to create your own private AI cloud for your team or organization, or run it at scale with Jan's managed service. - -## Why Organizations Need This - -Jan Server gives you complete control over your AI infrastructure, ensuring total privacy, predictable costs, and compliance readiness. - -## Key Features - - - - - - - - - - -## Deployment Options -- **Docker:** Single command setup -- **Kubernetes:** Scale with your needs -- **Bare Metal:** Maximum control and performance - -## Scaling Guidelines -- **Small Teams (5-10 users):** Small GPU cluster -- **Departments (10-50 users):** 4-8 GPU cluster nodes -- **Enterprise (50+ users):** Multi-cluster setup with custom configurations \ No newline at end of file diff --git a/website/src/content/products/tools/browseruse.mdx b/website/src/content/products/tools/browseruse.mdx deleted file mode 100644 index 0363df1b97..0000000000 --- a/website/src/content/products/tools/browseruse.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: BrowserUse -description: Native browser automation for Jan, enabling AI to interact with the web on your behalf. -sidebar: - order: 3 ---- - -## Let Jan Use Your Browser - -`BrowserUse` is a native tool being developed for the Jan ecosystem that will allow the AI to securely control a browser to perform tasks, automate workflows, and interact with websites just like a human would. - -Think of it as an integrated, privacy-first automation layer that turns Jan from a conversational AI into a powerful agent for action. - -:::note -**This tool is not yet available.** We are designing `BrowserUse` to be a core component of Jan's agentic capabilities. -::: - -### Inspired by the Best - -While tools like [Browser Use](https://docs.browser-use.com/introduction) exist for developers, Jan's `BrowserUse` will be a built-in, user-friendly feature. No complex setup required—just grant permission and let Jan handle the rest. - -### Built on MCP - -The tool will be implemented as a native **Model Context Protocol (MCP)** server within Jan, ensuring secure and standardized communication between the AI model and the browser. This allows for powerful, auditable, and secure web interactions. - -### Planned Core Features: -- **Secure Sessions**: All browsing activity happens in an isolated, sandboxed environment. -- **Natural Language Control**: Simply tell Jan what you want to do (e.g., "Book a flight," "Research this topic and summarize the top 5 findings," "Apply for this job for me"). -- **Visual Understanding**: Jan will be able to see and interpret the content on pages, not just the underlying code. -- **User in the Loop**: Always ask for permission before taking critical actions like submitting forms or making purchases. - ---- diff --git a/website/src/content/products/tools/deepresearch.mdx b/website/src/content/products/tools/deepresearch.mdx deleted file mode 100644 index f518a91857..0000000000 --- a/website/src/content/products/tools/deepresearch.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: DeepResearch -description: An AI agent that performs comprehensive, multi-step research for you. -sidebar: - order: 1 ---- -import { Aside } from '@astrojs/starlight/components'; - - - -## Your Personal Research Analyst - -`DeepResearch` is a planned native tool for Jan that transforms it into a powerful research agent. Give it a complex question, and it will autonomously browse, analyze, and synthesize information from numerous sources to deliver a comprehensive, structured report. - -Think of it as Jan's answer to the advanced research capabilities seen in **OpenAI's ChatGPT** and **Google's Gemini**, but built in the open and with user control at its core. - -### How It Will Work -Unlike a simple web search that returns a list of links, `DeepResearch` will understand your goal, create a research plan that you can edit, execute it, and deliver a final, synthesized document with citations. - -### Planned Core Features: -- **Autonomous Multi-Step Research** -- **Comprehensive Source Analysis** -- **Structured Report Generation** -- **Full Transparency with Citations** -- **Local-First Privacy** \ No newline at end of file diff --git a/website/src/content/products/tools/search.mdx b/website/src/content/products/tools/search.mdx deleted file mode 100644 index 3b8bb56b1c..0000000000 --- a/website/src/content/products/tools/search.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Search -description: A native search tool that gives you answers, not just links, with complete privacy. -sidebar: - order: 2 ---- -import { Aside } from '@astrojs/starlight/components'; - - - -## Answers, Not Just Links - -'Search' is a planned native tool for Jan that rethinks web search. Instead of just giving you a list of links to sift through, it understands your question, scours the web, and provides a direct, synthesized answer with sources cited. - -Think of it as a private, self-hosted alternative to services like **Perplexity.ai**, integrated directly into your AI assistant. - -### How It's Different -- **Privacy-First:** Your search queries are processed locally and anonymized. -- **Direct Answers:** Get a concise, accurate answer compiled from the best sources. -- **Cited Sources:** Every piece of information is backed by a verifiable source. -- **Conversational Follow-up:** Ask follow-up questions in a natural way. - -### Planned Core Features: -- **Real-Time Information** -- **Source Verification** -- **Customizable Focus** -- **Seamless Integration** with other tools \ No newline at end of file diff --git a/website/src/layouts/BaseLayout.astro b/website/src/layouts/BaseLayout.astro deleted file mode 100644 index 70b6691f5b..0000000000 --- a/website/src/layouts/BaseLayout.astro +++ /dev/null @@ -1,238 +0,0 @@ ---- -export interface Props { - title?: string; - description?: string; -} - -const { title = 'Jan', description = 'AI that runs where you need it, how you need it' } = Astro.props; ---- - - - - - - - - {title} - - - - -
- -
- - diff --git a/website/src/layouts/Layout.astro b/website/src/layouts/Layout.astro deleted file mode 100644 index f13a41ebff..0000000000 --- a/website/src/layouts/Layout.astro +++ /dev/null @@ -1,170 +0,0 @@ ---- -export interface Props { - title: string; -} - -const { title } = Astro.props; ---- - - - - - - - - - {title} - - - - - - - - - - - - - - - - - - diff --git a/website/src/pages/api-reference.astro b/website/src/pages/api-reference.astro deleted file mode 100644 index ac3d9b4f5b..0000000000 --- a/website/src/pages/api-reference.astro +++ /dev/null @@ -1,58 +0,0 @@ ---- -const title = '👋 Jan API Reference'; -const description = 'Interactive Jan API documentation powered by Scalar'; ---- - - - - - - - - {title} - - - -
- - - - - - \ No newline at end of file diff --git a/website/src/pages/blog.astro b/website/src/pages/blog.astro deleted file mode 100644 index b4b846b463..0000000000 --- a/website/src/pages/blog.astro +++ /dev/null @@ -1,363 +0,0 @@ ---- -import { getCollection } from 'astro:content'; -import Layout from '../layouts/Layout.astro'; -import CustomNav from '../components/CustomNav.astro'; - -// Get all blog entries and sort by date (newest first) -const blogEntries = await getCollection('blog'); -const sortedEntries = blogEntries.sort((a, b) => - new Date(b.data.date).getTime() - new Date(a.data.date).getTime() -); - -// Extract unique categories -// const allCategories = [...new Set(sortedEntries.flatMap(entry => -// entry.data.categories ? entry.data.categories.split(',').map(cat => cat.trim()) : [] -// ))]; - -const title = 'Blog'; -const description = 'The latest updates from Jan. See Changelog for more product updates.'; - -// Define gradient colors for cards -const gradients = [ - 'from-purple-500 to-pink-500', - 'from-blue-500 to-cyan-400', - 'from-purple-600 to-blue-500', - 'from-cyan-400 to-blue-500', - 'from-pink-500 to-purple-600', - 'from-blue-600 to-purple-600' -]; ---- - - - -
-
- -
-

Blog

-

The latest updates from Jan. See Changelog for more product updates.

-
- - -
- - - - -
- - -
- {sortedEntries.map((entry, index) => { - const date = new Date(entry.data.date); - const formattedDate = date.toLocaleDateString('en-US', { - year: 'numeric', - month: 'long', - day: 'numeric' - }); - const gradientClass = gradients[index % gradients.length]; - const category = entry.data.categories || 'guides'; - - return ( - - ); - })} -
-
-
-
- - - - - diff --git a/website/src/pages/blog/[slug].astro b/website/src/pages/blog/[slug].astro deleted file mode 100644 index ac5abf4cee..0000000000 --- a/website/src/pages/blog/[slug].astro +++ /dev/null @@ -1,656 +0,0 @@ ---- -import { getCollection } from 'astro:content'; -import Layout from '../../layouts/Layout.astro'; -import CustomNav from '../../components/CustomNav.astro'; -import '../../styles/blog.css'; - -export async function getStaticPaths() { - const blogEntries = await getCollection('blog'); - return blogEntries.map(entry => ({ - params: { slug: entry.slug }, - props: { entry }, - })); -} - -const { entry } = Astro.props; -const { Content } = await entry.render(); - -const formattedDate = new Date(entry.data.date).toLocaleDateString('en-US', { - year: 'numeric', - month: 'long', - day: 'numeric' -}); - -const tags = entry.data.tags ? entry.data.tags.split(',').map(tag => tag.trim()) : []; ---- - - - -
-
- - - - -
- - -

{entry.data.title}

- -

{entry.data.description}

- - {tags.length > 0 && ( - - )} -
- - -
- -
- - - -
-
-
- - - - - - - diff --git a/website/src/pages/changelog.astro b/website/src/pages/changelog.astro deleted file mode 100644 index d4824afeee..0000000000 --- a/website/src/pages/changelog.astro +++ /dev/null @@ -1,469 +0,0 @@ ---- -import { getCollection } from 'astro:content'; -import Layout from '../layouts/Layout.astro'; -import CustomNav from '../components/CustomNav.astro'; - -// Get all changelog entries and sort by date (newest first) -const changelogEntries = await getCollection('changelog'); -const sortedEntries = changelogEntries.sort((a, b) => - new Date(b.data.date).getTime() - new Date(a.data.date).getTime() -); - -const title = 'Changelog'; -const description = 'Latest release updates from the Jan team. Check out our Roadmap to see what\'s next.'; ---- - - - -
-
- -
-

Changelog

-

Latest release updates from the Jan team. Check out our Roadmap to see what's next.

- - - -
- - -
- {sortedEntries.map((entry, index) => { - const date = new Date(entry.data.date); - const formattedDate = date.toLocaleDateString('en-US', { - year: 'numeric', - month: 'long', - day: 'numeric' - }); - - return ( -
- -
-
- -
- - -
- {entry.data.image && ( -
- {entry.data.title} -
- )} - -
-

- {entry.data.title} -

- -

{entry.data.description}

- - {entry.data.version && ( - New release Jan App v{entry.data.version} - )} -
-
-
- ); - })} -
-
-
-
- - - - - diff --git a/website/src/pages/changelog/[slug].astro b/website/src/pages/changelog/[slug].astro deleted file mode 100644 index 48dc1ab9d5..0000000000 --- a/website/src/pages/changelog/[slug].astro +++ /dev/null @@ -1,306 +0,0 @@ ---- -import { getCollection, type CollectionEntry } from 'astro:content'; -import Layout from '../../layouts/Layout.astro'; -import CustomNav from '../../components/CustomNav.astro'; - -export async function getStaticPaths() { - const changelogEntries = await getCollection('changelog'); - return changelogEntries.map((entry) => ({ - params: { slug: entry.slug }, - props: { entry }, - })); -} - -interface Props { - entry: CollectionEntry<'changelog'>; -} - -const { entry } = Astro.props; -const { Content } = await entry.render(); - -const date = new Date(entry.data.date); -const formattedDate = date.toLocaleDateString('en-US', { - year: 'numeric', - month: 'long', - day: 'numeric' -}); ---- - - - -
-
- - - - -
- -

{entry.data.title}

- - {entry.data.version && ( -
- v{entry.data.version} -
- )} - - {entry.data.image && ( -
- {entry.data.title} -
- )} -
- - -
- -
- - - -
-
-
- - \ No newline at end of file diff --git a/website/src/pages/index.astro b/website/src/pages/index.astro deleted file mode 100644 index ff150ee2b6..0000000000 --- a/website/src/pages/index.astro +++ /dev/null @@ -1,198 +0,0 @@ ---- -import Layout from '@/layouts/Layout.astro'; -import CustomNav from '@/components/CustomNav.astro'; -import DownloadButton from '@/components/DownloadButton.astro'; - -// Placeholder data - to be fetched from GitHub API later -const latestVersion = 'v0.6.7'; -const downloadCount = '3.8M+'; ---- - - - -
-
-
- - - - - -
-
- -

- Chat with AI
- without privacy concerns -

-
- -

- Jan is an open source ChatGPT-alternative that runs 100% offline. -

- -
- -
-
- -
-
-
-
- - diff --git a/website/src/pages/prods.mdx b/website/src/pages/prods.mdx deleted file mode 100644 index 0c8fc7ba3b..0000000000 --- a/website/src/pages/prods.mdx +++ /dev/null @@ -1,534 +0,0 @@ ---- -layout: ../layouts/BaseLayout.astro -title: Product -description: AI that runs where you need it, how you need it ---- - -import SimpleFloatingNav from '../components/SimpleFloatingNav.astro'; -import SimpleTOC from '../components/SimpleTOC.astro'; -import ReleaseDatabase from '../components/ReleaseDatabase.astro'; - - - - - -
- -# Products - -

-Jan is moving from a local AI application to a complete full-stack AI solution that you can self-host. This includes models, -applications, and tools that delights users and help them solve their problems. -

- -## What We're Building - -**Jan Factory (or Agent)** = Jan Models + Jan Application + Jan Tools - -Unlike other AI assistants that do specific tasks with one model or have many models with a myriad of solutions, Jan provides: -- Its own specialised models that are optimised at specific tasks like web-search, creative writing, and translation -- Applications that work across all of your devices in an integrated way -- Tools that actually get things done - -## Two Modes, One Experience - -### Local (Incognito) Mode - -Run AI models entirely on your device, giving you complete privacy with no internet required. - -![Jan Desktop](../assets/jan_desktop.png) - -### Cloud Mode - -Connect to more powerful models when needed - either self-hosted or via jan.ai. - -![Jan Everywhere](../assets/jan_everywhere.png) - -Users shouldn't need to understand models, APIs, or technical details. Just choose Local for privacy or Cloud for power. - -## Our Product Principles - -### 1) It Just Works - -1. Open Jan, start chatting -2. Onboarding is fully available but optional -3. Setting up an API key is optional -4. Selecting a local model is optional -5. Become a power user at your own pace, if you want to - -We handle the complexity. - -### 2) Cloud When Needed - -Start completely locally and own your AI models. Add cloud capabilities only when you choose to. - -### 3) Solve Problems, Not Settings - -We help users get to answers quickly answers, not configuration options. Power users can dig deeper, but it's never required. - -## Available on Every Device - -### Jan Desktop - -This is how Jan started and it has been available since day 1. Jan Desktop stives to be: - -> Your personal AI workstation that helps with our use cases and powers other devices. Run models locally right away -or bring an API key to connect to your favorite cloud-based models. - -**Key Features:** -- Runs models locally on your hardware -- GPU acceleration support -- Powers other devices via network connection -- Complete privacy and control -- Windows, macOS, and Linux support - -**Requirements:** -- Minimum 8GB RAM -- 10GB+ storage space -- Optional: NVIDIA GPU for acceleration - -### Jan Web - -**Status:** Beta Launch Soon - -![Jan Web](../assets/jan_web.png) - -Web-based version of 👋 Jan with no setup required. Same default cloud mode for mobile and desktop users. - -**Key Features:** -- No installation needed -- Instant access from any browser -- Automatic updates and maintenance -- Default cloud backend for mobile apps -- Team collaboration features - - Share prompts - - Share workflows - - Collaborate on threads - -**Pricing:** -- Free for everyone -- Pro: - - Access our latest models - - Access other cloud providers, no need to bring their API keys -- Enterprise: - - Self-host or we host it for you - - Active support and SLAs - - SSO - - Team featues - -### Jan Mobile - -**Status:** Coming Q4 2025 - -Connect to Desktop/Server, run local mode with Jan Nano or Lucy, same experience everywhere. - -Jan Mobile adapts to your situation: - -At Home, you can connect to your Jan Desktop over WiFi - -![Jan Mobile Home](../assets/jan_mobile2.png) - -At Work, you can connect to your company Jan Server - -![Jan Mobile Home](../assets/jan_mobile3.png) - -On the Go, you can run Jan Nano on your phone or talk to your favourite cloud-based model - -![Jan Mobile Home](../assets/jan_mobile4.png) - -**Key Features:** -- iOS and Android support -- Three adaptive modes (Desktop, Server, Local) -- Voice-first interface -- Seamless device switching -- Jan Nano for on-device AI - -### Jan Server - -**Status:** Coming Q2 2025 - -Self-hosted solution for teams and enterprises. Your own private AI cloud. - -**Key Features:** -- Support for 5-500+ concurrent users -- Enterprise authentication (SSO, LDAP) -- Docker and Kubernetes deployment -- Admin dashboard -- Team knowledge sharing - -**Deployment Options:** -- Docker: Single command setup -- Kubernetes: Enterprise scale -- Bare metal: Maximum control - -## Jan Mobile: Three Modes, One Experience - -Jan Mobile brings the same AI experience to your phone. Connect to your desktop, your server, or run models locally. - -### How It Works - -Jan Mobile adapts to your situation: - -**At Home** - Connect to your Jan Desktop over WiFi -Your Phone → WiFi → Your Desktop → Response - -**At Work** - Connect to your company Jan Server -Your Phone → Internet → Company Server → Response - -**On the Go** - Run Jan Nano on your phone or talk to your favorite cloud-based model -Your Phone → Jan Nano (6GB) → Response - -No configuration needed. It just works. - -### Key Features - -- **Seamless Switching**: Move from home to office to airplane. One-click and Jan adapts immediately. -- **Voice First**: Talk to Jan naturally. Responses can be spoken too. -- **Sync Everything**: Conversations, settings, and preferences follow you across devices. - -### Privacy & Security - -**Your Data, Your Control** -- Local Mode: Everything stays on your phone -- Desktop Mode: Direct encrypted connection -- Server Mode: Your organization's policies apply - -**No Compromises** -- Biometric app lock -- Encrypted storage -- No cloud backups without permission -- Clear data anytime - -## What Makes Jan Different - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FeatureOther AI AssistantsJan
ModelsWrapper around Claude/GPTOur own models + You can own them
Dual modeYour data on their serversYour data stays yours
DeploymentCloud onlyLocal, self-hosted, or cloud
CostSubscription foreverFree locally, pay for cloud
- -## Development Timeline - - - -
- - - - diff --git a/website/src/pages/products/index.astro b/website/src/pages/products/index.astro deleted file mode 100644 index 4d6ecb8ae7..0000000000 --- a/website/src/pages/products/index.astro +++ /dev/null @@ -1,47 +0,0 @@ ---- -// Products index page with custom sidebar for root deployment -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../content/products/index.mdx'; ---- - - - - diff --git a/website/src/pages/products/models/jan-nano.astro b/website/src/pages/products/models/jan-nano.astro deleted file mode 100644 index 4c1e8381bc..0000000000 --- a/website/src/pages/products/models/jan-nano.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/models/jan-nano.mdx'; ---- - - - - diff --git a/website/src/pages/products/models/jan-v1.astro b/website/src/pages/products/models/jan-v1.astro deleted file mode 100644 index 2c41301aad..0000000000 --- a/website/src/pages/products/models/jan-v1.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/models/jan-v1.mdx'; ---- - - - - diff --git a/website/src/pages/products/platforms/desktop.astro b/website/src/pages/products/platforms/desktop.astro deleted file mode 100644 index 1f8b7ccc2c..0000000000 --- a/website/src/pages/products/platforms/desktop.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/platforms/desktop.mdx'; ---- - - - - diff --git a/website/src/pages/products/platforms/jan-ai.astro b/website/src/pages/products/platforms/jan-ai.astro deleted file mode 100644 index 98ffadb77a..0000000000 --- a/website/src/pages/products/platforms/jan-ai.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/platforms/jan-ai.mdx'; ---- - - - - diff --git a/website/src/pages/products/platforms/mobile.astro b/website/src/pages/products/platforms/mobile.astro deleted file mode 100644 index b8741938c2..0000000000 --- a/website/src/pages/products/platforms/mobile.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/platforms/mobile.mdx'; ---- - - - - diff --git a/website/src/pages/products/platforms/server.astro b/website/src/pages/products/platforms/server.astro deleted file mode 100644 index 390cd690d6..0000000000 --- a/website/src/pages/products/platforms/server.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/platforms/server.mdx'; ---- - - - - diff --git a/website/src/pages/products/tools/browseruse.astro b/website/src/pages/products/tools/browseruse.astro deleted file mode 100644 index 48df73f29c..0000000000 --- a/website/src/pages/products/tools/browseruse.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/tools/browseruse.mdx'; ---- - - - - diff --git a/website/src/pages/products/tools/deepresearch.astro b/website/src/pages/products/tools/deepresearch.astro deleted file mode 100644 index 5bf97d7e6c..0000000000 --- a/website/src/pages/products/tools/deepresearch.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/tools/deepresearch.mdx'; ---- - - - - diff --git a/website/src/pages/products/tools/search.astro b/website/src/pages/products/tools/search.astro deleted file mode 100644 index 4f373ca242..0000000000 --- a/website/src/pages/products/tools/search.astro +++ /dev/null @@ -1,46 +0,0 @@ ---- -import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; -import { Content } from '../../../content/products/tools/search.mdx'; ---- - - - - diff --git a/website/src/scripts/robot-interactions.js b/website/src/scripts/robot-interactions.js deleted file mode 100644 index 143cb26975..0000000000 --- a/website/src/scripts/robot-interactions.js +++ /dev/null @@ -1,697 +0,0 @@ -// Robot Interactions - Make robots come alive with personality! - -class RobotInteractions { - constructor() { - this.robots = []; - this.mousePosition = { x: 0, y: 0 }; - this.isInitialized = false; - - // Robot moods and states - this.moods = ['happy', 'curious', 'excited', 'sleepy', 'thinking']; - this.currentMood = 'happy'; - - // Easter egg: secret robot dance - this.konami = []; - this.konamiCode = ['ArrowUp', 'ArrowUp', 'ArrowDown', 'ArrowDown', 'ArrowLeft', 'ArrowRight', 'ArrowLeft', 'ArrowRight', 'b', 'a']; - } - - init() { - if (this.isInitialized) return; - - this.findRobots(); - this.setupEventListeners(); - this.startIdleAnimations(); - this.setupEasterEggs(); - - this.isInitialized = true; - } - - findRobots() { - // Find all robot elements - const robotElements = document.querySelectorAll('.robot-head, .robot-container, .model-robot, .platform-robot, .tool-robot'); - - robotElements.forEach(robot => { - const eyes = robot.querySelectorAll('.robot-eye'); - if (eyes.length > 0) { - this.robots.push({ - element: robot, - eyes: Array.from(eyes), - originalPositions: Array.from(eyes).map(eye => { - const rect = eye.getBoundingClientRect(); - return { - x: rect.left + rect.width / 2, - y: rect.top + rect.height / 2 - }; - }), - isBlinking: false, - mood: 'happy' - }); - } - }); - } - - setupEventListeners() { - // Track mouse movement - document.addEventListener('mousemove', (e) => { - this.mousePosition = { x: e.clientX, y: e.clientY }; - this.updateEyes(); - }); - - // Robot hover interactions - this.robots.forEach(robot => { - robot.element.addEventListener('mouseenter', () => this.onRobotHover(robot)); - robot.element.addEventListener('mouseleave', () => this.onRobotLeave(robot)); - robot.element.addEventListener('click', () => this.onRobotClick(robot)); - }); - - // Keyboard interactions - document.addEventListener('keydown', (e) => this.handleKeyPress(e)); - } - - updateEyes() { - this.robots.forEach(robot => { - if (robot.isBlinking) return; - - robot.eyes.forEach((eye, index) => { - // Skip sleeping or closed eyes - if (eye.classList.contains('sleeping') || eye.classList.contains('closed')) return; - - const eyeRect = eye.getBoundingClientRect(); - const eyeCenter = { - x: eyeRect.left + eyeRect.width / 2, - y: eyeRect.top + eyeRect.height / 2 - }; - - // Calculate angle between eye and mouse - const angle = Math.atan2( - this.mousePosition.y - eyeCenter.y, - this.mousePosition.x - eyeCenter.x - ); - - // Calculate distance (capped for natural movement) - const distance = Math.min( - Math.hypot( - this.mousePosition.x - eyeCenter.x, - this.mousePosition.y - eyeCenter.y - ) / 10, - 3 // Maximum pupil movement - ); - - // Move the pupil (eye shine) - const pupil = eye.querySelector('::after') || eye; - const offsetX = Math.cos(angle) * distance; - const offsetY = Math.sin(angle) * distance; - - // Apply transform to eye or create inner pupil - if (!eye.querySelector('.pupil')) { - const pupilElement = document.createElement('div'); - pupilElement.className = 'pupil'; - eye.appendChild(pupilElement); - } - - const pupilElement = eye.querySelector('.pupil'); - if (pupilElement) { - pupilElement.style.transform = `translate(${offsetX}px, ${offsetY}px)`; - } - }); - }); - } - - onRobotHover(robot) { - // Add excited animation - robot.element.classList.add('hover-active'); - - // Make eyes bigger - robot.eyes.forEach(eye => { - eye.style.transform = 'scale(1.2)'; - eye.classList.add('excited'); - }); - - // Add blush effect - this.addBlush(robot); - - // Random reaction - const reactions = ['happy', 'surprised', 'love']; - const reaction = reactions[Math.floor(Math.random() * reactions.length)]; - - if (reaction === 'love') { - robot.eyes.forEach(eye => eye.classList.add('love')); - } - } - - onRobotLeave(robot) { - robot.element.classList.remove('hover-active'); - - // Reset eyes - robot.eyes.forEach(eye => { - eye.style.transform = ''; - eye.classList.remove('excited', 'love'); - }); - - // Remove blush - this.removeBlush(robot); - } - - onRobotClick(robot) { - // Trigger a fun animation - this.makeRobotJump(robot); - this.makeRobotSpeak(robot); - - // Easter egg: clicking 5 times makes robot do a dance - if (!robot.clickCount) robot.clickCount = 0; - robot.clickCount++; - - if (robot.clickCount >= 5) { - this.robotDance(robot); - robot.clickCount = 0; - } - } - - makeRobotJump(robot) { - robot.element.style.animation = 'robotJump 0.6s ease-out'; - setTimeout(() => { - robot.element.style.animation = ''; - }, 600); - } - - makeRobotSpeak(robot) { - const messages = [ - "Hello there! 👋", - "Beep boop! 🤖", - "AI at your service!", - "Let's build something cool!", - "Privacy first! 🔒", - "Running locally! 💪", - "*happy robot noises*", - "01001000 01101001! (Hi in binary!)" - ]; - - const message = messages[Math.floor(Math.random() * messages.length)]; - this.showSpeechBubble(robot, message); - } - - showSpeechBubble(robot, message) { - // Remove existing bubble - const existingBubble = robot.element.querySelector('.robot-speech-bubble'); - if (existingBubble) existingBubble.remove(); - - // Create new bubble - const bubble = document.createElement('div'); - bubble.className = 'robot-speech-bubble'; - bubble.innerHTML = ` - ${message} -
- `; - - robot.element.appendChild(bubble); - - // Remove after 3 seconds - setTimeout(() => { - bubble.classList.add('fade-out'); - setTimeout(() => bubble.remove(), 300); - }, 3000); - } - - startIdleAnimations() { - // Random blinking - setInterval(() => { - this.robots.forEach(robot => { - if (Math.random() < 0.1 && !robot.isBlinking) { - this.makeRobotBlink(robot); - } - }); - }, 2000); - - // Random mood changes - setInterval(() => { - const robot = this.robots[Math.floor(Math.random() * this.robots.length)]; - if (robot && Math.random() < 0.3) { - this.changeRobotMood(robot); - } - }, 5000); - } - - makeRobotBlink(robot) { - robot.isBlinking = true; - - robot.eyes.forEach(eye => { - eye.style.transition = 'height 0.1s ease'; - eye.style.height = '2px'; - - setTimeout(() => { - eye.style.height = ''; - robot.isBlinking = false; - }, 150); - }); - } - - changeRobotMood(robot) { - const moods = ['thinking', 'happy', 'curious']; - const mood = moods[Math.floor(Math.random() * moods.length)]; - - switch(mood) { - case 'thinking': - this.showThinkingBubbles(robot); - break; - case 'happy': - this.makeRobotSmile(robot); - break; - case 'curious': - this.makeRobotLookAround(robot); - break; - } - } - - showThinkingBubbles(robot) { - const bubbles = document.createElement('div'); - bubbles.className = 'thinking-bubbles'; - bubbles.innerHTML = ` -
-
-
💭
- `; - - robot.element.appendChild(bubbles); - - setTimeout(() => { - bubbles.classList.add('fade-out'); - setTimeout(() => bubbles.remove(), 300); - }, 3000); - } - - makeRobotSmile(robot) { - const smile = robot.element.querySelector('.robot-smile'); - if (smile) { - smile.classList.add('big-smile'); - setTimeout(() => smile.classList.remove('big-smile'), 2000); - } - } - - makeRobotLookAround(robot) { - let direction = -1; - const lookInterval = setInterval(() => { - robot.eyes.forEach(eye => { - eye.style.transform = `translateX(${direction * 3}px)`; - }); - direction *= -1; - }, 500); - - setTimeout(() => { - clearInterval(lookInterval); - robot.eyes.forEach(eye => { - eye.style.transform = ''; - }); - }, 2000); - } - - addBlush(robot) { - const head = robot.element.querySelector('.robot-head') || robot.element; - - ['left', 'right'].forEach(side => { - const blush = document.createElement('div'); - blush.className = `robot-blush ${side}`; - head.appendChild(blush); - }); - } - - removeBlush(robot) { - const blushes = robot.element.querySelectorAll('.robot-blush'); - blushes.forEach(blush => blush.remove()); - } - - setupEasterEggs() { - // Konami code for robot dance party - document.addEventListener('keydown', (e) => { - this.konami.push(e.key); - this.konami = this.konami.slice(-10); - - if (this.konami.join(',') === this.konamiCode.join(',')) { - this.robotDanceParty(); - } - }); - - // Secret robot activation phrase - let secretPhrase = ''; - document.addEventListener('keypress', (e) => { - secretPhrase += e.key; - secretPhrase = secretPhrase.slice(-10); - - if (secretPhrase.includes('robot')) { - this.activateSecretRobotMode(); - } - }); - } - - robotDanceParty() { - document.body.classList.add('robot-dance-party'); - - // Make all robots dance - this.robots.forEach((robot, index) => { - setTimeout(() => { - this.robotDance(robot); - }, index * 200); - }); - - // Add disco lights - this.addDiscoLights(); - - // Stop after 10 seconds - setTimeout(() => { - document.body.classList.remove('robot-dance-party'); - this.removeDiscoLights(); - }, 10000); - } - - robotDance(robot) { - robot.element.classList.add('dancing'); - - // Random dance moves - const dances = ['wiggle', 'spin', 'bounce', 'shake']; - const dance = dances[Math.floor(Math.random() * dances.length)]; - - robot.element.style.animation = `robot-${dance} 1s ease-in-out infinite`; - - setTimeout(() => { - robot.element.style.animation = ''; - robot.element.classList.remove('dancing'); - }, 5000); - } - - addDiscoLights() { - const disco = document.createElement('div'); - disco.className = 'disco-lights'; - disco.innerHTML = ` -
🪩
-
-
-
-
- `; - document.body.appendChild(disco); - } - - removeDiscoLights() { - const disco = document.querySelector('.disco-lights'); - if (disco) disco.remove(); - } - - activateSecretRobotMode() { - // Make all robots super happy - this.robots.forEach(robot => { - robot.eyes.forEach(eye => { - eye.classList.add('sparkle', 'rainbow'); - }); - - this.showSpeechBubble(robot, "Secret mode activated! 🎉"); - }); - - // Remove after 5 seconds - setTimeout(() => { - this.robots.forEach(robot => { - robot.eyes.forEach(eye => { - eye.classList.remove('sparkle', 'rainbow'); - }); - }); - }, 5000); - } - - handleKeyPress(e) { - // Number keys change robot expressions - if (e.key >= '1' && e.key <= '9') { - const expressions = ['happy', 'sad', 'surprised', 'angry', 'love', 'sleepy', 'wink', 'dizzy', 'cool']; - const expression = expressions[parseInt(e.key) - 1]; - - if (expression) { - this.robots.forEach(robot => { - this.setRobotExpression(robot, expression); - }); - } - } - } - - setRobotExpression(robot, expression) { - // Clear previous expressions - const allExpressions = ['happy', 'sad', 'surprised', 'angry', 'love', 'sleepy', 'wink', 'dizzy', 'cool']; - robot.eyes.forEach(eye => { - allExpressions.forEach(exp => eye.classList.remove(exp)); - eye.classList.add(expression); - }); - - // Remove after 3 seconds - setTimeout(() => { - robot.eyes.forEach(eye => { - eye.classList.remove(expression); - }); - }, 3000); - } -} - -// Initialize when DOM is ready -if (document.readyState === 'loading') { - document.addEventListener('DOMContentLoaded', () => { - window.robotInteractions = new RobotInteractions(); - window.robotInteractions.init(); - }); -} else { - window.robotInteractions = new RobotInteractions(); - window.robotInteractions.init(); -} - -// Add required styles dynamically -const style = document.createElement('style'); -style.textContent = ` - /* Pupil for eye tracking */ - .robot-eye .pupil { - position: absolute; - top: 50%; - left: 50%; - width: 40%; - height: 40%; - background: white; - border-radius: 50%; - transform: translate(-50%, -50%); - transition: transform 0.1s ease-out; - pointer-events: none; - } - - /* Speech bubble */ - .robot-speech-bubble { - position: absolute; - bottom: 110%; - left: 50%; - transform: translateX(-50%); - background: white; - color: #1A1A2E; - padding: 0.75rem 1rem; - border-radius: 20px; - border: 3px solid #1A1A2E; - font-size: 0.9rem; - font-weight: 600; - white-space: nowrap; - box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2); - animation: bubbleIn 0.3s ease-out; - z-index: 100; - } - - .robot-speech-bubble.fade-out { - animation: bubbleOut 0.3s ease-in forwards; - } - - .bubble-tail { - position: absolute; - bottom: -8px; - left: 50%; - transform: translateX(-50%); - width: 0; - height: 0; - border-left: 8px solid transparent; - border-right: 8px solid transparent; - border-top: 10px solid white; - } - - .bubble-tail::before { - content: ''; - position: absolute; - bottom: 2px; - left: -10px; - width: 0; - height: 0; - border-left: 10px solid transparent; - border-right: 10px solid transparent; - border-top: 12px solid #1A1A2E; - } - - /* Thinking bubbles */ - .thinking-bubbles { - position: absolute; - top: -40px; - right: -20px; - animation: bubbleFloat 3s ease-in-out; - } - - .thinking-bubbles.fade-out { - animation: fadeOut 0.3s ease-in forwards; - } - - .think-bubble { - position: absolute; - background: white; - border: 2px solid #1A1A2E; - border-radius: 50%; - } - - .think-bubble.small { - width: 8px; - height: 8px; - bottom: 0; - left: 0; - } - - .think-bubble.medium { - width: 12px; - height: 12px; - bottom: 10px; - left: -5px; - } - - .think-bubble.large { - width: 30px; - height: 30px; - bottom: 25px; - left: -15px; - display: flex; - align-items: center; - justify-content: center; - font-size: 16px; - } - - /* Big smile animation */ - .robot-smile.big-smile { - width: 30px !important; - height: 15px !important; - border-width: 4px !important; - } - - /* Dance animations */ - @keyframes robot-wiggle { - 0%, 100% { transform: rotate(-5deg); } - 50% { transform: rotate(5deg); } - } - - @keyframes robot-spin { - 0% { transform: rotate(0deg); } - 100% { transform: rotate(360deg); } - } - - @keyframes robot-bounce { - 0%, 100% { transform: translateY(0); } - 50% { transform: translateY(-20px); } - } - - @keyframes robot-shake { - 0%, 100% { transform: translateX(0); } - 25% { transform: translateX(-5px); } - 75% { transform: translateX(5px); } - } - - @keyframes robotJump { - 0%, 100% { transform: translateY(0) scale(1); } - 50% { transform: translateY(-30px) scale(1.1); } - } - - @keyframes bubbleIn { - 0% { opacity: 0; transform: translateX(-50%) scale(0.8); } - 100% { opacity: 1; transform: translateX(-50%) scale(1); } - } - - @keyframes bubbleOut { - 0% { opacity: 1; transform: translateX(-50%) scale(1); } - 100% { opacity: 0; transform: translateX(-50%) scale(0.8); } - } - - @keyframes fadeOut { - 0% { opacity: 1; } - 100% { opacity: 0; } - } - - @keyframes bubbleFloat { - 0%, 100% { transform: translateY(0); } - 50% { transform: translateY(-5px); } - } - - /* Disco mode */ - .disco-lights { - position: fixed; - top: 0; - left: 0; - right: 0; - bottom: 0; - pointer-events: none; - z-index: 9999; - } - - .disco-ball { - position: absolute; - top: 50px; - left: 50%; - transform: translateX(-50%); - font-size: 60px; - animation: discoSpin 2s linear infinite; - } - - .light-beam { - position: absolute; - top: 100px; - left: 50%; - width: 100px; - height: 500px; - opacity: 0.3; - transform-origin: top center; - animation: lightSweep 4s ease-in-out infinite; - } - - .light-beam.red { background: linear-gradient(transparent, #FF006E); } - .light-beam.blue { background: linear-gradient(transparent, #3A86FF); animation-delay: 1s; } - .light-beam.green { background: linear-gradient(transparent, #06FFA5); animation-delay: 2s; } - .light-beam.yellow { background: linear-gradient(transparent, #FFB700); animation-delay: 3s; } - - @keyframes discoSpin { - 0% { transform: translateX(-50%) rotate(0deg); } - 100% { transform: translateX(-50%) rotate(360deg); } - } - - @keyframes lightSweep { - 0%, 100% { transform: translateX(-50%) rotate(-30deg); } - 50% { transform: translateX(-50%) rotate(30deg); } - } - - /* Rainbow eyes */ - .robot-eye.rainbow { - animation: rainbowEyes 2s linear infinite; - } - - @keyframes rainbowEyes { - 0% { background: #FF006E; } - 25% { background: #3A86FF; } - 50% { background: #06FFA5; } - 75% { background: #FFB700; } - 100% { background: #FF006E; } - } - - /* Sparkle effect */ - .robot-eye.sparkle::before { - content: '✨'; - position: absolute; - top: -10px; - right: -10px; - font-size: 12px; - animation: sparkleFloat 1s ease-in-out infinite; - } - - @keyframes sparkleFloat { - 0%, 100% { transform: translateY(0) rotate(0deg); } - 50% { transform: translateY(-5px) rotate(180deg); } - } -`; - -document.head.appendChild(style); diff --git a/website/src/styles/blog.css b/website/src/styles/blog.css deleted file mode 100644 index f7c57aecc1..0000000000 --- a/website/src/styles/blog.css +++ /dev/null @@ -1,372 +0,0 @@ -/* Blog Post Content Styles */ - -/* Image Handling in MDX */ -.blog-article img, -.article-content img { - max-width: 100%; - height: auto; - display: block; - margin: 2rem auto; - border-radius: 0.75rem; - box-shadow: - 0 4px 6px -1px rgba(0, 0, 0, 0.1), - 0 2px 4px -1px rgba(0, 0, 0, 0.06); - border: 1px solid var(--sl-color-gray-5); - object-fit: contain; -} - -/* Prevent images from overflowing */ -.blog-article, -.article-content { - overflow-x: hidden; - width: 100%; -} - -/* Inline images and icons */ -.blog-article p img, -.article-content p img { - display: inline-block; - margin: 0 0.25rem; - vertical-align: middle; - max-height: 1.5em; - width: auto; - border: none; - box-shadow: none; - border-radius: 0; -} - -/* Figure elements */ -.blog-article figure, -.article-content figure { - margin: 2rem 0; - text-align: center; - max-width: 100%; -} - -.blog-article figure img, -.article-content figure img { - margin-bottom: 0.5rem; - max-width: 100%; -} - -.blog-article figcaption, -.article-content figcaption { - font-size: 0.9rem; - color: var(--sl-color-text-muted); - font-style: italic; - margin-top: 0.5rem; - padding: 0 1rem; -} - -/* Code blocks - prevent horizontal overflow */ -.blog-article pre, -.article-content pre { - overflow-x: auto; - max-width: 100%; - white-space: pre-wrap; - word-wrap: break-word; -} - -/* Tables - make responsive */ -.blog-article .table-wrapper, -.article-content .table-wrapper, -.blog-article .overflow-x-auto, -.article-content .overflow-x-auto { - overflow-x: auto; - max-width: 100%; - margin: 1.5rem 0; -} - -.blog-article table, -.article-content table { - min-width: 100%; - width: max-content; -} - -/* MDX Components */ -.blog-article .callout, -.article-content .callout { - margin: 1.5rem 0; - max-width: 100%; - overflow: hidden; -} - -/* Video embeds */ -.blog-article video, -.article-content video, -.blog-article iframe, -.article-content iframe { - max-width: 100%; - height: auto; - border-radius: 0.75rem; - margin: 2rem auto; - display: block; -} - -/* YouTube/Video embeds responsive wrapper */ -.blog-article .video-wrapper, -.article-content .video-wrapper, -.blog-article .embed-wrapper, -.article-content .embed-wrapper { - position: relative; - padding-bottom: 56.25%; /* 16:9 aspect ratio */ - height: 0; - overflow: hidden; - max-width: 100%; - margin: 2rem 0; - border-radius: 0.75rem; -} - -.blog-article .video-wrapper iframe, -.article-content .video-wrapper iframe, -.blog-article .embed-wrapper iframe, -.article-content .embed-wrapper iframe { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - border: 0; - margin: 0; -} - -/* Fix emoji and icon display */ -.blog-article .emoji, -.article-content .emoji, -.blog-article span[role='img'], -.article-content span[role='img'] { - display: inline-block; - vertical-align: middle; - font-size: 1.2em; - line-height: 1; -} - -/* Lists with icons */ -.blog-article ul li::before, -.article-content ul li::before { - content: none; /* Remove default bullets if using custom icons */ -} - -.blog-article ul.icon-list li, -.article-content ul.icon-list li { - list-style: none; - position: relative; - padding-left: 1.5rem; -} - -.blog-article ul.icon-list li::before, -.article-content ul.icon-list li::before { - position: absolute; - left: 0; - top: 0.25em; -} - -/* Dark mode adjustments */ -:global(.dark) .blog-article img, -:global(.dark) .article-content img { - box-shadow: - 0 4px 6px -1px rgba(0, 0, 0, 0.3), - 0 2px 4px -1px rgba(0, 0, 0, 0.2); - border-color: var(--sl-color-gray-6); -} - -/* Mobile responsive adjustments */ -@media (max-width: 768px) { - .blog-article img, - .article-content img { - border-radius: 0.5rem; - margin: 1.5rem auto; - } - - .blog-article figure, - .article-content figure { - margin: 1.5rem 0; - } - - .blog-article figcaption, - .article-content figcaption { - font-size: 0.85rem; - padding: 0 0.5rem; - } - - /* Ensure content doesn't overflow on mobile */ - .blog-article, - .article-content { - padding-left: 0.5rem; - padding-right: 0.5rem; - } - - .blog-article pre, - .article-content pre { - border-radius: 0.5rem; - font-size: 0.85rem; - } -} - -@media (max-width: 480px) { - .blog-article img, - .article-content img { - border-radius: 0.375rem; - margin: 1rem auto; - } - - .blog-article figure, - .article-content figure { - margin: 1rem 0; - } -} - -/* Image loading states */ -.blog-article img[loading='lazy'], -.article-content img[loading='lazy'] { - background: linear-gradient( - 90deg, - var(--sl-color-gray-5) 0%, - var(--sl-color-gray-4) 50%, - var(--sl-color-gray-5) 100% - ); - background-size: 200% 100%; - animation: shimmer 1.5s infinite; -} - -@keyframes shimmer { - 0% { - background-position: 200% 0; - } - 100% { - background-position: -200% 0; - } -} - -/* Gallery styles for multiple images */ -.blog-article .image-gallery, -.article-content .image-gallery { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); - gap: 1rem; - margin: 2rem 0; -} - -.blog-article .image-gallery img, -.article-content .image-gallery img { - margin: 0; - width: 100%; - height: 100%; - object-fit: cover; -} - -/* Side-by-side images */ -.blog-article .image-row, -.article-content .image-row { - display: flex; - gap: 1rem; - margin: 2rem 0; - flex-wrap: wrap; -} - -.blog-article .image-row img, -.article-content .image-row img { - flex: 1; - min-width: 0; - margin: 0; -} - -/* Full-width images */ -.blog-article .full-width-image, -.article-content .full-width-image { - margin-left: calc(-50vw + 50%); - margin-right: calc(-50vw + 50%); - max-width: 100vw; - width: 100vw; -} - -.blog-article .full-width-image img, -.article-content .full-width-image img { - width: 100%; - max-width: 1400px; - margin-left: auto; - margin-right: auto; - border-radius: 0; -} - -/* Screenshot specific styles */ -.blog-article .screenshot, -.article-content .screenshot { - box-shadow: - 0 20px 25px -5px rgba(0, 0, 0, 0.1), - 0 10px 10px -5px rgba(0, 0, 0, 0.04); - border: 1px solid var(--sl-color-gray-4); -} - -/* Terminal/code screenshots */ -.blog-article .terminal-screenshot, -.article-content .terminal-screenshot { - border-radius: 0.5rem; - overflow: hidden; - box-shadow: - 0 10px 15px -3px rgba(0, 0, 0, 0.1), - 0 4px 6px -2px rgba(0, 0, 0, 0.05); -} - -/* Comparison images */ -.blog-article .image-comparison, -.article-content .image-comparison { - display: grid; - grid-template-columns: 1fr 1fr; - gap: 1rem; - margin: 2rem 0; -} - -.blog-article .image-comparison img, -.article-content .image-comparison img { - margin: 0; -} - -.blog-article .image-comparison figcaption, -.article-content .image-comparison figcaption { - grid-column: 1 / -1; - text-align: center; - margin-top: 1rem; -} - -@media (max-width: 640px) { - .blog-article .image-comparison, - .article-content .image-comparison { - grid-template-columns: 1fr; - } - - .blog-article .image-row, - .article-content .image-row { - flex-direction: column; - } -} - -/* Broken image fallback */ -.blog-article img.error, -.article-content img.error { - display: flex; - align-items: center; - justify-content: center; - min-height: 200px; - background: var(--sl-color-gray-5); - position: relative; -} - -.blog-article img.error::after, -.article-content img.error::after { - content: '🖼️ Image not found'; - position: absolute; - color: var(--sl-color-text-muted); - font-size: 0.875rem; -} - -/* Print styles */ -@media print { - .blog-article img, - .article-content img { - max-width: 100%; - page-break-inside: avoid; - border: 1px solid #ddd; - box-shadow: none; - } -} diff --git a/website/src/styles/hand-drawn.css b/website/src/styles/hand-drawn.css deleted file mode 100644 index fe64e358ea..0000000000 --- a/website/src/styles/hand-drawn.css +++ /dev/null @@ -1,667 +0,0 @@ -/* Hand-Drawn UI Styles - PostHog-inspired playful design */ - -/* ===== HAND-DRAWN BORDERS ===== */ -.hand-drawn-border { - position: relative; - border: none !important; -} - -.hand-drawn-border::before { - content: ''; - position: absolute; - inset: -2px; - background: transparent; - border: 3px solid currentColor; - border-radius: 255px 15px 225px 15px/15px 225px 15px 255px; - pointer-events: none; -} - -.hand-drawn-border.thick::before { - border-width: 4px; - inset: -3px; -} - -.hand-drawn-border.animated::before { - animation: borderWobble 8s ease-in-out infinite; -} - -@keyframes borderWobble { - 0%, 100% { - border-radius: 255px 15px 225px 15px/15px 225px 15px 255px; - } - 25% { - border-radius: 15px 255px 15px 225px/225px 15px 255px 15px; - } - 50% { - border-radius: 225px 15px 255px 15px/15px 255px 15px 225px; - } - 75% { - border-radius: 15px 225px 15px 255px/255px 15px 225px 15px; - } -} - -/* ===== SKETCHY BUTTONS ===== */ -.btn-sketchy { - position: relative; - background: var(--robot-yellow); - color: var(--robot-dark); - padding: 1rem 2rem; - border: none; - font-weight: 700; - font-size: 1rem; - cursor: pointer; - transition: all 0.3s ease; - overflow: visible; -} - -.btn-sketchy::before { - content: ''; - position: absolute; - inset: -3px; - background: var(--robot-yellow); - transform: rotate(-1deg); - border-radius: 255px 15px 225px 15px/15px 225px 15px 255px; - z-index: -1; - transition: all 0.3s ease; -} - -.btn-sketchy::after { - content: ''; - position: absolute; - inset: 0; - border: 3px solid var(--robot-dark); - border-radius: 225px 15px 255px 15px/15px 255px 15px 225px; - transform: rotate(0.5deg); -} - -.btn-sketchy:hover { - transform: translateY(-2px) rotate(-0.5deg); -} - -.btn-sketchy:hover::before { - transform: rotate(-2deg) scale(1.05); - background: var(--robot-orange); -} - -.btn-sketchy:hover::after { - transform: rotate(1deg); -} - -.btn-sketchy:active { - transform: translateY(0) rotate(0deg); -} - -/* ===== HAND-DRAWN ARROWS ===== */ -.arrow-doodle { - position: relative; - width: 100px; - height: 50px; -} - -.arrow-doodle svg { - width: 100%; - height: 100%; - overflow: visible; -} - -.arrow-doodle path { - fill: none; - stroke: var(--robot-dark); - stroke-width: 3; - stroke-linecap: round; - stroke-linejoin: round; -} - -.arrow-doodle.animated path { - stroke-dasharray: 100; - stroke-dashoffset: 100; - animation: drawArrow 2s ease-in-out infinite; -} - -@keyframes drawArrow { - 0% { stroke-dashoffset: 100; } - 50% { stroke-dashoffset: 0; } - 100% { stroke-dashoffset: -100; } -} - -/* ===== WOBBLY UNDERLINES ===== */ -.underline-wobble { - position: relative; - text-decoration: none; - display: inline-block; -} - -.underline-wobble::after { - content: ''; - position: absolute; - bottom: -4px; - left: 0; - right: 0; - height: 3px; - background: currentColor; - clip-path: polygon( - 0% 50%, - 2% 35%, - 4% 65%, - 6% 45%, - 8% 55%, - 10% 40%, - 12% 60%, - 14% 50%, - 16% 35%, - 18% 65%, - 20% 45%, - 22% 55%, - 24% 40%, - 26% 60%, - 28% 50%, - 30% 35%, - 32% 65%, - 34% 45%, - 36% 55%, - 38% 40%, - 40% 60%, - 42% 50%, - 44% 35%, - 46% 65%, - 48% 45%, - 50% 55%, - 52% 40%, - 54% 60%, - 56% 50%, - 58% 35%, - 60% 65%, - 62% 45%, - 64% 55%, - 66% 40%, - 68% 60%, - 70% 50%, - 72% 35%, - 74% 65%, - 76% 45%, - 78% 55%, - 80% 40%, - 82% 60%, - 84% 50%, - 86% 35%, - 88% 65%, - 90% 45%, - 92% 55%, - 94% 40%, - 96% 60%, - 98% 50%, - 100% 50% - ); -} - -.underline-wobble:hover::after { - animation: wobbleUnderline 0.5s ease-in-out; -} - -@keyframes wobbleUnderline { - 0%, 100% { transform: translateY(0); } - 25% { transform: translateY(-2px); } - 75% { transform: translateY(2px); } -} - -/* ===== SKETCH CIRCLES ===== */ -.circle-sketch { - position: relative; - width: 100px; - height: 100px; - background: var(--robot-blue); -} - -.circle-sketch::before { - content: ''; - position: absolute; - inset: -5px; - background: transparent; - border: 3px solid var(--robot-dark); - border-radius: 50%; - clip-path: polygon( - 50% 0%, - 52% 2%, - 54% 1%, - 56% 3%, - 58% 2%, - 60% 0%, - 62% 1%, - 64% 3%, - 66% 2%, - 68% 1%, - 70% 0%, - 72% 2%, - 74% 3%, - 76% 1%, - 78% 2%, - 80% 0%, - 82% 1%, - 84% 3%, - 86% 2%, - 88% 1%, - 90% 0%, - 92% 2%, - 94% 1%, - 96% 3%, - 98% 2%, - 100% 0%, - 100% 50%, - 98% 52%, - 99% 54%, - 97% 56%, - 98% 58%, - 100% 60%, - 99% 62%, - 97% 64%, - 98% 66%, - 99% 68%, - 100% 70%, - 98% 72%, - 97% 74%, - 99% 76%, - 98% 78%, - 100% 80%, - 99% 82%, - 97% 84%, - 98% 86%, - 99% 88%, - 100% 90%, - 98% 92%, - 99% 94%, - 97% 96%, - 98% 98%, - 100% 100%, - 50% 100%, - 48% 98%, - 46% 99%, - 44% 97%, - 42% 98%, - 40% 100%, - 38% 99%, - 36% 97%, - 34% 98%, - 32% 99%, - 30% 100%, - 28% 98%, - 26% 97%, - 24% 99%, - 22% 98%, - 20% 100%, - 18% 99%, - 16% 97%, - 14% 98%, - 12% 99%, - 10% 100%, - 8% 98%, - 6% 99%, - 4% 97%, - 2% 98%, - 0% 100%, - 0% 50%, - 2% 48%, - 1% 46%, - 3% 44%, - 2% 42%, - 0% 40%, - 1% 38%, - 3% 36%, - 2% 34%, - 1% 32%, - 0% 30%, - 2% 28%, - 3% 26%, - 1% 24%, - 2% 22%, - 0% 20%, - 1% 18%, - 3% 16%, - 2% 14%, - 1% 12%, - 0% 10%, - 2% 8%, - 1% 6%, - 3% 4%, - 2% 2%, - 0% 0% - ); -} - -/* ===== ROUGH PAPER TEXTURE ===== */ -.paper-texture { - position: relative; - background: - repeating-linear-gradient( - 90deg, - transparent, - transparent 2px, - rgba(0, 0, 0, 0.03) 2px, - rgba(0, 0, 0, 0.03) 4px - ), - repeating-linear-gradient( - 0deg, - transparent, - transparent 2px, - rgba(0, 0, 0, 0.03) 2px, - rgba(0, 0, 0, 0.03) 4px - ), - linear-gradient( - 135deg, - #ffffff 0%, - #f8f8f8 100% - ); -} - -.paper-texture::before { - content: ''; - position: absolute; - inset: 0; - background: url('data:image/svg+xml;utf8,'); - pointer-events: none; -} - -/* ===== HANDWRITING FONTS ===== */ -@import url('https://fonts.googleapis.com/css2?family=Kalam:wght@400;700&family=Caveat:wght@400;700&display=swap'); - -.handwriting { - font-family: 'Kalam', cursive; - letter-spacing: 0.02em; -} - -.handwriting-alt { - font-family: 'Caveat', cursive; - font-size: 1.2em; -} - -/* ===== DOODLE CHECKBOX ===== */ -.checkbox-doodle { - position: relative; - width: 24px; - height: 24px; - cursor: pointer; -} - -.checkbox-doodle input { - position: absolute; - opacity: 0; - width: 100%; - height: 100%; - cursor: pointer; -} - -.checkbox-doodle .checkbox-box { - position: absolute; - inset: 0; - background: white; - border: 3px solid var(--robot-dark); - border-radius: 4px; - transform: rotate(-2deg); - transition: all 0.3s ease; -} - -.checkbox-doodle .checkbox-check { - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%) scale(0) rotate(-45deg); - width: 12px; - height: 6px; - border-left: 3px solid var(--robot-green); - border-bottom: 3px solid var(--robot-green); - transition: all 0.3s cubic-bezier(0.68, -0.55, 0.265, 1.55); -} - -.checkbox-doodle input:checked ~ .checkbox-box { - background: var(--robot-yellow); - transform: rotate(2deg); -} - -.checkbox-doodle input:checked ~ .checkbox-check { - transform: translate(-50%, -50%) scale(1) rotate(-45deg); -} - -.checkbox-doodle:hover .checkbox-box { - transform: rotate(-4deg) scale(1.1); - border-color: var(--robot-blue); -} - -/* ===== TOOLTIP BUBBLE ===== */ -.tooltip-bubble { - position: relative; - display: inline-block; -} - -.tooltip-bubble .tooltip-content { - position: absolute; - bottom: 125%; - left: 50%; - transform: translateX(-50%) scale(0); - background: var(--robot-dark); - color: white; - padding: 0.75rem 1rem; - border-radius: 15px 15px 15px 2px; - font-size: 0.9rem; - white-space: nowrap; - box-shadow: 4px 4px 0 rgba(0, 0, 0, 0.2); - transform-origin: bottom center; - transition: all 0.3s cubic-bezier(0.68, -0.55, 0.265, 1.55); -} - -.tooltip-bubble .tooltip-content::after { - content: ''; - position: absolute; - top: 100%; - left: 50%; - transform: translateX(-50%); - width: 0; - height: 0; - border-left: 8px solid transparent; - border-right: 8px solid transparent; - border-top: 10px solid var(--robot-dark); - margin-left: -8px; -} - -.tooltip-bubble:hover .tooltip-content { - transform: translateX(-50%) scale(1); -} - -/* ===== WIGGLE TEXT ===== */ -.text-wiggle { - display: inline-block; - animation: textWiggle 2s ease-in-out infinite; -} - -.text-wiggle.subtle { - animation-duration: 4s; -} - -@keyframes textWiggle { - 0%, 100% { transform: rotate(-1deg); } - 50% { transform: rotate(1deg); } -} - -/* ===== HAND-DRAWN DIVIDER ===== */ -.divider-sketch { - width: 100%; - height: 20px; - margin: 2rem 0; - background: url('data:image/svg+xml;utf8,') center/contain no-repeat; -} - -/* ===== SCRIBBLE BACKGROUND ===== */ -.scribble-bg { - position: relative; - overflow: hidden; -} - -.scribble-bg::before { - content: ''; - position: absolute; - top: -50%; - left: -50%; - width: 200%; - height: 200%; - background: url('data:image/svg+xml;utf8,') 0 0/50px 50px; - transform: rotate(-5deg); - opacity: 0.1; - pointer-events: none; -} - -/* ===== STICKY NOTE ===== */ -.sticky-note { - position: relative; - background: #fff740; - padding: 1.5rem; - box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1); - transform: rotate(-2deg); - transition: all 0.3s ease; -} - -.sticky-note::before { - content: ''; - position: absolute; - top: -8px; - left: 50%; - transform: translateX(-50%); - width: 60px; - height: 16px; - background: rgba(0, 0, 0, 0.1); - border-radius: 8px; -} - -.sticky-note:hover { - transform: rotate(0deg) scale(1.02); - box-shadow: 4px 4px 20px rgba(0, 0, 0, 0.15); -} - -.sticky-note.pink { background: #ff9ff3; } -.sticky-note.blue { background: #54a0ff; } -.sticky-note.green { background: #1dd1a1; } - -/* ===== MARKER HIGHLIGHT ===== */ -.highlight-marker { - position: relative; - display: inline-block; - color: var(--robot-dark); - padding: 0 0.2em; -} - -.highlight-marker::before { - content: ''; - position: absolute; - bottom: 0; - left: -0.1em; - right: -0.1em; - height: 0.8em; - background: var(--robot-yellow); - opacity: 0.4; - transform: skew(-5deg); - z-index: -1; - transition: all 0.3s ease; -} - -.highlight-marker:hover::before { - height: 1.2em; - opacity: 0.6; -} - -.highlight-marker.pink::before { background: var(--robot-pink); } -.highlight-marker.blue::before { background: var(--robot-blue); } -.highlight-marker.green::before { background: var(--robot-green); } - -/* ===== ROUGH SHADOW ===== */ -.shadow-rough { - position: relative; -} - -.shadow-rough::after { - content: ''; - position: absolute; - inset: 4px -4px -4px 4px; - background: rgba(0, 0, 0, 0.2); - border-radius: inherit; - transform: rotate(-1deg); - z-index: -1; - filter: blur(2px); -} - -/* ===== HAND-DRAWN PROGRESS BAR ===== */ -.progress-sketch { - position: relative; - width: 100%; - height: 30px; - background: white; - border: 3px solid var(--robot-dark); - border-radius: 15px; - overflow: hidden; - transform: rotate(-1deg); -} - -.progress-sketch .progress-fill { - height: 100%; - background: repeating-linear-gradient( - 45deg, - var(--robot-green), - var(--robot-green) 10px, - transparent 10px, - transparent 20px - ); - border-right: 3px solid var(--robot-dark); - transition: width 0.5s ease; - animation: progressSlide 1s linear infinite; -} - -@keyframes progressSlide { - 0% { background-position: 0 0; } - 100% { background-position: 20px 0; } -} - -/* ===== UTILITY CLASSES ===== */ -.wiggle { animation: wiggle 2s ease-in-out infinite; } -.bounce { animation: bounce 3s ease-in-out infinite; } -.float { animation: float 4s ease-in-out infinite; } -.shake { animation: shake 0.5s ease-in-out infinite; } - -@keyframes wiggle { - 0%, 100% { transform: rotate(-2deg); } - 50% { transform: rotate(2deg); } -} - -@keyframes bounce { - 0%, 100% { transform: translateY(0); } - 50% { transform: translateY(-10px); } -} - -@keyframes float { - 0%, 100% { transform: translateY(0); } - 50% { transform: translateY(-20px); } -} - -@keyframes shake { - 0%, 100% { transform: translateX(0); } - 25% { transform: translateX(-5px); } - 75% { transform: translateX(5px); } -} - -/* ===== ACCESSIBILITY ===== */ -@media (prefers-reduced-motion: reduce) { - *, - *::before, - *::after { - animation-duration: 0.01ms !important; - animation-iteration-count: 1 !important; - transition-duration: 0.01ms !important; - } -} - -/* ===== RESPONSIVE ADJUSTMENTS ===== */ -@media (max-width: 768px) { - .btn-sketchy { - padding: 0.75rem 1.5rem; - font-size: 0.9rem; - } - - .sticky-note { - padding: 1rem; - } -} diff --git a/website/src/styles/products-architecture.css b/website/src/styles/products-architecture.css deleted file mode 100644 index 3ff9d0a3e0..0000000000 --- a/website/src/styles/products-architecture.css +++ /dev/null @@ -1,577 +0,0 @@ -/* ProductsArchitecture Component Styles */ - -.architecture-section { - background: var(--light-gray); - border-top: 1px solid var(--border-color); -} - -.architecture-section .badge-robot-head { - background: var(--pastel-blue); -} - -/* Architecture Stack */ -.architecture-stack { - margin: 4rem 0; - max-width: 900px; - margin-left: auto; - margin-right: auto; - position: relative; -} - -.stack-layer { - background: white; - border-radius: 20px; - border: 3px solid var(--border-color); - padding: 2rem; - margin-bottom: 2rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); - transition: all 0.3s ease; - position: relative; - display: flex; - align-items: center; - gap: 2rem; -} - -.stack-layer:hover { - transform: translateX(8px); - box-shadow: 0 12px 40px rgba(0, 0, 0, 0.12); -} - -.tools-layer { - border-color: var(--gentle-orange); - border-left-width: 8px; -} - -.models-layer { - border-color: var(--soft-green); - border-left-width: 8px; -} - -.platforms-layer { - border-color: var(--primary-blue); - border-left-width: 8px; - margin-bottom: 0; -} - -.layer-number { - position: absolute; - top: -15px; - left: 2rem; - width: 30px; - height: 30px; - background: var(--primary-blue); - color: white; - border-radius: 50%; - display: flex; - align-items: center; - justify-content: center; - font-weight: 700; - font-size: 1.1rem; - border: 3px solid white; - box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); -} - -.layer-robot { - flex-shrink: 0; -} - -/* Robot Designs */ -.tools-robot, -.models-robot, -.platforms-robot { - position: relative; -} - -.tools-robot .robot-head, -.models-robot .robot-head, -.platforms-robot .robot-head { - width: 60px; - height: 60px; - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto 10px; - position: relative; -} - -.tools-robot .robot-body, -.models-robot .robot-body, -.platforms-robot .robot-body { - width: 70px; - height: 80px; - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; -} - -.tools-robot .robot-head, -.tools-robot .robot-body { - background: var(--gentle-orange); -} - -.models-robot .robot-head, -.models-robot .robot-body { - background: var(--soft-green); -} - -.platforms-robot .robot-head, -.platforms-robot .robot-body { - background: var(--pastel-blue); -} - -.robot-eyes { - display: flex; - justify-content: space-between; - padding: 12px 8px; -} - -.robot-eye { - width: 12px; - height: 12px; - background: var(--dark-text); - border-radius: 50%; - position: relative; -} - -.robot-eye::after { - content: ''; - position: absolute; - top: 2px; - right: 2px; - width: 3px; - height: 3px; - background: white; - border-radius: 50%; -} - -.robot-eye.thinking { - animation: thinking 3s infinite ease-in-out; -} - -.robot-smile { - position: absolute; - bottom: 8px; - left: 50%; - transform: translateX(-50%); - width: 15px; - height: 8px; - border: 3px solid var(--dark-text); - border-top: none; - border-radius: 0 0 12px 12px; -} - -.robot-smile { - position: absolute; - bottom: 8px; - left: 50%; - transform: translateX(-50%); - width: 15px; - height: 8px; - border: 3px solid var(--dark-text); - border-top: none; - border-radius: 0 0 12px 12px; -} - -/* Robot-specific details */ -.robot-base { - margin-top: 5px; -} - -.base-foundation { - width: 50px; - height: 8px; - background: var(--medium-gray); - border-radius: 4px; - border: 2px solid var(--dark-text); - margin: 0 auto; -} - -.layer-content { - flex: 1; -} - -.layer-content h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.75rem; -} - -.layer-content p { - color: var(--medium-gray); - margin-bottom: 1.5rem; - line-height: 1.6; -} - -.layer-features { - display: flex; - flex-wrap: wrap; - gap: 0.75rem; -} - -.feature-item { - display: flex; - align-items: center; - gap: 0.5rem; - font-size: 0.9rem; - font-weight: 500; -} - -.feature-status { - width: 8px; - height: 8px; - border-radius: 50%; - border: 1px solid var(--dark-text); -} - -.feature-item.active .feature-status { - background: var(--success-green); - animation: pulse 2s infinite; -} - -.feature-item.beta .feature-status { - background: var(--warning-amber); - animation: pulse 2s infinite; -} - -.feature-item.coming .feature-status { - background: var(--medium-gray); -} - -/* Connection Lines */ -.layer-connection { - position: absolute; - bottom: -2rem; - left: 50%; - width: 4px; - height: 2rem; - background-color: var(--border-color); - z-index: -1; -} - -.platforms-layer .layer-connection { - display: none; -} - -.data-flow { - position: absolute; - top: 0; - left: -4px; - right: -4px; - height: 100%; -} - -.flow-packet { - position: absolute; - width: 6px; - height: 6px; - background: var(--primary-blue); - border-radius: 50%; - animation: flowData 3s infinite linear; - left: 50%; - transform: translateX(-50%); -} - -.flow-packet:nth-child(2) { - animation-delay: 1s; -} -.flow-packet:nth-child(3) { - animation-delay: 2s; -} - -/* Architecture Benefits */ -.architecture-benefits { - margin: 4rem 0; -} - -.architecture-benefits h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.benefits-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - gap: 2rem; -} - -.benefit-card { - background: white; - border-radius: 16px; - border: 2px solid var(--border-color); - padding: 2rem; - text-align: center; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); - transition: all 0.3s ease; -} - -.benefit-card:hover { - transform: translateY(-4px); - box-shadow: 0 12px 40px rgba(0, 0, 0, 0.12); - border-color: var(--primary-blue); -} - -.benefit-robot { - margin-bottom: 1rem; -} - -.benefit-robot .robot-head { - width: 40px; - height: 40px; - background: var(--warm-yellow); - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto 6px; - position: relative; -} - -.benefit-robot .robot-body { - width: 50px; - height: 60px; - background: var(--gentle-orange); - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto; - display: flex; - align-items: center; - justify-content: center; -} - -.benefit-card h4 { - font-size: 1.3rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.benefit-card p { - color: var(--medium-gray); - line-height: 1.6; -} - -/* Interactive Demo */ -.architecture-demo { - margin: 4rem 0; - background: white; - border-radius: 20px; - border: 2px solid var(--border-color); - padding: 3rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); -} - -.architecture-demo h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.demo-container { - max-width: 800px; - margin: 0 auto; -} - -.scenario-title { - text-align: center; - font-size: 1.2rem; - font-weight: 600; - color: var(--primary-blue); - margin-bottom: 2rem; - padding: 1rem; - background: var(--light-blue); - border-radius: 10px; - border: 2px solid var(--primary-blue); -} - -.demo-flow { - display: flex; - align-items: center; - justify-content: center; - gap: 1rem; - flex-wrap: wrap; -} - -.flow-step { - background: var(--cream-white); - border-radius: 12px; - border: 2px solid var(--border-color); - padding: 1.5rem; - text-align: center; - max-width: 200px; - transition: all 0.3s ease; -} - -.flow-step:hover { - border-color: var(--primary-blue); - transform: scale(1.05); -} - -.step-number { - width: 30px; - height: 30px; - background: var(--primary-blue); - color: white; - border-radius: 50%; - display: flex; - align-items: center; - justify-content: center; - font-weight: 700; - margin: 0 auto 1rem; -} - -.step-content { - font-size: 0.9rem; - line-height: 1.4; -} - -.step-content strong { - color: var(--primary-blue); - display: block; - margin-bottom: 0.5rem; -} - -.flow-arrow { - width: 30px; - height: 2px; - background: var(--primary-blue); - position: relative; -} - -.flow-arrow::after { - content: ''; - position: absolute; - right: -5px; - top: -3px; - width: 0; - height: 0; - border-top: 4px solid transparent; - border-bottom: 4px solid transparent; - border-left: 8px solid var(--primary-blue); -} - -/* Animations */ -@keyframes thinking { - 0%, - 100% { - transform: scaleY(1); - } - 50% { - transform: scaleY(0.8); - } -} - -@keyframes flowData { - 0% { - top: 0; - opacity: 0; - } - 10% { - opacity: 1; - } - 90% { - opacity: 1; - } - 100% { - top: 100%; - opacity: 0; - } -} - -@keyframes pulse { - 0%, - 100% { - transform: scale(1); - opacity: 1; - } - 50% { - transform: scale(1.2); - opacity: 0.7; - } -} - -/* Responsive Design */ -@media (max-width: 1024px) { - .stack-layer { - flex-direction: column; - text-align: center; - gap: 1.5rem; - } - - .layer-connection { - display: none; - } - - .benefits-grid { - grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); - } -} - -@media (max-width: 768px) { - .architecture-stack { - margin: 3rem 0; - } - - .stack-layer { - padding: 1.5rem; - margin-bottom: 1.5rem; - } - - .layer-content h3 { - font-size: 1.3rem; - } - - .demo-flow { - flex-direction: column; - } - - .flow-arrow { - transform: rotate(90deg); - } - - .architecture-demo { - padding: 2rem; - } -} - -@media (max-width: 480px) { - .stack-layer { - padding: 1.25rem; - } - - .benefits-grid { - grid-template-columns: 1fr; - } - - .benefit-card { - padding: 1.5rem; - } -} - -/* Accessibility */ -@media (prefers-reduced-motion: reduce) { - .robot-eye, - .flow-packet, - .feature-status, - .stack-layer, - .benefit-card, - .flow-step { - animation: none !important; - transition: none !important; - } -} - -/* High contrast mode */ -@media (prefers-contrast: high) { - .stack-layer, - .benefit-card, - .architecture-demo { - border-width: 3px; - } -} diff --git a/website/src/styles/products-models.css b/website/src/styles/products-models.css deleted file mode 100644 index d691759f73..0000000000 --- a/website/src/styles/products-models.css +++ /dev/null @@ -1,758 +0,0 @@ -/* ProductsModels Component Styles */ - -.models-section { - background: var(--cream-white); - border-top: 1px solid var(--border-color); -} - -.models-section .badge-robot-head { - background: var(--soft-pink); -} - -/* Models Grid */ -.models-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(400px, 1fr)); - gap: 2rem; - margin: 4rem 0; -} - -.model-card { - position: relative; - overflow: visible; -} - -.model-robot-section { - position: relative; - text-align: center; - margin-bottom: 2rem; -} - -.model-robot { - display: inline-block; - position: relative; - margin-bottom: 1rem; -} - -.model-robot .robot-head { - width: 70px; - height: 70px; - border-radius: 16px; - border: 3px solid var(--dark-text); - margin: 0 auto 12px; - position: relative; -} - -.model-robot .robot-body { - width: 80px; - height: 90px; - border-radius: 16px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; - flex-direction: column; -} - -/* Robot variations */ -.v1-robot .robot-head { background: var(--success-green); } -.v1-robot .robot-body { background: var(--success-green); } - -.nano-robot .robot-head { background: var(--primary-blue); } -.nano-robot .robot-body { background: var(--primary-blue); } - -.lucy-robot .robot-head { background: var(--warning-amber); } -.lucy-robot .robot-body { background: var(--warning-amber); } - -.robot-eyes { - display: flex; - justify-content: space-between; - padding: 12px 10px; -} - -.robot-eye { - width: 14px; - height: 14px; - background: var(--dark-text); - border-radius: 50%; - position: relative; -} - -.robot-eye::after { - content: ''; - position: absolute; - top: 3px; - right: 3px; - width: 4px; - height: 4px; - background: white; - border-radius: 50%; -} - -.robot-eye.research { - background: var(--primary-blue); - animation: researchBlink 3s infinite ease-in-out; -} - -.robot-eye.sleeping { - height: 4px; - border-radius: 2px; - background: var(--dark-text); -} - -.robot-eye.sleeping::after { - display: none; -} - -.robot-antenna { - position: absolute; - top: -12px; - left: 50%; - transform: translateX(-50%); - width: 3px; - height: 15px; - background: var(--dark-text); - border-radius: 2px; -} - -.antenna-tip { - position: absolute; - top: -6px; - left: 50%; - transform: translateX(-50%); - width: 8px; - height: 8px; - border-radius: 50%; - border: 2px solid var(--dark-text); -} - -.antenna-tip.active { - background: var(--success-green); - animation: pulse 2s infinite; -} - -.robot-chest { - margin-bottom: 8px; -} - -.chest-panel { - display: flex; - flex-direction: column; - gap: 2px; -} - -.panel-line { - width: 20px; - height: 2px; - background: var(--dark-text); - border-radius: 1px; -} - -.panel-line.active { - background: white; - animation: dataFlow 2s infinite ease-in-out; -} - -.panel-line:nth-child(2) { animation-delay: 0.5s; } -.panel-line:nth-child(3) { animation-delay: 1s; } - -.robot-badge { - background: white; - color: var(--dark-text); - padding: 2px 8px; - border-radius: 8px; - font-size: 0.7rem; - font-weight: 700; - border: 2px solid var(--dark-text); -} - -.robot-scanner { - position: absolute; - top: 8px; - right: 8px; - width: 12px; - height: 12px; -} - -.scanner-beam { - width: 100%; - height: 2px; - background: var(--primary-blue); - border-radius: 1px; - animation: scan 3s infinite ease-in-out; -} - -.research-display { - display: flex; - gap: 3px; -} - -.display-dot { - width: 4px; - height: 4px; - background: white; - border-radius: 50%; - animation: pulse 2s infinite; -} - -.display-dot:nth-child(2) { animation-delay: 0.5s; } -.display-dot:nth-child(3) { animation-delay: 1s; } - -.robot-vision-sensor { - position: absolute; - top: 5px; - left: 50%; - transform: translateX(-50%); - width: 16px; - height: 16px; -} - -.vision-ring { - width: 100%; - height: 100%; - border: 2px solid var(--dark-text); - border-radius: 50%; - position: relative; -} - -.vision-center { - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - width: 6px; - height: 6px; - background: var(--error-red); - border-radius: 50%; -} - -.multimodal-display { - display: flex; - gap: 2px; -} - -.modal-icon { - width: 6px; - height: 6px; - border-radius: 2px; - border: 1px solid var(--dark-text); -} - -.modal-icon.vision { background: var(--primary-blue); } -.modal-icon.text { background: var(--success-green); } -.modal-icon.audio { background: var(--medium-gray); } - -.robot-progress { - margin-top: 8px; - text-align: center; -} - -.progress-bar { - width: 40px; - height: 4px; - background: var(--border-color); - border-radius: 2px; - margin: 0 auto 4px; - overflow: hidden; -} - -.progress-fill { - height: 100%; - background: var(--warning-amber); - border-radius: 2px; - width: 60%; - animation: training 3s infinite ease-in-out; -} - -.progress-text { - font-size: 0.7rem; - color: var(--medium-gray); - font-weight: 500; -} - -.robot-arms { - display: flex; - justify-content: space-between; - margin-top: -30px; - padding: 0 15px; - position: relative; - z-index: -1; -} - -.arm { - width: 10px; - height: 30px; - background: var(--gentle-orange); - border-radius: 5px; - border: 2px solid var(--dark-text); - transform-origin: top center; -} - -.robot-tools { - display: flex; - gap: 8px; - margin-top: 8px; -} - -.tool { - width: 8px; - height: 8px; - border-radius: 2px; - border: 1px solid var(--dark-text); -} - -.search-tool { background: var(--primary-blue); } -.analysis-tool { background: var(--success-green); } - -.model-status-bubble { - position: absolute; - top: -10px; - right: 1rem; -} - -/* Model Content */ -.model-content { - padding: 0; -} - -.model-header { - display: flex; - justify-content: space-between; - align-items: center; - margin-bottom: 1rem; -} - -.model-header h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); -} - -.model-version { - background: var(--light-gray); - color: var(--medium-gray); - padding: 0.25rem 0.5rem; - border-radius: 8px; - font-size: 0.7rem; - font-weight: 600; - border: 1px solid var(--border-color); -} - -.model-description { - color: var(--medium-gray); - margin-bottom: 1.5rem; - line-height: 1.6; -} - -.model-specs { - margin-bottom: 1.5rem; -} - -.spec-section { - margin-bottom: 1rem; -} - -.spec-section h4 { - font-size: 0.9rem; - font-weight: 600; - color: var(--dark-text); - margin-bottom: 0.75rem; - text-transform: uppercase; - letter-spacing: 0.05em; -} - -.spec-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(100px, 1fr)); - gap: 0.75rem; -} - -.spec-item { - background: var(--light-gray); - border-radius: 8px; - padding: 0.75rem; - text-align: center; - border: 1px solid var(--border-color); -} - -.spec-label { - font-size: 0.8rem; - font-weight: 600; - color: var(--primary-blue); - margin-bottom: 0.25rem; -} - -.spec-value { - font-size: 0.9rem; - color: var(--dark-text); - font-weight: 500; -} - -.spec-highlight { - background: var(--light-blue); - border: 2px solid var(--primary-blue); - border-radius: 10px; - padding: 1rem; - text-align: center; -} - -.highlight-label { - font-size: 0.8rem; - font-weight: 600; - color: var(--primary-blue); - margin-bottom: 0.25rem; - text-transform: uppercase; -} - -.highlight-value { - font-size: 1.2rem; - font-weight: 700; - color: var(--dark-text); -} - -.model-capabilities { - margin-bottom: 1.5rem; -} - -.model-capabilities h4 { - font-size: 0.9rem; - font-weight: 600; - color: var(--dark-text); - margin-bottom: 0.75rem; - text-transform: uppercase; - letter-spacing: 0.05em; -} - -.capabilities-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(120px, 1fr)); - gap: 0.75rem; -} - -.capability { - display: flex; - flex-direction: column; - align-items: center; - gap: 0.5rem; - padding: 0.75rem; - border-radius: 10px; - border: 2px solid var(--border-color); - text-align: center; - font-size: 0.8rem; - font-weight: 500; - transition: all 0.3s ease; -} - -.capability:hover { - transform: translateY(-2px); - box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); -} - -.capability.active { - border-color: var(--success-green); - background: var(--soft-green); -} - -.capability.coming { - border-color: var(--medium-gray); - background: var(--light-gray); - color: var(--medium-gray); - border-style: dashed; -} - -.capability-icon { - width: 20px; - height: 20px; - border-radius: 4px; - border: 2px solid var(--dark-text); - position: relative; -} - -/* Capability icon colors */ -.capability-icon.reasoning { background: var(--primary-blue); } -.capability-icon.coding { background: var(--success-green); } -.capability-icon.multilingual { background: var(--warning-amber); } -.capability-icon.tools { background: var(--light-purple); } -.capability-icon.search { background: var(--primary-blue); } -.capability-icon.document { background: var(--warning-amber); } -.capability-icon.mobile { background: var(--success-green); } -.capability-icon.mcp { background: var(--light-purple); } -.capability-icon.image { background: var(--medium-gray); } -.capability-icon.ocr { background: var(--medium-gray); } -.capability-icon.visual { background: var(--medium-gray); } -.capability-icon.processing { background: var(--medium-gray); } - -/* Add small symbols to capability icons */ -.capability-icon::after { - content: ''; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - width: 6px; - height: 6px; - background: white; - border-radius: 2px; -} - -.capability-icon.reasoning::after { border-radius: 50%; } -.capability-icon.coding::after { width: 8px; height: 4px; } -.capability-icon.search::after { border-radius: 50%; width: 4px; height: 4px; } - -.model-metrics { - display: flex; - justify-content: space-around; - padding-top: 1rem; - border-top: 1px solid var(--border-color); -} - -.metric { - text-align: center; -} - -.metric-value { - font-size: 1.1rem; - font-weight: 700; - color: var(--primary-blue); - margin-bottom: 0.25rem; -} - -.metric-label { - font-size: 0.8rem; - color: var(--medium-gray); - text-transform: uppercase; - letter-spacing: 0.05em; -} - -/* Models Comparison */ -.models-comparison { - margin: 4rem 0; - background: white; - border-radius: 20px; - border: 2px solid var(--border-color); - padding: 3rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); -} - -.models-comparison h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.comparison-table { - display: grid; - grid-template-columns: 1fr 1fr 1fr 1fr; - gap: 1px; - background: var(--border-color); - border-radius: 12px; - overflow: hidden; -} - -.table-header { - display: contents; -} - -.header-cell { - background: var(--primary-blue); - color: white; - padding: 1rem; - font-weight: 700; - text-align: center; -} - -.table-row { - display: contents; -} - -.table-cell { - background: white; - padding: 1rem; - text-align: center; - font-size: 0.9rem; -} - -.feature-col { - font-weight: 600; - color: var(--dark-text); - text-align: left; -} - -.model-col { - color: var(--medium-gray); -} - -.status-indicator { - display: inline-flex; - align-items: center; - gap: 0.25rem; - padding: 0.25rem 0.5rem; - border-radius: 12px; - font-size: 0.7rem; - font-weight: 600; - text-transform: uppercase; -} - -.status-indicator::before { - content: ''; - width: 6px; - height: 6px; - border-radius: 50%; -} - -.status-indicator.active { - background: var(--soft-green); - color: var(--success-green); -} - -.status-indicator.active::before { - background: var(--success-green); -} - -.status-indicator.beta { - background: var(--warm-yellow); - color: var(--warning-amber); -} - -.status-indicator.beta::before { - background: var(--warning-amber); -} - -.status-indicator.coming { - background: var(--light-gray); - color: var(--medium-gray); -} - -.status-indicator.coming::before { - background: var(--medium-gray); -} - -/* Animations */ -@keyframes researchBlink { - 0%, 100% { opacity: 1; } - 50% { opacity: 0.3; } -} - -@keyframes scan { - 0% { transform: translateY(0) scaleX(1); } - 50% { transform: translateY(4px) scaleX(1.2); } - 100% { transform: translateY(8px) scaleX(1); } -} - -@keyframes training { - 0% { width: 0%; } - 50% { width: 80%; } - 100% { width: 60%; } -} - -@keyframes dataFlow { - 0%, 100% { opacity: 0.5; transform: scaleX(1); } - 50% { opacity: 1; transform: scaleX(1.2); } -} - -@keyframes pulse { - 0%, 100% { transform: scale(1); opacity: 1; } - 50% { transform: scale(1.1); opacity: 0.7; } -} - -/* Responsive Design */ -@media (max-width: 1024px) { - .models-grid { - grid-template-columns: repeat(auto-fit, minmax(350px, 1fr)); - } - - .models-comparison { - padding: 2rem; - } - - .comparison-table { - font-size: 0.8rem; - } -} - -@media (max-width: 768px) { - .models-grid { - grid-template-columns: 1fr; - gap: 1.5rem; - } - - .model-robot .robot-head { - width: 60px; - height: 60px; - } - - .model-robot .robot-body { - width: 70px; - height: 80px; - } - - .spec-grid { - grid-template-columns: repeat(auto-fit, minmax(80px, 1fr)); - } - - .capabilities-grid { - grid-template-columns: repeat(auto-fit, minmax(100px, 1fr)); - } - - .comparison-table { - grid-template-columns: 1fr; - gap: 0; - } - - .table-row { - display: grid; - grid-template-columns: 1fr 1fr 1fr 1fr; - gap: 1px; - margin-bottom: 1px; - } - - .models-comparison { - padding: 1.5rem; - } -} - -@media (max-width: 480px) { - .model-content { - padding: 1rem; - } - - .model-specs { - margin-bottom: 1rem; - } - - .spec-grid { - grid-template-columns: 1fr 1fr; - } - - .capabilities-grid { - grid-template-columns: 1fr 1fr; - } - - .model-metrics { - flex-direction: column; - gap: 1rem; - } -} - -/* Accessibility */ -@media (prefers-reduced-motion: reduce) { - .robot-eye, .scanner-beam, .display-dot, .panel-line, .progress-fill, .antenna-tip, .capability { - animation: none !important; - transition: none !important; - } -} - -/* High contrast mode */ -@media (prefers-contrast: high) { - .model-card, .comparison-table { - border-width: 3px; - } - - .capability, .spec-item { - border-width: 2px; - } -} diff --git a/website/src/styles/products-platforms.css b/website/src/styles/products-platforms.css deleted file mode 100644 index a5eaf7c235..0000000000 --- a/website/src/styles/products-platforms.css +++ /dev/null @@ -1,1009 +0,0 @@ -/* Products Platforms Section Styles */ - -.platforms-section { - background: var(--light-gray); - position: relative; - overflow: hidden; -} - -/* Platforms Timeline */ -.platforms-timeline { - position: relative; - max-width: 1000px; - margin: 4rem auto; - padding: 0 2rem; -} - -.timeline-path { - position: absolute; - left: 50px; - top: 0; - bottom: 0; - width: 4px; - background: linear-gradient( - 180deg, - var(--primary-blue) 0%, - var(--soft-pink) 25%, - var(--warm-yellow) 50%, - var(--soft-green) 75%, - var(--primary-blue) 100% - ); - border-radius: 2px; -} - -.timeline-path::before { - content: ''; - position: absolute; - top: -20px; - left: 50%; - transform: translateX(-50%); - width: 12px; - height: 12px; - background: var(--primary-blue); - border-radius: 50%; - border: 3px solid white; -} - -/* Platform Items */ -.platform-item { - position: relative; - display: flex; - align-items: center; - gap: 3rem; - margin-bottom: 4rem; - opacity: 0; - transform: translateX(-50px); - animation: slideInFromLeft 0.6s ease-out forwards; -} - -.platform-item:nth-child(2) { animation-delay: 0.2s; } -.platform-item:nth-child(3) { animation-delay: 0.4s; } -.platform-item:nth-child(4) { animation-delay: 0.6s; } - -@keyframes slideInFromLeft { - to { - opacity: 1; - transform: translateX(0); - } -} - -@keyframes slideInFromRight { - from { - opacity: 0; - transform: translateX(50px); - } - to { - opacity: 1; - transform: translateX(0); - } -} - -/* Platform Robots Container */ -.platform-robot { - position: relative; - width: 150px; - flex-shrink: 0; - z-index: 2; -} - -.platform-robot .robot-head { - width: 60px; - height: 60px; - margin: 0 auto 10px; - border-radius: 15px; - border: 3px solid var(--dark-text); - position: relative; - transition: all 0.3s ease; -} - -.platform-robot .robot-body { - width: 80px; - height: 100px; - margin: 0 auto; - border-radius: 15px; - border: 3px solid var(--dark-text); - position: relative; - transition: all 0.3s ease; -} - -/* Desktop Robot Styles */ -.desktop-robot .robot-head { - background: linear-gradient(135deg, var(--primary-blue), var(--pastel-blue)); -} - -.desktop-robot .robot-body { - background: linear-gradient(135deg, var(--pastel-blue), var(--primary-blue)); -} - -.desktop-robot .robot-screen { - position: absolute; - top: 15px; - left: 50%; - transform: translateX(-50%); - width: 50px; - height: 30px; - background: #000; - border: 2px solid var(--primary-blue); - border-radius: 4px; - overflow: hidden; -} - -.screen-content { - padding: 4px; -} - -.screen-bar { - height: 2px; - background: var(--soft-green); - margin: 2px 0; - border-radius: 1px; - animation: codeFlow 2s ease-in-out infinite; -} - -.screen-bar:nth-child(2) { animation-delay: 0.5s; width: 80%; } -.screen-bar:nth-child(3) { animation-delay: 1s; width: 60%; } - -@keyframes codeFlow { - 0%, 100% { opacity: 0.3; transform: translateX(-10px); } - 50% { opacity: 1; transform: translateX(0); } -} - -.robot-keyboard { - position: absolute; - bottom: 20px; - left: 50%; - transform: translateX(-50%); - display: flex; - gap: 2px; -} - -.key { - width: 6px; - height: 6px; - background: var(--dark-text); - border-radius: 1px; -} - -.robot-base { - margin-top: 10px; -} - -.base-stand { - width: 40px; - height: 8px; - background: var(--dark-text); - margin: 0 auto; - border-radius: 4px; -} - -/* Web Robot Styles */ -.web-robot .robot-head { - background: linear-gradient(135deg, var(--light-blue), var(--soft-green)); -} - -.web-robot .robot-body { - background: linear-gradient(135deg, var(--soft-green), var(--light-blue)); -} - -.robot-browser { - position: absolute; - top: 10px; - left: 50%; - transform: translateX(-50%); - width: 45px; - height: 35px; - background: white; - border: 2px solid var(--dark-text); - border-radius: 4px; - overflow: hidden; -} - -.browser-bar { - display: flex; - gap: 2px; - padding: 2px; - background: var(--light-gray); - border-bottom: 1px solid var(--border-color); -} - -.browser-dot { - width: 4px; - height: 4px; - border-radius: 50%; -} - -.browser-dot.red { background: #ff5f56; } -.browser-dot.yellow { background: #ffbd2e; } -.browser-dot.green { background: #27ca3f; } - -.browser-content { - padding: 4px; -} - -.content-line { - height: 2px; - background: var(--primary-blue); - margin: 2px 0; - border-radius: 1px; - width: 100%; -} - -.content-line.short { width: 70%; } - -.robot-cloud { - position: absolute; - top: 20px; - left: 50%; - transform: translateX(-50%); - width: 40px; - height: 40px; - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; -} - -.cloud-icon { - font-size: 24px; -} - -.cloud-data { - display: flex; - gap: 2px; - margin-top: 4px; -} - -.data-bit { - width: 4px; - height: 4px; - background: var(--primary-blue); - border-radius: 50%; - animation: dataFlow 1.5s ease-in-out infinite; -} - -.data-bit:nth-child(2) { animation-delay: 0.5s; } - -@keyframes dataFlow { - 0%, 100% { opacity: 0; transform: translateY(0); } - 50% { opacity: 1; transform: translateY(-5px); } -} - -/* Mobile Robot Styles */ -.mobile-robot .robot-head { - background: linear-gradient(135deg, var(--warm-yellow), var(--gentle-orange)); -} - -.mobile-robot .robot-body { - background: linear-gradient(135deg, var(--gentle-orange), var(--warm-yellow)); -} - -.robot-phone { - position: absolute; - top: 10px; - left: 50%; - transform: translateX(-50%); - width: 30px; - height: 40px; - background: #000; - border: 2px solid var(--dark-text); - border-radius: 6px; - overflow: hidden; -} - -.phone-screen { - margin: 2px; - height: calc(100% - 4px); - background: var(--primary-blue); - border-radius: 4px; - display: flex; - align-items: center; - justify-content: center; -} - -.phone-ui { - display: flex; - flex-direction: column; - gap: 4px; -} - -.ui-element { - width: 20px; - height: 4px; - background: white; - border-radius: 2px; - opacity: 0.8; -} - -.robot-signal { - position: absolute; - top: 20px; - right: 10px; - display: flex; - gap: 2px; -} - -.signal-bar { - width: 3px; - height: 8px; - background: var(--dark-text); - border-radius: 1px; - opacity: 0.3; -} - -.signal-bar:nth-child(2) { height: 12px; opacity: 0.6; } -.signal-bar:nth-child(3) { height: 16px; opacity: 1; } - -.development-indicator { - position: absolute; - bottom: -30px; - left: 50%; - transform: translateX(-50%); - text-align: center; - width: 100%; -} - -.dev-progress { - width: 80px; - height: 4px; - background: var(--light-gray); - border-radius: 2px; - margin: 0 auto 8px; - overflow: hidden; -} - -.progress-fill { - height: 100%; - width: 60%; - background: var(--warm-yellow); - border-radius: 2px; - animation: progressAnimation 2s ease-in-out infinite; -} - -@keyframes progressAnimation { - 0%, 100% { width: 60%; } - 50% { width: 80%; } -} - -.dev-text { - font-size: 0.8rem; - color: var(--medium-gray); - font-weight: 600; -} - -/* Server Robot Styles */ -.server-robot .robot-head { - background: linear-gradient(135deg, var(--dark-text), var(--medium-gray)); -} - -.server-robot .robot-body { - background: linear-gradient(135deg, var(--medium-gray), var(--dark-text)); -} - -.robot-server-display { - position: absolute; - top: 15px; - left: 50%; - transform: translateX(-50%); -} - -.server-lights { - display: flex; - gap: 4px; -} - -.light { - width: 6px; - height: 6px; - border-radius: 50%; - background: var(--success-green); - animation: serverBlink 2s ease-in-out infinite; -} - -.light:nth-child(2) { animation-delay: 0.5s; background: var(--warning-amber); } -.light:nth-child(3) { animation-delay: 1s; background: var(--error-red); } - -@keyframes serverBlink { - 0%, 100% { opacity: 0.3; } - 50% { opacity: 1; } -} - -.robot-rack { - padding: 20px 10px; -} - -.rack-unit { - width: 100%; - height: 8px; - background: var(--light-gray); - margin: 4px 0; - border-radius: 2px; - position: relative; - overflow: hidden; -} - -.rack-unit::after { - content: ''; - position: absolute; - top: 0; - left: -20px; - width: 20px; - height: 100%; - background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.4), transparent); - animation: rackScan 3s linear infinite; -} - -@keyframes rackScan { - to { left: 100%; } -} - -.robot-connections { - margin-top: 10px; - display: flex; - justify-content: center; - gap: 8px; -} - -.connection-port { - width: 12px; - height: 6px; - background: var(--dark-text); - border-radius: 2px; -} - -/* Platform Cards */ -.platform-card { - flex: 1; - background: white; - border-radius: 20px; - border: 3px solid var(--border-color); - padding: 2rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); - transition: all 0.3s ease; - position: relative; -} - -.platform-card:hover { - transform: translateY(-4px); - box-shadow: 0 12px 40px rgba(0, 0, 0, 0.12); - border-color: var(--primary-blue); -} - -.platform-status { - display: inline-flex; - align-items: center; - gap: 0.5rem; - padding: 0.5rem 1rem; - border-radius: 20px; - font-size: 0.8rem; - font-weight: 600; - margin-bottom: 1rem; -} - -.platform-status.available { - background: var(--success-green); - color: white; -} - -.platform-status.beta { - background: var(--warning-amber); - color: white; -} - -.platform-status.coming { - background: var(--medium-gray); - color: white; - border: 2px dashed white; -} - -.platform-card h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.5rem; -} - -.platform-card p { - color: var(--medium-gray); - margin-bottom: 1.5rem; - line-height: 1.6; -} - -/* Platform Metrics */ -.platform-metrics { - display: flex; - gap: 2rem; - margin-bottom: 1.5rem; -} - -.metric { - display: flex; - align-items: center; - gap: 0.75rem; -} - -.metric-icon { - width: 32px; - height: 32px; - background: var(--light-blue); - border-radius: 8px; - display: flex; - align-items: center; - justify-content: center; -} - -.metric-text { - display: flex; - flex-direction: column; -} - -.metric-value { - font-size: 1.2rem; - font-weight: 700; - color: var(--dark-text); -} - -.metric-label { - font-size: 0.8rem; - color: var(--medium-gray); -} - -/* Platform Features */ -.platform-features { - display: grid; - grid-template-columns: repeat(2, 1fr); - gap: 1rem; - margin-bottom: 1.5rem; -} - -.feature-item { - display: flex; - align-items: center; - gap: 0.5rem; - font-size: 0.9rem; -} - -.feature-icon { - width: 20px; - height: 20px; - background: var(--light-purple); - border-radius: 4px; - flex-shrink: 0; -} - -/* Platform Specs */ -.platform-specs { - display: flex; - gap: 1rem; - padding: 1rem; - background: var(--light-gray); - border-radius: 12px; -} - -.spec-item { - flex: 1; - text-align: center; -} - -.spec-label { - font-size: 0.8rem; - color: var(--medium-gray); - margin-bottom: 0.25rem; -} - -.spec-value { - font-size: 1rem; - font-weight: 700; - color: var(--dark-text); -} - -/* Mobile Modes */ -.mobile-modes { - margin-top: 2rem; - padding: 2rem; - background: var(--light-blue); - border-radius: 16px; -} - -.mobile-modes h4 { - text-align: center; - font-size: 1.3rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1.5rem; -} - -.modes-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); - gap: 1.5rem; -} - -.mode-card { - background: white; - border-radius: 12px; - padding: 1.5rem; - border: 2px solid transparent; - transition: all 0.3s ease; - cursor: pointer; -} - -.mode-card:hover { - border-color: var(--primary-blue); - transform: translateY(-2px); - box-shadow: 0 8px 20px rgba(0, 0, 0, 0.1); -} - -.mode-card.active { - border-color: var(--primary-blue); - background: var(--light-blue); -} - -.mode-icon { - display: flex; - justify-content: center; - margin-bottom: 1rem; -} - -.mode-robot-mini { - width: 40px; - height: 50px; - position: relative; -} - -.mini-head { - width: 20px; - height: 20px; - margin: 0 auto 5px; - border-radius: 6px; - border: 2px solid var(--dark-text); -} - -.mini-body { - width: 30px; - height: 25px; - margin: 0 auto; - border-radius: 6px; - border: 2px solid var(--dark-text); - display: flex; - align-items: center; - justify-content: center; -} - -.desktop-color { background: var(--primary-blue); } -.server-color { background: var(--medium-gray); } -.local-color { background: var(--soft-green); } - -.mode-content h5 { - font-size: 1.1rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.5rem; -} - -.mode-content p { - font-size: 0.9rem; - color: var(--medium-gray); - margin-bottom: 1rem; -} - -.mode-flow { - display: flex; - align-items: center; - gap: 0.5rem; - font-size: 0.8rem; - color: var(--primary-blue); - font-weight: 600; - margin-bottom: 1rem; -} - -.mode-benefits { - display: flex; - flex-direction: column; - gap: 0.5rem; -} - -.benefit { - font-size: 0.85rem; - color: var(--dark-text); - padding-left: 1rem; - position: relative; -} - -.benefit::before { - content: '✓'; - position: absolute; - left: 0; - color: var(--success-green); - font-weight: bold; -} - -/* Mobile Features */ -.mobile-features { - margin-top: 2rem; -} - -.mobile-features h4 { - font-size: 1.3rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1.5rem; -} - -.feature-highlight-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); - gap: 1.5rem; -} - -.feature-highlight { - display: flex; - gap: 1rem; -} - -.highlight-icon { - width: 48px; - height: 48px; - background: var(--light-purple); - border-radius: 12px; - flex-shrink: 0; -} - -.highlight-content h5 { - font-size: 1rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.25rem; -} - -.highlight-content p { - font-size: 0.85rem; - color: var(--medium-gray); - line-height: 1.5; -} - -/* Deployment Options */ -.deployment-options { - display: flex; - gap: 1rem; - margin-top: 1rem; -} - -.deploy-option { - flex: 1; - display: flex; - align-items: center; - gap: 0.75rem; - padding: 1rem; - background: var(--light-gray); - border-radius: 8px; -} - -.deploy-icon { - width: 32px; - height: 32px; - background: var(--primary-blue); - border-radius: 8px; - display: flex; - align-items: center; - justify-content: center; - color: white; - font-weight: bold; -} - -.deploy-content { - display: flex; - flex-direction: column; -} - -.deploy-name { - font-size: 0.9rem; - font-weight: 600; - color: var(--dark-text); -} - -.deploy-desc { - font-size: 0.8rem; - color: var(--medium-gray); -} - -/* Platform Comparison */ -.platform-comparison { - margin: 4rem 0; -} - -.platform-comparison h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.comparison-grid { - background: white; - border-radius: 16px; - overflow: hidden; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); -} - -.comparison-header { - display: grid; - grid-template-columns: 2fr 2fr 1fr 2fr; - background: var(--light-gray); - font-weight: 700; - color: var(--dark-text); -} - -.header-item { - padding: 1rem; - border-right: 1px solid var(--border-color); -} - -.header-item:last-child { - border-right: none; -} - -.comparison-row { - display: grid; - grid-template-columns: 2fr 2fr 1fr 2fr; - border-bottom: 1px solid var(--border-color); - transition: all 0.3s ease; -} - -.comparison-row:last-child { - border-bottom: none; -} - -.comparison-row:hover { - background: var(--light-blue); -} - -.row-item { - padding: 1rem; - border-right: 1px solid var(--border-color); - display: flex; - align-items: center; -} - -.row-item:last-child { - border-right: none; -} - -.platform-name { - display: flex; - align-items: center; - gap: 0.75rem; - font-weight: 600; -} - -.platform-mini-icon { - width: 24px; - height: 24px; - border-radius: 6px; - border: 2px solid var(--dark-text); -} - -.desktop-mini { background: var(--primary-blue); } -.web-mini { background: var(--soft-green); } -.mobile-mini { background: var(--warm-yellow); } -.server-mini { background: var(--medium-gray); } - -.status-indicator { - display: inline-flex; - align-items: center; - gap: 0.5rem; - padding: 0.25rem 0.75rem; - border-radius: 20px; - font-size: 0.8rem; - font-weight: 600; -} - -.status-indicator.active { - background: var(--success-green); - color: white; -} - -.status-indicator.beta { - background: var(--warning-amber); - color: white; -} - -.status-indicator.coming { - background: var(--light-gray); - color: var(--medium-gray); - border: 2px dashed var(--medium-gray); -} - -/* Hover Animation for Platform Robots */ -.platform-robot.hover-animation { - animation: robotHoverBounce 0.8s ease-in-out; -} - -@keyframes robotHoverBounce { - 0%, 100% { transform: translateY(0) scale(1); } - 50% { transform: translateY(-10px) scale(1.05); } -} - -/* Responsive Design */ -@media (max-width: 1024px) { - .platforms-timeline { - padding: 0 1rem; - } - - .platform-item { - flex-direction: column; - text-align: center; - } - - .timeline-path { - display: none; - } - - .modes-grid { - grid-template-columns: 1fr; - } - - .comparison-grid { - overflow-x: auto; - } - - .comparison-header, - .comparison-row { - min-width: 600px; - } -} - -@media (max-width: 768px) { - .platform-robot { - transform: scale(0.8); - margin-bottom: 1rem; - } - - .platform-features { - grid-template-columns: 1fr; - } - - .platform-specs { - flex-direction: column; - gap: 0.5rem; - } - - .spec-item { - display: flex; - justify-content: space-between; - text-align: left; - } - - .mobile-features { - padding: 1rem; - } - - .feature-highlight-grid { - grid-template-columns: 1fr; - } -} - -@media (max-width: 480px) { - .platform-card { - padding: 1.5rem; - } - - .platform-metrics { - flex-direction: column; - gap: 1rem; - } - - .deployment-options { - flex-direction: column; - } -} - -/* Accessibility */ -@media (prefers-reduced-motion: reduce) { - *, - *::before, - *::after { - animation: none !important; - transition: none !important; - } -} diff --git a/website/src/styles/products-principles.css b/website/src/styles/products-principles.css deleted file mode 100644 index 221fdbbfd5..0000000000 --- a/website/src/styles/products-principles.css +++ /dev/null @@ -1,1303 +0,0 @@ -/* ProductsPrinciples Component Styles */ - -.principles-section { - background: var(--cream-white); - border-top: 1px solid var(--border-color); -} - -.principles-section .badge-robot-head { - background: var(--error-red); -} - -/* Main Comparison */ -.main-comparison { - margin: 4rem 0; -} - -.comparison-sides { - display: grid; - grid-template-columns: 1fr auto 1fr; - gap: 2rem; - align-items: start; - max-width: 1000px; - margin: 0 auto; -} - -.comparison-side { - background: white; - border-radius: 20px; - border: 3px solid var(--border-color); - padding: 2rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); - transition: all 0.3s ease; -} - -.other-ai { - border-color: var(--error-red); - border-left-width: 6px; -} - -.jan-side { - border-color: var(--success-green); - border-left-width: 6px; -} - -.comparison-side:hover { - transform: translateY(-4px); - box-shadow: 0 12px 40px rgba(0, 0, 0, 0.12); -} - -.side-header { - text-align: center; - margin-bottom: 2rem; -} - -.side-robot { - display: inline-block; - position: relative; - margin-bottom: 1rem; -} - -/* Other AI Robot (Corporate) */ -.other-robot .robot-head { - width: 60px; - height: 60px; - background: var(--error-red); - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto 10px; - position: relative; -} - -.other-robot .robot-body { - width: 70px; - height: 80px; - background: var(--error-red); - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; -} - -.robot-eyes { - display: flex; - justify-content: space-between; - padding: 12px 8px; -} - -.robot-eye { - width: 12px; - height: 12px; - background: var(--dark-text); - border-radius: 50%; - position: relative; -} - -.robot-eye::after { - content: ''; - position: absolute; - top: 2px; - right: 2px; - width: 3px; - height: 3px; - background: white; - border-radius: 50%; -} - -.robot-eye.corporate { - background: var(--dark-text); - animation: corporateGlare 3s infinite ease-in-out; -} - -.robot-corporate-hat { - position: absolute; - top: -8px; - left: 50%; - transform: translateX(-50%); - width: 25px; - height: 15px; - background: var(--dark-text); - border-radius: 8px 8px 0 0; - display: flex; - align-items: center; - justify-content: center; -} - -.hat-logo { - color: var(--warning-amber); - font-size: 0.7rem; - font-weight: 700; -} - -.robot-tie { - width: 8px; - height: 25px; - background: var(--dark-text); - margin-bottom: 5px; - position: relative; -} - -.tie-pattern { - position: absolute; - top: 3px; - left: 50%; - transform: translateX(-50%); - width: 4px; - height: 4px; - background: var(--warning-amber); - border-radius: 50%; -} - -.robot-briefcase { - width: 20px; - height: 12px; - background: var(--dark-text); - border-radius: 2px; - display: flex; - align-items: center; - justify-content: center; -} - -.briefcase-lock { - width: 6px; - height: 6px; - background: var(--warning-amber); - border-radius: 2px; -} - -.robot-chains { - display: flex; - gap: 2px; - margin-top: 8px; -} - -.chain-link { - width: 8px; - height: 4px; - border: 1px solid var(--dark-text); - border-radius: 2px; - background: var(--medium-gray); -} - -/* Jan Robot (Friendly) */ -.jan-robot .robot-head { - width: 60px; - height: 60px; - background: var(--success-green); - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto 10px; - position: relative; -} - -.jan-robot .robot-body { - width: 70px; - height: 80px; - background: var(--success-green); - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; -} - -.robot-eye.friendly { - background: var(--dark-text); - animation: friendlyBlink 4s infinite ease-in-out; -} - -.robot-smile { - position: absolute; - bottom: 12px; - left: 50%; - transform: translateX(-50%); - width: 20px; - height: 10px; - border: 3px solid var(--dark-text); - border-top: none; - border-radius: 0 0 20px 20px; -} - -.robot-heart { - margin-bottom: 8px; - position: relative; -} - -.heart-beat { - width: 15px; - height: 15px; - background: var(--error-red); - border-radius: 50%; - position: relative; - animation: heartbeat 2s infinite; -} - -.heart-beat::before, -.heart-beat::after { - content: ''; - position: absolute; - width: 15px; - height: 15px; - background: var(--error-red); - border-radius: 50%; -} - -.heart-beat::before { - top: -7px; - left: 0; -} - -.heart-beat::after { - top: 0; - left: -7px; -} - -.robot-shield { - width: 18px; - height: 20px; - background: var(--primary-blue); - border-radius: 50% 50% 50% 50% / 60% 60% 40% 40%; - border: 2px solid var(--dark-text); - display: flex; - align-items: center; - justify-content: center; -} - -.shield-lock { - width: 6px; - height: 6px; - background: white; - border-radius: 2px; -} - -.robot-freedom { - margin-top: 8px; -} - -.freedom-wings { - display: flex; - gap: 4px; - align-items: center; -} - -.wing { - width: 12px; - height: 8px; - background: white; - border: 2px solid var(--dark-text); - border-radius: 50%; - animation: wingFlap 3s infinite ease-in-out; -} - -.left-wing { - animation-delay: 0s; -} - -.right-wing { - animation-delay: 0.5s; -} - -/* VS Divider */ -.vs-divider { - display: flex; - align-items: center; - justify-content: center; - padding: 2rem 0; -} - -.vs-robot { - position: relative; -} - -.vs-robot .robot-head { - width: 40px; - height: 40px; - background: var(--warning-amber); - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto 8px; - position: relative; -} - -.vs-robot .robot-body { - width: 50px; - height: 60px; - background: var(--warning-amber); - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto; - display: flex; - align-items: center; - justify-content: center; -} - -.robot-eye.vs-eye { - animation: vsFlicker 1s infinite ease-in-out; -} - -.vs-text { - font-size: 1.2rem; - font-weight: 800; - color: var(--dark-text); -} - -.robot-lightning { - position: absolute; - top: -15px; - left: 50%; - transform: translateX(-50%); -} - -.lightning-bolt { - width: 0; - height: 0; - border-left: 6px solid transparent; - border-right: 6px solid transparent; - border-top: 12px solid var(--warning-amber); - position: relative; -} - -.lightning-bolt::after { - content: ''; - position: absolute; - top: 8px; - left: -3px; - width: 0; - height: 0; - border-left: 3px solid transparent; - border-right: 3px solid transparent; - border-top: 6px solid var(--warning-amber); -} - -.side-header h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.5rem; -} - -.side-header p { - color: var(--medium-gray); - font-size: 0.9rem; -} - -.side-features { - display: flex; - flex-direction: column; - gap: 1rem; -} - -.feature-item { - display: flex; - align-items: flex-start; - gap: 1rem; - padding: 1rem; - border-radius: 12px; - transition: all 0.3s ease; -} - -.feature-item.limitation { - background: var(--warm-yellow); - border: 2px solid var(--error-red); -} - -.feature-item.advantage { - background: var(--soft-green); - border: 2px solid var(--success-green); -} - -.feature-item:hover { - transform: translateX(4px); -} - -.feature-icon { - width: 30px; - height: 30px; - border-radius: 8px; - border: 2px solid var(--dark-text); - flex-shrink: 0; -} - -.feature-icon.wrapper { - background: var(--error-red); -} -.feature-icon.cloud-only { - background: var(--medium-gray); -} -.feature-icon.subscription { - background: var(--warning-amber); -} -.feature-icon.control { - background: var(--error-red); -} -.feature-icon.own-models { - background: var(--success-green); -} -.feature-icon.privacy-first { - background: var(--primary-blue); -} -.feature-icon.free-local { - background: var(--success-green); -} -.feature-icon.your-rules { - background: var(--primary-blue); -} - -.feature-content h4 { - font-size: 1rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.25rem; -} - -.feature-content p { - font-size: 0.9rem; - color: var(--medium-gray); - line-height: 1.4; -} - -/* Detailed Comparison */ -.detailed-comparison { - margin: 4rem 0; - background: white; - border-radius: 20px; - border: 2px solid var(--border-color); - padding: 3rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); -} - -.detailed-comparison h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.comparison-table { - display: grid; - grid-template-columns: 1fr 1fr 1fr; - gap: 1px; - background: var(--border-color); - border-radius: 12px; - overflow: hidden; -} - -.table-header { - display: contents; -} - -.header-cell { - background: var(--primary-blue); - color: white; - padding: 1rem; - font-weight: 700; - text-align: center; -} - -.table-row { - display: contents; -} - -.table-cell { - background: white; - padding: 1rem; - text-align: center; - transition: all 0.3s ease; -} - -.feature-col { - display: flex; - align-items: center; - justify-content: center; - gap: 0.75rem; - font-weight: 600; - color: var(--dark-text); -} - -.feature-robot { - position: relative; -} - -.robot-mini-head { - width: 20px; - height: 20px; - border-radius: 6px; - border: 2px solid var(--dark-text); - margin: 0 auto 4px; -} - -.robot-mini-body { - width: 25px; - height: 30px; - border-radius: 6px; - border: 2px solid var(--dark-text); - margin: 0 auto; -} - -.models-feature .robot-mini-head { - background: var(--soft-pink); -} -.models-feature .robot-mini-body { - background: var(--soft-pink); -} - -.privacy-feature .robot-mini-head { - background: var(--primary-blue); -} -.privacy-feature .robot-mini-body { - background: var(--primary-blue); -} - -.deployment-feature .robot-mini-head { - background: var(--success-green); -} -.deployment-feature .robot-mini-body { - background: var(--success-green); -} - -.cost-feature .robot-mini-head { - background: var(--warning-amber); -} -.cost-feature .robot-mini-body { - background: var(--warning-amber); -} - -.control-feature .robot-mini-head { - background: var(--light-purple); -} -.control-feature .robot-mini-body { - background: var(--light-purple); -} - -.limitation-indicator { - background: var(--warm-yellow); - color: var(--error-red); - padding: 0.5rem 1rem; - border-radius: 8px; - font-weight: 600; - border: 2px solid var(--error-red); -} - -.advantage-indicator { - background: var(--soft-green); - color: var(--success-green); - padding: 0.5rem 1rem; - border-radius: 8px; - font-weight: 600; - border: 2px solid var(--success-green); -} - -/* Philosophy Cards */ -.philosophy-cards { - margin: 4rem 0; -} - -.philosophy-cards h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.cards-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - gap: 2rem; -} - -.philosophy-card { - background: white; - border-radius: 16px; - border: 2px solid var(--border-color); - padding: 2rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); - transition: all 0.3s ease; - text-align: center; -} - -.philosophy-card:hover { - border-color: var(--primary-blue); -} - -.card-robot { - margin-bottom: 1.5rem; - display: inline-block; -} - -.card-robot .robot-head { - width: 50px; - height: 50px; - border-radius: 12px; - border: 3px solid var(--dark-text); - margin: 0 auto 8px; - position: relative; -} - -.card-robot .robot-body { - width: 60px; - height: 70px; - border-radius: 12px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; -} - -.models-card .card-robot .robot-head { - background: var(--soft-pink); -} -.models-card .card-robot .robot-body { - background: var(--soft-pink); -} - -.infrastructure-card .card-robot .robot-head { - background: var(--success-green); -} -.infrastructure-card .card-robot .robot-body { - background: var(--success-green); -} - -.tools-card .card-robot .robot-head { - background: var(--warning-amber); -} -.tools-card .card-robot .robot-body { - background: var(--warning-amber); -} - -.robot-brain { - position: absolute; - top: 8px; - left: 50%; - transform: translateX(-50%); -} - -.brain-core { - width: 15px; - height: 10px; - background: var(--primary-blue); - border-radius: 8px; - border: 2px solid var(--dark-text); -} - -.model-display { - display: flex; - flex-direction: column; - gap: 3px; -} - -.model-layer { - width: 25px; - height: 3px; - background: var(--dark-text); - border-radius: 2px; -} - -.robot-home { - position: absolute; - top: 5px; - left: 50%; - transform: translateX(-50%); -} - -.home-roof { - width: 20px; - height: 10px; - background: var(--error-red); - border: 2px solid var(--dark-text); - border-radius: 2px 2px 0 0; -} - -.home-door { - width: 8px; - height: 8px; - background: var(--dark-text); - border-radius: 2px; - margin: 0 auto; -} - -.infrastructure-display { - display: flex; - flex-direction: column; - align-items: center; - gap: 4px; -} - -.server-rack { - width: 20px; - height: 15px; - background: var(--dark-text); - border-radius: 2px; -} - -.connection-lines { - display: flex; - gap: 2px; -} - -.line { - width: 8px; - height: 1px; - background: var(--dark-text); -} - -.robot-tools-display { - position: absolute; - top: 8px; - left: 50%; - transform: translateX(-50%); - display: flex; - gap: 4px; -} - -.tool-wrench { - width: 8px; - height: 8px; - background: var(--dark-text); - border-radius: 2px; - transform: rotate(45deg); -} - -.tool-gear { - width: 8px; - height: 8px; - background: var(--dark-text); - border-radius: 50%; -} - -.automation-display { - width: 30px; - height: 20px; -} - -.automation-flow { - display: flex; - justify-content: space-between; - align-items: center; -} - -.flow-dot { - width: 4px; - height: 4px; - background: var(--dark-text); - border-radius: 50%; - animation: flowPulse 2s infinite ease-in-out; -} - -.flow-dot:nth-child(2) { - animation-delay: 0.5s; -} -.flow-dot:nth-child(3) { - animation-delay: 1s; -} - -.card-content h4 { - font-size: 1.3rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.card-content p { - color: var(--medium-gray); - line-height: 1.6; -} - -.card-icon { - width: 40px; - height: 40px; - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto 1rem; -} - -.card-icon.our-models { - background: var(--soft-pink); -} -.card-icon.your-infrastructure { - background: var(--success-green); -} -.card-icon.real-tools { - background: var(--warning-amber); -} - -/* Principles CTA */ -.principles-cta { - margin: 4rem 0; - background: var(--light-blue); - border-radius: 20px; - border: 3px solid var(--primary-blue); - padding: 3rem; - display: flex; - align-items: center; - gap: 2rem; - box-shadow: 0 8px 30px rgba(59, 130, 246, 0.2); -} - -.cta-robot { - flex-shrink: 0; - position: relative; -} - -.cta-robot .robot-head { - width: 80px; - height: 80px; - background: var(--primary-blue); - border-radius: 20px; - border: 4px solid var(--dark-text); - margin: 0 auto 12px; - position: relative; -} - -.cta-robot .robot-body { - width: 90px; - height: 100px; - background: var(--primary-blue); - border-radius: 20px; - border: 4px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; -} - -.robot-eye.confident { - background: var(--dark-text); - animation: confidentGlow 3s infinite ease-in-out; -} - -.robot-confident-smile { - position: absolute; - bottom: 15px; - left: 50%; - transform: translateX(-50%); - width: 30px; - height: 15px; - border: 4px solid var(--dark-text); - border-bottom: none; - border-radius: 30px 30px 0 0; -} - -.robot-badge { - background: white; - color: var(--primary-blue); - padding: 0.5rem 1rem; - border-radius: 12px; - font-size: 0.8rem; - font-weight: 700; - border: 2px solid var(--dark-text); - text-transform: uppercase; -} - -.robot-cape { - position: absolute; - top: 70px; - left: -10px; - right: -10px; - height: 40px; - background: var(--error-red); - border-radius: 0 0 15px 15px; - border: 3px solid var(--dark-text); - border-top: none; - z-index: -1; -} - -.cape-flow { - width: 100%; - height: 100%; - background: linear-gradient( - 45deg, - transparent 30%, - rgba(255, 255, 255, 0.2) 50%, - transparent 70% - ); - animation: capeFlow 3s infinite ease-in-out; -} - -.cta-content { - flex: 1; -} - -.cta-content h3 { - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.cta-content p { - font-size: 1.1rem; - color: var(--medium-gray); - margin-bottom: 2rem; - line-height: 1.6; -} - -.cta-buttons { - display: flex; - gap: 1rem; - flex-wrap: wrap; -} - -/* Animations */ -@keyframes corporateGlare { - 0%, - 100% { - background: var(--dark-text); - } - 50% { - background: var(--error-red); - } -} - -@keyframes friendlyBlink { - 0%, - 90%, - 100% { - transform: scaleY(1); - } - 95% { - transform: scaleY(0.1); - } -} - -@keyframes heartbeat { - 0%, - 100% { - transform: scale(1); - } - 50% { - transform: scale(1.2); - } -} - -@keyframes wingFlap { - 0%, - 100% { - transform: translateY(0); - } - 50% { - transform: translateY(-3px); - } -} - -@keyframes vsFlicker { - 0%, - 100% { - opacity: 1; - } - 50% { - opacity: 0.5; - } -} - -@keyframes flowPulse { - 0%, - 100% { - transform: scale(1); - opacity: 1; - } - 50% { - transform: scale(1.3); - opacity: 0.7; - } -} - -@keyframes confidentGlow { - 0%, - 100% { - box-shadow: 0 0 5px var(--primary-blue); - } - 50% { - box-shadow: 0 0 15px var(--primary-blue); - } -} - -@keyframes capeFlow { - 0% { - transform: translateX(-100%); - } - 100% { - transform: translateX(100%); - } -} - -@keyframes corporateShake { - 0%, - 100% { - transform: translateX(0); - } - 25% { - transform: translateX(-2px) rotate(-1deg); - } - 75% { - transform: translateX(2px) rotate(1deg); - } -} - -@keyframes friendlyBounce { - 0%, - 100% { - transform: translateY(0); - } - 50% { - transform: translateY(-8px); - } -} - -@keyframes vsFlash { - 0%, - 100% { - transform: scale(1); - } - 50% { - transform: scale(1.1); - box-shadow: 0 0 15px var(--warning-amber); - } -} - -@keyframes featureBounce { - 0%, - 100% { - transform: translateY(0); - } - 50% { - transform: translateY(-4px); - } -} - -@keyframes cardRobotFloat { - 0%, - 100% { - transform: translateY(0); - } - 50% { - transform: translateY(-6px); - } -} - -@keyframes slideInComparison { - 0% { - transform: translateX(-30px); - opacity: 0; - } - 100% { - transform: translateX(0); - opacity: 1; - } -} - -@keyframes fadeInUp { - 0% { - transform: translateY(30px); - opacity: 0; - } - 100% { - transform: translateY(0); - opacity: 1; - } -} - -@keyframes ctaRobotCelebrate { - 0%, - 100% { - transform: translateY(0) rotate(0deg); - } - 25% { - transform: translateY(-10px) rotate(-5deg); - } - 75% { - transform: translateY(-5px) rotate(5deg); - } -} - -@keyframes advantagePulse { - 0%, - 100% { - transform: scale(1); - } - 50% { - transform: scale(1.05); - } -} - -@keyframes limitationShake { - 0%, - 100% { - transform: translateX(0); - } - 25% { - transform: translateX(-2px); - } - 75% { - transform: translateX(2px); - } -} - -@keyframes iconPulse { - 0%, - 100% { - transform: scale(1); - } - 50% { - transform: scale(1.2); - } -} - -/* Responsive Design */ -@media (max-width: 1024px) { - .comparison-sides { - grid-template-columns: 1fr; - gap: 1.5rem; - } - - .vs-divider { - order: 2; - transform: rotate(90deg); - padding: 1rem 0; - } - - .other-ai { - order: 1; - } - - .jan-side { - order: 3; - } - - .principles-cta { - flex-direction: column; - text-align: center; - gap: 2rem; - } -} - -@media (max-width: 768px) { - .detailed-comparison { - padding: 2rem; - } - - .comparison-table { - grid-template-columns: 1fr; - gap: 0; - } - - .table-row { - display: grid; - grid-template-columns: 1fr 1fr 1fr; - gap: 1px; - margin-bottom: 1px; - } - - .cards-grid { - grid-template-columns: 1fr; - gap: 1.5rem; - } - - .philosophy-card { - padding: 1.5rem; - } - - .feature-item { - flex-direction: column; - text-align: center; - gap: 1rem; - } - - .side-robot .robot-head { - width: 50px; - height: 50px; - } - - .side-robot .robot-body { - width: 60px; - height: 70px; - } -} - -@media (max-width: 480px) { - .comparison-side { - padding: 1.5rem; - } - - .detailed-comparison { - padding: 1.5rem; - } - - .philosophy-cards h3, - .detailed-comparison h3 { - font-size: 1.8rem; - } - - .cta-content h3 { - font-size: 1.8rem; - } - - .cta-buttons { - flex-direction: column; - align-items: center; - } - - .table-row { - grid-template-columns: 1fr; - text-align: left; - } - - .header-cell, - .table-cell { - padding: 0.75rem; - font-size: 0.8rem; - } - - .feature-col { - justify-content: flex-start; - } - - .advantage-indicator, - .limitation-indicator { - padding: 0.5rem; - font-size: 0.8rem; - } -} - -/* Accessibility */ -@media (prefers-reduced-motion: reduce) { - .other-robot, - .jan-robot, - .vs-robot, - .feature-robot, - .card-robot, - .cta-robot, - .comparison-side, - .feature-item, - .philosophy-card, - .advantage-indicator, - .limitation-indicator { - animation: none !important; - transition: none !important; - } -} - -/* High contrast mode */ -@media (prefers-contrast: high) { - .comparison-side, - .detailed-comparison, - .philosophy-card, - .principles-cta { - border-width: 3px; - } - - .feature-item { - border-width: 2px; - } -} diff --git a/website/src/styles/products-roadmap.css b/website/src/styles/products-roadmap.css deleted file mode 100644 index 1cc480daec..0000000000 --- a/website/src/styles/products-roadmap.css +++ /dev/null @@ -1,837 +0,0 @@ -/* ProductsRoadmap Component Styles */ - -.roadmap-section { - background: var(--light-gray); - border-top: 1px solid var(--border-color); -} - -.roadmap-section .badge-robot-head { - background: var(--light-purple); -} - -/* Roadmap Overview */ -.roadmap-overview { - margin: 4rem 0; - max-width: 900px; - margin-left: auto; - margin-right: auto; -} - -.timeline-stats { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); - gap: 2rem; -} - -.stat-card { - background: white; - border-radius: 16px; - border: 2px solid var(--border-color); - padding: 1.5rem; - display: flex; - align-items: center; - gap: 1rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); - transition: all 0.3s ease; -} - -.stat-card:hover { - transform: translateY(-4px); - box-shadow: 0 12px 40px rgba(0, 0, 0, 0.12); -} - -.stat-robot { - flex-shrink: 0; -} - -.stat-robot .robot-head { - width: 40px; - height: 40px; - border-radius: 10px; - border: 3px solid var(--dark-text); - margin: 0 auto 8px; - position: relative; -} - -.stat-robot .robot-body { - width: 50px; - height: 60px; - border-radius: 10px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; -} - -.released-robot .robot-head { - background: var(--success-green); -} -.released-robot .robot-body { - background: var(--success-green); -} - -.development-robot .robot-head { - background: var(--primary-blue); -} -.development-robot .robot-body { - background: var(--primary-blue); -} - -.planned-robot .robot-head { - background: var(--medium-gray); -} -.planned-robot .robot-body { - background: var(--medium-gray); -} - -.success-check { - width: 20px; - height: 10px; - border-left: 4px solid white; - border-bottom: 4px solid white; - transform: rotate(-45deg); -} - -.work-indicator { - display: flex; - gap: 3px; -} - -.work-dot { - width: 6px; - height: 6px; - background: white; - border-radius: 50%; - animation: workPulse 2s infinite ease-in-out; -} - -.work-dot:nth-child(2) { - animation-delay: 0.3s; -} -.work-dot:nth-child(3) { - animation-delay: 0.6s; -} - -.planning-board { - display: flex; - flex-direction: column; - gap: 3px; -} - -.plan-item { - width: 20px; - height: 3px; - background: white; - border-radius: 2px; -} - -.stat-content { - text-align: left; -} - -.stat-number { - font-size: 2rem; - font-weight: 800; - color: var(--dark-text); - line-height: 1; -} - -.stat-label { - font-size: 1rem; - color: var(--medium-gray); - font-weight: 500; -} - -/* Roadmap Database */ -.roadmap-database { - background: white; - border-radius: 20px; - border: 2px solid var(--border-color); - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); -} - -.database-header { - display: flex; - justify-content: space-between; - align-items: center; - padding: 1.5rem; - border-bottom: 1px solid var(--border-color); -} - -.database-title { - display: flex; - align-items: center; - gap: 1rem; -} - -.title-robot .robot-head { - width: 30px; - height: 30px; - background: var(--light-purple); - border-radius: 8px; - border: 2px solid var(--dark-text); - margin-bottom: 4px; -} - -.title-robot .robot-body { - width: 35px; - height: 40px; - background: var(--light-purple); - border-radius: 8px; - border: 2px solid var(--dark-text); - display: flex; - align-items: center; - justify-content: center; - font-weight: 700; - font-size: 0.9rem; -} - -.database-title h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); -} - -.database-controls { - display: flex; - gap: 0.75rem; -} - -.control-btn { - background: var(--light-gray); - border: 1px solid var(--border-color); - border-radius: 8px; - padding: 0.5rem 1rem; - font-size: 0.8rem; - font-weight: 600; - color: var(--dark-text); - cursor: pointer; - transition: all 0.3s ease; -} - -.control-btn:hover { - background: var(--border-color); - transform: translateY(-2px); -} - -.database-table { - padding: 1.5rem; -} - -.table-header { - display: grid; - grid-template-columns: 2fr 1fr 1fr 80px; - gap: 1rem; - padding: 0 1rem 1rem; - border-bottom: 1px solid var(--border-color); - font-size: 0.8rem; - font-weight: 600; - color: var(--medium-gray); - text-transform: uppercase; - letter-spacing: 0.05em; -} - -.expand-col { - text-align: right; -} - -/* Roadmap Row */ -.roadmap-row { - border-bottom: 1px solid var(--border-color); - transition: background-color 0.3s ease; -} - -.roadmap-row:last-child { - border-bottom: none; -} - -.roadmap-row.expanded { - background: var(--light-blue); -} - -.row-main { - display: grid; - grid-template-columns: 2fr 1fr 1fr 80px; - gap: 1rem; - align-items: center; - padding: 1rem; - cursor: pointer; -} - -.version-col { - display: flex; - align-items: center; - gap: 1rem; -} - -.version-robot { - flex-shrink: 0; -} - -.robot-mini-head { - width: 25px; - height: 25px; - border-radius: 6px; - border: 2px solid var(--dark-text); - margin: 0 auto 4px; -} - -.robot-mini-body { - width: 30px; - height: 35px; - border-radius: 6px; - border: 2px solid var(--dark-text); - margin: 0 auto; - display: flex; - align-items: center; - justify-content: center; - font-weight: 700; -} - -.released-version .robot-mini-head, -.released-version .robot-mini-body { - background: var(--success-green); -} - -.development-version .robot-mini-head, -.development-version .robot-mini-body { - background: var(--primary-blue); -} - -.planned-version .robot-mini-head, -.planned-version .robot-mini-body { - background: var(--medium-gray); -} - -.version-info { - display: flex; - flex-direction: column; -} - -.version-number { - font-size: 1rem; - font-weight: 700; - color: var(--dark-text); -} - -.version-name { - font-size: 0.9rem; - color: var(--medium-gray); -} - -.date-badge { - display: inline-block; - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; -} - -.date-badge.released { - background: var(--soft-green); - color: var(--success-green); -} -.date-badge.development { - background: var(--light-blue); - color: var(--primary-blue); -} -.date-badge.planned { - background: var(--light-gray); - color: var(--medium-gray); -} - -.status-indicator { - display: flex; - align-items: center; - gap: 0.5rem; - font-size: 0.9rem; - font-weight: 600; -} - -.status-dot { - width: 8px; - height: 8px; - border-radius: 50%; -} - -.status-indicator.released { - color: var(--success-green); -} -.status-indicator.released .status-dot { - background: var(--success-green); -} - -.status-indicator.development { - color: var(--primary-blue); -} -.status-indicator.development .status-dot { - background: var(--primary-blue); - animation: pulse 2s infinite; -} - -.status-indicator.planned { - color: var(--medium-gray); -} -.status-indicator.planned .status-dot { - background: var(--medium-gray); -} - -.expand-btn { - width: 30px; - height: 30px; - background: var(--light-gray); - border: 1px solid var(--border-color); - border-radius: 50%; - cursor: pointer; - display: flex; - align-items: center; - justify-content: center; - transition: all 0.3s ease; -} - -.expand-btn:hover { - background: var(--border-color); - transform: scale(1.1); -} - -.expand-icon { - font-size: 1.2rem; - font-weight: 700; - color: var(--medium-gray); - transition: transform 0.3s ease; -} - -/* Row Details */ -.row-details { - max-height: 0; - opacity: 0; - overflow: hidden; - transition: max-height 0.5s ease-out, opacity 0.5s ease-out; -} - -.details-content { - padding: 1rem 2rem 2rem; - display: grid; - grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); - gap: 2rem; - border-top: 1px solid var(--border-color); -} - -.detail-section h4 { - font-size: 1rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; - border-bottom: 2px solid var(--primary-blue); - padding-bottom: 0.5rem; - display: inline-block; -} - -.detail-section ul { - list-style: none; - padding: 0; - margin: 0; - display: flex; - flex-direction: column; - gap: 0.75rem; -} - -.detail-section li { - padding-left: 1.5rem; - position: relative; - font-size: 0.9rem; - color: var(--medium-gray); -} - -.detail-section li::before { - content: "→"; - position: absolute; - left: 0; - color: var(--primary-blue); - font-weight: 700; -} - -/* Roadmap CTA */ -.roadmap-cta { - margin: 4rem 0 0; - background: var(--light-blue); - border-radius: 20px; - border: 3px solid var(--primary-blue); - padding: 3rem; - display: flex; - align-items: center; - gap: 2rem; - box-shadow: 0 8px 30px rgba(59, 130, 246, 0.2); -} - -.cta-robot { - flex-shrink: 0; -} - -.cta-robot .robot-head { - width: 80px; - height: 80px; - background: var(--primary-blue); - border-radius: 20px; - border: 4px solid var(--dark-text); - margin: 0 auto 12px; - position: relative; -} - -.cta-robot .robot-body { - width: 90px; - height: 100px; - background: var(--primary-blue); - border-radius: 20px; - border: 4px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; -} - -.robot-eye.excited { - animation: excitedWiggle 2s infinite ease-in-out; -} - -.robot-smile-big { - position: absolute; - bottom: 15px; - left: 50%; - transform: translateX(-50%); - width: 35px; - height: 20px; - border: 4px solid var(--dark-text); - border-bottom: none; - border-radius: 35px 35px 0 0; -} - -.robot-progress-wheel { - width: 40px; - height: 40px; - border-radius: 50%; - border: 3px solid var(--dark-text); - position: relative; - animation: spin 4s infinite linear; -} - -.progress-segment { - position: absolute; - width: 50%; - height: 50%; -} - -.progress-segment:nth-child(1) { - top: 0; - left: 0; - background: var(--soft-green); - border-radius: 100% 0 0 0; -} -.progress-segment:nth-child(2) { - top: 0; - right: 0; - background: var(--warm-yellow); - border-radius: 0 100% 0 0; -} -.progress-segment:nth-child(3) { - bottom: 0; - left: 0; - background: var(--light-purple); - border-radius: 0 0 0 100%; -} -.progress-segment:nth-child(4) { - bottom: 0; - right: 0; - background: var(--soft-pink); - border-radius: 0 0 100% 0; -} - -.robot-arms.celebrating { - display: flex; - justify-content: space-between; - margin-top: -80px; - padding: 0 10px; -} - -.arm.left-celebrate, -.arm.right-celebrate { - width: 15px; - height: 40px; - background: var(--gentle-orange); - border-radius: 8px; - border: 3px solid var(--dark-text); - transform-origin: bottom center; - animation: celebrate 1.5s infinite ease-in-out; -} - -.arm.right-celebrate { - animation-delay: 0.3s; -} - -.cta-content h3 { - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.cta-content p { - font-size: 1.1rem; - color: var(--medium-gray); - margin-bottom: 2rem; - line-height: 1.6; -} - -.cta-buttons { - display: flex; - gap: 1rem; - flex-wrap: wrap; -} - -/* Animations */ -@keyframes workPulse { - 0%, - 100% { - transform: scale(1); - opacity: 0.7; - } - 50% { - transform: scale(1.2); - opacity: 1; - } -} - -@keyframes pulse { - 0%, - 100% { - transform: scale(1); - opacity: 1; - } - 50% { - transform: scale(1.1); - opacity: 0.7; - } -} - -@keyframes versionCelebrate { - 0%, - 100% { - transform: translateY(0); - } - 25% { - transform: translateY(-8px) rotate(-5deg); - } - 75% { - transform: translateY(-4px) rotate(5deg); - } -} - -@keyframes versionHover { - 0%, - 100% { - transform: scale(1); - } - 50% { - transform: scale(1.05); - } -} - -@keyframes slideInFromLeft { - 0% { - transform: translateX(-30px); - opacity: 0; - } - 100% { - transform: translateX(0); - opacity: 1; - } -} - -@keyframes fadeInUp { - 0% { - transform: translateY(20px); - opacity: 0; - } - 100% { - transform: translateY(0); - opacity: 1; - } -} - -@keyframes excitedWiggle { - 0%, - 100% { - transform: rotate(0); - } - 25% { - transform: rotate(-3deg); - } - 75% { - transform: rotate(3deg); - } -} - -@keyframes spin { - 0% { - transform: rotate(0deg); - } - 100% { - transform: rotate(360deg); - } -} - -@keyframes celebrate { - 0%, - 100% { - transform: rotate(0deg); - } - 50% { - transform: rotate(15deg); - } -} - -@keyframes ctaRobotCelebration { - 0%, - 100% { - transform: translateY(0) rotate(0); - } - 25% { - transform: translateY(-10px) rotate(-5deg); - } - 75% { - transform: translateY(-5px) rotate(5deg); - } -} - -/* Responsive Design */ -@media (max-width: 1024px) { - .database-table { - font-size: 0.9rem; - } - - .table-header { - grid-template-columns: 2fr 1.5fr 1fr 60px; - } - - .row-main { - grid-template-columns: 2fr 1.5fr 1fr 60px; - } -} - -@media (max-width: 768px) { - .timeline-stats { - grid-template-columns: 1fr; - gap: 1.5rem; - } - - .database-header { - flex-direction: column; - gap: 1rem; - align-items: flex-start; - } - - .table-header { - display: none; - } - - .row-main { - grid-template-columns: 1fr; - gap: 1rem; - padding: 1.5rem; - } - - .row-cell { - display: flex; - justify-content: space-between; - align-items: center; - border-bottom: 1px dashed var(--border-color); - padding-bottom: 0.75rem; - } - - .row-cell:last-child { - border-bottom: none; - } - - .row-cell::before { - content: attr(data-label); - font-weight: 600; - color: var(--medium-gray); - font-size: 0.8rem; - text-transform: uppercase; - } - - .expand-col { - justify-content: flex-end; - } - - .details-content { - grid-template-columns: 1fr; - padding: 1rem; - } - - .roadmap-cta { - flex-direction: column; - text-align: center; - } -} - -@media (max-width: 480px) { - .roadmap-database { - font-size: 0.8rem; - } - - .row-main { - padding: 1rem; - } - - .version-col { - gap: 0.75rem; - } - - .version-number { - font-size: 0.9rem; - } - - .cta-content h3 { - font-size: 1.8rem; - } - - .cta-buttons { - flex-direction: column; - align-items: center; - } -} - -/* Accessibility */ -@media (prefers-reduced-motion: reduce) { - .work-dot, - .status-indicator.development .status-dot, - .robot-eye, - .progress-wheel, - .arm, - .stat-card, - .roadmap-row, - .control-btn, - .expand-btn, - .row-details, - .version-robot { - animation: none !important; - transition: none !important; - } -} - -/* High contrast mode */ -@media (prefers-contrast: high) { - .stat-card, - .roadmap-database, - .roadmap-cta { - border-width: 3px; - } - - .roadmap-row { - border-width: 2px; - } -} diff --git a/website/src/styles/products-tools.css b/website/src/styles/products-tools.css deleted file mode 100644 index 8603b491e0..0000000000 --- a/website/src/styles/products-tools.css +++ /dev/null @@ -1,597 +0,0 @@ -/* ProductsTools Component Styles */ - -.tools-section { - background: var(--cream-white); - border-top: 1px solid var(--border-color); -} - -.tools-section .badge-robot-head { - background: var(--warning-amber); -} - -/* Tools Grid */ -.tools-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(350px, 1fr)); - gap: 2rem; - margin: 4rem 0; -} - -.tool-card { - position: relative; - overflow: visible; -} - -.tool-robot-section { - position: relative; - text-align: center; - margin-bottom: 2rem; -} - -.tool-robot { - display: inline-block; - position: relative; - margin-bottom: 1rem; -} - -.tool-robot .robot-head { - width: 60px; - height: 60px; - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto 10px; - position: relative; -} - -.tool-robot .robot-body { - width: 70px; - height: 80px; - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; -} - -/* Robot Variations */ -.search-robot .robot-head, -.search-robot .robot-body { - background: var(--success-green); -} - -.research-robot .robot-head, -.research-robot .robot-body { - background: var(--primary-blue); -} - -.browser-robot .robot-head, -.browser-robot .robot-body { - background: var(--warning-amber); -} - -.agents-robot .robot-head, -.agents-robot .robot-body { - background: var(--light-purple); -} - -.agents-robot.sleeping { - opacity: 0.7; - animation: sleepBob 4s infinite ease-in-out; -} - -.tool-status-bubble { - position: absolute; - top: -10px; - right: 1rem; -} - -/* Tool Content */ -.tool-content { - padding: 0; -} - -.tool-header { - display: flex; - justify-content: space-between; - align-items: center; - margin-bottom: 1rem; -} - -.tool-header h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); -} - -.tool-version { - background: var(--light-gray); - color: var(--medium-gray); - padding: 0.25rem 0.5rem; - border-radius: 8px; - font-size: 0.7rem; - font-weight: 600; - border: 1px solid var(--border-color); -} - -.tool-description { - color: var(--medium-gray); - margin-bottom: 1.5rem; - line-height: 1.6; -} - -.tool-features { - margin-bottom: 1.5rem; -} - -.tool-features h4 { - font-size: 0.9rem; - font-weight: 600; - color: var(--dark-text); - margin-bottom: 0.75rem; - text-transform: uppercase; - letter-spacing: 0.05em; -} - -.features-list { - display: flex; - flex-direction: column; - gap: 0.75rem; -} - -.feature-item { - display: flex; - align-items: center; - gap: 0.75rem; - font-size: 0.9rem; - color: var(--dark-text); - padding: 0.5rem; - border-radius: 8px; - transition: all 0.3s ease; -} - -.feature-item:hover { - background: var(--light-gray); -} - -.feature-item.coming { - color: var(--medium-gray); - font-style: italic; -} - -.feature-icon { - width: 20px; - height: 20px; - border-radius: 4px; - border: 2px solid var(--dark-text); - flex-shrink: 0; -} - -.tool-metrics { - display: flex; - justify-content: space-around; - padding-top: 1rem; - border-top: 1px solid var(--border-color); -} - -.metric { - text-align: center; -} - -.metric-value { - font-size: 1.1rem; - font-weight: 700; - color: var(--primary-blue); - margin-bottom: 0.25rem; -} - -.metric-label { - font-size: 0.8rem; - color: var(--medium-gray); - text-transform: uppercase; - letter-spacing: 0.05em; -} - -/* Tools Integration Flow */ -.tools-integration { - margin: 4rem 0; - background: white; - border-radius: 20px; - border: 2px solid var(--border-color); - padding: 3rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); -} - -.tools-integration h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.integration-flow { - display: flex; - align-items: center; - justify-content: center; - gap: 1rem; - flex-wrap: wrap; -} - -.flow-step { - text-align: center; -} - -.step-robot { - margin-bottom: 0.5rem; -} - -.robot-mini { - width: 40px; - height: 50px; - border-radius: 8px; - border: 2px solid var(--dark-text); - margin: 0 auto; -} - -.search-mini { background: var(--success-green); } -.research-mini { background: var(--primary-blue); } -.browser-mini { background: var(--warning-amber); } -.agents-mini { background: var(--light-purple); } - -.step-content h4 { - font-size: 1rem; - font-weight: 600; - color: var(--dark-text); -} - -.step-content p { - font-size: 0.8rem; - color: var(--medium-gray); -} - -.flow-arrow { - width: 40px; - height: 2px; - background: var(--primary-blue); - position: relative; -} - -.flow-arrow.coming { - background: var(--medium-gray); - border-style: dashed; -} - -.flow-arrow .arrow-head { - position: absolute; - right: -5px; - top: -3px; - width: 0; - height: 0; - border-top: 4px solid transparent; - border-bottom: 4px solid transparent; - border-left: 8px solid var(--primary-blue); -} - -.flow-arrow.coming .arrow-head { - border-left-color: var(--medium-gray); -} - -/* Tools Comparison Table */ -.tools-comparison { - margin: 4rem 0; - background: white; - border-radius: 20px; - border: 2px solid var(--border-color); - padding: 3rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.08); -} - -.tools-comparison h3 { - text-align: center; - font-size: 2rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 2rem; -} - -.comparison-table { - display: grid; - grid-template-columns: 1fr 1fr 1fr 1fr; - gap: 1px; - background: var(--border-color); - border-radius: 12px; - overflow: hidden; -} - -.table-header { display: contents; } - -.header-cell { - background: var(--primary-blue); - color: white; - padding: 1rem; - font-weight: 700; - text-align: center; -} - -.table-row { display: contents; } - -.table-cell { - background: white; - padding: 1rem; - text-align: center; - font-size: 0.9rem; -} - -.tool-col { - display: flex; - align-items: center; - justify-content: center; - gap: 0.5rem; - font-weight: 600; -} - -.tool-mini-icon { - width: 20px; - height: 20px; - border-radius: 4px; - border: 2px solid var(--dark-text); -} - -.search-icon { background: var(--success-green); } -.research-icon { background: var(--primary-blue); } -.browser-icon { background: var(--warning-amber); } -.agents-icon { background: var(--light-purple); } - -/* Animations */ -@keyframes toolBounce { - 0%, 100% { transform: translateY(0); } - 50% { transform: translateY(-8px); } -} - -@keyframes sleepShift { - 0%, 100% { transform: translateX(0) rotate(0); } - 50% { transform: translateX(4px) rotate(2deg); } -} - -@keyframes slideInFromBottom { - 0% { transform: translateY(50px); opacity: 0; } - 100% { transform: translateY(0); opacity: 1; } -} - -@keyframes miniRobotSpin { - 0% { transform: rotate(0); } - 100% { transform: rotate(360deg); } -} - -@keyframes iconPulse { - 0%, 100% { transform: scale(1); } - 50% { transform: scale(1.1); } -} - -@keyframes featureIconBounce { - 0%, 100% { transform: scale(1); } - 50% { transform: scale(1.2) rotate(5deg); } -} - -@keyframes fadeInUp { - 0% { transform: translateY(20px); opacity: 0; } - 100% { transform: translateY(0); opacity: 1; } -} - -/* Responsive Design */ -@media (max-width: 1024px) { - .tools-grid { - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - } -} - -@media (max-width: 768px) { - .tools-grid { - grid-template-columns: 1fr; - gap: 1.5rem; - } - - .tools-comparison { - padding: 2rem; - } - - .comparison-table { - grid-template-columns: 1fr; - gap: 0; - } - - .table-row { - display: grid; - grid-template-columns: 1fr 1fr 1fr 1fr; - gap: 1px; - margin-bottom: 1px; - } -} - -@media (max-width: 480px) { - .flow-arrow { - transform: rotate(90deg); - } - - .table-row { - display: flex; - flex-direction: column; - gap: 0; - margin-bottom: 1px; - } - - .table-cell { - border-bottom: 1px solid var(--border-color); - } -} - -/* Accessibility */ -@media (prefers-reduced-motion: reduce) { - .tool-robot, - .tool-card, - .flow-step, - .robot-mini, - .tool-mini-icon, - .feature-icon { - animation: none !important; - transition: none !important; - } -} - -/* High contrast mode */ -@media (prefers-contrast: high) { - .tool-card, - .tools-integration, - .tools-comparison { - border-width: 3px; - } -} - -/* Robot-specific details and animations that were missing */ -.robot-eyes { - display: flex; - justify-content: space-between; - padding: 12px 10px; -} - -.robot-eye { - width: 12px; - height: 12px; - background: var(--dark-text); - border-radius: 50%; -} - -.robot-eye.active { - animation: eyeBlink 4s infinite ease-in-out; -} - -.robot-eye.thinking { - height: 3px; - border-radius: 2px; - animation: thinkingBlink 3s infinite ease-in-out; -} - -.robot-eye.beta { - background: var(--dark-text); - animation: betaGlow 2s infinite ease-in-out; -} - -.robot-eye.sleeping { - height: 3px; - border-radius: 2px; -} - -@keyframes eyeBlink { - 0%, 90%, 100% { transform: scaleY(1); } - 95% { transform: scaleY(0.1); } -} - -@keyframes thinkingBlink { - 0%, 100% { width: 12px; } - 50% { width: 4px; } -} - -@keyframes betaGlow { - 0%, 100% { box-shadow: none; } - 50% { box-shadow: 0 0 5px var(--warning-amber); } -} - -.robot-search-beam { - position: absolute; - bottom: 10px; - left: 50%; - transform: translateX(-50%); - width: 40px; - height: 20px; - perspective: 50px; -} - -.search-wave { - position: absolute; - width: 100%; - height: 1px; - background: white; - opacity: 0.7; - animation: searchWave 3s infinite ease-out; -} - -.search-wave:nth-child(2) { animation-delay: 1s; } -.search-wave:nth-child(3) { animation-delay: 2s; } - -@keyframes searchWave { - 0% { transform: translateY(0) scaleY(1); opacity: 0; } - 50% { opacity: 0.7; } - 100% { transform: translateY(20px) scaleY(3); opacity: 0; } -} - -.robot-screen { - width: 40px; - height: 30px; - background: var(--dark-text); - border-radius: 4px; - padding: 5px; -} - -.search-results .result-line { - height: 2px; - background: white; - border-radius: 1px; - margin-bottom: 3px; - animation: searchResults 4s infinite; -} - -.search-results .result-line.short { - width: 60%; -} - -@keyframes searchResults { - 0%, 100% { width: 100%; } - 50% { width: 40%; } -} - -.robot-magnifier { - position: absolute; - bottom: -10px; - right: -10px; -} - -.magnifier-glass { - width: 20px; - height: 20px; - border: 3px solid var(--dark-text); - border-radius: 50%; - background: var(--light-blue); -} - -.magnifier-handle { - width: 15px; - height: 4px; - background: var(--dark-text); - transform: rotate(45deg) translate(12px, -3px); -} - -.sleep-indicator { - position: absolute; - top: -5px; - right: -20px; - display: flex; - flex-direction: column; -} - -.sleep-z { - font-size: 0.8rem; - font-weight: 600; - color: var(--medium-gray); - animation: sleepZ 3s infinite ease-in-out; -} - -.sleep-z:nth-child(2) { animation-delay: 1s; } -.sleep-z:nth-child(3) { animation-delay: 2s; } - -@keyframes sleepZ { - 0% { transform: translate(0, 0); opacity: 0; } - 50% { transform: translate(5px, -5px); opacity: 1; } - 100% { transform: translate(10px, -10px); opacity: 0; } -} diff --git a/website/src/styles/products.css b/website/src/styles/products.css deleted file mode 100644 index 05b0fd54da..0000000000 --- a/website/src/styles/products.css +++ /dev/null @@ -1,1644 +0,0 @@ -/* Products Page Styles - Blue Dominant Theme */ - -/* Color Palette - Blue Dominant */ -:root { - --primary-blue: #3b82f6; - --light-blue: #93c5fd; - --pale-blue: #dbeafe; - --sky-blue: #e0f2fe; - --soft-green: #bbf7d0; - --warm-yellow: #fef08a; - --gentle-orange: #fed7aa; - --light-purple: #e9d5ff; - --soft-pink: #fce7f3; - --cream-white: #fefce8; - --light-gray: #f8fafc; - --medium-gray: #64748b; - --dark-text: #1e293b; - --success-green: #22c55e; - --warning-amber: #f59e0b; - --error-red: #ef4444; - --white: #ffffff; -} - -/* Base Styles */ -* { - box-sizing: border-box; -} - -.products-page { - background: linear-gradient( - 135deg, - var(--pale-blue) 0%, - var(--light-gray) 100% - ); - min-height: 100vh; - font-family: - 'Inter', - -apple-system, - BlinkMacSystemFont, - sans-serif; - line-height: 1.6; - color: var(--dark-text); -} - -.container { - max-width: 1200px; - margin: 0 auto; - padding: 0 2rem; -} - -/* Hero Section */ -.hero-section { - position: relative; - padding: 4rem 0 6rem; - background: linear-gradient( - 135deg, - var(--primary-blue) 0%, - var(--light-blue) 100% - ); - overflow: hidden; -} - -.hero-content { - position: relative; - text-align: center; - z-index: 2; - max-width: 800px; - margin: 0 auto; - padding: 0 2rem; -} - -.hero-badge { - display: inline-flex; - align-items: center; - gap: 0.75rem; - background: rgba(255, 255, 255, 0.9); - padding: 0.75rem 1.5rem; - border-radius: 50px; - font-size: 0.9rem; - font-weight: 600; - color: var(--dark-text); - margin-bottom: 2rem; - box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1); - border: 2px solid var(--dark-text); -} - -.badge-robot { - display: flex; - flex-direction: column; - align-items: center; - gap: 2px; -} - -.badge-robot-head { - width: 16px; - height: 16px; - background: var(--primary-blue); - border-radius: 4px; - border: 2px solid var(--dark-text); -} - -.badge-robot-body { - width: 12px; - height: 12px; - background: var(--light-blue); - border-radius: 2px; - border: 1px solid var(--dark-text); -} - -.hero-title { - margin-bottom: 1.5rem; -} - -.title-main { - display: block; - font-size: clamp(2.5rem, 8vw, 3.5rem); - font-weight: 800; - color: var(--white); - margin-bottom: 0.5rem; - line-height: 1.1; - text-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); -} - -.title-sub { - display: block; - font-size: 1.2rem; - font-weight: 500; - color: rgba(255, 255, 255, 0.9); -} - -.hero-description { - font-size: 1.1rem; - color: rgba(255, 255, 255, 0.9); - margin-bottom: 3rem; - line-height: 1.6; -} - -/* Hero Robot */ -.hero-robot { - margin: 3rem 0; -} - -.robot-container { - position: relative; - display: inline-block; - animation: bounce 3s infinite ease-in-out; -} - -.robot-head { - width: 80px; - height: 80px; - background: var(--warm-yellow); - border-radius: 20px; - position: relative; - margin: 0 auto 10px; - border: 4px solid var(--dark-text); - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); -} - -.robot-eyes { - display: flex; - justify-content: space-between; - padding: 15px 12px; -} - -.eye { - width: 16px; - height: 16px; - background: var(--dark-text); - border-radius: 50%; - position: relative; -} - -.eye::after { - content: ''; - position: absolute; - top: 3px; - right: 3px; - width: 4px; - height: 4px; - background: white; - border-radius: 50%; -} - -.robot-mouth.happy { - position: absolute; - bottom: 15px; - left: 50%; - transform: translateX(-50%); - width: 20px; - height: 10px; - border: 3px solid var(--dark-text); - border-bottom: none; - border-radius: 20px 20px 0 0; -} - -.robot-body { - width: 100px; - height: 100px; - background: var(--soft-green); - border-radius: 15px; - margin: 0 auto; - border: 4px solid var(--dark-text); - position: relative; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); -} - -.robot-panel { - display: flex; - justify-content: center; - gap: 8px; - padding: 20px; -} - -.panel-light { - width: 12px; - height: 12px; - border-radius: 50%; - border: 2px solid var(--dark-text); -} - -.panel-light.blue { - background: var(--primary-blue); -} -.panel-light.green { - background: var(--success-green); -} -.panel-light.amber { - background: var(--warning-amber); - animation: blink 1.5s infinite; -} - -.speech-bubble { - position: absolute; - top: -40px; - left: 120%; - background: white; - padding: 0.5rem 1rem; - border-radius: 15px; - border: 2px solid var(--dark-text); - font-size: 0.9rem; - font-weight: 600; - color: var(--dark-text); - white-space: nowrap; - box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1); -} - -.speech-bubble::before { - content: ''; - position: absolute; - left: -10px; - top: 50%; - transform: translateY(-50%); - width: 0; - height: 0; - border-top: 8px solid transparent; - border-bottom: 8px solid transparent; - border-right: 10px solid white; -} - -.speech-bubble::after { - content: ''; - position: absolute; - left: -12px; - top: 50%; - transform: translateY(-50%); - width: 0; - height: 0; - border-top: 10px solid transparent; - border-bottom: 10px solid transparent; - border-right: 12px solid var(--dark-text); -} - -/* Table of Contents */ -.toc-section { - padding: 4rem 0; - background: var(--white); -} - -.toc-card { - background: var(--white); - border-radius: 20px; - padding: 2rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); - border: 3px solid var(--dark-text); -} - -.toc-card h2 { - text-align: center; - margin-bottom: 2rem; - font-size: 1.8rem; - font-weight: 700; - color: var(--dark-text); -} - -.toc-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); - gap: 1rem; -} - -.toc-item { - display: flex; - align-items: center; - gap: 1rem; - padding: 1rem; - border-radius: 15px; - background: var(--pale-blue); - border: 2px solid var(--primary-blue); - text-decoration: none; - color: var(--dark-text); - font-weight: 600; - transition: all 0.3s ease; -} - -.toc-item:hover { - transform: translateY(-2px); - box-shadow: 0 4px 15px rgba(59, 130, 246, 0.3); - background: var(--light-blue); -} - -.toc-icon { - width: 40px; - height: 40px; - border-radius: 10px; - border: 2px solid var(--dark-text); - display: flex; - align-items: center; - justify-content: center; - font-size: 1.2rem; - flex-shrink: 0; -} - -.toc-icon.vision { - background: var(--soft-pink); -} -.toc-icon.work { - background: var(--light-blue); -} -.toc-icon.platform { - background: var(--gentle-orange); -} -.toc-icon.model { - background: var(--soft-green); -} -.toc-icon.tool { - background: var(--light-purple); -} -.toc-icon.compare { - background: var(--warm-yellow); -} -.toc-icon.roadmap { - background: var(--primary-blue); -} - -/* Section Styles */ -.section-header { - text-align: center; - margin-bottom: 4rem; -} - -.section-badge { - display: inline-flex; - align-items: center; - gap: 0.75rem; - background: rgba(255, 255, 255, 0.9); - padding: 0.75rem 1.5rem; - border-radius: 50px; - font-size: 0.9rem; - font-weight: 600; - color: var(--dark-text); - margin-bottom: 1rem; - box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1); - border: 2px solid var(--dark-text); -} - -.section-title { - font-size: 2.5rem; - font-weight: 800; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.section-description { - font-size: 1.2rem; - color: var(--medium-gray); - max-width: 600px; - margin: 0 auto; -} - -/* Vision Section */ -.vision-section { - padding: 6rem 0; - background: var(--sky-blue); -} - -.vision-formula { - display: flex; - align-items: center; - justify-content: center; - gap: 1rem; - flex-wrap: wrap; - margin: 3rem 0; -} - -.formula-item { - display: flex; - flex-direction: column; - align-items: center; - gap: 0.5rem; -} - -.formula-robot { - width: 60px; - height: 80px; - position: relative; -} - -.formula-robot .robot-head { - width: 40px; - height: 40px; - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto 5px; - background: var(--primary-blue); -} - -.formula-robot .robot-body { - width: 50px; - height: 40px; - border-radius: 8px; - border: 2px solid var(--dark-text); - margin: 0 auto; - background: var(--light-blue); -} - -.formula-plus, -.formula-equals { - font-size: 1.5rem; - font-weight: 800; - color: var(--dark-text); -} - -.vision-principles { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - gap: 2rem; - margin-top: 4rem; -} - -.principle-card { - background: var(--white); - border-radius: 20px; - padding: 2rem; - text-align: center; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); - border: 3px solid var(--dark-text); -} - -.principle-robot { - margin: 0 auto 1rem; - width: 50px; - height: 70px; -} - -.principle-robot .robot-head { - width: 30px; - height: 30px; - border-radius: 8px; - border: 2px solid var(--dark-text); - margin: 0 auto 5px; - background: var(--warm-yellow); -} - -.principle-robot .robot-body { - width: 40px; - height: 35px; - border-radius: 6px; - border: 2px solid var(--dark-text); - margin: 0 auto; - background: var(--soft-green); -} - -.principle-card h3 { - font-size: 1.3rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.principle-card p { - color: var(--medium-gray); - line-height: 1.6; -} - -/* Works Section */ -.works-section { - padding: 6rem 0; - background: var(--light-gray); -} - -.modes-comparison { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(400px, 1fr)); - gap: 2rem; - margin-top: 4rem; -} - -.mode-card { - background: var(--white); - border-radius: 20px; - padding: 2rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); - border: 3px solid var(--dark-text); - text-align: center; -} - -.mode-robot { - margin: 0 auto 1.5rem; - width: 60px; - height: 80px; -} - -.mode-robot .robot-head { - width: 40px; - height: 40px; - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto 5px; -} - -.mode-robot .robot-body { - width: 50px; - height: 40px; - border-radius: 8px; - border: 2px solid var(--dark-text); - margin: 0 auto; -} - -.local-mode .mode-robot .robot-head { - background: var(--success-green); -} -.local-mode .mode-robot .robot-body { - background: var(--soft-green); -} - -.cloud-mode .mode-robot .robot-head { - background: var(--primary-blue); -} -.cloud-mode .mode-robot .robot-body { - background: var(--light-blue); -} - -.mode-card h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.mode-card p { - color: var(--medium-gray); - margin-bottom: 1.5rem; -} - -.mode-features { - display: flex; - flex-direction: column; - gap: 0.5rem; -} - -.feature-item { - padding: 0.5rem 1rem; - background: var(--pale-blue); - border-radius: 15px; - border: 2px solid var(--primary-blue); - font-size: 0.9rem; - font-weight: 600; -} - -/* Platforms Section */ -.platforms-section { - padding: 6rem 0; - background: var(--cream-white); -} - -.platforms-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - gap: 2rem; - margin-top: 4rem; -} - -.platform-card { - background: var(--white); - border-radius: 20px; - padding: 2rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); - border: 3px solid var(--dark-text); - transition: all 0.3s ease; - text-align: center; -} - -.platform-card:hover { - transform: translateY(-5px); - box-shadow: 0 15px 40px rgba(0, 0, 0, 0.15); -} - -.platform-status { - display: inline-block; - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; - margin-bottom: 1rem; - border: 2px solid var(--dark-text); -} - -.platform-status:contains('Available') { - background: var(--success-green); - color: white; -} -.platform-status:contains('Beta') { - background: var(--warning-amber); - color: white; -} -.platform-status:contains('Q4') { - background: var(--light-gray); - color: var(--medium-gray); - border-style: dashed; -} -.platform-status:contains('Coming') { - background: var(--light-gray); - color: var(--medium-gray); - border-style: dashed; -} - -.platform-robot { - margin: 0 auto 1.5rem; - width: 60px; - height: 80px; -} - -.platform-robot .robot-head { - width: 40px; - height: 40px; - border-radius: 10px; - border: 2px solid var(--dark-text); - margin: 0 auto 5px; -} - -.platform-robot .robot-body { - width: 50px; - height: 40px; - border-radius: 8px; - border: 2px solid var(--dark-text); - margin: 0 auto; -} - -.desktop-robot .robot-head { - background: var(--success-green); -} -.desktop-robot .robot-body { - background: var(--soft-green); -} - -.web-robot .robot-head { - background: var(--primary-blue); -} -.web-robot .robot-body { - background: var(--light-blue); -} - -.mobile-robot .robot-head { - background: var(--warning-amber); -} -.mobile-robot .robot-body { - background: var(--warm-yellow); -} - -.server-robot .robot-head { - background: var(--light-purple); -} -.server-robot .robot-body { - background: var(--soft-pink); -} - -.platform-card h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.5rem; -} - -.platform-card p { - color: var(--medium-gray); - margin-bottom: 1rem; -} - -.platform-metrics { - margin-bottom: 1rem; -} - -.metric { - font-size: 0.9rem; - color: var(--primary-blue); - font-weight: 600; -} - -.platform-features { - display: flex; - flex-wrap: wrap; - gap: 0.5rem; - justify-content: center; -} - -.feature { - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; - background: var(--pale-blue); - color: var(--dark-text); - border: 2px solid var(--primary-blue); -} - -.feature.coming { - border-style: dashed; - color: var(--medium-gray); - background: var(--light-gray); -} - -/* Models Section */ -.models-section { - padding: 6rem 0; - background: var(--sky-blue); -} - -.models-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(350px, 1fr)); - gap: 2rem; - margin-top: 4rem; -} - -.model-card { - background: var(--white); - border-radius: 20px; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); - border: 3px solid var(--dark-text); - overflow: hidden; - transition: all 0.3s ease; - position: relative; -} - -.model-card:hover { - transform: translateY(-5px); - box-shadow: 0 15px 40px rgba(0, 0, 0, 0.15); -} - -.model-robot { - display: inline-block; - position: relative; - margin: 2rem auto 1rem; - text-align: center; -} - -.model-robot .robot-head { - width: 60px; - height: 60px; - border-radius: 15px; - border: 3px solid var(--dark-text); - margin: 0 auto 8px; - position: relative; -} - -.model-robot .robot-body { - width: 70px; - height: 60px; - border-radius: 12px; - border: 3px solid var(--dark-text); - margin: 0 auto; - position: relative; - display: flex; - align-items: center; - justify-content: center; -} - -.v1-robot .robot-head { - background: var(--success-green); -} -.v1-robot .robot-body { - background: var(--soft-green); -} - -.nano-robot .robot-head { - background: var(--primary-blue); -} -.nano-robot .robot-body { - background: var(--light-blue); -} - -.lucy-robot .robot-head { - background: var(--warning-amber); -} -.lucy-robot .robot-body { - background: var(--warm-yellow); -} - -.status-light { - width: 12px; - height: 12px; - border-radius: 50%; - border: 2px solid var(--dark-text); - position: absolute; - top: 5px; - right: 5px; -} - -.status-light.active { - background: var(--success-green); - animation: pulse 2s infinite; -} -.status-light.beta { - background: var(--warning-amber); - animation: pulse 1.5s infinite; -} -.status-light.coming { - background: var(--medium-gray); -} - -.robot-speech { - position: absolute; - top: -25px; - left: 50%; - transform: translateX(-50%); - background: white; - padding: 0.25rem 0.75rem; - border-radius: 15px; - border: 2px solid var(--dark-text); - font-size: 0.8rem; - font-weight: 600; - white-space: nowrap; - box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); -} - -.model-content { - padding: 0 2rem 2rem; -} - -.model-header { - display: flex; - justify-content: space-between; - align-items: center; - margin-bottom: 1.5rem; - text-align: left; -} - -.model-header h3 { - font-size: 1.5rem; - font-weight: 700; - color: var(--dark-text); -} - -.model-status { - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; - border: 2px solid var(--dark-text); -} - -.model-status.active { - background: var(--success-green); - color: white; -} -.model-status.beta { - background: var(--warning-amber); - color: white; -} -.model-status.coming { - background: var(--light-gray); - color: var(--medium-gray); -} - -.model-specs { - display: grid; - grid-template-columns: 1fr 1fr; - gap: 1.5rem; - margin-bottom: 1.5rem; -} - -.spec-group h4 { - font-size: 0.9rem; - color: var(--medium-gray); - margin-bottom: 0.5rem; - font-weight: 600; - text-transform: uppercase; - letter-spacing: 0.05em; -} - -.spec-list { - display: flex; - flex-direction: column; - gap: 0.25rem; -} - -.spec-item { - font-size: 0.9rem; - color: var(--dark-text); - font-weight: 500; -} - -.spec-highlight { - font-size: 1.1rem; - font-weight: 700; - color: var(--primary-blue); -} - -.model-description { - color: var(--medium-gray); - margin-bottom: 1.5rem; - line-height: 1.6; -} - -.model-capabilities { - display: flex; - flex-wrap: wrap; - gap: 0.5rem; -} - -.capability { - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; - background: var(--success-green); - color: white; - border: 2px solid var(--dark-text); -} - -.capability.coming { - background: var(--light-gray); - color: var(--medium-gray); - border-style: dashed; -} - -/* Tools Section */ -.tools-section { - padding: 6rem 0; - background: var(--light-gray); -} - -.tools-grid { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - gap: 2rem; - margin-top: 4rem; -} - -.tool-card { - background: var(--white); - border-radius: 20px; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); - border: 3px solid var(--dark-text); - overflow: hidden; - transition: all 0.3s ease; - position: relative; -} - -.tool-card:hover { - transform: translateY(-5px); - box-shadow: 0 15px 40px rgba(0, 0, 0, 0.15); -} - -.tool-robot { - padding: 2rem 2rem 1rem; - text-align: center; - position: relative; -} - -.tool-robot .robot-head { - width: 50px; - height: 50px; - border-radius: 12px; - border: 3px solid var(--dark-text); - margin: 0 auto 8px; - position: relative; -} - -.tool-robot .robot-body { - width: 60px; - height: 50px; - border-radius: 12px; - border: 3px solid var(--dark-text); - margin: 0 auto; - display: flex; - align-items: center; - justify-content: center; - font-size: 1.2rem; -} - -.search-tool .tool-robot .robot-head { - background: var(--success-green); -} -.search-tool .tool-robot .robot-body { - background: var(--soft-green); -} - -.research-tool .tool-robot .robot-head { - background: var(--primary-blue); -} -.research-tool .tool-robot .robot-body { - background: var(--light-blue); -} - -.browser-tool .tool-robot .robot-head { - background: var(--warning-amber); -} -.browser-tool .tool-robot .robot-body { - background: var(--warm-yellow); -} - -.agents-tool .tool-robot .robot-head { - background: var(--light-purple); -} -.agents-tool .tool-robot .robot-body { - background: var(--soft-pink); -} - -.tool-robot.sleeping { - opacity: 0.6; -} - -.sleep-indicator { - position: absolute; - top: -10px; - right: -10px; - font-size: 0.8rem; - color: var(--medium-gray); - animation: float 2s infinite ease-in-out; -} - -.tool-content { - padding: 0 2rem 2rem; -} - -.tool-status { - display: inline-block; - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; - margin-bottom: 1rem; - border: 2px solid var(--dark-text); -} - -.tool-status.active { - background: var(--success-green); - color: white; -} -.tool-status.beta { - background: var(--warning-amber); - color: white; -} -.tool-status.coming { - background: var(--light-gray); - color: var(--medium-gray); - border-style: dashed; -} - -.tool-content h3 { - font-size: 1.3rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 0.5rem; -} - -.tool-content p { - color: var(--medium-gray); - margin-bottom: 1rem; -} - -.tool-features { - display: flex; - flex-wrap: wrap; - gap: 0.5rem; -} - -.tool-features .feature { - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; - background: var(--pale-blue); - color: var(--dark-text); - border: 2px solid var(--primary-blue); -} - -.tool-features .feature.coming { - border-style: dashed; - color: var(--medium-gray); - background: var(--light-gray); -} - -/* Comparison Section */ -.comparison-section { - padding: 6rem 0; - background: var(--cream-white); -} - -.comparison-table { - background: var(--white); - border-radius: 20px; - border: 3px solid var(--dark-text); - overflow: hidden; - margin-top: 4rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); -} - -.table-header { - display: grid; - grid-template-columns: 1fr 1fr 1fr; - background: var(--primary-blue); - color: white; -} - -.header-cell { - padding: 1rem; - font-weight: 700; - text-align: center; - border-right: 2px solid var(--dark-text); -} - -.header-cell:last-child { - border-right: none; -} - -.header-cell.jan-col { - background: var(--success-green); -} - -.table-row { - display: grid; - grid-template-columns: 1fr 1fr 1fr; - border-bottom: 2px solid var(--light-gray); -} - -.table-row:last-child { - border-bottom: none; -} - -.table-cell { - padding: 1rem; - text-align: center; - border-right: 2px solid var(--light-gray); -} - -.table-cell:last-child { - border-right: none; -} - -.table-cell.feature-col { - font-weight: 600; - background: var(--pale-blue); -} - -.table-cell.jan-col { - background: var(--soft-green); - font-weight: 600; - color: var(--dark-text); -} - -/* Roadmap Section */ -.roadmap-section { - padding: 6rem 0; - background: var(--sky-blue); -} - -.roadmap-database { - background: var(--white); - border-radius: 20px; - border: 3px solid var(--dark-text); - overflow: hidden; - margin-top: 4rem; - box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1); -} - -.database-header { - display: grid; - grid-template-columns: 2fr 2fr 1fr 0.5fr; - background: var(--primary-blue); - color: white; -} - -.database-header .header-cell { - padding: 1rem; - font-weight: 700; - text-align: center; - border-right: 2px solid var(--dark-text); -} - -.roadmap-row { - border-bottom: 2px solid var(--light-gray); - position: relative; -} - -.roadmap-row:last-child { - border-bottom: none; -} - -.roadmap-row { - display: grid; - grid-template-columns: 2fr 2fr 1fr 0.5fr; -} - -.row-cell { - padding: 1rem; - text-align: center; - border-right: 2px solid var(--light-gray); - display: flex; - align-items: center; - justify-content: center; -} - -.row-cell:last-child { - border-right: none; -} - -.expand-btn { - background: var(--primary-blue); - color: white; - border: 2px solid var(--dark-text); - border-radius: 50%; - width: 30px; - height: 30px; - cursor: pointer; - font-weight: bold; - font-size: 1.2rem; - transition: all 0.3s ease; -} - -.expand-btn:hover { - background: var(--success-green); - transform: scale(1.1); -} - -.status-badge { - padding: 0.25rem 0.75rem; - border-radius: 15px; - font-size: 0.8rem; - font-weight: 600; - border: 2px solid var(--dark-text); -} - -.status-badge.released { - background: var(--success-green); - color: white; -} - -.status-badge.in-progress { - background: var(--warning-amber); - color: white; -} - -.status-badge.planned { - background: var(--light-gray); - color: var(--medium-gray); - border-style: dashed; -} - -.row-details { - grid-column: 1 / -1; - background: var(--pale-blue); - border-top: 2px solid var(--light-gray); - max-height: 0; - overflow: hidden; - transition: max-height 0.3s ease; -} - -.roadmap-row.expanded .row-details { - max-height: 500px; -} - -.details-content { - padding: 2rem; -} - -.detail-section { - margin-bottom: 2rem; -} - -.detail-section:last-child { - margin-bottom: 0; -} - -.detail-section h4 { - font-size: 1.1rem; - font-weight: 700; - color: var(--dark-text); - margin-bottom: 1rem; -} - -.detail-section ul { - list-style: none; - padding: 0; - margin: 0; -} - -.detail-section li { - padding: 0.5rem 0; - border-bottom: 1px solid var(--light-gray); - color: var(--medium-gray); -} - -.detail-section li:last-child { - border-bottom: none; -} - -/* CTA Section */ -.cta-section { - padding: 6rem 0; - background: var(--primary-blue); - text-align: center; -} - -.cta-content { - max-width: 600px; - margin: 0 auto; -} - -.cta-robot { - display: inline-block; - margin-bottom: 2rem; - animation: bounce 3s infinite ease-in-out; -} - -.cta-robot .robot-head { - width: 80px; - height: 80px; - background: var(--warm-yellow); - border-radius: 20px; - border: 4px solid var(--dark-text); - margin: 0 auto 10px; - position: relative; -} - -.cta-robot .robot-eyes { - display: flex; - justify-content: space-between; - padding: 15px 12px; -} - -.eye.winking { - height: 4px; - border-radius: 2px; - animation: wink 3s infinite; -} - -.cta-robot .robot-body { - width: 100px; - height: 80px; - background: var(--soft-green); - border-radius: 15px; - border: 4px solid var(--dark-text); - margin: 0 auto; - display: flex; - align-items: center; - justify-content: center; -} - -.heart { - font-size: 1.5rem; - animation: heartbeat 2s infinite; -} - -.cta-content h2 { - font-size: 2.5rem; - font-weight: 800; - color: white; - margin-bottom: 1rem; -} - -.cta-content p { - font-size: 1.2rem; - color: rgba(255, 255, 255, 0.9); - margin-bottom: 2rem; -} - -.cta-buttons { - display: flex; - justify-content: center; - gap: 1rem; - flex-wrap: wrap; -} - -.btn-primary, -.btn-secondary { - padding: 1rem 2rem; - border-radius: 50px; - font-size: 1rem; - font-weight: 600; - text-decoration: none; - transition: all 0.3s ease; - border: 3px solid var(--dark-text); - display: inline-block; -} - -.btn-primary { - background: var(--success-green); - color: white; -} - -.btn-primary:hover { - transform: translateY(-2px); - box-shadow: 0 8px 25px rgba(34, 197, 94, 0.3); -} - -.btn-secondary { - background: white; - color: var(--dark-text); -} - -.btn-secondary:hover { - transform: translateY(-2px); - box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1); -} - -/* Animations */ -@keyframes bounce { - 0%, - 100% { - transform: translateY(0px); - } - 50% { - transform: translateY(-10px); - } -} - -@keyframes blink { - 0%, - 90%, - 100% { - opacity: 1; - } - 95% { - opacity: 0.3; - } -} - -@keyframes pulse { - 0%, - 100% { - transform: scale(1); - opacity: 1; - } - 50% { - transform: scale(1.1); - opacity: 0.8; - } -} - -@keyframes wink { - 0%, - 90%, - 100% { - height: 16px; - border-radius: 50%; - } - 95% { - height: 4px; - border-radius: 2px; - } -} - -@keyframes heartbeat { - 0%, - 100% { - transform: scale(1); - } - 50% { - transform: scale(1.2); - } -} - -@keyframes float { - 0%, - 100% { - transform: translateY(0px); - } - 50% { - transform: translateY(-5px); - } -} - -/* Responsive Design */ -@media (max-width: 1024px) { - .vision-formula { - flex-direction: column; - align-items: center; - } - - .modes-comparison { - grid-template-columns: 1fr; - } - - .models-grid { - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - } - - .comparison-table, - .roadmap-database { - font-size: 0.9rem; - } - - .database-header, - .roadmap-row { - grid-template-columns: 1.5fr 1.5fr 1fr 0.5fr; - } -} - -@media (max-width: 768px) { - .container { - padding: 0 1rem; - } - - .title-main { - font-size: 2.5rem; - } - - .section-title { - font-size: 2rem; - } - - .toc-grid { - grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); - } - - .vision-principles { - grid-template-columns: 1fr; - } - - .platforms-grid { - grid-template-columns: 1fr; - } - - .model-specs { - grid-template-columns: 1fr; - } - - .tools-grid { - grid-template-columns: 1fr; - } - - .cta-buttons { - flex-direction: column; - align-items: center; - } - - .speech-bubble { - display: none; - } - - .comparison-table, - .roadmap-database { - font-size: 0.8rem; - } - - .table-header, - .table-row { - grid-template-columns: 1fr; - } - - .table-cell, - .header-cell { - text-align: left; - border-right: none; - border-bottom: 1px solid var(--light-gray); - } - - .database-header, - .roadmap-row { - grid-template-columns: 1fr; - } - - .row-cell { - text-align: left; - border-right: none; - border-bottom: 1px solid var(--light-gray); - } -} - -@media (max-width: 480px) { - .hero-content { - padding: 0 1rem; - } - - .section-title { - font-size: 1.8rem; - } - - .cta-content h2 { - font-size: 2rem; - } - - .platform-card, - .model-card, - .tool-card { - padding: 1.5rem; - } -} - -/* Accessibility */ -@media (prefers-reduced-motion: reduce) { - * { - animation-duration: 0.01ms !important; - animation-iteration-count: 1 !important; - transition-duration: 0.01ms !important; - } -} - -/* High contrast mode */ -@media (prefers-contrast: high) { - .model-card, - .platform-card, - .tool-card, - .toc-card { - border-width: 4px; - } - - .comparison-table, - .roadmap-database { - border-width: 4px; - } -} - -/* Focus states for accessibility */ -.toc-item:focus, -.expand-btn:focus, -.btn-primary:focus, -.btn-secondary:focus { - outline: 2px solid var(--primary-blue); - outline-offset: 2px; -} diff --git a/website/src/styles/retro-effects.css b/website/src/styles/retro-effects.css deleted file mode 100644 index 1e6f422942..0000000000 --- a/website/src/styles/retro-effects.css +++ /dev/null @@ -1,822 +0,0 @@ -/* Retro-Tech Effects CSS */ -/* RadioShack meets PostHog aesthetic */ - -:root { - /* Retro Color Palette */ - --retro-green: #00ff41; - --retro-amber: #ffb000; - --retro-blue: #00bfff; - --retro-red: #ff0040; - --retro-cyan: #00ffff; - --retro-magenta: #ff00ff; - - /* Terminal Colors */ - --terminal-bg: #0a0a0a; - --terminal-fg: #00ff41; - --terminal-cursor: #ffffff; - - /* Glow Effects */ - --glow-green: rgba(0, 255, 65, 0.5); - --glow-amber: rgba(255, 176, 0, 0.5); - --glow-blue: rgba(0, 191, 255, 0.5); - --glow-red: rgba(255, 0, 64, 0.5); -} - -/* CRT Monitor Effect */ -.crt-effect { - position: relative; - background: radial-gradient(ellipse at center, #0a0a0a 0%, #000000 100%); -} - -.crt-effect::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: repeating-linear-gradient( - 0deg, - rgba(0, 255, 65, 0.05) 0px, - rgba(0, 255, 65, 0.05) 1px, - transparent 1px, - transparent 3px - ); - pointer-events: none; - z-index: 1000; -} - -.crt-effect::after { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: radial-gradient( - ellipse at center, - transparent 40%, - rgba(0, 0, 0, 0.8) 100% - ); - pointer-events: none; - z-index: 1001; -} - -/* Matrix Rain Effect */ -.matrix-rain { - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - pointer-events: none; - z-index: -1; - overflow: hidden; -} - -.matrix-char { - position: absolute; - color: var(--retro-green); - font-family: 'JetBrains Mono', monospace; - font-size: 14px; - animation: matrixFall 3s linear infinite; - opacity: 0.7; -} - -@keyframes matrixFall { - 0% { - transform: translateY(-100vh); - opacity: 1; - } - 100% { - transform: translateY(100vh); - opacity: 0; - } -} - -/* Glitch Text Effects */ -.glitch-text { - position: relative; - color: var(--retro-green); - font-weight: bold; -} - -.glitch-text::before, -.glitch-text::after { - content: attr(data-text); - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; -} - -.glitch-text::before { - animation: glitch-anim-1 0.3s infinite; - color: var(--retro-red); - z-index: -1; -} - -.glitch-text::after { - animation: glitch-anim-2 0.3s infinite; - color: var(--retro-blue); - z-index: -2; -} - -@keyframes glitch-anim-1 { - 0% { - clip-path: inset(40% 0 61% 0); - transform: translate(-2px, -2px); - } - 20% { - clip-path: inset(92% 0 1% 0); - transform: translate(2px, 2px); - } - 40% { - clip-path: inset(43% 0 1% 0); - transform: translate(-2px, 2px); - } - 60% { - clip-path: inset(25% 0 58% 0); - transform: translate(2px, -2px); - } - 80% { - clip-path: inset(54% 0 7% 0); - transform: translate(-2px, -2px); - } - 100% { - clip-path: inset(58% 0 43% 0); - transform: translate(2px, 2px); - } -} - -@keyframes glitch-anim-2 { - 0% { - clip-path: inset(25% 0 58% 0); - transform: translate(2px, 1px); - } - 20% { - clip-path: inset(54% 0 7% 0); - transform: translate(-1px, 2px); - } - 40% { - clip-path: inset(58% 0 43% 0); - transform: translate(1px, -1px); - } - 60% { - clip-path: inset(40% 0 61% 0); - transform: translate(-1px, -2px); - } - 80% { - clip-path: inset(92% 0 1% 0); - transform: translate(2px, 1px); - } - 100% { - clip-path: inset(43% 0 1% 0); - transform: translate(-2px, -1px); - } -} - -/* Neon Glow Effects */ -.neon-glow { - text-shadow: - 0 0 5px currentColor, - 0 0 10px currentColor, - 0 0 15px currentColor, - 0 0 20px currentColor; - animation: neonFlicker 2s infinite alternate; -} - -@keyframes neonFlicker { - 0%, - 18%, - 22%, - 25%, - 53%, - 57%, - 100% { - text-shadow: - 0 0 5px currentColor, - 0 0 10px currentColor, - 0 0 15px currentColor, - 0 0 20px currentColor; - } - 20%, - 24%, - 55% { - text-shadow: none; - } -} - -/* Terminal Cursor */ -.terminal-cursor { - display: inline-block; - background-color: var(--terminal-cursor); - color: var(--terminal-bg); - animation: cursorBlink 1s infinite; - margin-left: 2px; - width: 8px; - height: 1em; -} - -@keyframes cursorBlink { - 0%, - 50% { - opacity: 1; - } - 51%, - 100% { - opacity: 0; - } -} - -/* Data Stream Animation */ -.data-stream { - position: relative; - overflow: hidden; -} - -.data-stream::before { - content: ''; - position: absolute; - top: 0; - left: -100%; - width: 100%; - height: 100%; - background: linear-gradient( - 90deg, - transparent, - rgba(0, 255, 65, 0.1) 10%, - rgba(0, 255, 65, 0.3) 50%, - rgba(0, 255, 65, 0.1) 90%, - transparent - ); - animation: dataStreamFlow 2s infinite; -} - -@keyframes dataStreamFlow { - 0% { - left: -100%; - opacity: 0; - } - 50% { - opacity: 1; - } - 100% { - left: 100%; - opacity: 0; - } -} - -/* Circuit Board Traces */ -.circuit-trace { - position: relative; - overflow: hidden; -} - -.circuit-trace::after { - content: ''; - position: absolute; - top: 50%; - left: 0; - right: 0; - height: 2px; - background: linear-gradient( - 90deg, - transparent, - var(--retro-green) 20%, - var(--retro-green) 80%, - transparent - ); - transform: translateY(-50%); - animation: circuitPulse 3s infinite; -} - -@keyframes circuitPulse { - 0%, - 100% { - opacity: 0.3; - box-shadow: 0 0 5px var(--retro-green); - } - 50% { - opacity: 1; - box-shadow: 0 0 15px var(--retro-green); - } -} - -/* Electronic Component Styles */ -.component-resistor { - display: inline-block; - width: 20px; - height: 6px; - background: linear-gradient( - 90deg, - transparent 0%, - var(--retro-amber) 20%, - var(--retro-red) 40%, - var(--retro-amber) 60%, - var(--retro-red) 80%, - transparent 100% - ); - border-radius: 1px; - position: relative; -} - -.component-resistor::before, -.component-resistor::after { - content: ''; - position: absolute; - top: 50%; - width: 5px; - height: 1px; - background: var(--retro-green); - transform: translateY(-50%); -} - -.component-resistor::before { - left: -5px; -} -.component-resistor::after { - right: -5px; -} - -.component-led { - display: inline-block; - width: 8px; - height: 8px; - border-radius: 50%; - background: radial-gradient(circle, var(--retro-red), transparent); - box-shadow: 0 0 10px var(--retro-red); - animation: ledBlink 2s infinite; -} - -@keyframes ledBlink { - 0%, - 80%, - 100% { - opacity: 0.3; - box-shadow: 0 0 5px var(--retro-red); - } - 85%, - 95% { - opacity: 1; - box-shadow: 0 0 15px var(--retro-red); - } -} - -/* Retro Button Styles */ -.retro-button { - background: linear-gradient(145deg, #2a2a2a, #1a1a1a); - border: 2px solid var(--retro-green); - color: var(--retro-green); - padding: 0.75rem 1.5rem; - font-family: 'JetBrains Mono', monospace; - font-weight: 600; - text-transform: uppercase; - letter-spacing: 0.1em; - cursor: pointer; - position: relative; - overflow: hidden; - transition: all 0.3s ease; -} - -.retro-button::before { - content: ''; - position: absolute; - top: 0; - left: -100%; - width: 100%; - height: 100%; - background: linear-gradient( - 90deg, - transparent, - rgba(0, 255, 65, 0.2), - transparent - ); - transition: left 0.5s; -} - -.retro-button:hover::before { - left: 100%; -} - -.retro-button:hover { - box-shadow: - 0 0 20px var(--glow-green), - inset 0 0 20px rgba(0, 255, 65, 0.1); - transform: translateY(-2px); -} - -.retro-button:active { - transform: translateY(0); - box-shadow: - 0 0 10px var(--glow-green), - inset 0 0 10px rgba(0, 255, 65, 0.2); -} - -/* Holographic Effect */ -.holographic { - background: linear-gradient( - 45deg, - var(--retro-cyan), - var(--retro-magenta), - var(--retro-amber), - var(--retro-green) - ); - background-size: 400% 400%; - animation: holographicShift 3s ease infinite; - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; -} - -@keyframes holographicShift { - 0% { - background-position: 0% 50%; - } - 50% { - background-position: 100% 50%; - } - 100% { - background-position: 0% 50%; - } -} - -/* System Status Indicators */ -.system-status { - display: flex; - align-items: center; - gap: 0.5rem; - font-family: 'JetBrains Mono', monospace; - font-size: 0.8rem; - text-transform: uppercase; -} - -.status-bar { - width: 60px; - height: 4px; - background: #333; - border-radius: 2px; - overflow: hidden; - position: relative; -} - -.status-bar::after { - content: ''; - position: absolute; - top: 0; - left: 0; - height: 100%; - background: var(--retro-green); - border-radius: 2px; - animation: statusProgress 2s ease-in-out infinite; -} - -@keyframes statusProgress { - 0% { - width: 0%; - } - 50% { - width: 100%; - } - 100% { - width: 0%; - } -} - -/* Vintage Monitor Bezel */ -.monitor-bezel { - background: linear-gradient(135deg, #2a2a2a 0%, #1a1a1a 50%, #0a0a0a 100%); - border: 3px solid #333; - border-radius: 15px; - padding: 20px; - position: relative; - box-shadow: - inset 0 0 20px rgba(0, 0, 0, 0.5), - 0 0 30px rgba(0, 0, 0, 0.8); -} - -.monitor-bezel::before { - content: ''; - position: absolute; - top: 10px; - right: 10px; - width: 8px; - height: 8px; - border-radius: 50%; - background: var(--retro-red); - box-shadow: 0 0 8px var(--retro-red); - animation: powerIndicator 3s infinite; -} - -@keyframes powerIndicator { - 0%, - 90%, - 100% { - opacity: 1; - } - 95% { - opacity: 0.3; - } -} - -/* ASCII Art Enhancement */ -.ascii-art { - font-family: 'JetBrains Mono', monospace; - white-space: pre; - color: var(--retro-green); - text-shadow: 0 0 10px var(--glow-green); - animation: asciiGlow 4s ease-in-out infinite alternate; -} - -@keyframes asciiGlow { - 0% { - text-shadow: 0 0 5px var(--glow-green); - opacity: 0.8; - } - 100% { - text-shadow: 0 0 20px var(--glow-green); - opacity: 1; - } -} - -/* Retro Loading Animation */ -.retro-loader { - display: inline-block; - position: relative; - width: 40px; - height: 40px; -} - -.retro-loader::after { - content: ''; - position: absolute; - border: 3px solid #333; - border-top: 3px solid var(--retro-green); - border-radius: 50%; - width: 30px; - height: 30px; - animation: retroSpin 1s linear infinite; -} - -@keyframes retroSpin { - 0% { - transform: rotate(0deg); - } - 100% { - transform: rotate(360deg); - } -} - -/* Scanline Effect */ -.scanlines { - position: relative; -} - -.scanlines::after { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: repeating-linear-gradient( - 0deg, - transparent, - transparent 2px, - rgba(0, 255, 65, 0.03) 2px, - rgba(0, 255, 65, 0.03) 4px - ); - pointer-events: none; - animation: scanlineMove 8s linear infinite; -} - -@keyframes scanlineMove { - 0% { - transform: translateY(0); - } - 100% { - transform: translateY(4px); - } -} - -/* Phosphor Screen Effect */ -.phosphor-screen { - background: radial-gradient(ellipse at center, #001100 0%, #000000 100%); - color: var(--retro-green); - text-shadow: 0 0 2px var(--retro-green); -} - -/* Terminal Window */ -.terminal-window { - background: #000000; - border: 2px solid var(--retro-green); - border-radius: 4px; - padding: 1rem; - font-family: 'JetBrains Mono', monospace; - color: var(--retro-green); - box-shadow: - 0 0 20px rgba(0, 255, 65, 0.3), - inset 0 0 20px rgba(0, 255, 65, 0.1); -} - -.terminal-header { - display: flex; - align-items: center; - margin-bottom: 1rem; - padding-bottom: 0.5rem; - border-bottom: 1px solid var(--retro-green); -} - -.terminal-title { - color: var(--retro-amber); - font-weight: bold; - text-transform: uppercase; -} - -/* Retro Grid */ -.retro-grid { - background-image: - linear-gradient(rgba(0, 255, 65, 0.1) 1px, transparent 1px), - linear-gradient(90deg, rgba(0, 255, 65, 0.1) 1px, transparent 1px); - background-size: 20px 20px; -} - -/* Digital Clock Display */ -.digital-clock { - font-family: 'JetBrains Mono', monospace; - color: var(--retro-green); - background: #000000; - padding: 0.5rem 1rem; - border: 1px solid var(--retro-green); - font-weight: bold; - text-shadow: 0 0 10px var(--retro-green); - letter-spacing: 0.1em; -} - -/* Radar Sweep Effect */ -.radar-sweep { - position: relative; - border-radius: 50%; - background: radial-gradient( - circle, - transparent 30%, - rgba(0, 255, 65, 0.1) 100% - ); - overflow: hidden; -} - -.radar-sweep::before { - content: ''; - position: absolute; - top: 50%; - left: 50%; - width: 50%; - height: 2px; - background: linear-gradient(90deg, var(--retro-green), transparent); - transform-origin: left center; - animation: radarSweep 3s linear infinite; -} - -@keyframes radarSweep { - 0% { - transform: translate(-50%, -50%) rotate(0deg); - } - 100% { - transform: translate(-50%, -50%) rotate(360deg); - } -} - -/* VHS Static Effect */ -.vhs-static { - position: relative; - overflow: hidden; -} - -.vhs-static::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: - repeating-linear-gradient( - 90deg, - transparent, - transparent 2px, - rgba(255, 255, 255, 0.03) 2px, - rgba(255, 255, 255, 0.03) 4px - ), - repeating-linear-gradient( - 0deg, - transparent, - transparent 2px, - rgba(255, 255, 255, 0.03) 2px, - rgba(255, 255, 255, 0.03) 4px - ); - animation: vhsStatic 0.1s infinite; - pointer-events: none; -} - -@keyframes vhsStatic { - 0% { - opacity: 0.03; - } - 50% { - opacity: 0.05; - } - 100% { - opacity: 0.03; - } -} - -/* Accessibility and Reduced Motion */ -@media (prefers-reduced-motion: reduce) { - *, - *::before, - *::after { - animation-duration: 0.01ms !important; - animation-iteration-count: 1 !important; - transition-duration: 0.01ms !important; - } -} - -/* High Contrast Mode */ -@media (prefers-contrast: high) { - :root { - --retro-green: #00ff00; - --retro-amber: #ffff00; - --retro-red: #ff0000; - --retro-blue: #0080ff; - } - - .crt-effect::before { - display: none; - } - - .neon-glow { - text-shadow: none; - font-weight: bold; - } - - .scanlines::after { - display: none; - } -} - -/* Dark Mode Enhancements */ -@media (prefers-color-scheme: dark) { - .retro-button { - background: linear-gradient(145deg, #1a1a1a, #0a0a0a); - } - - .monitor-bezel { - background: linear-gradient(135deg, #1a1a1a 0%, #0a0a0a 50%, #000000 100%); - } -} - -/* Print Styles */ -@media print { - .matrix-rain, - .crt-effect::before, - .crt-effect::after, - .neon-glow, - .data-stream::before, - .circuit-trace::after, - .scanlines::after { - display: none !important; - } - - .retro-button, - .terminal-window { - border: 2px solid #000 !important; - background: #fff !important; - color: #000 !important; - } -} - -/* Mobile Optimizations */ -@media (max-width: 768px) { - .retro-button { - padding: 0.5rem 1rem; - font-size: 0.9rem; - } - - .terminal-window { - padding: 0.75rem; - font-size: 0.9rem; - } - - .digital-clock { - font-size: 0.8rem; - padding: 0.25rem 0.5rem; - } -} - -/* Focus States for Accessibility */ -.retro-button:focus, -.terminal-window:focus { - outline: 2px solid var(--retro-green); - outline-offset: 2px; -} diff --git a/website/src/styles/robot-personalities.css b/website/src/styles/robot-personalities.css deleted file mode 100644 index 71a1cac663..0000000000 --- a/website/src/styles/robot-personalities.css +++ /dev/null @@ -1,393 +0,0 @@ -/* Robot Personalities - Fun, Playful, PostHog-inspired Style */ - -/* ===== VIBRANT COLOR PALETTE ===== */ -:root { - /* Primary Colors - Bold & Playful */ - --robot-pink: #FF006E; - --robot-blue: #3A86FF; - --robot-green: #06FFA5; - --robot-yellow: #FFB700; - --robot-purple: #8338EC; - --robot-orange: #FB5607; - --robot-teal: #00F5FF; - --robot-red: #FF4365; - - /* Background & UI Colors */ - --robot-dark: #1A1A2E; - --robot-light: #FFFFFF; - --robot-gray: #4A4E69; - --robot-bg-purple: #F3E8FF; - --robot-bg-blue: #E8F4FF; - --robot-bg-pink: #FFE8F0; - --robot-bg-yellow: #FFF8E8; - --robot-bg-green: #E8FFEA; - - /* Special Effects */ - --robot-glow-pink: rgba(255, 0, 110, 0.4); - --robot-glow-blue: rgba(58, 134, 255, 0.4); - --robot-glow-green: rgba(6, 255, 165, 0.4); - --robot-shadow: rgba(26, 26, 46, 0.15); -} - -/* ===== HAPPY ROBOT SMILES ===== */ -.robot-smile { - position: absolute; - bottom: 12px; - left: 50%; - transform: translateX(-50%); - width: 20px; - height: 10px; - border: 3px solid var(--robot-dark); - border-top: none !important; - border-radius: 0 0 20px 20px !important; - background: transparent; -} - -/* Big smile variant */ -.robot-smile-big { - width: 30px; - height: 15px; - border-radius: 0 0 30px 30px !important; -} - -/* Cheeky smile */ -.robot-smile.cheeky { - transform: translateX(-50%) rotate(-5deg); -} - -/* ===== ROBOT EYES WITH PERSONALITY ===== */ -.robot-eye { - width: 12px; - height: 12px; - background: var(--robot-dark); - border-radius: 50%; - position: relative; - transition: all 0.3s ease; -} - -/* Eye shine */ -.robot-eye::after { - content: ''; - position: absolute; - top: 2px; - right: 2px; - width: 4px; - height: 4px; - background: white; - border-radius: 50%; - animation: eyeShine 3s infinite; -} - -/* Winking eye */ -.robot-eye.wink { - height: 2px; - border-radius: 0; - animation: wink 4s infinite; -} - -/* Excited eyes */ -.robot-eye.excited { - transform: scale(1.2); -} - -.robot-eye.excited::before { - content: ''; - position: absolute; - top: -5px; - left: 50%; - transform: translateX(-50%); - width: 2px; - height: 4px; - background: var(--robot-dark); - border-radius: 1px; -} - -/* Heart eyes */ -.robot-eye.love { - background: var(--robot-pink); - clip-path: polygon(50% 25%, 15% 0%, 0% 15%, 0% 40%, 50% 90%, 100% 40%, 100% 15%, 85% 0%); - border-radius: 0; - width: 14px; - height: 14px; -} - -/* Sleepy eyes */ -.robot-eye.sleeping { - height: 2px; - border-radius: 0; - background: var(--robot-gray); -} - -/* ===== ROBOT BLUSH ===== */ -.robot-blush { - position: absolute; - width: 12px; - height: 8px; - background: var(--robot-pink); - opacity: 0.3; - border-radius: 50%; - top: 50%; -} - -.robot-blush.left { - left: -5px; -} - -.robot-blush.right { - right: -5px; -} - -/* ===== ROBOT HEAD STYLES ===== */ -.robot-head { - position: relative; - animation: robotBob 4s ease-in-out infinite; -} - -/* Add antenna with animation */ -.robot-antenna { - position: absolute; - top: -20px; - left: 50%; - transform: translateX(-50%); - width: 3px; - height: 15px; - background: var(--robot-dark); - border-radius: 2px; -} - -.robot-antenna::after { - content: ''; - position: absolute; - top: -8px; - left: 50%; - transform: translateX(-50%); - width: 12px; - height: 12px; - background: var(--robot-pink); - border: 2px solid var(--robot-dark); - border-radius: 50%; - animation: antennaGlow 2s ease-in-out infinite; -} - -/* ===== ROBOT BODY DECORATIONS ===== */ -.robot-chest-screen { - position: absolute; - top: 20px; - left: 50%; - transform: translateX(-50%); - width: 40px; - height: 30px; - background: var(--robot-dark); - border-radius: 8px; - overflow: hidden; -} - -.robot-chest-screen::after { - content: '♥'; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - color: var(--robot-pink); - font-size: 20px; - animation: heartBeat 1.5s ease-in-out infinite; -} - -/* Robot buttons */ -.robot-button { - position: absolute; - width: 8px; - height: 8px; - background: var(--robot-yellow); - border: 2px solid var(--robot-dark); - border-radius: 50%; - cursor: pointer; - transition: all 0.2s ease; -} - -.robot-button:hover { - transform: scale(1.2); - background: var(--robot-green); -} - -/* ===== SPECIAL ROBOT VARIANTS ===== */ - -/* Hero Robot - Extra Playful */ -.hero-robot .robot-head { - background: linear-gradient(135deg, var(--robot-yellow) 0%, var(--robot-orange) 100%); - box-shadow: 0 8px 32px var(--robot-shadow); -} - -.hero-robot .robot-body { - background: linear-gradient(135deg, var(--robot-green) 0%, var(--robot-teal) 100%); - box-shadow: 0 8px 32px var(--robot-shadow); -} - -/* Models Robot - Smart & Analytical */ -.models-robot .robot-head { - background: linear-gradient(135deg, var(--robot-purple) 0%, var(--robot-pink) 100%); -} - -.models-robot::before { - content: ''; - position: absolute; - top: -30px; - left: 50%; - transform: translateX(-50%); - width: 60px; - height: 60px; - border: 2px dashed var(--robot-purple); - border-radius: 50%; - animation: thinkingCircle 6s linear infinite; -} - -/* Platform Robot - Connected */ -.platform-robot .robot-body::before, -.platform-robot .robot-body::after { - content: ''; - position: absolute; - width: 40px; - height: 2px; - background: var(--robot-blue); - top: 50%; - animation: connectionPulse 2s ease-in-out infinite; -} - -.platform-robot .robot-body::before { - left: -40px; -} - -.platform-robot .robot-body::after { - right: -40px; -} - -/* Tools Robot - Action-Oriented */ -.tools-robot { - animation: toolsWorking 3s ease-in-out infinite; -} - -.tools-robot .robot-arms { - animation: armSwing 2s ease-in-out infinite; -} - -/* ===== FUN ANIMATIONS ===== */ -@keyframes robotBob { - 0%, 100% { transform: translateY(0); } - 50% { transform: translateY(-5px); } -} - -@keyframes antennaGlow { - 0%, 100% { - background: var(--robot-pink); - box-shadow: 0 0 10px var(--robot-glow-pink); - } - 50% { - background: var(--robot-green); - box-shadow: 0 0 20px var(--robot-glow-green); - } -} - -@keyframes heartBeat { - 0%, 100% { transform: translate(-50%, -50%) scale(1); } - 25% { transform: translate(-50%, -50%) scale(1.1); } - 50% { transform: translate(-50%, -50%) scale(0.9); } - 75% { transform: translate(-50%, -50%) scale(1.05); } -} - -@keyframes eyeShine { - 0%, 90%, 100% { opacity: 1; } - 95% { opacity: 0; } -} - -@keyframes wink { - 0%, 40%, 50%, 100% { height: 12px; border-radius: 50%; } - 45% { height: 2px; border-radius: 0; } -} - -@keyframes thinkingCircle { - 0% { transform: translateX(-50%) rotate(0deg); } - 100% { transform: translateX(-50%) rotate(360deg); } -} - -@keyframes connectionPulse { - 0%, 100% { opacity: 0.3; width: 40px; } - 50% { opacity: 1; width: 60px; } -} - -@keyframes toolsWorking { - 0%, 100% { transform: rotate(0deg); } - 25% { transform: rotate(-2deg); } - 75% { transform: rotate(2deg); } -} - -@keyframes armSwing { - 0%, 100% { transform: rotate(0deg); } - 25% { transform: rotate(-10deg); } - 75% { transform: rotate(10deg); } -} - -/* ===== SPARKLE EFFECTS ===== */ -.robot-sparkles { - position: absolute; - top: -20px; - left: -20px; - right: -20px; - bottom: -20px; - pointer-events: none; -} - -.sparkle { - position: absolute; - width: 4px; - height: 4px; - background: var(--robot-yellow); - animation: sparkle 3s ease-in-out infinite; -} - -.sparkle:nth-child(1) { top: 10%; left: 20%; animation-delay: 0s; } -.sparkle:nth-child(2) { top: 30%; right: 15%; animation-delay: 0.5s; } -.sparkle:nth-child(3) { bottom: 20%; left: 10%; animation-delay: 1s; } -.sparkle:nth-child(4) { bottom: 10%; right: 25%; animation-delay: 1.5s; } - -@keyframes sparkle { - 0%, 100% { - opacity: 0; - transform: scale(0) rotate(0deg); - } - 50% { - opacity: 1; - transform: scale(1) rotate(180deg); - } -} - -/* ===== HOVER INTERACTIONS ===== */ -.robot-head:hover { - animation-play-state: paused; - transform: scale(1.05); - cursor: pointer; -} - -.robot-head:hover .robot-eye { - transform: scale(1.2); -} - -.robot-head:hover .robot-smile { - border-color: var(--robot-pink); - width: 24px; -} - -/* ===== ACCESSIBILITY ===== */ -@media (prefers-reduced-motion: reduce) { - * { - animation-duration: 0.01ms !important; - animation-iteration-count: 1 !important; - transition-duration: 0.01ms !important; - } -} - -/* ===== RESPONSIVE ADJUSTMENTS ===== */ -@media (max-width: 768px) { - .robot-head, .robot-body { - transform: scale(0.9); - } -}