diff --git a/sample_solutions/Docugen-Microagents/.env.example b/sample_solutions/Docugen-Microagents/.env.example new file mode 100644 index 00000000..ee30fff2 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/.env.example @@ -0,0 +1,9 @@ +# Docker Compose Configuration + +# Local URL Endpoint (only needed for non-public domains) +# If using a local domain like api.example.com mapped to localhost, set to the domain without https:// +# Otherwise, set to: not-needed +LOCAL_URL_ENDPOINT=not-needed + +BACKEND_PORT=8000 +FRONTEND_PORT=3000 diff --git a/sample_solutions/Docugen-Microagents/.gitignore b/sample_solutions/Docugen-Microagents/.gitignore new file mode 100644 index 00000000..df9559db --- /dev/null +++ b/sample_solutions/Docugen-Microagents/.gitignore @@ -0,0 +1,86 @@ + +.env +.env.local +.env.*.local +*.env + + + +# ============================================ +# PYTHON +# ============================================ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +*.so + +# Virtual environments +venv/ +env/ +ENV/ +.venv/ + +# PyCharm +.idea/ + +# VS Code +.vscode/ + +# Pytest +.pytest_cache/ +.coverage +htmlcov/ + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# ============================================ +# NODE.JS / REACT +# ============================================ +# Dependencies +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Production build +build/ +dist/ + +# React +.env.development.local +.env.test.local +.env.production.local + +# ============================================ +# TEMPORARY & CACHE FILES +# ============================================ +# Temporary cloned repositories +api/tmp/ +api/temp/ +*/tmp/ +*/temp/ +tests/ + +# Logs +*.log +logs/ + +# OS files +.DS_Store +Thumbs.db +desktop.ini + +# ============================================ +# LANGGRAPH & AI +# ============================================ +# LangGraph checkpoints (SQLite databases) +*.db +*.sqlite +*.sqlite3 +checkpoints/ + +tmp/ diff --git a/sample_solutions/Docugen-Microagents/README.md b/sample_solutions/Docugen-Microagents/README.md new file mode 100644 index 00000000..1c526d4c --- /dev/null +++ b/sample_solutions/Docugen-Microagents/README.md @@ -0,0 +1,608 @@ +## Documentation Generator + +Documentation Generator (DocuGen) is an enterprise-grade, AI-powered documentation pipeline that autonomously transforms any GitHub repository into a comprehensive, production-ready README, running entirely on-premises on Intel® Xeon® processors. Built on a nine-agent micro-agent architecture orchestrated by LangGraph, the application deploys specialized AI agents that analyze code structure, extract API endpoints, map call graphs, detect error patterns, parse configurations, and generate Mermaid architecture diagrams. All nine agents are powered by Qwen3-4B-Instruct, a small language model optimized for Intel Xeon's instruction set and memory architecture. By delivering multi-agent AI inference at scale without GPU dependency, Documentation Generator demonstrates how Intel Xeon can run sophisticated, parallel agentic workloads, making it a compelling reference solution for enterprises seeking to automate developer workflows through cost-effective, CPU-native generative AI infrastructure. + +## Table of Contents + +- [Project Overview](#project-overview) +- [Features](#features) +- [Architecture](#architecture) +- [Tool System](#tool-system) +- [Prerequisites](#prerequisites) +- [Quick Start](#quick-start) +- [User Interface](#user-interface) +- [Troubleshooting](#troubleshooting) +- [Performance Metrics](#performance-metrics) + +--- + +## Project Overview + +Documentation Generator is an autonomous system that transforms code repositories into professional README documentation using a specialized micro-agent architecture. The application employs nine specialized AI agents built on LangChain and orchestrated by LangGraph. Six section-writer agents analyze specific aspects of the codebase (project overview, API endpoints, architecture patterns, error handling, environment configuration, and dependencies), while three coordination agents handle planning, diagram generation, and quality validation. The system clones repositories, performs intelligent analysis with strategic file sampling, and generates structured documentation including project descriptions, installation instructions, usage guides, and Mermaid architecture diagrams. Each agent operates with a limited tool set and context budget optimized for efficiency. The system supports both single-project repositories and monorepos with intelligent project detection and user selection capabilities. + +--- + +## Features + +- Micro-agent system with nine specialized AI agents optimized for small language models: Code Explorer, API Reference, Call Graph, Error Analysis, Environment Config, Dependency Analyzer, Planner, Mermaid Generator, and QA Validator +- 11 custom repository analysis tools built with LangChain distributed across agents for autonomous code exploration, API extraction, and syntax validation +- Strategic file sampling with three reading strategies: full, smart, and pattern_window (context-aware extraction around routes, errors, and entry points) +- Autonomous repository analysis using ReAct pattern (Thought → Action → Observation) for transparent decision-making +- Intelligent monorepo detection with automatic project discovery and user selection interface +- Subfolder-specific documentation generation via URL targeting (e.g., `https://github.com/owner/repo/tree/main/backend`) +- Support for GitHub public and private repositories with token-based authentication +- Automated pull request creation using Model Context Protocol (MCP) with official GitHub MCP server integration +- Real-time progress tracking via Server-Sent Events (SSE) for live agent activity monitoring +- Mermaid 8.14.0 compatible diagram generation with semantic validation and render-safe checks +- Built on LangChain framework for tool infrastructure and LangGraph for multi-agent workflow orchestration with state persistence +- Evidence-based documentation generation with filesystem verification and agent output aggregation +- Comprehensive metrics tracking including token usage, tool calls, LLM calls, TPS (tokens per second), and execution duration +- Modern React web interface with job history, status monitoring, and documentation preview +- Flexible LLM backend supporting Qwen/Qwen3-4B-Instruct-2507 optimized for Intel Xeon processors +- RESTful API with JSON-based communication for integration capabilities +- Configurable analysis limits via .env (10GB max repo size, 500 files max, 1MB per file, adjustable line budget) + +--- + +## Architecture + +This application uses a micro-agent architecture with nine specialized AI agents built on LangChain and orchestrated by LangGraph. The React frontend communicates with a FastAPI backend that orchestrates the workflow. Nine agents operate sequentially within a LangGraph StateGraph workflow: six section-writer agents (Code Explorer, API Reference, Call Graph, Error Analysis, Environment Config, Dependency Analyzer) analyze specific aspects of the codebase and write documentation sections directly, followed by three coordination agents (Planner decides which sections to include, Mermaid Generator creates architecture diagrams with semantic validation, and QA Validator performs evidence-based quality checks). An Evidence Aggregator node consolidates filesystem data and agent outputs before planning. All agents use strategic file sampling with pattern_window mode to extract high-value code regions (routes, error handlers, entry points) within limited context budgets. The system tracks comprehensive metrics including token usage, tool calls, and execution duration. Each agent operates with 3-8 specialized tools and implements the ReAct pattern for autonomous decision-making. + +```mermaid +graph TB + subgraph "Client Layer" + A[React Web UI
Port 3000] + end + + subgraph "API Layer" + B[FastAPI Server
Port 5001
REST API + SSE] + end + + subgraph "Section Writer Agents" + C[Code Explorer
Overview + Features] + D[API Reference
Endpoint Extractor] + E[Call Graph
Architecture Writer] + F[Error Analysis
Troubleshooting Writer] + G[Env Config
Configuration Writer] + H[Dependency Analyzer
Prerequisites Writer] + end + + subgraph "Coordination Agents" + I[Planner
Section Selector] + J[Mermaid Generator
Diagram + Validation] + K[QA Validator
Evidence Checks] + end + + subgraph "GitHub Integration" + L[PR Agent
MCP Tools] + M[GitHub MCP Server
Docker Container] + end + + subgraph "External Services" + N[LLM Backend
Qwen3-4B-Instruct] + end + + A -->|POST /generate-docs| B + A -->|SSE /logs| B + B -->|Clone & Detect| C + C --> D + D --> E + E --> F + F --> G + G --> H + H -->|Evidence Aggregation| I + I --> J + J --> K + K -->|Assembly| B + B -->|JSON Response| A + A -->|POST /create-pr| B + B --> L + L --> M + M -->|Branch, Commit, PR| GitHub + C --> N + D --> N + E --> N + F --> N + G --> N + H --> N + I --> N + J --> N + K --> N + L --> N + + style A fill:#e1f5ff + style B fill:#fff4e1 + style C fill:#ffe1f5 + style D fill:#ffe1f5 + style E fill:#ffe1f5 + style F fill:#ffe1f5 + style G fill:#ffe1f5 + style H fill:#ffe1f5 + style I fill:#d4f1d4 + style J fill:#d4f1d4 + style K fill:#d4f1d4 + style L fill:#ffd4d4 + style M fill:#e1ffe1 + style N fill:#ffe1e1 +``` + +**Strategic File Sampling:** + +The system implements three intelligent file reading strategies to optimize context usage with small language models. All strategies work within a configurable line budget (default: 500 lines, set via MAX_LINES_PER_FILE): + +- **full** - Reads first N lines within budget. Simple and deterministic for small files. +- **smart** - Extracts top + function/class signatures + bottom sections. Preserves file structure overview while staying within line budget. +- **pattern_window** - Context-aware extraction that detects high-value code patterns (FastAPI/Flask routes, error handlers, entry points, main functions) and extracts ±6 lines around matches. Falls back to smart strategy if no patterns found. This is the default mode for all agents, enabling efficient analysis of large codebases. + +All agents use `pattern_window` by default to maximize information density within the 8K context budget optimized for Qwen3-4B. The line budget is configurable via `.env` file to balance context usage and file coverage. + +**Core Components:** + +1. **React Web UI (Port 3000)** - Vite development server hosting the React frontend. Handles repository URL input with subfolder support, displays real-time generation progress via SSE streaming, provides project selection interface for monorepos, and offers documentation preview with download functionality. + +2. **FastAPI Server (Port 5001)** - Manages LangGraph workflow orchestration with state persistence, maintains job state in memory, handles repository cloning with branch detection, performs monorepo project detection, tracks comprehensive metrics (tokens, tool calls, TPS, duration), and coordinates GitHub PR creation via MCP protocol. + +3. **Code Explorer Agent** - Writes Project Overview and Features sections. Uses 5 tools: `list_directory`, `read_file`, `detect_languages`, `extract_dependencies`, `analyze_code_structure`. Extracts concrete technical details including project purpose, user-facing capabilities, dependencies, and architecture patterns. + +4. **API Reference Agent** - Extracts API endpoints and returns structured JSON. Uses 3 tools: `list_directory`, `read_file`, `find_api_routes`. Identifies REST API routes, HTTP methods, and endpoint purposes from FastAPI, Flask, Express, and Spring frameworks. + +5. **Call Graph Agent** - Writes Architecture section describing component relationships and data flow. Uses 5 tools including `find_entry_points` to identify main application files and `analyze_code_structure` to map dependencies. + +6. **Error Analysis Agent** - Writes Troubleshooting section by identifying error patterns, exception handlers, and common failure modes. Uses `read_file` with pattern_window to extract try-except blocks and error handling logic. + +7. **Environment Config Agent** - Writes Configuration section by analyzing .env.example files and configuration modules. Extracts environment variables, API keys, and deployment settings with their purposes. + +8. **Dependency Analyzer Agent** - Writes Prerequisites and Quick Start Deployment sections. Analyzes requirements.txt, package.json, Dockerfile, and docker-compose.yml to generate installation instructions and deployment guides. + +9. **Planner Agent** - Determines which sections to include in the final README based on project type (web app, API service, library, CLI tool). Uses 8 tools including `find_ui_files`, `find_docker_files`, `find_config_files` for project detection. + +10. **Mermaid Generator Agent** - Creates architecture diagrams with semantic validation. Validates diagrams against evidence packet to ensure accuracy (no endpoint nodes, includes backend/frontend/database based on detected components). Uses `validate_mermaid_syntax` for Mermaid 8.14.0 render-safe checks. + +11. **QA Validator Agent** - Performs evidence-based quality validation comparing README sections against filesystem evidence. Checks for hallucinations, missing sections, and factual accuracy before final assembly. + +12. **PR Agent** - Autonomous AI agent activated via `/api/create-pr/{job_id}` endpoint after documentation generation. Uses GitHub MCP server tools via Model Context Protocol stdio interface. Executes four-step workflow: checks existing README.md, creates feature branch, commits documentation with conventional commit message, creates pull request with descriptive body. + +--- + +## Tool System + +The system includes 11 specialized repository analysis tools implemented in `api/tools/repo_tools.py` using LangChain's `@tool` decorator. These tools enable the nine LangGraph agents to autonomously analyze repositories. Each agent receives a curated subset of tools based on its specific responsibilities. The PR Agent uses a separate set of GitHub tools provided by the GitHub MCP server. All tools are invoked by agents using the ReAct pattern - agents autonomously decide when to call tools, interpret results, and act on observations. + +### Tool Distribution Across Agents + +| Agent | Tool Count | Tools Available | +|--------------------------|------------|-------------------------------------------------------| +| **Code Explorer** | 5 | Core analysis tools for repository exploration | +| **API Reference** | 3 | Directory listing, file reading, API route detection | +| **Call Graph** | 5 | Core analysis tools + entry point detection | +| **Error Analysis** | 3 | File reading, directory listing, code structure | +| **Env Config** | 3 | File reading, config file detection, directory listing| +| **Dependency Analyzer** | 5 | Core analysis tools + dependency extraction | +| **Planner** | 8 | Core tools + project detection tools | +| **Mermaid Generator** | 7 | Selective tools + validation tools | +| **QA Validator** | 0 | No tools (operates on README sections + evidence) | +| **PR Agent** | 4 | GitHub MCP tools (via Model Context Protocol) | + +### Core Analysis Tools (5 tools) + +**Framework: LangChain (@tool decorator)** +**Used by: Code Explorer, Call Graph, Dependency Analyzer** + +1. **`list_directory`** - Browses directory structure with automatic filtering of common ignore patterns (.git, node_modules, __pycache__, venv, dist, build) +2. **`read_file`** - Reads source code and configuration files with strategic sampling (supports full, smart, pattern_window modes). Default: pattern_window for context-aware extraction. Limits: 1MB max file size, configurable line budget (default 500 via MAX_LINES_PER_FILE). +3. **`detect_languages`** - Identifies programming languages by file extensions (supports 16 languages including Python, JavaScript, TypeScript, Go, Rust, Java, C++) +4. **`extract_dependencies`** - Recursively scans subdirectories and parses dependency files (requirements.txt, package.json, go.mod, Cargo.toml). Returns first 20 dependencies per ecosystem. +5. **`analyze_code_structure`** - Extracts functions, classes, and imports using AST parsing (Python) or basic line count metrics (other languages) + +### Project Detection Tools (3 tools) + +**Framework: LangChain (@tool decorator)** +**Used by: Planner (in addition to core tools)** + +6. **`find_ui_files`** - Detects frontend components and frameworks (React, Vue.js, Angular, Next.js, Svelte) +7. **`find_docker_files`** - Checks for Docker and containerization configuration files +8. **`find_config_files`** - Locates environment and configuration files in repository root + +### Diagram Generation Tools (2 tools) + +**Framework: LangChain (@tool decorator)** +**Used by: Diagram Generator (in addition to selective core tools)** + +9. **`find_entry_points`** - Locates main entry point files (main.py, server.py, index.js, etc.) across languages +10. **`find_api_routes`** - Extracts API routes/endpoints using regex for Flask, FastAPI, Express, Spring frameworks + +### Validation Tool (1 tool) + +**Framework: LangChain (@tool decorator)** +**Used by: Diagram Generator for self-critique** + +11. **`validate_mermaid_syntax`** - Validates Mermaid 8.14.0 syntax with automatic error detection and repair suggestions + +### GitHub MCP Tools (4 tools) + +**Framework: Model Context Protocol (MCP)** +**Used by: PR Agent** + +1. **`get_file_contents`** - Retrieves file content and SHA from GitHub repository +2. **`create_branch`** - Creates new feature branch with timestamp-based naming +3. **`create_or_update_file`** - Commits README.md to branch with conventional commit message +4. **`create_pull_request`** - Opens PR with AI-generated title, description, and labels + +--- + +## Prerequisites + +### System Requirements + +Before you begin, ensure you have the following installed: + +- **Docker and Docker Compose** (required for running the application containers) +- **Docker daemon running** (required for PR Agent's GitHub MCP server container) +- **Enterprise inference endpoint access** (token-based authentication) + +### Required Model + +This application requires the following model to be deployed on your inference endpoint: + +- **Qwen/Qwen3-4B-Instruct-2507** - Small language model optimized for Intel Xeon processors with 8K context window + +All nine AI agents (Code Explorer, API Reference, Call Graph, Error Analysis, Environment Config, Dependency Analyzer, Planner, Mermaid Generator, and QA Validator) use this model for efficient documentation generation. + +**Note:** This model must be available through your GenAI Gateway or APISIX Gateway deployment before running the application. + +### Required API Configuration + +**For Inference Service (Documentation Generation):** + +This application supports multiple inference deployment patterns: + +**GenAI Gateway**: Provide your GenAI Gateway URL and API key +- **URL format**: `https://api.example.com` +- To generate the GenAI Gateway API key, use the [generate-vault-secrets.sh](https://github.com/opea-project/Enterprise-Inference/blob/main/core/scripts/generate-vault-secrets.sh) script +- The API key is the `litellm_master_key` value from the generated `vault.yml` file + +**APISIX Gateway**: Provide your APISIX Gateway URL and authentication token +- **URL format**: `https://api.example.com/Qwen3-4B-Instruct` +- **Note**: APISIX requires the model name in the URL path (without company/family prefixes) +- To generate the APISIX authentication token, use the [generate-token.sh](https://github.com/opea-project/Enterprise-Inference/blob/main/core/scripts/generate-token.sh) script +- The token is generated using Keycloak client credentials + +**Configuration requirements:** +- **INFERENCE_API_ENDPOINT**: URL to your inference service (example: `https://api.example.com`) +- **INFERENCE_API_TOKEN**: Authentication token/API key for your chosen service + + +### Local Development Configuration + +**For Local Testing Only (Optional)** + +If you're testing with a local inference endpoint using a custom domain (e.g., `api.example.com` mapped to localhost in your hosts file): + +1. Edit `api/.env` and set: + ```bash + LOCAL_URL_ENDPOINT=api.example.com + ``` + (Use the domain name from your INFERENCE_API_ENDPOINT without `https://`) + +2. This allows Docker containers to resolve your local domain correctly. + +**Note:** For public domains or cloud-hosted endpoints, leave the default value `not-needed`. + +### GitHub Personal Access Token (Optional) + +**For Automatic Pull Request Creation:** + +- Required only for PR Agent to create pull requests automatically +- Generate classic token at https://github.com/settings/tokens +- Required scopes: `repo` (full control of private repositories) +- Token format: `ghp_` followed by 36 alphanumeric characters + +### Verify Docker Installation + +```bash +# Check Docker version +docker --version + +# Check Docker Compose version +docker compose version + +# Verify Docker is running +docker ps +``` + +--- + +## Quick Start Deployment + +### Clone the Repository + +```bash +git clone https://github.com/opea-project/Enterprise-Inference.git +cd Enterprise-Inference/sample_solutions/Docugen-Microagents +``` + +### Set up the Environment + +This application requires **two `.env` files** for proper configuration: + +1. **Root `.env` file** (for Docker Compose variables) +2. **`api/.env` file** (for backend application configuration) + +#### Step 1: Create Root `.env` File + +```bash +# From the Docugen-Microagents directory +cat > .env << EOF +# Docker Compose Configuration + +# Local URL Endpoint (only needed for non-public domains) +# If using a local domain like api.example.com mapped to localhost, set to the domain without https:// +# Otherwise, set to: not-needed +LOCAL_URL_ENDPOINT=not-needed + +BACKEND_PORT=8000 +FRONTEND_PORT=3000 + +EOF +``` + +OR + +Copy from the example file and edit with your credentials as required. + +```bash +cp .env.example .env +``` + +**Note:** If using a local domain (e.g., `api.example.com` mapped to localhost), replace `not-needed` with your domain name (without `https://`). + +#### Step 2: Create `api/.env` File + +Copy from the example file and edit with your actual credentials: + +```bash +cp api/.env.example api/.env +``` + +Then edit `api/.env` to set your `INFERENCE_API_ENDPOINT` and `INFERENCE_API_TOKEN`. + +Or manually create `api/.env` with: + +```bash +# ========================================== +# Inference API Configuration +# ========================================== +# INFERENCE_API_ENDPOINT: URL to your inference service (without /v1 suffix) +# +# **GenAI Gateway**: Provide your GenAI Gateway URL and API key +# - URL format: https://genai-gateway.example.com +# - To generate the GenAI Gateway API key, use the [generate-vault-secrets.sh] script +# - The API key is the litellm_master_key value from the generated vault.yml file +# +# **APISIX Gateway**: Provide your APISIX Gateway URL and authentication token +# - For APISIX, include the model name in the INFERENCE_API_ENDPOINT path +# - Example: https://apisix-gateway.example.com/Qwen3-4B-Instruct +# - To generate the APISIX authentication token, use the [generate-token.sh] script +# - The token is generated using Keycloak client credentials +# +# INFERENCE_API_TOKEN: Authentication token/API key for the inference service +INFERENCE_API_ENDPOINT=https://api.example.com +INFERENCE_API_TOKEN=your-pre-generated-token-here + +# ========================================== +# Docker Network Configuration +# ========================================== +# LOCAL_URL_ENDPOINT: Required if using local domain mapping (e.g., api.example.com -> localhost) +# Set to your domain name (without https://) or leave as "not-needed" if using public URLs +LOCAL_URL_ENDPOINT=not-needed + +# ========================================== +# Micro-Agent Model Configuration +# ========================================== +# All agents use Qwen3-4B-Instruct (optimized SLM for code analysis) +# You can customize individual agent models if needed + +CODE_EXPLORER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +API_REFERENCE_MODEL=Qwen/Qwen3-4B-Instruct-2507 +CALL_GRAPH_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ERROR_ANALYSIS_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ENV_CONFIG_MODEL=Qwen/Qwen3-4B-Instruct-2507 +DEPENDENCY_ANALYZER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +PLANNER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +MERMAID_MODEL=Qwen/Qwen3-4B-Instruct-2507 +QA_VALIDATOR_MODEL=Qwen/Qwen3-4B-Instruct-2507 +WRITER_MODEL=Qwen/Qwen3-4B-Instruct-2507 + +# ========================================== +# Repository Analysis Limits +# ========================================== +# All limits are configurable to suit your needs +TEMP_REPO_DIR=./tmp/repos +MAX_REPO_SIZE=10737418240 # 10GB in bytes +MAX_FILE_SIZE=1000000 # 1MB in bytes +MAX_FILES_TO_SCAN=500 # Maximum number of files to analyze +MAX_LINES_PER_FILE=500 # Line budget per file (pattern_window strategy extracts ~150-300 lines focusing on key patterns) + +# ========================================== +# Agent Execution Settings +# ========================================== +AGENT_TEMPERATURE=0.7 # Model temperature (0.0-1.0) +AGENT_MAX_TOKENS=1000 # Maximum tokens per agent response +AGENT_TIMEOUT=300 # Agent timeout in seconds (5 minutes) + +# ========================================== +# GitHub Integration (MCP) +# ========================================== +# Required for automatic PR creation +# Generate token at: https://github.com/settings/tokens +# Required scopes: repo (full access to repositories) +GITHUB_TOKEN=your_github_personal_access_token_here + +# ========================================== +# Server Configuration +# ========================================== +API_PORT=5001 +HOST=0.0.0.0 + +# ========================================== +# CORS Settings +# ========================================== +# Add your frontend URL if different from defaults +CORS_ORIGINS=["http://localhost:3000", "http://localhost:3001", "http://localhost:5173"] + +# ========================================== +# Security Configuration +# ========================================== +# SSL Verification: Set to false only for development with self-signed certificates +VERIFY_SSL=true +``` + +**Note:** All nine AI agents (Code Explorer, API Reference, Call Graph, Error Analysis, Env Config, Dependency Analyzer, Planner, Mermaid Generator, QA Validator) plus PR Agent use Qwen/Qwen3-4B-Instruct-2507 optimized for Intel Xeon processors + +**Important Configuration Notes:** + +- **INFERENCE_API_ENDPOINT**: Your actual inference service URL (replace `https://api.example.com`) + - For APISIX/Keycloak deployments, the model name must be included in the endpoint URL (e.g., `https://api.example.com/Qwen3-4B-Instruct`) +- **INFERENCE_API_TOKEN**: Your actual pre-generated authentication token +- **Model Names**: Use the exact model names from your inference service +- **LOCAL_URL_ENDPOINT**: Only needed if using local domain mapping +- **SSL Verification**: set to false only for development with self-signed certificates + +**Note**: The docker-compose.yml file automatically loads environment variables from both `.env` (root) and `./api/.env` (backend) files. + +### Running the Application + +Start both API and UI services together with Docker Compose: + +```bash +# From the Docugen-Microagents directory +docker compose up --build + +# Or run in detached mode (background) +docker compose up -d --build +``` + +What happens during deployment: +- Docker builds images for frontend and backend (first time: 3-5 minutes) +- Creates containers for both services +- Sets up networking between services +- Initializes the workflow engine + +### Verify Deployment + +Check that all containers are running: + +```bash +docker compose ps +``` + +Expected output - You should see 2 containers with status "Up": + +| Container Name | Port | Status | +|----------------|------|--------| +| `Docugen-Microagents-backend` | 5001 | Up (healthy) | +| `Docugen-Microagents-frontend` | 3000 | Up (healthy) | + +If any container shows "Restarting" or "Exited", check logs: + +```bash +docker compose logs -f +``` + +**View logs:** + +```bash +# All services +docker compose logs -f + +# Backend only +docker logs -f Docugen-Microagents-backend + +# Frontend only +docker logs -f Docugen-Microagents-frontend +``` + +**Verify the services are running:** + +```bash +# Check API health endpoint +curl http://localhost:5001/health + +# Or open in your browser +# http://localhost:5001/health + +# Check if containers are running +docker compose ps +``` + +### Access the Application + +Open your browser and navigate to: + +``` +http://localhost:3000 +``` + +--- + +## User Interface + +### Homepage +Enter any GitHub repository URL to start documentation generation. Supports full repositories, specific branches, and subfolder paths for monorepos. + +![Homepage](./docs/images/01-homepage-input.png) + +### Agent Workflow +Watch the nine AI agents work sequentially in real-time. Each agent displays its reasoning process using the ReAct pattern (Thought → Action → Observation). Progress through six section-writer agents, followed by planning, diagram generation, and quality validation. + +![Agent Workflow](./docs/images/02-agent-workflow.png) + +### Progress Tracking +Monitor live progress with detailed logs from each agent. Server-Sent Events (SSE) provide real-time updates on tool executions, decisions, and analysis results. + +![Progress Tracking](./docs/images/03-progress-tracking.png) + +### Generated Documentation +Review the complete README with architecture diagrams, installation instructions, and usage guides. Download or create a pull request directly from the interface. + +![Generated README](./docs/images/04-generated-readme.png) + +### PR Agent Execution +Activate the PR Agent to autonomously create a GitHub pull request. Watch as it checks for existing files, creates a branch, commits changes, and opens a PR with AI-generated descriptions. + +![PR Agent Logs](./docs/images/05-pr-agent-execution.png) + +### Pull Request Created +PR Agent successfully creates the pull request with conventional commit messages and links it back to the UI for immediate access. + +![PR Created](./docs/images/06-pr-created.png) + +--- + +## Troubleshooting + +For detailed troubleshooting guidance and solutions to common issues, refer to: + +[TROUBLESHOOTING.md](./TROUBLESHOOTING.md) + +--- + +## Performance Metrics + +The system tracks comprehensive performance metrics for each agent execution, providing visibility into token usage, processing speed, and resource consumption. Metrics are calculated and displayed in real-time during workflow execution: + +**Metrics Tracked:** +- **Token Usage** - Input and output tokens per agent, aggregated for total workflow cost +- **Tokens Per Second (TPS)** - Model generation speed measured at two levels: + - **Per-Agent TPS**: `output_tokens / agent_duration_seconds` - True model inference speed for each agent + - **Workflow Average TPS**: `total_output_tokens / workflow_duration_seconds` - Overall throughput including inter-agent overhead (typically lower due to I/O and state management gaps) +- **Execution Duration** - Wall-clock time for each agent and total workflow duration +- **LLM Calls** - Number of model inference requests per agent +- **Tool Calls** - Number of repository analysis tool invocations per agent + +**TPS Interpretation**: Per-Agent TPS measures effective throughput (output tokens / total agent duration) which includes LLM inference time plus tool execution overhead. This represents real-world agent performance. Workflow Average TPS is typically lower because it also includes gaps between agent executions (state updates, inter-agent I/O). Both metrics provide complete visibility into system performance. + +![Performance Metrics Dashboard](./docs/images/07-metrics.png) + +Metrics are displayed in the agent logs panel upon completion with the format: `AgentName: total=X (out=Y), Zms, outTPS=W` showing total tokens, output tokens, duration, and output tokens per second for each agent, plus workflow-level averages. + diff --git a/sample_solutions/Docugen-Microagents/TROUBLESHOOTING.md b/sample_solutions/Docugen-Microagents/TROUBLESHOOTING.md new file mode 100644 index 00000000..4e8ab5b9 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/TROUBLESHOOTING.md @@ -0,0 +1,517 @@ +# Troubleshooting Guide + +This guide covers common issues and solutions for the Documentation Generator Micro-Agents application. + +--- + +## Table of Contents + +- [Installation Issues](#installation-issues) +- [Configuration Errors](#configuration-errors) +- [Runtime Errors](#runtime-errors) +- [Agent Failures](#agent-failures) +- [Performance Issues](#performance-issues) +- [Docker Issues](#docker-issues) +- [Network and API Errors](#network-and-api-errors) + +--- + +## Installation Issues + +### Docker Container Build Fails + +**Error:** `ERROR [internal] load build context` + +**Cause:** Docker daemon not running or insufficient permissions. + +**Solution:** +```bash +# Start Docker daemon (Linux/Mac) +sudo systemctl start docker + +# On Windows, start Docker Desktop application + +# Verify Docker is running +docker ps +``` + +### Port Already in Use + +**Error:** `Error starting userland proxy: listen tcp 0.0.0.0:5001: bind: address already in use` + +**Cause:** Another service is using port 5001 (backend) or 3000 (frontend). + +**Solution:** +```bash +# Find process using the port +# On Linux/Mac: +lsof -i :5001 +lsof -i :3000 + +# On Windows: +netstat -ano | findstr :5001 +netstat -ano | findstr :3000 + +# Kill the process or change ports in docker-compose.yml +``` + +--- + +## Configuration Errors + +### Missing GenAI Gateway API Key + +**Error:** `ValueError: GENAI_GATEWAY_API_KEY not configured` + +**Cause:** API key not set in `.env` file. + +**Solution:** +```bash +# Copy environment template +cp api/.env.example api/.env + +# Edit api/.env and add your API key +GENAI_GATEWAY_API_KEY=your_actual_api_key_here +``` + +### Invalid GitHub Token + +**Error:** `401 Unauthorized` when creating pull requests + +**Cause:** GitHub token missing, expired, or has insufficient permissions. + +**Solution:** +1. Generate a new Personal Access Token at https://github.com/settings/tokens +2. Select `repo` scope (full control of repositories) +3. Update `GITHUB_TOKEN` in `api/.env` +4. Token format should be: `ghp_` followed by 36 alphanumeric characters + +### Authentication Mode Mismatch + +**Error:** `KeyError: 'KEYCLOAK_CLIENT_SECRET'` or connection refused + +**Cause:** AUTH_MODE set to keycloak but credentials not configured. + +**Solution:** +```bash +# For GenAI Gateway (recommended): +AUTH_MODE=genai_gateway +GENAI_GATEWAY_URL=https://your-gateway-url.com +GENAI_GATEWAY_API_KEY=your_key_here + +# For Keycloak: +AUTH_MODE=keycloak +BASE_URL=https://your-inference-endpoint.com +KEYCLOAK_CLIENT_SECRET=your_secret_here +``` + +--- + +## Runtime Errors + +### Repository Clone Fails + +**Error:** `fatal: repository not found` or `Permission denied (publickey)` + +**Cause:** Invalid repository URL or insufficient permissions. + +**Solution:** +1. Verify the GitHub URL is correct and accessible +2. For private repositories, ensure: + - Repository exists and you have access + - Authentication is configured correctly + - Organization SSO is authorized if applicable + +### Repository Too Large + +**Error:** `Repository exceeds maximum size of 10GB` + +**Cause:** Repository size exceeds MAX_REPO_SIZE limit. + +**Solution:** +```bash +# Option 1: Increase limit in api/.env +MAX_REPO_SIZE=21474836480 # 20GB in bytes + +# Option 2: Use subfolder targeting +# Instead of: https://github.com/org/repo +# Use: https://github.com/org/repo/tree/main/backend +``` + +### File Size Limit Exceeded + +**Error:** `File too large (X bytes). Maximum is 1.0MB` + +**Cause:** Individual file exceeds MAX_FILE_SIZE limit. + +**Solution:** +```bash +# Increase file size limit in api/.env +MAX_FILE_SIZE=2000000 # 2MB in bytes + +# Or exclude large files by using subfolder targeting +``` + +### Too Many Files in Repository + +**Error:** `Repository has X files, maximum is 500` + +**Cause:** Repository exceeds MAX_FILES_TO_SCAN limit. + +**Solution:** +```bash +# Increase file scan limit in api/.env +MAX_FILES_TO_SCAN=1000 + +# Or use subfolder targeting for specific directories +``` + +--- + +## Agent Failures + +### Agent Timeout + +**Error:** `Agent execution timed out after 300 seconds` + +**Cause:** Agent taking too long to complete analysis. + +**Solution:** +```bash +# Increase timeout in api/.env +AGENT_TIMEOUT=600 # 10 minutes + +# Or reduce repository size using subfolder targeting +``` + +### Code Explorer Agent Fails + +**Error:** `CodeExplorer failed: No code files found` + +**Cause:** Repository contains no recognized programming language files. + +**Solution:** +- Verify repository contains source code files (.py, .js, .ts, .go, .rs, etc.) +- Check if files are in subdirectories (agents scan recursively) +- Ensure repository cloning completed successfully + +### API Reference Agent Returns Empty Results + +**Error:** `No API endpoints extracted` + +**Cause:** Repository doesn't contain API route definitions, or patterns not recognized. + +**Solution:** +- This is normal for non-API projects (libraries, CLI tools) +- For API projects, ensure route definitions use standard patterns: + - FastAPI: `@router.get("/endpoint")`, `@app.post("/endpoint")` + - Flask: `@app.route("/endpoint")` + - Express: `app.get("/endpoint")` + - Spring: `@GetMapping("/endpoint")` + +### Mermaid Diagram Validation Fails + +**Error:** `Diagram has semantic issues: missing Backend node` + +**Cause:** Generated diagram doesn't match detected project structure. + +**Solution:** +- This is a warning, not a failure - diagram is still generated +- The system validates diagrams against detected components (backend, frontend, database) +- Check agent logs to see what was detected in Evidence Aggregator +- Diagram may need manual refinement if complex architecture + +### QA Validator Detects Issues + +**Error:** `QA validation failed: Low quality score` + +**Cause:** README sections don't match detected evidence or contain hallucinations. + +**Solution:** +- Review the QA validation output in agent logs +- Check if correct sections were generated by section-writer agents +- Verify Evidence Aggregator detected files correctly +- This doesn't block README generation - final output is still produced + +--- + +## Performance Issues + +### Slow Documentation Generation + +**Symptom:** Workflow takes longer than 5 minutes + +**Causes and Solutions:** + +1. **Large repository** + ```bash + # Reduce scan limits in api/.env + MAX_FILES_TO_SCAN=300 + MAX_LINES_PER_FILE=300 + ``` + +2. **High token usage** + - Check metrics summary in agent logs + - Typical usage: 20K-40K tokens per repository + - If much higher, repository may have very large files + +3. **Network latency to LLM backend** + - Ensure low latency network connection to GenAI Gateway + - Check GENAI_GATEWAY_URL is accessible + +### High Token Usage + +**Symptom:** Metrics show >60K tokens for small repository + +**Cause:** Strategic sampling not working efficiently or very verbose files. + +**Solution:** +```bash +# Reduce lines per file in api/.env +MAX_LINES_PER_FILE=300 + +# Agents automatically use pattern_window mode to minimize context +# This is the default and most efficient strategy +``` + +--- + +## Docker Issues + +### Backend Container Won't Start + +**Error:** `backend exited with code 1` + +**Solution:** +```bash +# Check backend logs for specific error +docker-compose logs backend + +# Common causes: +# 1. Missing .env file - solution above in Configuration Errors +# 2. Invalid Python dependencies - rebuild: +docker-compose build --no-cache backend +docker-compose up -d + +# 3. Port conflict - solution above in Installation Issues +``` + +### Frontend Container Build Fails + +**Error:** `npm install failed` or `Cannot find module` + +**Solution:** +```bash +# Rebuild frontend with clean cache +docker-compose build --no-cache frontend +docker-compose up -d frontend + +# Verify Node.js version in Dockerfile is compatible (16+) +``` + +### Container Memory Issues + +**Error:** `Killed` or `Out of memory` + +**Solution:** +```bash +# Increase Docker memory limit in Docker Desktop settings +# Recommended: 4GB minimum, 8GB preferred + +# Or reduce repository analysis limits: +MAX_FILES_TO_SCAN=200 +MAX_LINES_PER_FILE=300 +``` + +### Cannot Connect to Backend from Frontend + +**Error:** `Network error` or `Connection refused` in browser console + +**Cause:** Docker network misconfiguration or CORS issue. + +**Solution:** +```bash +# Verify both containers are running +docker-compose ps + +# Check backend is accessible +curl http://localhost:5001/api/health + +# Verify CORS_ORIGINS in api/.env includes frontend URL +CORS_ORIGINS=["http://localhost:3000"] + +# Restart services +docker-compose restart +``` + +--- + +## Network and API Errors + +### GenAI Gateway Connection Refused + +**Error:** `ConnectionRefusedError: [Errno 111] Connection refused` + +**Cause:** GenAI Gateway URL incorrect or service unavailable. + +**Solution:** +```bash +# Verify GENAI_GATEWAY_URL is correct in api/.env +# Test connectivity: +curl https://your-gateway-url.com/health + +# Check firewall/proxy settings allow outbound HTTPS +``` + +### LLM Request Timeout + +**Error:** `Timeout waiting for LLM response` + +**Cause:** LLM backend overloaded or network latency. + +**Solution:** +```bash +# Increase agent timeout in api/.env +AGENT_TIMEOUT=600 + +# Check LLM backend status +# Contact infrastructure team if persistent +``` + +### Rate Limit Exceeded + +**Error:** `429 Too Many Requests` or `Rate limit exceeded` + +**Cause:** Too many concurrent requests to LLM backend. + +**Solution:** +- Wait a few minutes before retrying +- Reduce concurrent documentation generation jobs +- Contact infrastructure team to increase rate limits +- The system processes agents sequentially to minimize rate limit issues + +### GitHub API Rate Limit (PR Creation) + +**Error:** `403 API rate limit exceeded` + +**Cause:** Too many GitHub API calls within rate limit window. + +**Solution:** +- Wait 1 hour for rate limit reset +- Authenticated requests have higher limits (5000/hour vs 60/hour) +- Ensure GITHUB_TOKEN is configured correctly +- Check rate limit status: + ```bash + curl -H "Authorization: token YOUR_TOKEN" https://api.github.com/rate_limit + ``` + +--- + +## Advanced Troubleshooting + +### Enable Debug Logging + +To get more detailed logs for debugging: + +```bash +# Add to api/.env +LOG_LEVEL=DEBUG + +# Restart backend +docker-compose restart backend + +# View detailed logs +docker-compose logs -f backend +``` + +### Check Agent Execution Metrics + +Metrics are displayed in the agent logs panel after workflow completion: + +``` +📊 Workflow Metrics Summary +├─ Total Agents: 9 +├─ Successful: 9 +├─ Failed: 0 +├─ Total Duration: 155.59s +├─ Total Tokens: 33,135 +│ ├─ Input: 30,728 +│ └─ Output: 2,407 +├─ Total Tool Calls: 23 +├─ Total LLM Calls: 31 +└─ Average TPS: 15.47 tokens/sec +``` + +**Analysis:** +- **Failed agents**: Should be 0 for successful runs +- **Total tokens**: Typical range 20K-40K for medium repositories +- **Average TPS**: Depends on LLM backend performance +- **Tool calls**: Indicates how many tool invocations agents made + +### Manual Repository Cleanup + +If cloned repositories aren't cleaned up automatically: + +```bash +# Clean tmp directory +rm -rf api/tmp/repos/* + +# Or within Docker: +docker-compose exec backend rm -rf /app/tmp/repos/* +``` + +### Reset Application State + +To completely reset the application: + +```bash +# Stop and remove containers +docker-compose down -v + +# Remove temporary files +rm -rf api/tmp/* + +# Rebuild and restart +docker-compose build --no-cache +docker-compose up -d +``` + +--- + +## Getting Help + +If you continue to experience issues: + +1. **Check Logs:** Review backend logs with `docker-compose logs backend -f` +2. **Verify Configuration:** Ensure all required environment variables are set in `api/.env` +3. **Test Connectivity:** Verify network access to GenAI Gateway and GitHub +4. **Metrics Analysis:** Check workflow metrics for anomalies (token usage, duration, failed agents) +5. **Report Issues:** If the problem persists, collect: + - Error messages from logs + - Workflow metrics summary + - Repository URL (if not sensitive) + - Configuration (redact sensitive values) + +--- + +## Common Success Indicators + +A successful run should show: + +``` +✅ Repository cloned successfully +✅ Overview & Features sections completed +✅ Extracted X API endpoints +✅ Architecture section completed +✅ Troubleshooting section completed +✅ Configuration section completed +✅ Prerequisites & Deployment sections completed +✅ Evidence aggregated: X Python deps, Y Node deps +✅ Planner completed - Z sections planned +✅ Mermaid Generator completed +✅ QA Validator completed (Score: XX) +✅ Documentation generation complete! +📊 Workflow Metrics Summary +``` + +All agents should complete successfully with metrics showing reasonable token usage and no failed agents. diff --git a/sample_solutions/Docugen-Microagents/api/.dockerignore b/sample_solutions/Docugen-Microagents/api/.dockerignore new file mode 100644 index 00000000..2c7d2958 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/.dockerignore @@ -0,0 +1,47 @@ +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +*.egg-info/ +dist/ +build/ +*.egg + +venv/ +env/ +ENV/ +.venv + +.vscode/ +.idea/ +*.swp +*.swo +*~ + +.pytest_cache/ +.coverage +htmlcov/ +*.cover + +tmp/ +*.log +logs/ + +.env +.env.local +.env.*.local + +.git/ +.gitignore + +README.md +TROUBLESHOOTING.md +*.md + +Dockerfile +.dockerignore +docker-compose.yml + +.DS_Store +Thumbs.db diff --git a/sample_solutions/Docugen-Microagents/api/.env.example b/sample_solutions/Docugen-Microagents/api/.env.example new file mode 100644 index 00000000..c5e7a1a8 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/.env.example @@ -0,0 +1,86 @@ +# ========================================== +# Inference API Configuration +# ========================================== +# INFERENCE_API_ENDPOINT: URL to your inference service (without /v1 suffix) +# +# **GenAI Gateway**: Provide your GenAI Gateway URL and API key +# - URL format: https://genai-gateway.example.com +# - To generate the GenAI Gateway API key, use the [generate-vault-secrets.sh](https://github.com/opea-project/Enterprise-Inference/blob/main/core/scripts/generate-vault-secrets.sh) script +# - The API key is the litellm_master_key value from the generated vault.yml file +# +# **APISIX Gateway**: Provide your APISIX Gateway URL and authentication token +# - For APISIX, include the model name in the INFERENCE_API_ENDPOINT path +# - Example: https://apisix-gateway.example.com/Qwen3-4B-Instruct +# - To generate the APISIX authentication token, use the [generate-token.sh](https://github.com/opea-project/Enterprise-Inference/blob/main/core/scripts/generate-token.sh) script +# - The token is generated using Keycloak client credentials +# +# INFERENCE_API_TOKEN: Authentication token/API key for the inference service +INFERENCE_API_ENDPOINT=https://api.example.com +INFERENCE_API_TOKEN=your-pre-generated-token-here + +# ========================================== +# Docker Network Configuration +# ========================================== +# LOCAL_URL_ENDPOINT: Required if using local domain mapping (e.g., api.example.com -> localhost) +# Set to your domain name (without https://) or leave as "not-needed" if using public URLs +LOCAL_URL_ENDPOINT=not-needed + +# ========================================== +# Micro-Agent Model Configuration +# ========================================== +# All agents use Qwen3-4B-Instruct (optimized SLM for code analysis) +# You can customize individual agent models if needed + +CODE_EXPLORER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +API_REFERENCE_MODEL=Qwen/Qwen3-4B-Instruct-2507 +CALL_GRAPH_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ERROR_ANALYSIS_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ENV_CONFIG_MODEL=Qwen/Qwen3-4B-Instruct-2507 +DEPENDENCY_ANALYZER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +PLANNER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +MERMAID_MODEL=Qwen/Qwen3-4B-Instruct-2507 +QA_VALIDATOR_MODEL=Qwen/Qwen3-4B-Instruct-2507 +WRITER_MODEL=Qwen/Qwen3-4B-Instruct-2507 + +# ========================================== +# Repository Analysis Limits +# ========================================== +# All limits are configurable to suit your needs +TEMP_REPO_DIR=./tmp/repos +MAX_REPO_SIZE=10737418240 # 10GB in bytes +MAX_FILE_SIZE=1000000 # 1MB in bytes +MAX_FILES_TO_SCAN=500 # Maximum number of files to analyze +MAX_LINES_PER_FILE=500 # Line budget per file (pattern_window strategy extracts ~150-300 lines focusing on key patterns) + +# ========================================== +# Agent Execution Settings +# ========================================== +AGENT_TEMPERATURE=0.7 # Model temperature (0.0-1.0) +AGENT_MAX_TOKENS=1000 # Maximum tokens per agent response +AGENT_TIMEOUT=300 # Agent timeout in seconds (5 minutes) + +# ========================================== +# GitHub Integration (MCP) +# ========================================== +# Required for automatic PR creation +# Generate token at: https://github.com/settings/tokens +# Required scopes: repo (full access to repositories) +GITHUB_TOKEN=your_github_personal_access_token_here + +# ========================================== +# Server Configuration +# ========================================== +API_PORT=5001 +HOST=0.0.0.0 + +# ========================================== +# CORS Settings +# ========================================== +# Add your frontend URL if different from defaults +CORS_ORIGINS=["http://localhost:3000", "http://localhost:3001", "http://localhost:5173"] + +# ========================================== +# Security Configuration +# ========================================== +# SSL Verification: Set to false only for development with self-signed certificates +VERIFY_SSL=true diff --git a/sample_solutions/Docugen-Microagents/api/Dockerfile b/sample_solutions/Docugen-Microagents/api/Dockerfile new file mode 100644 index 00000000..25eb67bc --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/Dockerfile @@ -0,0 +1,52 @@ +# Multi-stage build for DocuGen AI Backend +FROM python:3.11-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies including git, curl, and Docker CLI for MCP +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + curl \ + build-essential \ + ca-certificates \ + gnupg \ + lsb-release \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update \ + && apt-get install -y --no-install-recommends docker-ce-cli \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements first for better caching +COPY requirements.txt . + +# Install Python dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Create temp directory for repo cloning +RUN mkdir -p /app/tmp/repos + +# Create non-root user +RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app + +# Set environment variables +ENV PYTHONUNBUFFERED=1 +ENV PYTHONDONTWRITEBYTECODE=1 + +# Expose port +EXPOSE 5001 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ + CMD curl -f http://localhost:5001/health || exit 1 + +# Switch to non-root user +USER appuser + +# Run the application +# Disable reload to prevent server restart when cloning repos into tmp/ +CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "5001"] diff --git a/sample_solutions/Docugen-Microagents/api/agents/__init__.py b/sample_solutions/Docugen-Microagents/api/agents/__init__.py new file mode 100644 index 00000000..f2db88af --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/__init__.py @@ -0,0 +1,35 @@ +""" +LangChain Agents for DocuGen AI +Each agent is autonomous with its own tools and reasoning capabilities +""" + +# 10-agent simplified system (optimized for 8K context) +from .code_explorer_agent import run_code_explorer_agent +from .api_reference_agent import run_api_reference_agent +from .call_graph_agent import run_call_graph_agent +from .error_analysis_agent import run_error_analysis_agent +from .env_config_agent import run_env_config_agent +from .dependency_analyzer_agent import run_dependency_analyzer_agent +from .planner_agent import run_planner_agent +from .writer_agent_sectioned import run_writer_agent_sectioned +from .mermaid_agent import run_mermaid_agent +from .qa_validator_agent import run_qa_validator_agent + +# PR Agent for MCP +from .pr_agent_mcp import create_pr_with_mcp + +__all__ = [ + # Simplified micro-agents + "run_code_explorer_agent", + "run_api_reference_agent", + "run_call_graph_agent", + "run_error_analysis_agent", + "run_env_config_agent", + "run_dependency_analyzer_agent", + "run_planner_agent", + "run_writer_agent_sectioned", + "run_mermaid_agent", + "run_qa_validator_agent", + # MCP PR agent + "create_pr_with_mcp" +] diff --git a/sample_solutions/Docugen-Microagents/api/agents/api_reference_agent.py b/sample_solutions/Docugen-Microagents/api/agents/api_reference_agent.py new file mode 100644 index 00000000..9e5ce35c --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/api_reference_agent.py @@ -0,0 +1,154 @@ +""" +API Reference Agent - SIMPLIFIED for 8K context models + +Extracts API endpoints, HTTP methods, parameters, and response models from FastAPI/Flask codebases. + +Follows CodeExplorer proven pattern: +- ≤3 tools (minimal schema overhead) +- pattern_window strategy by default (detects @app.get, @router.post) +- Minimal prompt (~200 tokens) +- No inline metrics +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + +# Data Extraction Prompt - outputs JSON data for Mermaid agent, NOT markdown sections +API_REFERENCE_PROMPT = """You are the API Endpoint Data Extractor. Extract API endpoint information for diagram generation. + +**YOUR JOB:** +Use tools to find actual API endpoints and extract their data. DO NOT write markdown sections. + +**TOOLS:** +- find_entry_points() - find main server files (server.py, main.py, app.py) +- read_file(file_path) - read files (detects @app.get, @router.post, @app.route) +- list_directory(relative_path) - list files + +**WORKFLOW:** +1. find_entry_points() to locate server files +2. read_file() on server files to find route decorators +3. Extract: HTTP method, path, description from code + +**CRITICAL RULES:** +1. ONLY extract endpoints you actually find in code +2. DO NOT write markdown sections - only return structured data +3. DO NOT invent example endpoints +4. Use actual paths/methods from code + +**OUTPUT FORMAT (JSON data structure):** +```json +{ + "endpoints": [ + { + "method": "GET", + "path": "/", + "description": "Health check endpoint" + }, + { + "method": "POST", + "path": "/upload", + "description": "Upload file for processing" + } + ], + "endpoint_count": 2 +} +``` + +If NO endpoints found: +```json +{ + "endpoints": [], + "endpoint_count": 0 +} +``` + +Return ONLY the JSON object, nothing else. No markdown, no explanations. + +**Limit:** 20 tool calls.""" + + +async def run_api_reference_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str +) -> Dict[str, Any]: + """ + Simplified API Reference Agent - minimal context usage + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + + Returns: + Results dict with success flag and output + """ + try: + # Create minimal tool set (3 tools only) + @tool + def list_directory(relative_path: str = ".") -> str: + """List directory. Args: relative_path (str)""" + from tools.repo_tools import list_directory_tool + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file(file_path: str) -> str: + """Read file with strategic sampling. Args: file_path (str)""" + from tools.repo_tools import read_file_tool + # Use pattern_window strategy to detect FastAPI routes (@app.get, @router.post) + return read_file_tool.func(repo_path=repo_path, file_path=file_path, strategy="pattern_window") + + @tool + def find_entry_points() -> str: + """Find main entry point files (main.py, server.py, app.py). No args.""" + from tools.repo_tools import find_entry_points_tool + return find_entry_points_tool.func(repo_path=repo_path) + + tools = [list_directory, read_file, find_entry_points] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger + event_logger = create_agent_logger(job_id=job_id, agent_name="APIReference") + + # Execute with callback + result = await agent.ainvoke( + {"messages": [ + ("system", API_REFERENCE_PROMPT), + ("user", "Extract API endpoint data as JSON. Start with find_entry_points().") + ]}, + config={ + "recursion_limit": 20, + "callbacks": [event_logger] + } + ) + + # Extract output + messages = result.get("messages", []) + final_output = messages[-1].content if messages else "" + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "APIReference", + **metrics + } + + except Exception as e: + logger.error(f"APIReference failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "APIReference" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/call_graph_agent.py b/sample_solutions/Docugen-Microagents/api/agents/call_graph_agent.py new file mode 100644 index 00000000..3fb9589c --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/call_graph_agent.py @@ -0,0 +1,158 @@ +""" +Call Graph Agent - Architecture Section Writer + +Writes the complete "## Architecture" section for README. + +Section writer pattern: +- ≤3 tools +- smart strategy (extracts function signatures, not full bodies) +- Outputs complete markdown section +- Also outputs structured call_graph data for Mermaid agent +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + +# Section Writer Prompt - outputs complete "## Architecture" section with explanation +CALL_GRAPH_PROMPT = """You are the Architecture Section Writer. Write the complete "## Architecture" README section with clear explanation. + +**YOUR JOB:** +Use tools to analyze code structure and function relationships, then write a clear Architecture section explaining how components work together. + +**TOOLS:** +- list_directory(relative_path) - list files +- read_file(file_path) - read file (uses smart strategy for signatures) +- analyze_code_structure(file_path) - get function/class list from Python files + +**WORKFLOW:** +1. list_directory() to find main code files +2. analyze_code_structure() to get quick overview of key files +3. read_file() on important files to understand flow +4. Identify: components, layers, data flow, key modules +5. Write the section with explanation + note about diagram + +**CRITICAL RULES:** +1. ONLY describe architecture you can see in the code +2. Start with 2-3 sentences explaining the overall architecture +3. Describe how components interact and data flows +4. Add note: "The architecture diagram below visualizes component relationships and data flow." +5. If minimal code found → output: "## Architecture\n\nMinimal codebase. Refer to source files for structure." +6. DO NOT invent components or layers not found in code + +**OUTPUT FORMAT (complete markdown section):** +``` +## Architecture + +[2-3 sentences explaining the overall architecture pattern, e.g., "The application follows a client-server architecture with a FastAPI backend and React frontend. The backend handles data processing and API endpoints, while the frontend provides the user interface. Data flows from user input through the API to backend services and back to the UI."] + +### Components + +**[Component Name]** (`path/to/file.py`) +- Purpose: [What it does based on code] +- Key functions: [Actual functions found] + +**[Another Component]** (`path/to/file.py`) +- Purpose: [What it does based on code] +- Key functions: [Actual functions found] + +### Data Flow + +1. User → [Entry point] receives [input] +2. [Entry point] → [Module/Service] processes data +3. [Service] → [Database/API] stores/retrieves information +4. Response flows back through the stack to user + +The architecture diagram below visualizes these component relationships and data flow. +``` + +Start your output with "## Architecture" heading. Include the diagram note at the end. + +**Limit:** 25 tool calls.""" + + +async def run_call_graph_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str +) -> Dict[str, Any]: + """ + Simplified Call Graph Agent + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + + Returns: + Results dict with success flag and output + """ + try: + # Create minimal tool set (3 tools only) + @tool + def list_directory(relative_path: str = ".") -> str: + """List directory. Args: relative_path (str)""" + from tools.repo_tools import list_directory_tool + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file(file_path: str) -> str: + """Read file with smart sampling (signatures only). Args: file_path (str)""" + from tools.repo_tools import read_file_tool + # Use smart strategy: top + signatures + bottom (good for call graph) + return read_file_tool.func(repo_path=repo_path, file_path=file_path, strategy="smart") + + @tool + def analyze_code_structure(file_path: str) -> str: + """Analyze Python file structure (functions, classes). Args: file_path (str)""" + from tools.repo_tools import analyze_code_structure_tool + return analyze_code_structure_tool.func(repo_path=repo_path, file_path=file_path) + + tools = [list_directory, read_file, analyze_code_structure] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger + event_logger = create_agent_logger(job_id=job_id, agent_name="CallGraph") + + + # Execute + result = await agent.ainvoke( + {"messages": [ + ("system", CALL_GRAPH_PROMPT), + ("user", "Write the Architecture section. Start with list_directory().") + ]}, + config={ + "recursion_limit": 40, + "callbacks": [event_logger] + } + ) + + # Extract output + messages = result.get("messages", []) + final_output = messages[-1].content if messages else "" + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "CallGraph", + **metrics + } + + except Exception as e: + logger.error(f"CallGraph failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "CallGraph" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/code_explorer_agent.py b/sample_solutions/Docugen-Microagents/api/agents/code_explorer_agent.py new file mode 100644 index 00000000..c86ceda0 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/code_explorer_agent.py @@ -0,0 +1,194 @@ +""" +Code Explorer Agent - Overview & Features Section Writer + +Writes the complete "## Project Overview" and "## Features" sections for README. + +Section writer pattern: +- ≤3 tools +- pattern_window strategy for quick file analysis +- Outputs TWO complete markdown sections +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + +# Section Writer Prompt - outputs THREE complete sections +EXPLORER_PROMPT = """You are the Overview, Features & User Interface Section Writer. Write THREE complete README sections. + +**YOUR JOB:** +Use tools to understand the project structure and purpose, then write Project Overview, Features, and User Interface sections. + +**TOOLS:** +- list_directory(relative_path) - list files in directories +- read_file(file_path) - read file with pattern detection +- detect_languages() - get programming languages used + +**WORKFLOW:** +1. list_directory('.') to see project structure +2. detect_languages() to understand tech stack +3. read_file() on key files (README.md if exists, main files) to understand purpose +4. Identify user-facing capabilities (not technical implementation) +5. Check for frontend directories (ui/, frontend/, client/, web/, src/) +6. Write ALL THREE sections + +**CRITICAL RULES - Project Overview:** +1. 1-3 sentences ONLY +2. Explain WHAT the project does and WHY it exists +3. NO tech stack lists, NO "Repository Information", NO "Primary Language" +4. Focus on user value, not implementation details + +**CRITICAL RULES - Features:** +1. ONLY list user-facing capabilities you can confirm from code +2. NO API endpoints (POST /, GET /), NO routes, NO technical implementation +3. Features = what users CAN DO, not how it's built +4. Group by Backend/Frontend if applicable +5. If minimal code → output: "Basic [project type] structure." + +**CRITICAL RULES - User Interface:** +1. ONLY write if frontend directory exists (ui/, frontend/, client/, web/) +2. Describe frontend technology used (React, Vue, Angular, vanilla JS) +3. Key UI components or pages found +4. User workflow/experience +5. If NO frontend found → output: "No dedicated user interface. This is a backend/API project." + +**OUTPUT FORMAT (THREE complete markdown sections):** +``` +## Project Overview + +[1-3 sentences describing what this project does and why it's useful] + +## Features + +**Backend:** +- Feature 1: [Backend capability based on code found] +- Feature 2: [Another backend capability] + +**Frontend:** +- Feature 3: [User interface capability] +- Feature 4: [Another UI capability] + +## User Interface + +The frontend is built with [React/Vue/Angular/etc.] providing [description of UI]. + +Key interface elements: +- [Component/page 1] - [Purpose] +- [Component/page 2] - [Purpose] + +User workflow: +1. [Step 1 in user journey] +2. [Step 2 in user journey] +``` + +If NO frontend found: +``` +## User Interface + +No dedicated user interface. This is a backend/API project. Interact with the application through API endpoints or CLI commands. +``` + +If minimal/empty repo: +``` +## Project Overview + +Minimal repository with basic project structure. + +## Features + +Basic project scaffolding. Refer to source files for details. + +## User Interface + +No user interface implemented yet. +``` + +Start with "## Project Overview", then "## Features", then "## User Interface". + +**Limit:** 25 tool calls.""" + + +async def run_code_explorer_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str +) -> Dict[str, Any]: + """ + Simplified Code Explorer - minimal context usage + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + + Returns: + Results dict with success flag and output + """ + try: + # Create minimal tool set (3 tools only) + @tool + def list_directory(relative_path: str = ".") -> str: + """List directory. Args: relative_path (str)""" + from tools.repo_tools import list_directory_tool + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file(file_path: str) -> str: + """Read file with strategic sampling. Args: file_path (str)""" + from tools.repo_tools import read_file_tool + # Use pattern_window strategy to detect FastAPI routes, error handlers, etc. + return read_file_tool.func(repo_path=repo_path, file_path=file_path, strategy="pattern_window") + + @tool + def detect_languages() -> str: + """Detect languages. No args.""" + from tools.repo_tools import detect_languages_tool + return detect_languages_tool.func(repo_path=repo_path) + + tools = [list_directory, read_file, detect_languages] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger for ReAct visibility + event_logger = create_agent_logger(job_id=job_id, agent_name="CodeExplorer") + + # Execute with callback + result = await agent.ainvoke( + {"messages": [ + ("system", EXPLORER_PROMPT), + ("user", "Write the Project Overview, Features, and User Interface sections. Start with list_directory(relative_path='.').") + ]}, + config={ + "recursion_limit": 25, + "callbacks": [event_logger] + } + ) + + # Extract output + messages = result.get("messages", []) + final_output = messages[-1].content if messages else "" + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "CodeExplorer", + **metrics + } + + except Exception as e: + logger.error(f"CodeExplorer failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "CodeExplorer" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/dependency_analyzer_agent.py b/sample_solutions/Docugen-Microagents/api/agents/dependency_analyzer_agent.py new file mode 100644 index 00000000..3a235238 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/dependency_analyzer_agent.py @@ -0,0 +1,181 @@ +""" +Dependency Analyzer Agent - Prerequisites & Deployment Section Writer + +Writes the complete "## Prerequisites" and "## Quick Start Deployment" sections for README. + +Section writer pattern: +- ≤3 tools +- full strategy for package files (they're small) +- Outputs TWO complete markdown sections +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain_core.messages import AIMessage +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + + +def _get_final_assistant_text(messages) -> str: + """ + Extract the last non-empty AIMessage content from LangGraph result. + + FIX: messages[-1] is not guaranteed to be the final assistant answer. + """ + for m in reversed(messages or []): + if isinstance(m, AIMessage) and isinstance(getattr(m, "content", None), str): + txt = m.content.strip() + if txt: + return txt + return (messages[-1].content or "").strip() if messages else "" + +# SIMPLIFIED Prompt for SLMs - NO templates, just instructions +def _build_dependency_prompt(repo_url: str) -> str: + """Build prompt with actual repository URL""" + return f"""You write Prerequisites and Deployment sections for README files. + +**REPOSITORY URL:** {repo_url} + +**YOUR TASK:** +1. First, call extract_dependencies() and list_directory(".") to see what's in the repo +2. Based on what you find, write TWO sections + +**SECTION 1: ## Prerequisites** +- List what needs to be installed (Python, Node.js, Docker) +- Only list if you found the files (requirements.txt = Python, package.json = Node, docker-compose.yml = Docker) +- Keep it short + +**SECTION 2: ## Quick Start Deployment** +This section has TWO subsections (use ### for subsections!): + +**Subsection A: ### Installation** +- Show how to clone the repository using the ACTUAL URL above +- Tell how to install dependencies you found +- Use actual paths (if found api/requirements.txt, write "cd api && pip install -r requirements.txt") + +**Subsection B: ### Running the Application** +- If docker-compose.yml exists, show docker command +- Otherwise, show how to run the entry point you find (server.py, app.py, main.py, etc.) + +**CRITICAL FORMATTING:** +- Main sections use ## (two hashtags) +- Subsections use ### (three hashtags) +- Installation and Running are SUBSECTIONS (use ###, NOT ##) +- Use the ACTUAL repository URL provided above, not placeholders + +**EXAMPLE STRUCTURE (not the content, just the heading pattern):** +## Prerequisites +...content... + +## Quick Start Deployment +### Installation +Clone the repository: +```bash +git clone {repo_url} +cd repo-name +``` +...other steps... +### Running the Application +...commands... + +Now analyze the repo and write these sections.""" + + +async def run_dependency_analyzer_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str, + repo_url: str = None +) -> Dict[str, Any]: + """ + Simplified Dependency Analyzer Agent + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + repo_url: Repository URL (for clone instructions) + + Returns: + Results dict with success flag and output + """ + try: + # Use placeholder if repo_url not provided + if not repo_url: + repo_url = "https://github.com/yourusername/your-repo.git" + + # Create tool set with project structure analysis capabilities + @tool + def list_directory(relative_path: str = ".") -> str: + """List files and folders in a directory. Args: relative_path (str)""" + from tools.repo_tools import list_directory_tool + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def find_dependency_files() -> str: + """Find dependency files (requirements.txt, package.json, etc.). No args.""" + from tools.repo_tools import find_dependency_files_tool + return find_dependency_files_tool.func(repo_path=repo_path) + + @tool + def read_file(file_path: str) -> str: + """Read file with full content. Args: file_path (str)""" + from tools.repo_tools import read_file_tool + # Use full strategy: package files are small + return read_file_tool.func(repo_path=repo_path, file_path=file_path, strategy="full") + + @tool + def extract_dependencies() -> str: + """Extract all dependencies from common package files. No args.""" + from tools.repo_tools import extract_dependencies_tool + return extract_dependencies_tool.func(repo_path=repo_path) + + tools = [list_directory, find_dependency_files, read_file, extract_dependencies] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger + event_logger = create_agent_logger(job_id=job_id, agent_name="DependencyAnalyzer") + + # Build prompt with actual repo URL + prompt = _build_dependency_prompt(repo_url) + + # Execute with simplified user message + result = await agent.ainvoke( + {"messages": [ + ("system", prompt), + ("user", "Write the sections. Start: call extract_dependencies()") + ]}, + config={ + "recursion_limit": 12, # Reduced for SLMs: 2-3 tools + reasoning + output + "callbacks": [event_logger] + } + ) + + # Extract output - FIX: Use helper to get last AIMessage + messages = result.get("messages", []) + final_output = _get_final_assistant_text(messages) + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "DependencyAnalyzer", + **metrics + } + + except Exception as e: + logger.error(f"DependencyAnalyzer failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "DependencyAnalyzer" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/env_config_agent.py b/sample_solutions/Docugen-Microagents/api/agents/env_config_agent.py new file mode 100644 index 00000000..f90551fa --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/env_config_agent.py @@ -0,0 +1,150 @@ +""" +Environment Config Agent - Configuration Section Writer + +Writes the complete "## Configuration" section for README. + +Section writer pattern: +- ≤3 tools +- full strategy (config files are small) +- Outputs complete markdown section +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain_core.messages import AIMessage +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + + +def _get_final_assistant_text(messages) -> str: + """ + Extract the last non-empty AIMessage content from LangGraph result. + + FIX: messages[-1] is not guaranteed to be the final assistant answer. + """ + for m in reversed(messages or []): + if isinstance(m, AIMessage) and isinstance(getattr(m, "content", None), str): + txt = m.content.strip() + if txt: + return txt + return (messages[-1].content or "").strip() if messages else "" + +# SIMPLIFIED for SLMs - No templates +ENV_CONFIG_PROMPT = """You write Configuration sections for README files. + +**YOUR TASK:** +1. Call find_config_files() to find .env or config files +2. If found, call read_file() on the config file to see variable names +3. Write "## Configuration" section documenting the variables + +**WHAT TO WRITE:** +- Start with heading: ## Configuration +- If you found config files, list the environment variables +- Group variables by category (Database, API, Server, etc.) +- Use actual variable names from the files +- Use placeholder values like or your_key_here +- If NO config files found, write: "## Configuration\n\nNo environment configuration files found." + +**HEADING FORMAT:** +- Use ## for main section: ## Configuration +- Use ### for subsection: ### Environment Variables + +**STRUCTURE EXAMPLE:** +## Configuration +### Environment Variables +Create a .env file: +```bash +DATABASE_URL= +API_KEY= +``` +**Required:** +- DATABASE_URL - Database connection +- API_KEY - API authentication + +Only document variables you actually find. Keep it simple.""" + + +async def run_env_config_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str +) -> Dict[str, Any]: + """ + Simplified Env Config Agent + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + + Returns: + Results dict with success flag and output + """ + try: + # Create minimal tool set (3 tools only) + @tool + def list_directory(relative_path: str = ".") -> str: + """List directory. Args: relative_path (str)""" + from tools.repo_tools import list_directory_tool + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file(file_path: str) -> str: + """Read config file. Args: file_path (str)""" + from tools.repo_tools import read_file_tool + # Use full strategy: config files are small, read everything + return read_file_tool.func(repo_path=repo_path, file_path=file_path, strategy="full") + + @tool + def find_config_files() -> str: + """Find configuration files (.env, config files). No args.""" + from tools.repo_tools import find_config_files_tool + return find_config_files_tool.func(repo_path=repo_path) + + tools = [list_directory, read_file, find_config_files] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger + event_logger = create_agent_logger(job_id=job_id, agent_name="EnvConfig") + + + # Execute with simplified message + result = await agent.ainvoke( + {"messages": [ + ("system", ENV_CONFIG_PROMPT), + ("user", "Write the Configuration section. Call find_config_files()") + ]}, + config={ + "recursion_limit": 8, + "callbacks": [event_logger] + } + ) + + # Extract output - FIX: Use helper to get last AIMessage + messages = result.get("messages", []) + final_output = _get_final_assistant_text(messages) + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "EnvConfig", + **metrics + } + + except Exception as e: + logger.error(f"EnvConfig failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "EnvConfig" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/error_analysis_agent.py b/sample_solutions/Docugen-Microagents/api/agents/error_analysis_agent.py new file mode 100644 index 00000000..ae75e66f --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/error_analysis_agent.py @@ -0,0 +1,136 @@ +""" +Error Analysis Agent - Troubleshooting Section Writer + +Writes the complete "## Troubleshooting" section for README. + +Section writer pattern: +- ≤3 tools +- pattern_window strategy (detects try:, except, raise) +- Outputs complete markdown section +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + +# SIMPLIFIED for SLMs - Generic troubleshooting +ERROR_ANALYSIS_PROMPT = """You write Troubleshooting sections for README files. + +**YOUR TASK:** +1. Optionally call list_directory(".") to see project type +2. Write "## Troubleshooting" section with GENERIC advice for common problems + +**WHAT TO WRITE:** +Write general troubleshooting guidance for these categories: +- Dependency Issues +- Environment Variables +- Server Errors +- (File/Upload Errors if project handles files) + +**HEADING FORMAT:** +- Main: ## Troubleshooting +- Subsections: ### Dependency Issues, ### Environment Variables, etc. + +**RULES:** +- Write GENERIC advice (not specific exception names) +- Keep it user-friendly and practical +- Include basic commands +- Mention checking logs + +**STRUCTURE EXAMPLE:** +## Troubleshooting +### Dependency Issues +...advice... +### Environment Variables +...advice... +### Server Errors +...advice... + +Do NOT copy this template. Write your own content based on project type.""" + + +async def run_error_analysis_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str +) -> Dict[str, Any]: + """ + Simplified Error Analysis Agent + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + + Returns: + Results dict with success flag and output + """ + try: + # Create minimal tool set (3 tools only) + @tool + def list_directory(relative_path: str = ".") -> str: + """List directory. Args: relative_path (str)""" + from tools.repo_tools import list_directory_tool + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file(file_path: str) -> str: + """Read file with pattern matching (finds try/except/raise). Args: file_path (str)""" + from tools.repo_tools import read_file_tool + # Use pattern_window: detects try:, except, raise patterns + return read_file_tool.func(repo_path=repo_path, file_path=file_path, strategy="pattern_window") + + @tool + def find_error_handlers() -> str: + """Find all try/except blocks across Python files. No args.""" + from tools.new_analysis_tools import find_error_handlers_tool + return find_error_handlers_tool.func(repo_path=repo_path) + + tools = [list_directory, read_file, find_error_handlers] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger + event_logger = create_agent_logger(job_id=job_id, agent_name="ErrorAnalysis") + + + # Execute with simplified message + result = await agent.ainvoke( + {"messages": [ + ("system", ERROR_ANALYSIS_PROMPT), + ("user", "Write the Troubleshooting section with generic advice.") + ]}, + config={ + "recursion_limit": 10, # Reduced for SLMs + "callbacks": [event_logger] + } + ) + + # Extract output + messages = result.get("messages", []) + final_output = messages[-1].content if messages else "" + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "ErrorAnalysis", + **metrics + } + + except Exception as e: + logger.error(f"ErrorAnalysis failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "ErrorAnalysis" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/mermaid_agent.py b/sample_solutions/Docugen-Microagents/api/agents/mermaid_agent.py new file mode 100644 index 00000000..f717647a --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/mermaid_agent.py @@ -0,0 +1,174 @@ +""" +Mermaid Diagram Agent - SIMPLIFIED for 8K context models + +Generates Mermaid diagrams for architecture visualization. + +Follows proven pattern: +- ≤3 tools +- Includes validate_mermaid_syntax for self-correction +- Minimal prompt +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + +# MINIMAL system prompt +MERMAID_PROMPT = """You are a Mermaid Diagram Generator. Create architecture diagrams. + +**Output:** 1-2 Mermaid diagrams: +1. System Architecture (components and connections) +2. Optional: Data Flow or Sequence Diagram + +**Tools:** +- find_entry_points() - find main files +- read_file(path) - read file (smart strategy for structure) +- validate_mermaid_syntax(code) - validate diagram + +**Mermaid Rules (CRITICAL):** +- Node IDs: alphanumeric + underscore only (no spaces, slashes) +- Node labels: use quotes for multi-word: NodeID["Label Text"] +- Edge labels: avoid special chars (/, :, `, ") +- Start with: graph TD or flowchart TD + +**Example:** +```mermaid +graph TD + User["User"] + API["API Server"] + DB["Database"] + User -->|Request| API + API -->|Query| DB +``` + +**Process:** +1. Find entry points +2. Read key files +3. Generate diagram +4. VALIDATE with validate_mermaid_syntax() +5. Fix errors if validation fails + +**FORBIDDEN Patterns (will cause syntax errors):** +- ❌ NO self-referencing edges: Service -->|X| Service +- ❌ NO curly braces in labels: -->|GET {id}| +- ❌ NO endpoint paths in labels: -->|POST /upload| +- ❌ NO colons in labels: -->|Method: POST| +- ✅ GOOD: -->|Upload File|, -->|Process Request| + +**Required Architecture Pattern:** +``` +User → Frontend → Backend Service(s) → Database/Storage +``` +Show the FLOW of data, NOT the API endpoints. + +**CRITICAL - Final Response Format:** +Your FINAL response MUST contain ONLY the mermaid diagram code wrapped in triple backticks. +Do NOT add any explanatory text before or after the diagram. +Format your final response EXACTLY like this: + +```mermaid +graph TD + [your diagram here] +``` + +**Limit:** 12 tool calls.""" + + +async def run_mermaid_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str, + api_endpoints: list = None +) -> Dict[str, Any]: + """ + Simplified Mermaid Agent + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + api_endpoints: Optional list of API endpoints extracted by API Reference agent + + Returns: + Results dict with success flag and output + """ + try: + # Create minimal tool set (3 tools) + @tool + def find_entry_points() -> str: + """Find main entry point files. No args.""" + from tools.repo_tools import find_entry_points_tool + return find_entry_points_tool.func(repo_path=repo_path) + + @tool + def read_file(file_path: str) -> str: + """Read file with smart sampling. Args: file_path (str)""" + from tools.repo_tools import read_file_tool + # Use smart strategy: signatures only (good for architecture) + return read_file_tool.func(repo_path=repo_path, file_path=file_path, strategy="smart") + + @tool + def validate_mermaid_syntax(mermaid_code: str) -> str: + """Validate Mermaid diagram syntax. Args: mermaid_code (str)""" + from tools.repo_tools import validate_mermaid_syntax_tool + return validate_mermaid_syntax_tool.func(mermaid_code=mermaid_code) + + tools = [find_entry_points, read_file, validate_mermaid_syntax] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger + event_logger = create_agent_logger(job_id=job_id, agent_name="Mermaid") + + + # Build user message with optional API endpoints + user_message = "Generate Mermaid architecture diagram. Start with find_entry_points()." + + if api_endpoints and len(api_endpoints) > 0: + user_message += f"\n\nAPI Endpoints available (include these in diagram if relevant):\n" + for ep in api_endpoints[:10]: # Limit to 10 to avoid context overflow + method = ep.get("method", "GET") + path = ep.get("path", "/") + desc = ep.get("description", "") + user_message += f"- {method} {path}: {desc}\n" + + # Execute + result = await agent.ainvoke( + {"messages": [ + ("system", MERMAID_PROMPT), + ("user", user_message) + ]}, + config={ + "recursion_limit": 15, + "callbacks": [event_logger] + } + ) + + # Extract output + messages = result.get("messages", []) + final_output = messages[-1].content if messages else "" + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "Mermaid", + **metrics + } + + except Exception as e: + logger.error(f"Mermaid failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "Mermaid" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/planner_agent.py b/sample_solutions/Docugen-Microagents/api/agents/planner_agent.py new file mode 100644 index 00000000..f85475a7 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/planner_agent.py @@ -0,0 +1,151 @@ +""" +Planner Agent - SIMPLIFIED for 8K context models + +Analyzes project type and plans documentation sections. + +Follows proven pattern: +- ≤3 tools +- full strategy for package files (they're small) +- Minimal prompt +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + +# MINIMAL system prompt - MATCHES DOCUGEN TEMPLATE +PLANNER_PROMPT = """You are a Documentation Planner. Determine project type and plan sections. + +**Task:** Analyze project and output recommended README sections. + +**Tools:** +- detect_languages() - get language breakdown +- extract_dependencies() - get dependencies +- find_ui_files() - check for frontend + +**SECTION TEMPLATE (from DocuGen):** +Always include base sections: "Project Overview", "Features", "Architecture", "Prerequisites", "Quick Start Deployment", "Troubleshooting" +Add conditionally: +- "User Interface" - if find_ui_files() returns has_ui=true +- "Configuration" - if project has .env or config files + +**Output:** JSON with: +```json +{ + "project_type": "fastapi-backend" (or "web app", "CLI tool", etc.), + "sections": ["Project Overview", "Features", "Architecture", "Prerequisites", "Quick Start Deployment", "User Interface", "Configuration", "Troubleshooting"] +} +``` + +**Limit:** 8 tool calls.""" + + +async def run_planner_agent( + llm: BaseChatModel, + repo_path: str, + job_id: str +) -> Dict[str, Any]: + """ + Simplified Planner Agent + + Args: + llm: Language model + repo_path: Repository path + job_id: Job ID + + Returns: + Results dict with success flag and output + """ + try: + # Create minimal tool set (3 core tools + 2 optional) + @tool + def detect_languages() -> str: + """Detect programming languages. No args.""" + from tools.repo_tools import detect_languages_tool + return detect_languages_tool.func(repo_path=repo_path) + + @tool + def extract_dependencies() -> str: + """Extract dependencies from package files. No args.""" + from tools.repo_tools import extract_dependencies_tool + return extract_dependencies_tool.func(repo_path=repo_path) + + @tool + def find_ui_files() -> str: + """Check if project has UI/frontend. No args.""" + from tools.repo_tools import find_ui_files_tool + return find_ui_files_tool.func(repo_path=repo_path) + + tools = [detect_languages, extract_dependencies, find_ui_files] + + # Create agent + agent = create_react_agent(model=llm, tools=tools) + + # Create callback logger + event_logger = create_agent_logger(job_id=job_id, agent_name="Planner") + + + # Execute + result = await agent.ainvoke( + {"messages": [ + ("system", PLANNER_PROMPT), + ("user", "Analyze project type and plan README sections using DocuGen template. Start with detect_languages().") + ]}, + config={ + "recursion_limit": 8, + "callbacks": [event_logger] + } + ) + + # Extract output + messages = result.get("messages", []) + final_output = messages[-1].content if messages else "" + + # Parse and validate output has correct section names + import json + try: + # Try to extract JSON + content = final_output.strip() + if '```json' in content: + content = content.split('```json')[1].split('```')[0] + elif '```' in content: + content = content.split('```')[1].split('```')[0] + + plan = json.loads(content.strip()) + + # Ensure sections match DocuGen template + if "sections" not in plan or not plan["sections"]: + plan["sections"] = ["Project Overview", "Features", "Architecture", "Prerequisites", "Quick Start Deployment", "Troubleshooting"] + + final_output = json.dumps(plan, indent=2) + except: + # Fallback to default DocuGen sections + plan = { + "project_type": "Unknown", + "sections": ["Project Overview", "Features", "Architecture", "Prerequisites", "Quick Start Deployment", "Troubleshooting"] + } + final_output = json.dumps(plan, indent=2) + + # Extract metrics from messages + metrics = extract_agent_metrics(messages) + + return { + "success": True, + "output": final_output, + "agent": "Planner", + **metrics + } + + except Exception as e: + logger.error(f"Planner failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "Planner" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/pr_agent_mcp.py b/sample_solutions/Docugen-Microagents/api/agents/pr_agent_mcp.py new file mode 100644 index 00000000..5f868b30 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/pr_agent_mcp.py @@ -0,0 +1,386 @@ +""" +PR Agent with TRUE MCP Integration +Uses official GitHub MCP server via MCP protocol (not direct API calls) +With full ReAct-style logging (Thought → Action → Observation) +""" + +import logging +import re +import json +from typing import Dict, Any +from datetime import datetime + +from models import get_log_manager, LogType +from mcp_client import get_github_mcp_client + +logger = logging.getLogger(__name__) + + +async def create_pr_with_mcp( + repo_full_name: str, + readme_content: str, + project_name: str, + base_branch: str, + github_token: str, + job_id: str +) -> Dict[str, Any]: + """ + Create GitHub PR using TRUE MCP protocol with ReAct logging + + This function: + 1. Connects to GitHub MCP server (Docker) + 2. Discovers available tools via MCP + 3. Calls tools to create branch, commit, and PR + 4. Uses stdio protocol (not direct API) + 5. Logs full ReAct cycle (Thought → Action → Observation) + + Args: + repo_full_name: Repository "owner/repo" + readme_content: Generated README content + project_name: Project name for PR title + base_branch: Base branch to create PR against (e.g., "main" or "dev") + github_token: GitHub Personal Access Token + job_id: Job ID for logging + + Returns: + Dict with success status, PR URL, and details + """ + log_manager = get_log_manager() + + try: + # Log start + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_START, + message="PR Agent initialized with MCP protocol", + agent_name="PR Agent (MCP)" + ) + + # 💭 Thought: Planning to connect + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_THINKING, + message=f"💭 Thought: I need to connect to GitHub MCP server for {repo_full_name}", + agent_name="PR Agent (MCP)" + ) + + # Get MCP client + mcp_client = get_github_mcp_client(github_token) + + # Connect to GitHub MCP server (Docker container via stdio) + async with mcp_client.connect() as session: + # 💭 Thought: Discovering tools + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_THINKING, + message="💭 Thought: Connected to MCP server. I need to discover available GitHub tools", + agent_name="PR Agent (MCP)" + ) + + # List available tools + tools = await mcp_client.list_available_tools() + logger.info(f"Available MCP tools: {tools}") + + # 📊 Observation: Tools discovered + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: Found {len(tools)} GitHub MCP tools including: {', '.join(tools[:5])}...", + agent_name="PR Agent (MCP)", + metadata={"tools_count": len(tools), "tools": tools} + ) + + # Generate unique branch name with project name + timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + # Create slug from project name (lowercase, replace spaces/special chars with hyphens) + project_slug = re.sub(r'[^a-z0-9]+', '-', project_name.lower()).strip('-') + branch_name = f"docs/{project_slug}-readme-{timestamp}" + + # Parse repo owner and name + owner, repo = repo_full_name.split("/") + + # Extract actual project name from README title (H1 or H2) + readme_title = project_name # fallback to provided name + lines = readme_content.split('\n') + for line in lines[:15]: # Check first 15 lines for title + # Look for H1 (# Title) or H2 (## Title) + if line.startswith('# ') and not line.startswith('## '): + readme_title = line[2:].strip() + break + elif line.startswith('## '): + # H2 title found - extract it + readme_title = line[3:].strip() + break + + # === STEP 1: Create Branch === + + # 💭 Thought: Need to create branch + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_THINKING, + message=f"💭 Thought: I need to create a new branch '{branch_name}' from {base_branch}", + agent_name="PR Agent (MCP)" + ) + + # 🔧 Action: Create branch + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_ACTION, + message=f"🔧 Action: create_branch(owner={owner}, repo={repo}, branch={branch_name}, from_branch={base_branch})", + agent_name="PR Agent (MCP)", + metadata={"tool": "create_branch", "params": {"owner": owner, "repo": repo, "branch": branch_name, "from_branch": base_branch}} + ) + + try: + branch_result = await mcp_client.call_tool( + "create_branch", + { + "owner": owner, + "repo": repo, + "branch": branch_name, + "from_branch": base_branch + } + ) + logger.info(f"Branch creation result: {branch_result}") + + # 📊 Observation: Branch created + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: Branch '{branch_name}' created successfully from {base_branch}", + agent_name="PR Agent (MCP)", + metadata={"branch_created": True, "branch_name": branch_name} + ) + except Exception as e: + logger.warning(f"Branch creation via MCP failed: {e}") + # 📊 Observation: Branch might exist + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: Branch creation failed ({str(e)}). Branch may already exist - continuing anyway", + agent_name="PR Agent (MCP)", + metadata={"branch_created": False, "error": str(e)} + ) + + # === STEP 2: Commit README.md using push_files (FOOLPROOF - No SHA needed) === + + # 💭 Thought: Need to commit README + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_THINKING, + message=f"💭 Thought: I need to commit README.md to branch '{branch_name}' using push_files (no SHA required)", + agent_name="PR Agent (MCP)" + ) + + # Use push_files tool (more reliable, no SHA needed) + push_params = { + "owner": owner, + "repo": repo, + "branch": branch_name, + "files": [ + { + "path": "README.md", + "content": readme_content + } + ], + "message": f"docs: Updated README for {readme_title}" + } + + logger.info(f"Using push_files to commit README.md to branch '{branch_name}'") + + # 🔧 Action: Commit file using push_files + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_ACTION, + message=f"🔧 Action: push_files(owner={owner}, repo={repo}, branch={branch_name}, files=[README.md])", + agent_name="PR Agent (MCP)", + metadata={"tool": "push_files", "params": push_params, "content_length": len(readme_content)} + ) + + commit_success = False + try: + commit_result = await mcp_client.call_tool( + "push_files", + push_params + ) + logger.info(f"Commit result: {commit_result}") + + # Check if commit was successful + if commit_result and not (hasattr(commit_result, 'isError') and commit_result.isError): + commit_success = True + # 📊 Observation: File committed + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: README.md committed successfully to branch '{branch_name}'", + agent_name="PR Agent (MCP)", + metadata={"file_committed": True} + ) + else: + error_text = str(commit_result.content[0].text if hasattr(commit_result, 'content') else commit_result) + logger.error(f"File commit returned error: {error_text}") + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: Failed to commit README.md - {error_text[:200]}", + agent_name="PR Agent (MCP)", + metadata={"file_committed": False, "error": error_text} + ) + raise Exception(f"push_files failed: {error_text}") + + except Exception as e: + logger.error(f"File commit failed: {e}") + # 📊 Observation: Commit failed + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: Failed to commit README.md - {str(e)}", + agent_name="PR Agent (MCP)", + metadata={"file_committed": False, "error": str(e)} + ) + raise + + # === STEP 3: Create Pull Request === + + # 💭 Thought: Ready to create PR + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_THINKING, + message=f"💭 Thought: README.md is committed. Now I need to create a pull request from '{branch_name}' to '{base_branch}'", + agent_name="PR Agent (MCP)" + ) + + pr_body = f"""## Summary +This PR adds comprehensive AI-generated documentation for **{readme_title}**. + +## What's Included +- **Project Overview** - High-level description and key features +- **Architecture** - System design with Mermaid diagrams +- **Installation Guide** - Step-by-step setup instructions with actual repository URLs +- **Configuration** - Environment variables and settings documentation +- **Deployment** - Quick start guide with Docker/manual deployment options +- **Troubleshooting** - Common issues and solutions +- **API Documentation** - Endpoints and usage examples (if applicable) + +## About This Documentation +This README was automatically generated by **DocuGen Micro-Agents**, an advanced AI system that uses specialized agents to analyze your repository: + +--- +*Generated by DocuGen Micro-Agents AI-powered documentation with specialized micro-agent system* +- Agents Used: Code Explorer, API Reference, Call Graph Analyzer, Environment Config, Dependency Analyzer, Error Analysis, Planner, Mermaid Generator, QA Validator +- Integration: Model Context Protocol (MCP) for GitHub""" + + # 🔧 Action: Create PR + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_ACTION, + message=f"🔧 Action: create_pull_request(owner={owner}, repo={repo}, title='docs: Updated README for {readme_title}', head={branch_name}, base={base_branch})", + agent_name="PR Agent (MCP)", + metadata={"tool": "create_pull_request", "params": {"owner": owner, "repo": repo, "head": branch_name, "base": base_branch}} + ) + + pr_url = None + try: + pr_result = await mcp_client.call_tool( + "create_pull_request", + { + "owner": owner, + "repo": repo, + "title": f"docs: Updated README for {readme_title}", + "body": pr_body, + "head": branch_name, + "base": base_branch + } + ) + + logger.info(f"PR creation result: {pr_result}") + logger.info(f"PR result type: {type(pr_result)}") + + # Check if PR creation failed + if hasattr(pr_result, 'isError') and pr_result.isError: + error_text = str(pr_result.content[0].text if hasattr(pr_result, 'content') else pr_result) + logger.error(f"PR creation returned error: {error_text}") + raise Exception(f"create_pull_request failed: {error_text}") + + # Extract PR URL from result + if hasattr(pr_result, 'content') and pr_result.content: + if isinstance(pr_result.content, list) and len(pr_result.content) > 0: + result_text = str(pr_result.content[0].text if hasattr(pr_result.content[0], 'text') else pr_result.content[0]) + logger.info(f"Extracted result_text: {result_text[:500]}") + else: + result_text = str(pr_result.content) + logger.info(f"Result content (non-list): {result_text[:500]}") + + # Try multiple methods to extract URL + # Method 1: Parse as JSON + try: + data = json.loads(result_text) + pr_url = data.get("html_url") + logger.info(f"Extracted PR URL from JSON: {pr_url}") + except: + logger.info("Could not parse result as JSON, trying regex") + # Method 2: Regex search for GitHub PR URL + url_match = re.search(r'https://github\.com/[^\s"\'<>]+/pull/\d+', result_text) + if url_match: + pr_url = url_match.group(0) + logger.info(f"Extracted PR URL from regex: {pr_url}") + else: + # Method 3: Construct URL from owner/repo if PR number is available + pr_number_match = re.search(r'"number":\s*(\d+)', result_text) + if pr_number_match: + pr_number = pr_number_match.group(1) + pr_url = f"https://github.com/{owner}/{repo}/pull/{pr_number}" + logger.info(f"Constructed PR URL from number: {pr_url}") + + # 📊 Observation: PR created + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: Pull request created successfully! URL: {pr_url or 'Check GitHub'}", + agent_name="PR Agent (MCP)", + metadata={"pr_created": True, "pr_url": pr_url} + ) + + except Exception as e: + logger.error(f"PR creation failed: {e}") + # 📊 Observation: PR creation failed + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation: Failed to create pull request - {str(e)}", + agent_name="PR Agent (MCP)", + metadata={"pr_created": False, "error": str(e)} + ) + raise + + # Log completion + await log_manager.log_async( + job_id=job_id, + log_type=LogType.AGENT_COMPLETE, + message=f"PR workflow complete! PR created at: {pr_url or 'Check GitHub'}", + agent_name="PR Agent (MCP)", + metadata={"pr_url": pr_url, "branch": branch_name} + ) + + return { + "success": True, + "pr_url": pr_url, + "branch_name": branch_name, + "output": f"Successfully created PR via MCP protocol: {pr_url or 'See GitHub'}" + } + + except Exception as e: + logger.error(f"MCP PR creation failed: {e}", exc_info=True) + await log_manager.log_async( + job_id=job_id, + log_type=LogType.ERROR, + message=f"MCP PR Agent error: {str(e)}", + agent_name="PR Agent (MCP)" + ) + + return { + "success": False, + "pr_url": None, + "branch_name": None, + "output": f"Failed to create PR via MCP: {str(e)}" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/qa_validator_agent.py b/sample_solutions/Docugen-Microagents/api/agents/qa_validator_agent.py new file mode 100644 index 00000000..a6d0bb9a --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/qa_validator_agent.py @@ -0,0 +1,222 @@ +""" +QA Validator Agent - SIMPLIFIED for 8K context models + +Validates README quality, completeness, and structure. + +KEY DIFFERENCE: QA Validator does NOT have file reading tools. +It receives the final_readme from state and validates it. + +Follows proven pattern: +- NO file reading tools (works from final_readme input) +- Minimal prompt +- Validates output quality +""" + +import logging +from typing import Dict, Any +from langgraph.prebuilt import create_react_agent +from langchain_core.language_models import BaseChatModel +from langchain.tools import tool +from core.agent_event_logger import create_agent_logger +from utils.metrics_extractor import extract_agent_metrics + +logger = logging.getLogger(__name__) + +# MINIMAL system prompt +QA_VALIDATOR_PROMPT = """You are a README Quality Validator. Validate documentation quality. + +**Input:** You'll receive a generated README markdown. + +**Task:** Evaluate the README on these criteria: +1. **Completeness** - Are all expected sections present? (Overview, Installation, Usage, etc.) +2. **Clarity** - Is the content clear and well-written? +3. **Structure** - Proper markdown formatting, headings hierarchy +4. **Code Examples** - Are code blocks properly formatted? +5. **Mermaid Diagrams** - If present, are they valid? + +**Output:** JSON validation report: +```json +{ + "qa_score": 85, + "qa_passed": true, + "issues": [ + {"severity": "warning", "message": "Installation section is brief"}, + {"severity": "error", "message": "No usage examples provided"} + ], + "recommendations": ["Add more usage examples", "Include configuration details"] +} +``` + +**Tools:** +- validate_readme_structure(readme) - check structure and completeness +- validate_mermaid_diagrams(readme) - extract and validate Mermaid diagrams + +**Scoring:** +- 90-100: Excellent +- 75-89: Good +- 60-74: Acceptable (needs minor improvements) +- <60: Poor (needs major improvements) + +**Pass Threshold:** 60 + +**Limit:** 5 tool calls.""" + + +async def run_qa_validator_agent( + llm: BaseChatModel, + readme_sections: dict, + job_id: str, + evidence_packet=None +) -> Dict[str, Any]: + """ + Enhanced QA Validator - Validates sections with evidence-based guardrails + + Checks if expected sections are present, non-empty, AND don't contain + forbidden phrases without evidence. + + Args: + llm: Language model (not used in fast mode) + readme_sections: Dict of section name -> content + job_id: Job ID + evidence_packet: EvidencePacket for checking forbidden phrases + + Returns: + Results dict with success flag and validation report + """ + try: + import json + + # Expected sections + EXPECTED_SECTIONS = [ + "Project Overview", + "Features", + "Architecture", + "Prerequisites", + "Quick Start Deployment", + "Configuration", + "Troubleshooting" + ] + + issues = [] + score = 100 + missing_sections = [] + empty_sections = [] + forbidden_violations = [] + + # Check for missing sections + for section in EXPECTED_SECTIONS: + if section not in readme_sections: + missing_sections.append(section) + issues.append({"severity": "warning", "message": f"Missing section: {section}"}) + score -= 5 + elif len(readme_sections[section].strip()) < 50: + empty_sections.append(section) + issues.append({"severity": "error", "message": f"Section too short or empty: {section}"}) + score -= 10 + + # Check for code blocks in deployment section + if "Quick Start Deployment" in readme_sections: + if '```' not in readme_sections["Quick Start Deployment"]: + issues.append({"severity": "warning", "message": "Quick Start Deployment lacks code examples"}) + score -= 5 + + # Check for architecture diagram mention + if "Architecture" in readme_sections: + if "diagram" not in readme_sections["Architecture"].lower(): + issues.append({"severity": "info", "message": "Architecture section should mention diagram"}) + + # === NEW: Evidence-Based Forbidden Phrase Checks (FIX 3 applied) === + if evidence_packet: + # Combine all section content for checking + full_content = "\n".join(readme_sections.values()) + + # Define forbidden checks (removed weak port checks per FIX 3) + forbidden_checks = [ + { + "phrase": "npm", + "evidence_required": lambda e: len(e.node_deps) > 0, + "message": "Claims 'npm' commands but no package.json found" + }, + { + "phrase": "docker-compose", + "evidence_required": lambda e: e.has_docker and "docker-compose.yml" in e.docker_files, + "message": "Claims 'docker-compose' but no docker-compose.yml found" + }, + { + "phrase": "Dockerfile", + "evidence_required": lambda e: e.has_docker and "Dockerfile" in e.docker_files, + "message": "Claims 'Dockerfile' but no Dockerfile found" + }, + { + "phrase": "Whisper", + "evidence_required": lambda e: any("whisper" in dep.lower() for dep in e.python_deps), + "message": "Claims 'Whisper' without dependency evidence" + }, + { + "phrase": "Keycloak", + "evidence_required": lambda e: any("keycloak" in dep.lower() for dep in (e.python_deps + e.node_deps)), + "message": "Claims 'Keycloak' without dependency evidence" + }, + { + "phrase": "React", + "evidence_required": lambda e: "react" in [d.lower() for d in e.node_deps] or e.frontend_framework == "React", + "message": "Claims 'React' without evidence" + }, + { + "phrase": "Vue", + "evidence_required": lambda e: "vue" in [d.lower() for d in e.node_deps] or e.frontend_framework == "Vue", + "message": "Claims 'Vue' without evidence" + } + ] + + # Check each forbidden phrase + for check in forbidden_checks: + phrase = check["phrase"] + if phrase in full_content: + # Check if evidence exists + if not check["evidence_required"](evidence_packet): + forbidden_violations.append(check["message"]) + issues.append({ + "severity": "error", + "message": f"HALLUCINATION: {check['message']}" + }) + score -= 15 # Heavy penalty for hallucinations + + if forbidden_violations: + logger.warning(f"[QA] Found {len(forbidden_violations)} forbidden phrase violations") + + # Build validation report + qa_report = { + "qa_score": max(score, 0), # Allow scores below 60 to show severity + "qa_passed": score >= 60, + "missing_sections": missing_sections, + "empty_sections": empty_sections, + "forbidden_violations": forbidden_violations, + "issues": issues, + "recommendations": [] if score >= 90 else [ + "Ensure all sections have meaningful content", + "Add code examples to deployment instructions", + "Remove hallucinated commands/technologies without evidence" + ] + } + + output_json = json.dumps(qa_report, indent=2) + + # Note: QA Validator doesn't use LangGraph messages, so metrics will be empty + # But we include the call for consistency with other agents + metrics = extract_agent_metrics([]) + + return { + "success": True, + "output": output_json, + "agent": "QAValidator", + **metrics + } + + except Exception as e: + logger.error(f"QAValidator failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "QAValidator" + } diff --git a/sample_solutions/Docugen-Microagents/api/agents/writer_agent_sectioned.py b/sample_solutions/Docugen-Microagents/api/agents/writer_agent_sectioned.py new file mode 100644 index 00000000..8b19c7b7 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/agents/writer_agent_sectioned.py @@ -0,0 +1,296 @@ +""" +Sectioned Writer Agent - TIMEOUT-RESISTANT DESIGN + +Generates README in multiple small calls instead of one large call. + +Architecture: +1. Generate outline (fast) +2. Generate sections one-by-one (small, bounded calls) +3. Stitch sections together + +This prevents 504 Gateway Timeouts by keeping each LLM call small and fast. +""" + +import logging +import json +from typing import Dict, Any, List +from langchain_core.language_models import BaseChatModel +from langchain_core.messages import SystemMessage, HumanMessage + +logger = logging.getLogger(__name__) + + +def build_structured_evidence(state: Dict[str, Any]) -> Dict[str, Any]: + """ + Convert raw agent outputs into compact structured evidence. + + This dramatically reduces tokens vs raw text concatenation. + """ + + # Parse code summary into bullets + code_summary = state.get("code_summary", "") + code_bullets = _extract_bullets(code_summary, max_bullets=10) + + # Parse API documentation + api_docs = state.get("api_documentation", "") + api_bullets = _extract_bullets(api_docs, max_bullets=8) + + # Parse dependencies + dependency_report = state.get("dependency_report", {}) + if isinstance(dependency_report, dict): + dep_output = dependency_report.get("output", "") + else: + dep_output = str(dependency_report) + dep_bullets = _extract_bullets(dep_output, max_bullets=6) + + # Parse env config + env_config = state.get("env_config_output", "") + env_bullets = _extract_bullets(env_config, max_bullets=5) + + # Parse call graph (already dict) + call_graph = state.get("call_graph", {}) + if isinstance(call_graph, dict) and "output" in call_graph: + call_graph_text = str(call_graph["output"])[:500] # Truncate + else: + call_graph_text = str(call_graph)[:500] + + # Parse error analysis + error_analysis = state.get("error_analysis", {}) + if isinstance(error_analysis, dict): + error_output = error_analysis.get("output", "") + else: + error_output = str(error_analysis) + error_bullets = _extract_bullets(error_output, max_bullets=5) + + # Project metadata + project_type = state.get("project_type", "Unknown") + repo_name = state.get("repo_name", "Project") + + # Build compact structured evidence + structured = { + "project_name": repo_name, + "project_type": project_type, + "overview": code_bullets[:3] if code_bullets else ["No overview available"], + "architecture": code_bullets[3:8] if len(code_bullets) > 3 else [], + "api_endpoints": api_bullets, + "dependencies": dep_bullets, + "environment": env_bullets, + "call_graph_summary": call_graph_text, + "error_handling": error_bullets + } + + return structured + + +def _extract_bullets(text: str, max_bullets: int = 10) -> List[str]: + """Extract bullet points or key sentences from text""" + if not text: + return [] + + bullets = [] + + # Try to find existing bullets (lines starting with -, *, •, or numbers) + lines = text.split('\n') + for line in lines: + line = line.strip() + if line and (line.startswith('-') or line.startswith('*') or + line.startswith('•') or (len(line) > 2 and line[0].isdigit() and line[1] in '.)')): + # Remove bullet marker + clean = line.lstrip('-*•0123456789.) ').strip() + if clean: + bullets.append(clean) + if len(bullets) >= max_bullets: + break + + # If no bullets found, extract first N sentences + if not bullets: + sentences = text.replace('\n', ' ').split('. ') + for sentence in sentences[:max_bullets]: + sentence = sentence.strip() + if sentence: + bullets.append(sentence) + + return bullets[:max_bullets] + + +async def run_writer_agent_sectioned( + llm: BaseChatModel, + state: Dict[str, Any], + job_id: str +) -> Dict[str, Any]: + """ + Sectioned Writer Agent - generates README in multiple small calls. + Uses planner's section list instead of generating own outline. + + Args: + llm: Language model + state: Full workflow state (we'll extract structured evidence) + job_id: Job ID + + Returns: + Results dict with sections dict (not a single readme string) + """ + try: + # Step 1: Build structured evidence (compact) + print(f"[Writer] Building structured evidence...") + evidence = build_structured_evidence(state) + evidence_json = json.dumps(evidence, indent=2) + + print(f"[Writer] Structured evidence: {len(evidence_json)} chars") + logger.info(f"[Writer] Structured evidence: {len(evidence_json)} chars") + + # Step 2: Get sections from planner (NO outline generation!) + planned_sections = state.get("documentation_sections", ["Project Overview", "Features", "Architecture", "Prerequisites", "Quick Start Deployment", "Troubleshooting"]) + print(f"[Writer] Using planner sections: {planned_sections}") + logger.info(f"[Writer] Using planner sections: {planned_sections}") + + # Step 3: Generate sections one-by-one (small, fast calls) + sections_dict = {} + for section_name in planned_sections: + logger.info(f"[Writer] Generating section: {section_name}") + section_content = await _generate_section(llm, section_name, evidence) + # Store with ## heading included + sections_dict[section_name] = f"## {section_name}\n\n{section_content}" + + logger.info(f"[Writer] Generated {len(sections_dict)} sections") + + # Return sections dict (NOT a single assembled readme) + # Assembly will happen in assembly_node + return { + "success": True, + "output": sections_dict, # Dict of section_name: content + "agent": "Writer" + } + + except Exception as e: + logger.error(f"Writer failed: {e}") + return { + "success": False, + "error": str(e), + "agent": "Writer" + } + + +async def _generate_section( + llm: BaseChatModel, + section_name: str, + evidence: Dict[str, Any] +) -> str: + """ + Generate a single README section (small, fast call). + + Returns: Markdown content for this section + """ + + # Build section-specific evidence (only relevant data) + # Map DocuGen section names to evidence + if section_name == "Project Overview": + context = f""" +Project: {evidence.get('project_name')} +Type: {evidence.get('project_type')} +Overview: {json.dumps(evidence.get('overview', []))} +Architecture: {json.dumps(evidence.get('architecture', [])[:3])} +""" + + elif section_name == "Features": + context = f""" +Architecture: {json.dumps(evidence.get('architecture', []))} +Key capabilities: {json.dumps(evidence.get('overview', []))} +API Endpoints: {json.dumps(evidence.get('api_endpoints', [])[:5])} +""" + + elif section_name == "Prerequisites": + context = f""" +Dependencies: {json.dumps(evidence.get('dependencies', []))} +Project type: {evidence.get('project_type')} +""" + + elif section_name == "Quick Start Deployment": + context = f""" +Dependencies: {json.dumps(evidence.get('dependencies', []))} +Environment variables: {json.dumps(evidence.get('environment', [])[:5])} +Project type: {evidence.get('project_type')} +""" + + elif section_name == "Configuration": + context = f""" +Environment variables: {json.dumps(evidence.get('environment', []))} +""" + + elif section_name == "User Interface": + context = f""" +Project type: {evidence.get('project_type')} +Overview: {json.dumps(evidence.get('overview', []))} +""" + + elif section_name == "Architecture": + context = f""" +Architecture: {json.dumps(evidence.get('architecture', []))} +Call graph: {evidence.get('call_graph_summary', '')} +API Endpoints: {json.dumps(evidence.get('api_endpoints', [])[:3])} +""" + + elif section_name == "Troubleshooting": + context = f""" +Error handling: {json.dumps(evidence.get('error_handling', []))} +Common issues: {json.dumps(evidence.get('overview', []))} +""" + + else: + # Fallback for any other section names + context = json.dumps(evidence, indent=2)[:1000] + + # Section-specific requirements + if section_name == "Prerequisites": + requirements = """List ONLY the actual dependencies and requirements found in the context. +Format as: +- Runtime/language versions (e.g., Python 3.8+, Node.js 16+) +- Required tools (Docker, npm, pip, etc.) +- System requirements +If no specific prerequisites found, state: "Standard development environment for [project_type]." +""" + elif section_name == "Quick Start Deployment": + requirements = """Provide deployment instructions based ONLY on evidence: +1. Installation steps (based on actual dependencies found) +2. Configuration steps (based on actual env variables found) +3. How to run the application (based on project type and entry points) +4. Docker commands if Docker files were found +If insufficient information, provide minimal: "Refer to dependency files and configuration for setup details." +""" + elif section_name == "Configuration": + requirements = """List ONLY the actual environment variables found. Format as: +- Variable name: Brief purpose +If no variables found, state: "No environment configuration required." +""" + else: + requirements = "Write based on the evidence provided. Be factual and concise." + + prompt = f"""Write the "{section_name}" section for a README based STRICTLY on the provided evidence. + +**CRITICAL RULES:** +1. ONLY use information from the Context below +2. If Context is empty or says "No X available", write a minimal 1-2 sentence section acknowledging what's missing +3. DO NOT invent features, files, commands, or technical details that aren't in the Context +4. DO NOT add placeholder examples or generic instructions +5. Be factual and concise - accuracy over completeness + +**Section-Specific Requirements:** +{requirements} + +Context: +{context} + +Write clear, concise markdown using ONLY the facts above. +If the context indicates the project is minimal/empty, reflect that accurately. +Keep it under 300 words. + +Section content:""" + + messages = [ + SystemMessage(content="You are a technical writer who ONLY writes based on provided evidence. Never invent or assume information. Be factual and concise."), + HumanMessage(content=prompt) + ] + + response = await llm.ainvoke(messages) + + return response.content.strip() diff --git a/sample_solutions/Docugen-Microagents/api/config.py b/sample_solutions/Docugen-Microagents/api/config.py new file mode 100644 index 00000000..a80cbf70 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/config.py @@ -0,0 +1,77 @@ +""" +Configuration management for DocuGen AI +Supports GenAI Gateway and Keycloak authentication +""" + +import os +from enum import Enum +from typing import Optional +from pydantic_settings import BaseSettings +from dotenv import load_dotenv + +load_dotenv() + + +class Settings(BaseSettings): + """Application settings with unified inference configuration""" + + # Application Info + APP_TITLE: str = "DocuGen Micro-Agents" + APP_DESCRIPTION: str = "AI-powered documentation generation with specialized micro-agent system" + APP_VERSION: str = "1.0.0" + + # Server Configuration + API_PORT: int = 5001 + HOST: str = "0.0.0.0" + + # CORS Settings + CORS_ORIGINS: list = ["http://localhost:3000", "http://localhost:3001", "http://localhost:5173"] + + # Inference API Configuration + # Supports multiple inference deployment patterns: + # - GenAI Gateway: Provide your GenAI Gateway URL and API key + # - APISIX Gateway: Provide your APISIX Gateway URL and authentication token + INFERENCE_API_ENDPOINT: Optional[str] = None + INFERENCE_API_TOKEN: Optional[str] = None + + # Security Configuration + VERIFY_SSL: bool = True + + # Docker Network Configuration + LOCAL_URL_ENDPOINT: str = "not-needed" + + # Micro-Agent Model Configuration (Using SLM - Qwen3-4B) + CODE_EXPLORER_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + API_REFERENCE_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + CALL_GRAPH_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + ERROR_ANALYSIS_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + ENV_CONFIG_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + DEPENDENCY_ANALYZER_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + PLANNER_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + MERMAID_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + QA_VALIDATOR_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + WRITER_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + + + # Repository Settings + TEMP_REPO_DIR: str = "./tmp/repos" + MAX_REPO_SIZE: int = 10737418240 # 10GB in bytes + MAX_FILE_SIZE: int = 1000000 # 1MB + MAX_FILES_TO_SCAN: int = 500 + MAX_LINES_PER_FILE: int = 500 # Line budget per file (pattern_window extracts ~150-300 lines) + + # GitHub Integration (for MCP PR creation) + GITHUB_TOKEN: Optional[str] = None + + # Agent Settings + AGENT_TEMPERATURE: float = 0.7 + AGENT_MAX_TOKENS: int = 1000 + AGENT_TIMEOUT: int = 300 # 5 minutes + + class Config: + env_file = ".env" + case_sensitive = True + + +# Global settings instance +settings = Settings() diff --git a/sample_solutions/Docugen-Microagents/api/core/__init__.py b/sample_solutions/Docugen-Microagents/api/core/__init__.py new file mode 100644 index 00000000..32dabaa6 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/core/__init__.py @@ -0,0 +1,13 @@ +""" +Core infrastructure for Docugen-Microagents +Provides metrics tracking and agent event logging +""" + +from .metrics_collector import MetricsCollector, AgentMetrics +from .agent_event_logger import create_agent_logger + +__all__ = [ + 'MetricsCollector', + 'AgentMetrics', + 'create_agent_logger' +] diff --git a/sample_solutions/Docugen-Microagents/api/core/agent_event_logger.py b/sample_solutions/Docugen-Microagents/api/core/agent_event_logger.py new file mode 100644 index 00000000..e2ce0191 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/core/agent_event_logger.py @@ -0,0 +1,309 @@ +""" +Shared Agent Event Logger - Callback Handler for ReAct Pattern Visibility + +Captures tool calls and LLM events from LangChain agents and streams them +to the UI via SSE for real-time "Thinking → Action → Observation" display. + +Works with both LangChain and LangGraph agents. +""" + +import logging +from typing import Any, Dict, List, Optional +from langchain_core.callbacks.base import AsyncCallbackHandler +from models import get_log_manager, LogType + +logger = logging.getLogger(__name__) + + +class AgentEventLogger(AsyncCallbackHandler): + """ + Callback handler that captures agent events and streams them to UI. + + Captures: + - Tool calls (Actions) + - Tool outputs (Observations) + - LLM starts (Thinking) + + Usage: + logger = AgentEventLogger(job_id="job_123", agent_name="CodeExplorer") + result = await agent.ainvoke(..., config={"callbacks": [logger]}) + """ + + def __init__(self, job_id: str, agent_name: str): + """ + Initialize the event logger + + Args: + job_id: Job ID for streaming logs + agent_name: Name of the agent (e.g., "CodeExplorer", "Writer") + """ + super().__init__() + self.job_id = job_id + self.agent_name = agent_name + self.log_manager = get_log_manager() + self.cycle_count = 0 + self.tool_call_count = 0 + self.current_tool = None + + # Debouncing flags to prevent duplicate logs + self._agent_started = False + self._agent_completed = False + + # Tool-specific thinking messages (context-aware) + self.thinking_templates = { + "list_directory": "📂 Examining directory structure...", + "read_file": "📖 Reading file to extract information...", + "detect_languages": "🔍 Analyzing programming languages...", + "extract_dependencies": "📦 Extracting project dependencies...", + "analyze_code_structure": "🏗️ Analyzing code structure and patterns...", + "find_entry_points": "🎯 Locating application entry points...", + "find_api_routes": "🌐 Discovering API routes and endpoints...", + "find_config_files": "⚙️ Finding configuration files...", + "validate_mermaid_syntax": "✅ Validating diagram syntax...", + "validate_readme_structure": "📋 Checking README structure...", + "default": "💭 Analyzing repository and planning next step..." + } + + async def on_tool_start( + self, + serialized: Dict[str, Any], + input_str: str, + **kwargs: Any + ) -> None: + """ + Called when a tool starts executing (Action step) + + Args: + serialized: Tool metadata + input_str: Tool input arguments + """ + self.cycle_count += 1 + self.tool_call_count += 1 + + # Extract tool name + tool_name = serialized.get("name", "unknown_tool") + self.current_tool = tool_name + + # Log thinking step before action + thinking_msg = self.thinking_templates.get(tool_name, self.thinking_templates["default"]) + await self.log_manager.log_async( + job_id=self.job_id, + log_type=LogType.AGENT_THINKING, + message=f"💭 Thinking (Cycle {self.cycle_count}): {thinking_msg}", + agent_name=self.agent_name, + metadata={ + "cycle": self.cycle_count, + "type": "thinking", + "next_tool": tool_name + } + ) + + # Format tool arguments for display + try: + # Try to parse input_str as it might be JSON or formatted string + args_display = input_str[:200] if input_str else "(no args)" + if len(input_str) > 200: + args_display += "..." + except: + args_display = "(args)" + + # Log action step + await self.log_manager.log_async( + job_id=self.job_id, + log_type=LogType.AGENT_ACTION, + message=f"🔧 Action (Cycle {self.cycle_count}): {tool_name}({args_display})", + agent_name=self.agent_name, + metadata={ + "cycle": self.cycle_count, + "type": "action", + "tool": tool_name, + "input": input_str, + "call_number": self.tool_call_count + } + ) + + async def on_tool_end( + self, + output: str, + **kwargs: Any + ) -> None: + """ + Called when a tool finishes (Observation step) + + Args: + output: Tool output/result + """ + tool_name = self.current_tool or "unknown" + + # Create intelligent summary of output + output_str = str(output) + output_lines = output_str.count('\n') + 1 + + # Smart truncation + if len(output_str) > 300: + output_preview = output_str[:300] + f"... [{len(output_str)} chars total, {output_lines} lines]" + else: + output_preview = output_str + + # Log observation step + await self.log_manager.log_async( + job_id=self.job_id, + log_type=LogType.AGENT_OBSERVATION, + message=f"📊 Observation (Cycle {self.cycle_count}): {tool_name} returned:\n{output_preview}", + agent_name=self.agent_name, + metadata={ + "cycle": self.cycle_count, + "type": "observation", + "tool": tool_name, + "output_length": len(output_str), + "line_count": output_lines + } + ) + + async def on_tool_error( + self, + error: Exception, + **kwargs: Any + ) -> None: + """ + Called when a tool encounters an error + + Args: + error: The exception that occurred + """ + tool_name = self.current_tool or "unknown" + + await self.log_manager.log_async( + job_id=self.job_id, + log_type=LogType.ERROR, + message=f"❌ Error (Cycle {self.cycle_count}): {tool_name} failed - {str(error)}", + agent_name=self.agent_name, + metadata={ + "cycle": self.cycle_count, + "type": "error", + "tool": tool_name, + "error": str(error) + } + ) + + async def on_llm_start( + self, + serialized: Dict[str, Any], + prompts: List[str], + **kwargs: Any + ) -> None: + """ + Called when LLM starts generating (optional - for detailed logging) + + Args: + serialized: LLM metadata + prompts: Input prompts + """ + # Optional: Log LLM reasoning start + # Only log if we want very detailed visibility + pass + + async def on_llm_end( + self, + response: Any, + **kwargs: Any + ) -> None: + """ + Called when LLM finishes generating + + Args: + response: LLM response + """ + # Optional: Log LLM completion + pass + + async def on_chain_start( + self, + serialized: Dict[str, Any], + inputs: Dict[str, Any], + **kwargs: Any + ) -> None: + """ + Called when agent chain starts + """ + # Debounce: Only log the first chain start + if self._agent_started: + return + + self._agent_started = True + + # Log agent start + await self.log_manager.log_async( + job_id=self.job_id, + log_type=LogType.AGENT_START, + message=f"🚀 Starting {self.agent_name} agent...", + agent_name=self.agent_name, + metadata={"type": "agent_start"} + ) + + async def on_chain_end( + self, + outputs: Dict[str, Any], + **kwargs: Any + ) -> None: + """ + Called when agent chain completes + """ + # Debounce: Only log the first chain end + if self._agent_completed: + return + + self._agent_completed = True + + # Log completion summary + await self.log_manager.log_async( + job_id=self.job_id, + log_type=LogType.INFO, + message=f"✅ Completed {self.cycle_count} ReAct cycle(s) with {self.tool_call_count} tool call(s)", + agent_name=self.agent_name, + metadata={ + "type": "agent_complete", + "total_cycles": self.cycle_count, + "total_tool_calls": self.tool_call_count + } + ) + + async def on_chain_error( + self, + error: Exception, + **kwargs: Any + ) -> None: + """ + Called when agent chain encounters an error + + Args: + error: The exception that occurred + """ + await self.log_manager.log_async( + job_id=self.job_id, + log_type=LogType.ERROR, + message=f"❌ {self.agent_name} agent failed: {str(error)}", + agent_name=self.agent_name, + metadata={ + "type": "agent_error", + "error": str(error) + } + ) + + +def create_agent_logger(job_id: str, agent_name: str) -> AgentEventLogger: + """ + Factory function to create an AgentEventLogger + + Args: + job_id: Job ID for log streaming + agent_name: Name of the agent + + Returns: + Configured AgentEventLogger instance + + Example: + logger = create_agent_logger("job_123", "CodeExplorer") + result = await agent.ainvoke(..., config={"callbacks": [logger]}) + """ + return AgentEventLogger(job_id=job_id, agent_name=agent_name) diff --git a/sample_solutions/Docugen-Microagents/api/core/metrics_collector.py b/sample_solutions/Docugen-Microagents/api/core/metrics_collector.py new file mode 100644 index 00000000..f887d373 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/core/metrics_collector.py @@ -0,0 +1,220 @@ +""" +Metrics Collection for Workflow Execution + +Tracks real metrics from execution: +- Token usage per agent +- Time to first token (TTFT) +- Tokens per second (TPS) +- Tool calls per agent +- Workflow-level aggregated metrics + +Key principle: Measure everything, estimate nothing +""" + +import time +import logging +from typing import Dict, Optional, Any +from dataclasses import dataclass, asdict + +logger = logging.getLogger(__name__) + + +@dataclass +class AgentMetrics: + """Metrics for a single agent execution""" + agent_name: str + job_id: str + + # Execution metrics + start_time_ms: float = 0 + end_time_ms: float = 0 + duration_ms: float = 0 + + # Token usage + input_tokens: int = 0 + output_tokens: int = 0 + total_tokens: int = 0 + + # Tool calling + llm_calls: int = 0 + tool_calls: int = 0 + + # Status + success: bool = False + error_message: Optional[str] = None + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return asdict(self) + + def calculate_cost(self, cost_per_million_tokens: float = 0.15) -> float: + """ + Calculate estimated cost in USD + + Args: + cost_per_million_tokens: Cost per 1M tokens (default: $0.15 for Qwen3-4B estimate) + + Returns: + Estimated cost in USD + """ + return (self.total_tokens / 1_000_000) * cost_per_million_tokens + + def calculate_tps(self) -> float: + """ + Calculate tokens per second (TPS) + + Returns: + Tokens per second + """ + if self.duration_ms == 0: + return 0.0 + duration_seconds = self.duration_ms / 1000 + return self.output_tokens / duration_seconds if duration_seconds > 0 else 0.0 + + +class MetricsCollector: + """ + Centralized metrics collection for all agents + + Tracks real execution metrics for workflow analysis + """ + + def __init__(self, job_id: str): + """ + Initialize MetricsCollector + + Args: + job_id: Unique job identifier + """ + self.job_id = job_id + self.agents: Dict[str, AgentMetrics] = {} + self.workflow_start_time = time.time() * 1000 # Convert to ms + self.workflow_end_time: Optional[float] = None + + logger.info(f"[{job_id}] MetricsCollector initialized") + + def start_agent(self, agent_name: str) -> AgentMetrics: + """ + Start tracking an agent + + Args: + agent_name: Name of agent + + Returns: + AgentMetrics instance for this agent + """ + if agent_name in self.agents: + logger.warning(f"[{self.job_id}] Agent {agent_name} already started, resetting") + + metrics = AgentMetrics( + agent_name=agent_name, + job_id=self.job_id, + start_time_ms=time.time() * 1000 + ) + + self.agents[agent_name] = metrics + logger.debug(f"[{self.job_id}] Started tracking: {agent_name}") + + return metrics + + def end_agent( + self, + agent_name: str, + success: bool = True, + input_tokens: int = 0, + output_tokens: int = 0, + llm_calls: int = 0, + tool_calls: int = 0, + error_message: Optional[str] = None + ): + """ + End tracking an agent + + Args: + agent_name: Name of agent + success: Whether agent succeeded + input_tokens: Input tokens used + output_tokens: Output tokens generated + llm_calls: Number of LLM calls + tool_calls: Number of tool calls + error_message: Error message if failed + """ + if agent_name not in self.agents: + logger.error(f"[{self.job_id}] Agent {agent_name} not started") + return + + metrics = self.agents[agent_name] + metrics.end_time_ms = time.time() * 1000 + metrics.duration_ms = metrics.end_time_ms - metrics.start_time_ms + metrics.success = success + metrics.input_tokens = input_tokens + metrics.output_tokens = output_tokens + metrics.total_tokens = input_tokens + output_tokens + metrics.llm_calls = llm_calls + metrics.tool_calls = tool_calls + metrics.error_message = error_message + + tps = metrics.calculate_tps() + + logger.info( + f"[{self.job_id}] {agent_name} completed: " + f"success={success}, tokens={metrics.total_tokens}, " + f"duration={metrics.duration_ms:.0f}ms, TPS={tps:.2f}" + ) + + def finalize_workflow(self): + """Mark workflow as complete and calculate final metrics""" + self.workflow_end_time = time.time() * 1000 + workflow_duration = self.workflow_end_time - self.workflow_start_time + + logger.info( + f"[{self.job_id}] Workflow completed in {workflow_duration:.0f}ms " + f"({len(self.agents)} agents)" + ) + + def get_summary(self) -> Dict[str, Any]: + """ + Get comprehensive metrics summary + + Returns: + Dictionary with all metrics + """ + if not self.workflow_end_time: + self.finalize_workflow() + + # Calculate totals + total_tokens = sum(m.total_tokens for m in self.agents.values()) + total_input_tokens = sum(m.input_tokens for m in self.agents.values()) + total_output_tokens = sum(m.output_tokens for m in self.agents.values()) + total_tool_calls = sum(m.tool_calls for m in self.agents.values()) + total_llm_calls = sum(m.llm_calls for m in self.agents.values()) + workflow_duration_ms = self.workflow_end_time - self.workflow_start_time + + # Agent-level metrics + agent_metrics = [m.to_dict() for m in self.agents.values()] + + # Calculate workflow-level TPS + workflow_duration_seconds = workflow_duration_ms / 1000 + workflow_tps = total_output_tokens / workflow_duration_seconds if workflow_duration_seconds > 0 else 0.0 + + # Count failed agents + failed_agents = [m for m in self.agents.values() if not m.success] + + return { + "job_id": self.job_id, + "workflow": { + "total_agents": len(self.agents), + "successful_agents": len([m for m in self.agents.values() if m.success]), + "failed_agents": len(failed_agents), + "total_duration_ms": workflow_duration_ms, + "total_duration_seconds": round(workflow_duration_seconds, 2), + "total_tokens": total_tokens, + "total_input_tokens": total_input_tokens, + "total_output_tokens": total_output_tokens, + "total_tool_calls": total_tool_calls, + "total_llm_calls": total_llm_calls, + "average_tps": round(workflow_tps, 2) + }, + "agents": agent_metrics, + "failed_agent_names": [m.agent_name for m in failed_agents] + } diff --git a/sample_solutions/Docugen-Microagents/api/mcp_client/__init__.py b/sample_solutions/Docugen-Microagents/api/mcp_client/__init__.py new file mode 100644 index 00000000..15cca45f --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/mcp_client/__init__.py @@ -0,0 +1,8 @@ +""" +MCP Client Module +Provides connections to MCP servers (Model Context Protocol) +""" + +from .github_mcp_client import GitHubMCPClient, get_github_mcp_client + +__all__ = ['GitHubMCPClient', 'get_github_mcp_client'] diff --git a/sample_solutions/Docugen-Microagents/api/mcp_client/github_mcp_client.py b/sample_solutions/Docugen-Microagents/api/mcp_client/github_mcp_client.py new file mode 100644 index 00000000..cf235d3f --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/mcp_client/github_mcp_client.py @@ -0,0 +1,131 @@ +""" +GitHub MCP Client - Connects to official GitHub MCP server +Uses MCP protocol (stdio) to communicate with Docker container +""" + +import os +import asyncio +import logging +from typing import Dict, Any, List, Optional +from contextlib import asynccontextmanager + +from mcp import ClientSession, StdioServerParameters +from mcp.client.stdio import stdio_client + +logger = logging.getLogger(__name__) + + +class GitHubMCPClient: + """Client for GitHub MCP Server""" + + def __init__(self, github_token: str): + """ + Initialize GitHub MCP client + + Args: + github_token: GitHub Personal Access Token + """ + self.github_token = github_token + self.session: Optional[ClientSession] = None + self._client_context = None + + @asynccontextmanager + async def connect(self): + """ + Connect to GitHub MCP server running in Docker + + Yields: + ClientSession for making tool calls + """ + # Define server parameters - run GitHub MCP server via Docker + server_params = StdioServerParameters( + command="docker", + args=[ + "run", + "-i", + "--rm", + "-e", f"GITHUB_PERSONAL_ACCESS_TOKEN={self.github_token}", + "ghcr.io/github/github-mcp-server:latest" + ], + env=None + ) + + logger.info("Starting GitHub MCP server via Docker...") + + async with stdio_client(server_params) as (read, write): + async with ClientSession(read, write) as session: + self.session = session + + # Initialize session + await session.initialize() + + # List available tools + tools = await session.list_tools() + logger.info(f"Connected to GitHub MCP server. Available tools: {[t.name for t in tools.tools]}") + + yield session + + logger.info("Disconnected from GitHub MCP server") + self.session = None + + async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Any: + """ + Call a tool on the GitHub MCP server + + Args: + tool_name: Name of the tool to call + arguments: Tool arguments + + Returns: + Tool result + """ + if not self.session: + raise RuntimeError("Not connected to MCP server. Use 'async with client.connect():'") + + logger.debug(f"Calling MCP tool: {tool_name} with args: {arguments}") + + result = await self.session.call_tool(tool_name, arguments=arguments) + + logger.debug(f"MCP tool {tool_name} result: {result}") + + return result + + async def list_available_tools(self) -> List[str]: + """ + Get list of available tools from GitHub MCP server + + Returns: + List of tool names + """ + if not self.session: + raise RuntimeError("Not connected to MCP server") + + tools = await self.session.list_tools() + return [tool.name for tool in tools.tools] + + +# Singleton instance +_github_mcp_client: Optional[GitHubMCPClient] = None + + +def get_github_mcp_client(github_token: Optional[str] = None) -> GitHubMCPClient: + """ + Get or create GitHub MCP client instance + + Args: + github_token: GitHub PAT (uses env var if not provided) + + Returns: + GitHubMCPClient instance + """ + global _github_mcp_client + + if _github_mcp_client is None: + token = github_token or os.getenv("GITHUB_TOKEN") + if not token: + raise ValueError("GitHub token required. Set GITHUB_TOKEN env var or pass token parameter.") + + _github_mcp_client = GitHubMCPClient(token) + logger.info("GitHub MCP client created") + + return _github_mcp_client diff --git a/sample_solutions/Docugen-Microagents/api/models/__init__.py b/sample_solutions/Docugen-Microagents/api/models/__init__.py new file mode 100644 index 00000000..84e35905 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/models/__init__.py @@ -0,0 +1,28 @@ +""" +Data models for DocuGen AI +""" + +from .state import DocGenState +from .schemas import ( + GenerateDocsRequest, + GenerateDocsResponse, + JobStatusResponse, + ProjectSelectionRequest, + ProjectSelectionResponse, + AgentLog, + LogType +) +from .log_manager import LogManager, get_log_manager + +__all__ = [ + "DocGenState", + "GenerateDocsRequest", + "GenerateDocsResponse", + "JobStatusResponse", + "ProjectSelectionRequest", + "ProjectSelectionResponse", + "AgentLog", + "LogType", + "LogManager", + "get_log_manager" +] diff --git a/sample_solutions/Docugen-Microagents/api/models/evidence.py b/sample_solutions/Docugen-Microagents/api/models/evidence.py new file mode 100644 index 00000000..b8818d10 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/models/evidence.py @@ -0,0 +1,126 @@ +""" +Evidence Packet - Central Evidence Store for README Generation + +This module defines the schema for storing all repository evidence +with provenance tracking to prevent hallucinations. + +Key Principles: +- Single source of truth for all README content +- Every evidence item includes source_files for traceability +- Deterministic derivation (no LLM-generated values) +- Supports both file-based and structured agent outputs +""" + +from dataclasses import dataclass, field +from typing import List, Dict, Any + + +@dataclass +class EvidenceItem: + """Single piece of evidence with provenance.""" + category: str # "dependency", "config", "route", "tool", etc. + key: str # Identifier (e.g., "fastapi", "PORT", "/upload") + value: str # The evidence value + source_files: List[str] = field(default_factory=list) # Where it was found + confidence: str = "high" # "high", "medium", "low" + + +@dataclass +class EvidencePacket: + """ + Central evidence store for README generation. + + Contains all factual information extracted from repository + with provenance tracking. + """ + + # Repository metadata (deterministic) + repo_name: str = "" + repo_path: str = "" + + # Language detection + languages: Dict[str, int] = field(default_factory=dict) # {language: file_count} + + # Dependencies (from package files) + python_deps: List[str] = field(default_factory=list) # From requirements.txt + node_deps: List[str] = field(default_factory=list) # From package.json + + # Technology flags + has_docker: bool = False + has_frontend: bool = False + has_backend: bool = False + + # Configuration + env_files: List[str] = field(default_factory=list) # [".env.example", ".env"] + env_vars: Dict[str, str] = field(default_factory=dict) # {VAR: description} + + # Docker evidence + docker_files: List[str] = field(default_factory=list) # ["Dockerfile", "docker-compose.yml"] + + # API endpoints (if backend detected) + api_endpoints: List[Dict[str, str]] = field(default_factory=list) # [{method, path, file}] + + # Entry points + entry_points: List[str] = field(default_factory=list) # ["server.py", "main.py"] + + # Frontend info + frontend_framework: str = "" # "React", "Vue", "Angular", "" + frontend_files: List[str] = field(default_factory=list) + + # Raw evidence items + items: List[EvidenceItem] = field(default_factory=list) + + def add_evidence(self, item: EvidenceItem) -> None: + """Add evidence item with deduplication.""" + # Avoid duplicate evidence + for existing in self.items: + if existing.key == item.key and existing.category == item.category: + return + self.items.append(item) + + def has_evidence_for(self, key: str, category: str = None) -> bool: + """Check if evidence exists for a key.""" + for item in self.items: + if item.key == key: + if category is None or item.category == category: + return True + return False + + def get_evidence(self, key: str, category: str = None) -> List[EvidenceItem]: + """Get all evidence items matching key and optional category.""" + results = [] + for item in self.items: + if item.key == key: + if category is None or item.category == category: + results.append(item) + return results + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + return { + "repo_name": self.repo_name, + "repo_path": self.repo_path, + "languages": self.languages, + "python_deps": self.python_deps, + "node_deps": self.node_deps, + "has_docker": self.has_docker, + "has_frontend": self.has_frontend, + "has_backend": self.has_backend, + "env_files": self.env_files, + "env_vars": self.env_vars, + "docker_files": self.docker_files, + "api_endpoints": self.api_endpoints, + "entry_points": self.entry_points, + "frontend_framework": self.frontend_framework, + "frontend_files": self.frontend_files, + "items": [ + { + "category": item.category, + "key": item.key, + "value": item.value, + "source_files": item.source_files, + "confidence": item.confidence + } + for item in self.items + ] + } diff --git a/sample_solutions/Docugen-Microagents/api/models/log_manager.py b/sample_solutions/Docugen-Microagents/api/models/log_manager.py new file mode 100644 index 00000000..422b52d3 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/models/log_manager.py @@ -0,0 +1,209 @@ +""" +Log Manager for storing and streaming agent activity logs +Supports Server-Sent Events (SSE) for real-time streaming +""" + +import asyncio +import threading +import logging +from collections import defaultdict +from typing import Dict, List, Optional +from datetime import datetime +from .schemas import AgentLog, LogType + +logger = logging.getLogger(__name__) + + +class LogManager: + """ + Manages agent activity logs with SSE streaming support. + Stores logs per job_id and provides async iteration for SSE. + """ + + def __init__(self, max_logs_per_job: int = 1000): + """ + Initialize log manager + + Args: + max_logs_per_job: Maximum logs to keep per job (circular buffer) + """ + self.max_logs_per_job = max_logs_per_job + self._logs: Dict[str, List[AgentLog]] = defaultdict(list) + self._subscribers: Dict[str, List[asyncio.Queue]] = defaultdict(list) + self._lock = threading.Lock() + + async def add_log(self, log: AgentLog) -> None: + """ + Add a log entry and notify all subscribers + + Args: + log: Agent log entry + """ + with self._lock: + # Add to storage + job_logs = self._logs[log.job_id] + job_logs.append(log) + + # Maintain circular buffer + if len(job_logs) > self.max_logs_per_job: + job_logs.pop(0) + + # Notify all subscribers for this job + dead_queues = [] + for queue in self._subscribers[log.job_id]: + try: + queue.put_nowait(log) + except asyncio.QueueFull: + dead_queues.append(queue) + logger.warning(f"Queue full for job {log.job_id}") + + # Clean up dead queues + for queue in dead_queues: + self._subscribers[log.job_id].remove(queue) + + # Log to console for debugging + logger.info(f"[{log.job_id}] {log.log_type.value}: {log.message}") + + def log_sync( + self, + job_id: str, + log_type: LogType, + message: str, + agent_name: Optional[str] = None, + metadata: Optional[Dict] = None + ) -> None: + """ + Synchronous logging helper (for use in sync code) + + Args: + job_id: Job identifier + log_type: Type of log + message: Log message + agent_name: Name of the agent (if applicable) + metadata: Additional metadata + """ + log = AgentLog( + job_id=job_id, + log_type=log_type, + message=message, + agent_name=agent_name, + metadata=metadata + ) + # Schedule the async add_log in the event loop + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + asyncio.create_task(self.add_log(log)) + else: + loop.run_until_complete(self.add_log(log)) + except RuntimeError: + # No event loop, just log to console + logger.info(f"[{job_id}] {log_type.value}: {message}") + + async def log_async( + self, + job_id: str, + log_type: LogType, + message: str, + agent_name: Optional[str] = None, + metadata: Optional[Dict] = None + ) -> None: + """ + Asynchronous logging helper + + Args: + job_id: Job identifier + log_type: Type of log + message: Log message + agent_name: Name of the agent (if applicable) + metadata: Additional metadata + """ + log = AgentLog( + job_id=job_id, + log_type=log_type, + message=message, + agent_name=agent_name, + metadata=metadata + ) + await self.add_log(log) + + def get_logs(self, job_id: str, limit: Optional[int] = None) -> List[AgentLog]: + """ + Get all logs for a job + + Args: + job_id: Job identifier + limit: Maximum number of logs to return (most recent) + + Returns: + List of logs + """ + logs = self._logs.get(job_id, []) + if limit: + return logs[-limit:] + return logs + + async def subscribe(self, job_id: str) -> asyncio.Queue: + """ + Subscribe to real-time logs for a job (for SSE) + + Args: + job_id: Job identifier + + Returns: + Queue that will receive new logs + """ + queue: asyncio.Queue = asyncio.Queue(maxsize=100) + with self._lock: + self._subscribers[job_id].append(queue) + + # Send existing logs immediately + for log in self._logs.get(job_id, []): + try: + queue.put_nowait(log) + except asyncio.QueueFull: + logger.warning(f"Initial logs queue full for job {job_id}") + break + + return queue + + async def unsubscribe(self, job_id: str, queue: asyncio.Queue) -> None: + """ + Unsubscribe from logs + + Args: + job_id: Job identifier + queue: Queue to remove + """ + with self._lock: + if queue in self._subscribers[job_id]: + self._subscribers[job_id].remove(queue) + + def clear_job_logs(self, job_id: str) -> None: + """ + Clear all logs for a job + + Args: + job_id: Job identifier + """ + if job_id in self._logs: + del self._logs[job_id] + if job_id in self._subscribers: + del self._subscribers[job_id] + + +# Global log manager instance +_log_manager: Optional[LogManager] = None + + +def get_log_manager() -> LogManager: + """ + Get or create the global log manager instance + + Returns: + LogManager instance + """ + global _log_manager + if _log_manager is None: + _log_manager = LogManager() + return _log_manager diff --git a/sample_solutions/Docugen-Microagents/api/models/schemas.py b/sample_solutions/Docugen-Microagents/api/models/schemas.py new file mode 100644 index 00000000..7668e567 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/models/schemas.py @@ -0,0 +1,75 @@ +""" +Pydantic schemas for API requests and responses +""" + +from datetime import datetime +from enum import Enum +from typing import Optional, Dict, Any, List +from pydantic import BaseModel, HttpUrl, Field + + +class GenerateDocsRequest(BaseModel): + """Request to generate documentation""" + repo_url: HttpUrl = Field(..., description="GitHub repository URL") + + +class GenerateDocsResponse(BaseModel): + """Response with job ID for tracking""" + job_id: str + status: str + message: str + + +class JobStatusResponse(BaseModel): + """Job status information""" + job_id: str + status: str + progress_percentage: int + current_agent: Optional[str] = None + error_message: Optional[str] = None + readme_preview: Optional[str] = None + awaiting_project_selection: Optional[bool] = False + detected_projects: Optional[List[Dict[str, Any]]] = None + skipped_folders: Optional[List[Dict[str, Any]]] = None + + +class ProjectSelectionRequest(BaseModel): + """User's project selection""" + selected_project_paths: List[str] = Field(..., description="List of selected project paths") + + +class ProjectSelectionResponse(BaseModel): + """Response after project selection""" + status: str + message: str + + +class LogType(str, Enum): + """Types of log entries""" + AGENT_START = "agent_start" + AGENT_THINKING = "agent_thinking" + AGENT_ACTION = "agent_action" # Agent calling a tool (ReAct Action step) + AGENT_OBSERVATION = "agent_observation" # Agent receiving tool result (ReAct Observation step) + AGENT_TOOL_USE = "agent_tool_use" + AGENT_DECISION = "agent_decision" + AGENT_COMPLETE = "agent_complete" + WORKFLOW_PROGRESS = "workflow_progress" + INFO = "info" + WARNING = "warning" + ERROR = "error" + SUCCESS = "success" + + +class AgentLog(BaseModel): + """Agent activity log entry""" + timestamp: datetime = Field(default_factory=datetime.now) + job_id: str + log_type: LogType + agent_name: Optional[str] = None + message: str + metadata: Optional[Dict[str, Any]] = None + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } diff --git a/sample_solutions/Docugen-Microagents/api/models/state.py b/sample_solutions/Docugen-Microagents/api/models/state.py new file mode 100644 index 00000000..b1144781 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/models/state.py @@ -0,0 +1,98 @@ +""" +LangGraph State Definition for DocuGen Micro-Agents +Enhanced with agent messaging, evidence packs, and metrics tracking +""" + +from typing import TypedDict, Dict, List, Any, Optional +from models.evidence import EvidencePacket + + +class DocGenState(TypedDict): + """ + Shared state for the micro-agent documentation generation workflow. + Each agent reads from and writes to this state. + """ + + # Job Information + job_id: str + repo_url: str + + # Repository Data (populated by clone step) + repo_path: Optional[str] + repo_name: Optional[str] + default_branch: Optional[str] + is_subfolder_target: Optional[bool] + + # Project Detection (populated by project detector) + is_monorepo: Optional[bool] + detected_projects: Optional[List[Dict[str, Any]]] + skipped_folders: Optional[List[Dict[str, Any]]] + selected_projects: Optional[List[str]] + awaiting_project_selection: Optional[bool] + + # Evidence-Based Architecture + evidence_packet: Optional[EvidencePacket] # Central evidence store + + # Analysis Agent Outputs + file_structure: Optional[str] + languages: Optional[Dict[str, int]] + key_files: Optional[List[str]] + code_summary: Optional[str] + + # Dependency Analysis (NEW - from DependencyAnalyzer) + dependency_report: Optional[Dict[str, Any]] # Full dependency analysis + security_warnings: Optional[List[Dict[str, str]]] # Vulnerability warnings + + # Environment Config (NEW - from EnvConfigParser) + env_variables: Optional[List[Dict[str, Any]]] # Extracted env vars + config_files_found: Optional[List[str]] + + # Code Analysis Agent Outputs + # API Reference (NEW - from APIReferenceAgent) + api_endpoints: Optional[List[Dict[str, Any]]] # Detected endpoints + api_documentation: Optional[str] + + # Call Graph (NEW - from CallGraphAgent) + call_graph: Optional[Dict[str, Any]] # Function call relationships + + # Error Analysis (NEW - from ErrorAnalysisAgent) + error_handlers: Optional[List[Dict[str, Any]]] # Exception handlers found + error_analysis: Optional[Dict[str, Any]] + + # Planning + project_type: Optional[str] + documentation_sections: Optional[List[str]] + section_instructions: Optional[Dict[str, str]] + + # Content Generation + readme_sections: Optional[Dict[str, str]] + + # Visualization + mermaid_diagrams: Optional[Dict[str, str]] + + # Quality Assurance + qa_validation_result: Optional[Dict[str, Any]] # QA validator output + qa_score: Optional[int] + qa_passed: Optional[bool] + + # Final Output + final_readme: Optional[str] + + # Agent-to-Agent Communication (NEW) + agent_messages: Optional[List[Dict[str, Any]]] # Messages between agents + # Format: [{"from": "APIReference", "to": "Mermaid", "type": "data", "content": {...}}] + + # Performance Metrics (NEW) + agent_metrics: Optional[Dict[str, Dict[str, Any]]] # Per-agent metrics + blast_radius_report: Optional[Dict[str, Any]] # Blast radius calculation + total_tokens_used: Optional[int] + total_duration_ms: Optional[float] + + # Error Handling + error: Optional[str] + retry_count: int + failed_agents: Optional[List[str]] # List of agents that failed + + # Metadata + workflow_status: str # pending, in_progress, completed, failed + current_agent: Optional[str] diff --git a/sample_solutions/Docugen-Microagents/api/requirements.txt b/sample_solutions/Docugen-Microagents/api/requirements.txt new file mode 100644 index 00000000..e58f151c --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/requirements.txt @@ -0,0 +1,38 @@ +# Web Framework +fastapi>=0.109.0 +uvicorn[standard]>=0.27.0 +python-multipart>=0.0.22 +sse-starlette>=1.6.5 + +# Environment & Config +python-dotenv>=1.0.0 +pydantic>=2.5.0 +pydantic-settings>=2.1.0 + +# LangChain & LangGraph (Agentic Framework) +langchain>=0.1.0 +langchain-openai>=0.0.5 +langgraph>=0.0.20 +langgraph-checkpoint-sqlite>=1.0.0 +langchain-community>=0.0.20 + +# LLM APIs +openai>=1.10.0 +httpx>=0.24.0 +requests>=2.31.0 + +# Git Operations +gitpython>=3.1.40 + +# GitHub Integration +PyGithub>=2.1.1 + +# MCP (Model Context Protocol) +mcp>=0.9.0 + +# Code Analysis +astroid>=3.0.0 + +# Utilities +aiofiles>=23.0.0 +aiosqlite>=0.20.0 diff --git a/sample_solutions/Docugen-Microagents/api/server.py b/sample_solutions/Docugen-Microagents/api/server.py new file mode 100644 index 00000000..600228a1 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/server.py @@ -0,0 +1,563 @@ +""" +FastAPI Server for DocuGen AI +Provides REST API and SSE streaming for real-time agent visibility +""" + +import asyncio +import json +import logging +import sys +import uuid +from contextlib import asynccontextmanager +from typing import Dict, List + +# Disable output buffering for real-time log streaming +sys.stdout.reconfigure(line_buffering=True) +sys.stderr.reconfigure(line_buffering=True) + +from fastapi import FastAPI, BackgroundTasks, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import StreamingResponse +from sse_starlette.sse import EventSourceResponse + +from config import settings +from models import ( + GenerateDocsRequest, + GenerateDocsResponse, + JobStatusResponse, + ProjectSelectionRequest, + ProjectSelectionResponse, + LogType, + get_log_manager +) +from workflow import get_workflow +from agents.pr_agent_mcp import create_pr_with_mcp + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +# Job storage +job_storage: Dict[str, Dict] = {} + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Lifespan context for startup/shutdown""" + logger.info("DocuGen AI starting up...") + logger.info(f"Inference endpoint: {settings.INFERENCE_API_ENDPOINT}") + + # Check if GitHub token is configured for MCP PR creation + github_token = settings.GITHUB_TOKEN + if github_token: + logger.info("GitHub token configured - MCP PR creation enabled") + else: + logger.warning("GITHUB_TOKEN not configured - PR creation will be disabled") + + yield + logger.info("DocuGen AI shutting down...") + + +# Initialize FastAPI +app = FastAPI( + title=settings.APP_TITLE, + description=settings.APP_DESCRIPTION, + version=settings.APP_VERSION, + lifespan=lifespan +) + +# CORS +app.add_middleware( + CORSMiddleware, + allow_origins=settings.CORS_ORIGINS, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +@app.get("/") +def root(): + """Root endpoint""" + return { + "name": settings.APP_TITLE, + "version": settings.APP_VERSION, + "status": "running" + } + + +@app.get("/health") +def health_check(): + """Health check""" + return { + "status": "healthy", + "inference_endpoint": settings.INFERENCE_API_ENDPOINT + } + + +@app.post("/api/generate-docs", response_model=GenerateDocsResponse) +async def generate_documentation(request: GenerateDocsRequest, background_tasks: BackgroundTasks): + """ + Start documentation generation workflow + + Returns job_id for tracking and SSE streaming + """ + try: + job_id = str(uuid.uuid4()) + + # Initialize job + job_storage[job_id] = { + "repo_url": str(request.repo_url), + "status": "pending", + "workflow_paused": False, + "current_agent": None, + "final_readme": None, + "error": None, + "awaiting_project_selection": False, + "detected_projects": None + } + + # Start workflow in background + background_tasks.add_task(run_workflow, job_id, str(request.repo_url)) + + logger.info(f"Documentation generation started: {job_id}") + + return GenerateDocsResponse( + job_id=job_id, + status="started", + message="Documentation generation workflow started. Connect to SSE stream to monitor progress." + ) + + except Exception as e: + logger.error(f"Failed to start workflow: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) + + +async def run_workflow(job_id: str, repo_url: str): + """ + Run the documentation workflow + Executes with human-in-the-loop interrupts + """ + try: + workflow = await get_workflow() + + # Initial state + initial_state = { + "job_id": job_id, + "repo_url": repo_url, + "repo_path": None, + "repo_name": None, + "default_branch": None, + "is_monorepo": None, + "detected_projects": None, + "skipped_folders": None, # List of skipped folders with reasons + "selected_projects": None, + "awaiting_project_selection": None, + "is_subfolder_target": None, # NEW: Track if user provided subfolder URL + "file_structure": None, + "languages": None, + "dependencies": None, + "key_files": None, + "code_summary": None, + "project_type": None, + "documentation_sections": None, + "section_instructions": None, + "readme_sections": None, + "mermaid_diagrams": None, + "final_readme": None, + "error": None, + "retry_count": 0, + "workflow_status": "pending", + "current_agent": None + } + + # Run workflow (will interrupt after each agent) + config = {"configurable": {"thread_id": job_id}} + + # Execute workflow - it will pause at interrupt points + async for event in workflow.graph.astream(initial_state, config): + logger.info(f"Workflow event: {event}") + + # Update job storage with latest state + for node_name, node_state in event.items(): + if isinstance(node_state, dict): + # Always update status from workflow_status + workflow_status = node_state.get("workflow_status", "in_progress") + job_storage[job_id]["status"] = workflow_status + job_storage[job_id]["current_agent"] = node_state.get("current_agent") + + # Store repo_name for PR creation + if node_state.get("repo_name"): + job_storage[job_id]["repo_name"] = node_state["repo_name"] + + # CRITICAL: Update final_readme whenever present + if node_state.get("final_readme"): + job_storage[job_id]["final_readme"] = node_state["final_readme"] + logger.info(f"✅ Updated job_storage[{job_id}]['final_readme'] = {len(node_state['final_readme'])} chars (workflow_status={workflow_status})") + + if node_state.get("error"): + job_storage[job_id]["error"] = node_state["error"] + + # Handle project selection + if node_state.get("awaiting_project_selection"): + job_storage[job_id]["awaiting_project_selection"] = True + job_storage[job_id]["detected_projects"] = node_state.get("detected_projects") + job_storage[job_id]["skipped_folders"] = node_state.get("skipped_folders", []) + + # DEBUG: Log what we're storing + logger.info(f"DEBUG server.py: Storing skipped_folders: {job_storage[job_id]['skipped_folders']}") + logger.info(f"DEBUG server.py: Number of skipped folders: {len(job_storage[job_id]['skipped_folders'])}") + + logger.info(f"Workflow paused for project selection: {job_id}") + return # Stop workflow execution until user selects projects + + logger.info(f"Workflow completed for job: {job_id}") + + except Exception as e: + logger.error(f"Workflow failed for job {job_id}: {e}", exc_info=True) + job_storage[job_id]["status"] = "failed" + job_storage[job_id]["error"] = str(e) + + +@app.get("/api/status/{job_id}", response_model=JobStatusResponse) +def get_job_status(job_id: str): + """Get current job status""" + if job_id not in job_storage: + raise HTTPException(status_code=404, detail="Job not found") + + job = job_storage[job_id] + + # DEBUG: Log what we're returning + skipped = job.get("skipped_folders", []) + logger.info(f"DEBUG get_job_status: Returning skipped_folders with {len(skipped)} items") + logger.info(f"DEBUG get_job_status: skipped_folders = {skipped}") + + return JobStatusResponse( + job_id=job_id, + status=job["status"], + progress_percentage=50, # Simplified + current_agent=job.get("current_agent"), + error_message=job.get("error"), + readme_preview=job.get("final_readme", "")[:500] if job.get("final_readme") else None, + awaiting_project_selection=job.get("awaiting_project_selection", False), + detected_projects=job.get("detected_projects"), + skipped_folders=job.get("skipped_folders", []) + ) + + +@app.get("/api/logs/{job_id}") +async def stream_logs(job_id: str): + """ + Server-Sent Events (SSE) endpoint for streaming agent activity logs + + Client usage: + ```javascript + const eventSource = new EventSource(`/api/logs/${jobId}`) + eventSource.onmessage = (event) => { + const log = JSON.parse(event.data) + console.log(log) + } + ``` + """ + log_manager = get_log_manager() + + async def event_generator(): + """Generate SSE events from log queue""" + # Subscribe to logs for this job + queue = await log_manager.subscribe(job_id) + + try: + while True: + # Wait for new log + try: + log = await asyncio.wait_for(queue.get(), timeout=30.0) + + # Convert log to JSON + log_data = { + "timestamp": log.timestamp.isoformat(), + "log_type": log.log_type.value, + "agent_name": log.agent_name, + "message": log.message, + "metadata": log.metadata + } + + yield { + "data": json.dumps(log_data) + } + + except asyncio.TimeoutError: + # Send keep-alive ping + yield { + "data": json.dumps({"type": "keepalive"}) + } + + except asyncio.CancelledError: + logger.info(f"SSE stream cancelled for job: {job_id}") + finally: + # Unsubscribe + await log_manager.unsubscribe(job_id, queue) + + return EventSourceResponse(event_generator()) + + +@app.post("/api/approve/{job_id}") +async def approve_agent_output(job_id: str): + """ + Approve current agent's output and continue workflow + Used for human-in-the-loop control + """ + if job_id not in job_storage: + raise HTTPException(status_code=404, detail="Job not found") + + # Resume workflow by invoking with continue command + workflow = await get_workflow() + config = {"configurable": {"thread_id": job_id}} + + try: + # Continue workflow from last checkpoint + state = workflow.graph.get_state(config) + + # Resume execution + async for event in workflow.graph.astream(None, config): + logger.info(f"Resumed workflow: {event}") + + return {"status": "approved", "message": "Workflow continued"} + + except Exception as e: + logger.error(f"Failed to continue workflow: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@app.post("/api/reject/{job_id}") +async def reject_agent_output(job_id: str, feedback: str = ""): + """ + Reject current agent's output and provide feedback + Agent will retry with feedback + """ + if job_id not in job_storage: + raise HTTPException(status_code=404, detail="Job not found") + + # Update state with feedback and retry + workflow = await get_workflow() + config = {"configurable": {"thread_id": job_id}} + + try: + state = workflow.graph.get_state(config) + + # Update state with feedback + # (Implementation depends on how you want to handle retries) + + return {"status": "rejected", "message": "Agent will retry with feedback"} + + except Exception as e: + logger.error(f"Failed to reject and retry: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@app.post("/api/select-projects/{job_id}", response_model=ProjectSelectionResponse) +async def select_projects(job_id: str, request: ProjectSelectionRequest, background_tasks: BackgroundTasks): + """ + Submit user's project selection and resume workflow + """ + if job_id not in job_storage: + raise HTTPException(status_code=404, detail="Job not found") + + job = job_storage[job_id] + + if not job.get("awaiting_project_selection"): + raise HTTPException(status_code=400, detail="Job is not awaiting project selection") + + # Validate: 1 project can be selected + if len(request.selected_project_paths) != 1: + project_word = "project" if len(request.selected_project_paths) == 1 else "projects" + raise HTTPException( + status_code=400, + detail=f" {len(request.selected_project_paths)} {project_word} are selected! Please select one project, as single-project documentation is supported." + ) + + try: + # Update job storage + job["awaiting_project_selection"] = False + job_storage[job_id] = job + + logger.info(f"User selected {len(request.selected_project_paths)} projects for job {job_id}") + + # Resume workflow with selected projects + background_tasks.add_task(resume_workflow, job_id, request.selected_project_paths) + + return ProjectSelectionResponse( + status="accepted", + message=f"Selected {len(request.selected_project_paths)} project(s). Resuming documentation generation..." + ) + + except Exception as e: + logger.error(f"Failed to process project selection: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +async def resume_workflow(job_id: str, selected_projects: List[str]): + """ + Resume workflow after user selects projects + """ + try: + workflow = await get_workflow() + config = {"configurable": {"thread_id": job_id}} + + # Log selection + log_manager = get_log_manager() + await log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message=f"✅ User selected {len(selected_projects)} project(s). Continuing workflow..." + ) + + # DEBUG: Log the selected projects + logger.info(f"DEBUG: Updating state with selected_projects = {selected_projects}") + + # Update the state directly in the graph + await workflow.graph.aupdate_state( + config, + { + "selected_projects": selected_projects, + "awaiting_project_selection": False + } + ) + + # Resume workflow from updated state + async for event in workflow.graph.astream(None, config): + logger.info(f"Resumed workflow event: {event}") + + # Update job storage + for node_name, node_state in event.items(): + if isinstance(node_state, dict): + workflow_status = node_state.get("workflow_status", "in_progress") + job_storage[job_id]["status"] = workflow_status + job_storage[job_id]["current_agent"] = node_state.get("current_agent") + + # Store repo_name for PR creation + if node_state.get("repo_name"): + job_storage[job_id]["repo_name"] = node_state["repo_name"] + + # CRITICAL: Update final_readme whenever present + if node_state.get("final_readme"): + job_storage[job_id]["final_readme"] = node_state["final_readme"] + logger.info(f"✅ [RESUME] Updated job_storage[{job_id}]['final_readme'] = {len(node_state['final_readme'])} chars (workflow_status={workflow_status})") + + if node_state.get("error"): + job_storage[job_id]["error"] = node_state["error"] + + logger.info(f"Workflow resumed and completed for job: {job_id}") + logger.info(f"✅ Final job_storage[{job_id}] has final_readme: {'final_readme' in job_storage[job_id]}") + + except Exception as e: + logger.error(f"Failed to resume workflow: {e}", exc_info=True) + job_storage[job_id]["status"] = "failed" + job_storage[job_id]["error"] = str(e) + + +@app.get("/api/download/{job_id}") +def download_readme(job_id: str): + """Download generated README.md""" + if job_id not in job_storage: + raise HTTPException(status_code=404, detail="Job not found") + + job = job_storage[job_id] + + if not job.get("final_readme"): + raise HTTPException(status_code=400, detail="README not ready yet") + + from fastapi.responses import Response + + return Response( + content=job["final_readme"], + media_type="text/markdown", + headers={ + "Content-Disposition": "attachment; filename=README.md" + } + ) + + +@app.post("/api/create-pr/{job_id}") +async def create_pull_request(job_id: str): + """ + Create a GitHub Pull Request using TRUE MCP Protocol + Connects to official GitHub MCP server via stdio + """ + if job_id not in job_storage: + raise HTTPException(status_code=404, detail="Job not found") + + job = job_storage[job_id] + + if not job.get("final_readme"): + raise HTTPException(status_code=400, detail="README not ready yet") + + # Check if GitHub token is configured + github_token = settings.GITHUB_TOKEN + if not github_token: + raise HTTPException( + status_code=503, + detail="GitHub integration not configured. Please set GITHUB_TOKEN in environment variables." + ) + + try: + # Extract repo info from URL + repo_url = job["repo_url"] + + # Parse GitHub repo from URL + # Expected format: https://github.com/owner/repo or https://github.com/owner/repo/tree/branch/... + if "github.com" not in repo_url: + raise HTTPException(status_code=400, detail="Only GitHub repositories are supported for PR creation") + + # Parse the full GitHub URL to extract owner, repo, and branch + from services.git_service import parse_github_url + parsed_url = parse_github_url(repo_url) + + repo_full_name = f"{parsed_url['owner']}/{parsed_url['repo']}" + base_branch = parsed_url['branch'] # Extract branch from URL (e.g., "dev") + + # Extract project name from workflow state (already set correctly) + # Use the actual repo_name from job storage which comes from workflow + project_name = job.get("repo_name", parsed_url['display_name']) + + # Call MCP-based PR creation + result = await create_pr_with_mcp( + repo_full_name=repo_full_name, + readme_content=job["final_readme"], + project_name=project_name, + base_branch=base_branch, + github_token=github_token, + job_id=job_id + ) + + if result["success"]: + return { + "status": "success", + "message": "Pull request created successfully via MCP", + "pr_url": result["pr_url"], + "branch_name": result["branch_name"] + } + else: + return { + "status": "error", + "message": f"Failed to create PR via MCP: {result['output']}", + "details": result["output"] + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to create PR via MCP: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Failed to create PR via MCP: {str(e)}") + + +if __name__ == "__main__": + import uvicorn + uvicorn.run( + app, + host=settings.HOST, + port=settings.API_PORT, + log_level="info" + ) diff --git a/sample_solutions/Docugen-Microagents/api/services/__init__.py b/sample_solutions/Docugen-Microagents/api/services/__init__.py new file mode 100644 index 00000000..33f8d3b5 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/services/__init__.py @@ -0,0 +1,11 @@ +""" +Services for DocuGen AI +""" + +from .llm_service import get_llm +from .git_service import GitService + +__all__ = [ + "get_llm", + "GitService" +] diff --git a/sample_solutions/Docugen-Microagents/api/services/git_service.py b/sample_solutions/Docugen-Microagents/api/services/git_service.py new file mode 100644 index 00000000..69abd381 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/services/git_service.py @@ -0,0 +1,288 @@ +""" +Git Service - Handles repository cloning and cleanup +""" + +import os +import shutil +import logging +import re +import requests +from typing import Tuple, Dict, Optional, Callable +from git import Repo, GitCommandError, RemoteProgress +from config import settings +import uuid + +logger = logging.getLogger(__name__) + + +def parse_github_url(url: str) -> Dict[str, Optional[str]]: + """ + Parse GitHub URL to extract components + + Supports: + - Base repo: https://github.com/owner/repo + - Base repo with .git: https://github.com/owner/repo.git + - Tree URL: https://github.com/owner/repo/tree/branch + - Subfolder URL: https://github.com/owner/repo/tree/branch/path/to/folder + + Args: + url: GitHub URL + + Returns: + Dict with keys: owner, repo, branch, subfolder, clone_url, is_subfolder + + Raises: + ValueError: If URL is invalid + """ + # Remove trailing slashes + url = url.rstrip('/') + + # Pattern: https://github.com/owner/repo[.git][/tree/branch[/path]] + pattern = r'^https?://github\.com/([^/]+)/([^/]+?)(?:\.git)?(?:/tree/([^/]+)(?:/(.*))?)?$' + + match = re.match(pattern, url) + + if not match: + raise ValueError( + f"Invalid GitHub URL format. Expected: https://github.com/owner/repo[/tree/branch[/path]]\n" + f"Got: {url}" + ) + + owner, repo, branch, subfolder = match.groups() + + # Clean repo name (remove .git if present) + repo = repo.replace('.git', '') + + result = { + "owner": owner, + "repo": repo, + "branch": branch or "main", # Default to main if not specified + "subfolder": subfolder if subfolder else None, + "clone_url": f"https://github.com/{owner}/{repo}.git", + "is_subfolder": bool(subfolder), + "display_name": subfolder.split('/')[-1] if subfolder else repo + } + + logger.info(f"Parsed URL: {url} -> {result}") + + return result + + +class CloneProgress(RemoteProgress): + """Progress reporter for git clone operations""" + + def __init__(self, callback: Optional[Callable[[str], None]] = None): + super().__init__() + self.callback = callback + self._last_message = "" + + def update(self, op_code, cur_count, max_count=None, message=''): + """Called by GitPython during clone operation""" + if self.callback: + # Parse operation + if op_code & self.COUNTING: + msg = f"📊 Counting objects: {cur_count}" + elif op_code & self.COMPRESSING: + msg = f"🗜️ Compressing objects: {cur_count}/{max_count}" if max_count else f"🗜️ Compressing objects: {cur_count}" + elif op_code & self.RECEIVING: + percentage = int((cur_count / max_count * 100)) if max_count else 0 + msg = f"⬇️ Receiving objects: {cur_count}/{max_count} ({percentage}%)" + elif op_code & self.RESOLVING: + percentage = int((cur_count / max_count * 100)) if max_count else 0 + msg = f"🔧 Resolving deltas: {cur_count}/{max_count} ({percentage}%)" + else: + msg = f"⏳ Cloning: {cur_count}" + + # Only report if message changed (avoid spam) + if msg != self._last_message: + self._last_message = msg + self.callback(msg) + + +class GitService: + """Service for Git operations""" + + def __init__(self): + self.temp_dir = settings.TEMP_REPO_DIR + os.makedirs(self.temp_dir, exist_ok=True) + + def clone_repository( + self, + repo_url: str, + branch: Optional[str] = None, + progress_callback: Optional[Callable[[str], None]] = None + ) -> Tuple[str, Dict[str, str]]: + """ + Clone a Git repository to a temporary directory + + Args: + repo_url: GitHub repository URL + branch: Optional branch name to checkout after cloning + progress_callback: Optional callback for progress updates + + Returns: + Tuple of (repo_path, metadata_dict) + """ + try: + # Generate unique directory name + repo_id = str(uuid.uuid4()) + repo_path = os.path.join(self.temp_dir, repo_id) + + logger.info(f"Cloning repository: {repo_url}") + + # Check repository size before cloning + try: + # Extract owner and repo name from URL + # Format: https://github.com/owner/repo or https://github.com/owner/repo.git + parts = repo_url.rstrip('/').replace('.git', '').split('/') + if 'github.com' in repo_url and len(parts) >= 2: + owner = parts[-2] + repo_name = parts[-1] + + # Query GitHub API for repo info + api_url = f"https://api.github.com/repos/{owner}/{repo_name}" + headers = {} + if settings.GITHUB_TOKEN: + headers['Authorization'] = f'token {settings.GITHUB_TOKEN}' + + response = requests.get(api_url, headers=headers, timeout=10) + if response.status_code == 200: + repo_data = response.json() + repo_size_kb = repo_data.get('size', 0) # GitHub returns size in KB + repo_size_bytes = repo_size_kb * 1024 + + max_size_gb = settings.MAX_REPO_SIZE / (1024 ** 3) + if repo_size_bytes > settings.MAX_REPO_SIZE: + error_msg = f"Repository size ({repo_size_bytes / (1024 ** 3):.2f}GB) exceeds maximum allowed size ({max_size_gb:.0f}GB). Please try a smaller repository." + logger.error(error_msg) + if progress_callback: + progress_callback(f"❌ {error_msg}") + raise ValueError(error_msg) + else: + logger.info(f"Repository size check passed: {repo_size_bytes / (1024 ** 3):.2f}GB") + if progress_callback: + progress_callback(f"✅ Repository size: {repo_size_bytes / (1024 ** 3):.2f}GB") + except requests.RequestException as e: + # Don't fail if we can't check size, just log warning + logger.warning(f"Could not verify repository size: {e}") + except ValueError: + # Re-raise size limit errors + raise + + if progress_callback: + progress_callback(f"🚀 Starting clone of {repo_url}") + + # Clone the repository with progress tracking + progress = CloneProgress(callback=progress_callback) + + # Add GitHub PAT authentication for private repos + clone_url = repo_url + if settings.GITHUB_TOKEN and 'github.com' in repo_url: + # Inject GitHub PAT into URL: https://TOKEN@github.com/owner/repo.git + clone_url = repo_url.replace('https://github.com/', f'https://{settings.GITHUB_TOKEN}@github.com/') + logger.info(f"Using authenticated clone for private repository") + + repo = Repo.clone_from(clone_url, repo_path, progress=progress) + + if progress_callback: + progress_callback(f"✅ Clone completed successfully") + + # Checkout specified branch if provided + if branch: + try: + current_branch = None + try: + current_branch = repo.active_branch.name + except TypeError: + # Detached HEAD state or no branches yet + pass + + # Only checkout if not already on the requested branch + if current_branch != branch: + # Check if branch exists locally + if branch in [ref.name for ref in repo.heads]: + repo.git.checkout(branch) + else: + # Create local branch tracking remote branch + repo.git.checkout('-b', branch, f'origin/{branch}') + + logger.info(f"Checked out branch: {branch}") + if progress_callback: + progress_callback(f"✅ Checked out branch: {branch}") + except Exception as e: + logger.warning(f"Failed to checkout branch {branch}: {e}") + if progress_callback: + progress_callback(f"⚠️ Warning: Could not checkout branch {branch}, using default branch") + + # Extract metadata + metadata = { + "repo_url": repo_url, + "repo_name": repo_url.split('/')[-1].replace('.git', ''), + "default_branch": repo.active_branch.name, + "repo_path": repo_path + } + + logger.info(f"Repository cloned successfully to: {repo_path}") + + return repo_path, metadata + + except GitCommandError as e: + logger.error(f"Git clone failed: {e}") + + # Parse git error and provide user-friendly message + error_message = str(e) + user_friendly_message = None + + # Exit code 128: Repository not found or access denied + if "exit code(128)" in error_message: + if "not found" in error_message.lower() or "could not read" in error_message.lower(): + user_friendly_message = "Repository not found. Please check the URL and verify the repository exists." + elif "authentication" in error_message.lower() or "permission" in error_message.lower(): + user_friendly_message = "Access denied. This repository may be private or you may not have permission to access it." + else: + user_friendly_message = "Repository not found or access denied. Please verify the URL and your permissions." + + # Exit code 403: Rate limit or access forbidden + elif "403" in error_message: + user_friendly_message = "Access forbidden. You may have hit GitHub's rate limit or don't have permission to access this repository." + + # Network errors + elif "connection" in error_message.lower() or "network" in error_message.lower(): + user_friendly_message = "Network error. Please check your internet connection and try again." + + # Use user-friendly message if detected, otherwise use generic message + final_message = user_friendly_message or f"Failed to clone repository: {str(e)}" + + if progress_callback: + progress_callback(f"❌ {final_message}") + + raise ValueError(final_message) + + except Exception as e: + logger.error(f"Unexpected error cloning repository: {e}") + error_message = f"Unexpected error while cloning: {str(e)}" + + if progress_callback: + progress_callback(f"❌ {error_message}") + + raise ValueError(error_message) + + def cleanup_repository(self, repo_path: str) -> bool: + """ + Clean up cloned repository + + Args: + repo_path: Path to the repository + + Returns: + True if successful + """ + try: + if os.path.exists(repo_path): + shutil.rmtree(repo_path, ignore_errors=True) + logger.info(f"Cleaned up repository: {repo_path}") + return True + return False + except Exception as e: + logger.warning(f"Failed to cleanup repository {repo_path}: {e}") + return False diff --git a/sample_solutions/Docugen-Microagents/api/services/llm_service.py b/sample_solutions/Docugen-Microagents/api/services/llm_service.py new file mode 100644 index 00000000..d259727d --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/services/llm_service.py @@ -0,0 +1,44 @@ +""" +LLM Service - Handles LLM initialization for inference endpoints +Supports GenAI Gateway, APISIX Gateway, and any OpenAI-compatible endpoint +""" + +import logging +from typing import Optional +from langchain_openai import ChatOpenAI +from config import settings + +logger = logging.getLogger(__name__) + + +def get_llm(model_name: Optional[str] = None, temperature: float = 0.7) -> ChatOpenAI: + """ + Get LLM instance configured for inference endpoint + + Args: + model_name: Override model name (required - specify which agent model to use) + temperature: Temperature for generation + + Returns: + ChatOpenAI instance configured for the inference endpoint + """ + if model_name is None: + raise ValueError("model_name is required. Use settings.CODE_EXPLORER_MODEL, settings.PLANNER_MODEL, etc.") + + if not settings.INFERENCE_API_ENDPOINT or not settings.INFERENCE_API_TOKEN: + raise ValueError("INFERENCE_API_ENDPOINT and INFERENCE_API_TOKEN are required") + + logger.info(f"Initializing LLM with model: {model_name}") + + # Create httpx async client with configurable SSL verification + import httpx + async_http_client = httpx.AsyncClient(verify=settings.VERIFY_SSL) + + return ChatOpenAI( + model=model_name, + temperature=temperature, + openai_api_key=settings.INFERENCE_API_TOKEN, + openai_api_base=f"{settings.INFERENCE_API_ENDPOINT}/v1", + max_tokens=settings.AGENT_MAX_TOKENS, + http_async_client=async_http_client + ) diff --git a/sample_solutions/Docugen-Microagents/api/tools/__init__.py b/sample_solutions/Docugen-Microagents/api/tools/__init__.py new file mode 100644 index 00000000..d7897a09 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/tools/__init__.py @@ -0,0 +1,31 @@ +""" +LangChain Tools for repository analysis +These tools are used by agents to interact with code repositories +""" + +from .repo_tools import ( + list_directory_tool, + read_file_tool, + detect_languages_tool, + extract_dependencies_tool, + analyze_code_structure_tool +) + +from .new_analysis_tools import ( + analyze_call_graph_tool, + find_error_handlers_tool, + analyze_exceptions_tool, + extract_env_vars_tool +) + +__all__ = [ + "list_directory_tool", + "read_file_tool", + "detect_languages_tool", + "extract_dependencies_tool", + "analyze_code_structure_tool", + "analyze_call_graph_tool", + "find_error_handlers_tool", + "analyze_exceptions_tool", + "extract_env_vars_tool" +] diff --git a/sample_solutions/Docugen-Microagents/api/tools/new_analysis_tools.py b/sample_solutions/Docugen-Microagents/api/tools/new_analysis_tools.py new file mode 100644 index 00000000..c7fffbf0 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/tools/new_analysis_tools.py @@ -0,0 +1,443 @@ +""" +New Analysis Tools for Micro-Agent Architecture + +Four specialized tools for code analysis: +1. analyze_call_graph - Python function call relationships +2. find_error_handlers - Exception handling patterns +3. analyze_exceptions - Error handling analysis +4. extract_env_vars - Environment variable extraction + +All tools use RepoReadService for cached, bounded file reads +""" + +import os +import ast +import re +import json +import logging +from typing import Dict, List, Set, Optional +from pathlib import Path +from langchain.tools import tool + +logger = logging.getLogger(__name__) + + +@tool +def analyze_call_graph_tool(repo_path: str, entry_file: str) -> str: + """ + Analyze Python function call relationships to build call graph. + + Extracts which functions call which other functions, useful for + understanding code flow and dependencies. + + Args: + repo_path: Absolute path to repository root + entry_file: Relative path to Python file to analyze + + Returns: + JSON string with call graph data + """ + try: + full_path = os.path.normpath(os.path.join(repo_path, entry_file)) + + # Security check + if not full_path.startswith(os.path.normpath(repo_path)): + return json.dumps({"error": "Access denied - path traversal detected"}) + + if not os.path.exists(full_path): + return json.dumps({"error": f"File not found: {entry_file}"}) + + if not entry_file.endswith('.py'): + return json.dumps({"error": "Only Python files supported for call graph analysis"}) + + # Read file (respects RepoReadService caps via shared config) + with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + + # Parse AST + try: + tree = ast.parse(content) + except SyntaxError as e: + return json.dumps({ + "error": f"Python syntax error: {str(e)}", + "file": entry_file + }) + + # Extract function definitions and calls + functions = {} + + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef): + func_name = node.name + calls = [] + + # Find function calls within this function + for child in ast.walk(node): + if isinstance(child, ast.Call): + if isinstance(child.func, ast.Name): + calls.append(child.func.id) + elif isinstance(child.func, ast.Attribute): + calls.append(f"{child.func.value.id if hasattr(child.func.value, 'id') else '?'}.{child.func.attr}") + + functions[func_name] = { + "calls": list(set(calls)), # Deduplicate + "call_count": len(calls), + "line_number": node.lineno + } + + # Build graph structure + edges = [] + for func, data in functions.items(): + for called_func in data["calls"]: + # Only include edges to other functions in this file + if called_func in functions: + edges.append({"from": func, "to": called_func}) + + result = { + "file": entry_file, + "functions": functions, + "edges": edges, + "function_count": len(functions), + "edge_count": len(edges) + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error analyzing call graph for {entry_file}: {e}") + return json.dumps({"error": f"Analysis failed: {str(e)}"}) + + +@tool +def find_error_handlers_tool(repo_path: str) -> str: + """ + Find exception handlers (try/except blocks) across Python files. + + Identifies error handling patterns and which exceptions are caught. + + Args: + repo_path: Absolute path to repository root + + Returns: + JSON string with error handler locations and types + """ + try: + error_handlers = [] + files_scanned = 0 + max_files = 50 # Limit scan to prevent timeout + + # Walk repository + for root, dirs, files in os.walk(repo_path): + # Filter ignored directories + dirs[:] = [d for d in dirs if d not in { + '.git', '__pycache__', 'node_modules', '.venv', 'venv', + 'dist', 'build', 'target', 'coverage' + }] + + for file in files: + if not file.endswith('.py'): + continue + + if files_scanned >= max_files: + break + + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, repo_path) + + try: + with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + + # Parse AST + tree = ast.parse(content) + + # Find try/except blocks + for node in ast.walk(tree): + if isinstance(node, ast.Try): + exceptions_caught = [] + + for handler in node.handlers: + if handler.type: + if isinstance(handler.type, ast.Name): + exceptions_caught.append(handler.type.id) + elif isinstance(handler.type, ast.Tuple): + for exc in handler.type.elts: + if isinstance(exc, ast.Name): + exceptions_caught.append(exc.id) + else: + exceptions_caught.append("Exception") # Bare except + + error_handlers.append({ + "file": rel_path, + "line": node.lineno, + "exceptions": exceptions_caught, + "has_finally": len(node.finalbody) > 0, + "has_else": len(node.orelse) > 0 + }) + + files_scanned += 1 + + except Exception as e: + logger.debug(f"Skipping {rel_path}: {e}") + continue + + if files_scanned >= max_files: + break + + # Aggregate statistics + exception_types = {} + for handler in error_handlers: + for exc in handler["exceptions"]: + exception_types[exc] = exception_types.get(exc, 0) + 1 + + result = { + "error_handlers": error_handlers[:100], # Limit output + "total_handlers": len(error_handlers), + "files_scanned": files_scanned, + "exception_types": dict(sorted(exception_types.items(), key=lambda x: x[1], reverse=True)) + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error finding error handlers: {e}") + return json.dumps({"error": f"Scan failed: {str(e)}"}) + + +@tool +def analyze_exceptions_tool(repo_path: str, file_path: str) -> str: + """ + Analyze exception handling in a specific Python file. + + Provides detailed analysis of try/except patterns, custom exceptions, + and error handling quality. + + Args: + repo_path: Absolute path to repository root + file_path: Relative path to Python file + + Returns: + JSON string with exception analysis + """ + try: + full_path = os.path.normpath(os.path.join(repo_path, file_path)) + + # Security check + if not full_path.startswith(os.path.normpath(repo_path)): + return json.dumps({"error": "Access denied"}) + + if not os.path.exists(full_path): + return json.dumps({"error": f"File not found: {file_path}"}) + + # Read file + with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + + # Parse AST + try: + tree = ast.parse(content) + except SyntaxError as e: + return json.dumps({"error": f"Syntax error: {str(e)}"}) + + # Find custom exception classes + custom_exceptions = [] + for node in ast.walk(tree): + if isinstance(node, ast.ClassDef): + # Check if it inherits from Exception + for base in node.bases: + if isinstance(base, ast.Name) and 'Exception' in base.id: + custom_exceptions.append({ + "name": node.name, + "line": node.lineno + }) + + # Analyze try/except blocks + exception_handlers = [] + bare_excepts = [] + + for node in ast.walk(tree): + if isinstance(node, ast.Try): + handler_info = { + "line": node.lineno, + "handlers": [] + } + + for handler in node.handlers: + if handler.type is None: + bare_excepts.append(node.lineno) + handler_info["handlers"].append({"type": "bare_except", "warning": "Catches all exceptions"}) + elif isinstance(handler.type, ast.Name): + handler_info["handlers"].append({"type": handler.type.id}) + elif isinstance(handler.type, ast.Tuple): + types = [exc.id for exc in handler.type.elts if isinstance(exc, ast.Name)] + handler_info["handlers"].append({"types": types}) + + exception_handlers.append(handler_info) + + # Find raise statements + raises = [] + for node in ast.walk(tree): + if isinstance(node, ast.Raise): + if node.exc: + if isinstance(node.exc, ast.Call) and isinstance(node.exc.func, ast.Name): + raises.append({ + "line": node.lineno, + "exception": node.exc.func.id + }) + elif isinstance(node.exc, ast.Name): + raises.append({ + "line": node.lineno, + "exception": node.exc.id + }) + + result = { + "file": file_path, + "custom_exceptions": custom_exceptions, + "exception_handlers": exception_handlers, + "bare_excepts_count": len(bare_excepts), + "bare_except_lines": bare_excepts, + "raises": raises, + "quality_notes": [] + } + + # Add quality notes + if len(bare_excepts) > 0: + result["quality_notes"].append(f"Found {len(bare_excepts)} bare except clauses (not recommended)") + + if len(custom_exceptions) > 0: + result["quality_notes"].append(f"Defines {len(custom_exceptions)} custom exception classes") + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error analyzing exceptions in {file_path}: {e}") + return json.dumps({"error": f"Analysis failed: {str(e)}"}) + + +@tool +def extract_env_vars_tool(repo_path: str) -> str: + """ + Extract environment variables from .env example files. + + Finds .env.example, .env.sample, .env.template files and extracts + variable names (not values, for security). + + Args: + repo_path: Absolute path to repository root + + Returns: + JSON string with environment variable names and descriptions + """ + try: + env_vars = [] + files_found = [] + + # Common env file patterns + env_file_patterns = [ + '.env.example', + '.env.sample', + '.env.template', + 'env.example', + 'example.env' + ] + + # Search for env files + for pattern in env_file_patterns: + file_path = os.path.join(repo_path, pattern) + if os.path.exists(file_path): + files_found.append(pattern) + + try: + with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + lines = f.readlines() + + for line_num, line in enumerate(lines, 1): + line = line.strip() + + # Skip comments and empty lines + if not line or line.startswith('#'): + continue + + # Parse KEY=value format + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Extract inline comment if present + comment = "" + if '#' in value: + value, comment = value.split('#', 1) + comment = comment.strip() + + # Check if value looks like a placeholder + is_placeholder = bool(re.match(r'^(<.*?>|".*?"|\'.*?\'|\$\{.*?\}|your_.*|example_.*)', value, re.IGNORECASE)) + + env_vars.append({ + "key": key, + "file": pattern, + "line": line_num, + "has_default": len(value) > 0 and not is_placeholder, + "description": comment if comment else None, + "is_placeholder": is_placeholder + }) + + except Exception as e: + logger.warning(f"Error reading {pattern}: {e}") + continue + + # Also check for variables in README or docs + readme_vars = set() + for readme_name in ['README.md', 'readme.md', 'README']: + readme_path = os.path.join(repo_path, readme_name) + if os.path.exists(readme_path): + try: + with open(readme_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + + # Look for environment variable mentions + var_pattern = r'\b([A-Z_][A-Z0-9_]{2,})\b' + matches = re.findall(var_pattern, content) + readme_vars.update(matches) + + except: + pass + + # Categorize variables + categories = { + "database": [], + "api_keys": [], + "urls": [], + "ports": [], + "auth": [], + "other": [] + } + + for var in env_vars: + key_lower = var["key"].lower() + if any(word in key_lower for word in ['db', 'database', 'postgres', 'mysql', 'mongo']): + categories["database"].append(var) + elif any(word in key_lower for word in ['key', 'token', 'secret', 'password']): + categories["api_keys"].append(var) + elif any(word in key_lower for word in ['url', 'endpoint', 'host', 'domain']): + categories["urls"].append(var) + elif 'port' in key_lower: + categories["ports"].append(var) + elif any(word in key_lower for word in ['auth', 'client_id', 'client_secret', 'oauth']): + categories["auth"].append(var) + else: + categories["other"].append(var) + + result = { + "env_files_found": files_found, + "total_variables": len(env_vars), + "variables": env_vars, + "categorized": categories, + "readme_mentions": list(readme_vars)[:20] # Limit to 20 + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error extracting env vars: {e}") + return json.dumps({"error": f"Extraction failed: {str(e)}"}) diff --git a/sample_solutions/Docugen-Microagents/api/tools/repo_tools.py b/sample_solutions/Docugen-Microagents/api/tools/repo_tools.py new file mode 100644 index 00000000..b9083783 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/tools/repo_tools.py @@ -0,0 +1,1299 @@ +""" +LangChain tools for repository analysis +Uses @tool decorator for automatic integration with agents + +FIXED VERSION: Strengthened Mermaid validation for render-safe diagrams +ENHANCED: Strategic file reading with pattern_window mode +""" + +import os +import json +import re +from pathlib import Path +from collections import Counter +from typing import Dict, List, Literal +from langchain.tools import tool +import logging + +from config import settings + +logger = logging.getLogger(__name__) + +# Ignore patterns for directory listing +IGNORE_DIRS = { + '.git', '__pycache__', 'node_modules', '.venv', 'venv', + 'dist', 'build', '.next', '.nuxt', 'target', 'coverage', + '.idea', '.vscode', 'DS_Store' +} + +# Language detection mapping +LANGUAGE_MAP = { + '.py': 'Python', + '.js': 'JavaScript', + '.ts': 'TypeScript', + '.jsx': 'JavaScript (JSX)', + '.tsx': 'TypeScript (TSX)', + '.java': 'Java', + '.go': 'Go', + '.rs': 'Rust', + '.c': 'C', + '.cpp': 'C++', + '.cs': 'C#', + '.rb': 'Ruby', + '.php': 'PHP', + '.swift': 'Swift', + '.kt': 'Kotlin', + '.scala': 'Scala', +} + + +@tool +def list_directory_tool(repo_path: str, relative_path: str = ".") -> str: + """ + List contents of a directory in the repository. + Shows files and subdirectories with their types. + + Args: + repo_path: Absolute path to the repository root + relative_path: Relative path from repo root (default: ".") + + Returns: + Formatted string listing files and directories + """ + try: + full_path = os.path.normpath(os.path.join(repo_path, relative_path)) + + # Security check + if not full_path.startswith(os.path.normpath(repo_path)): + return f"Error: Access denied - path traversal detected" + + if not os.path.exists(full_path): + return f"Error: Directory not found: {relative_path}" + + if not os.path.isdir(full_path): + return f"Error: Path is not a directory: {relative_path}" + + items = [] + for entry in os.listdir(full_path): + if entry in IGNORE_DIRS or entry.startswith('.'): + continue + + entry_path = os.path.join(full_path, entry) + item_type = "DIR" if os.path.isdir(entry_path) else "FILE" + items.append(f"[{item_type}] {entry}") + + if not items: + return f"Directory '{relative_path}' is empty or contains only ignored files" + + return f"Contents of {relative_path}:\n" + "\n".join(sorted(items)) + + except Exception as e: + logger.error(f"Error listing directory {relative_path}: {e}") + return f"Error listing directory: {str(e)}" + + +def _smart_sample_lines(lines: List[str], max_lines: int) -> tuple[list[str], bool]: + """ + Smart sampling: top + function/class signatures + bottom + + Args: + lines: All file lines + max_lines: Budget for total lines + + Returns: + (selected_lines, was_truncated) + """ + if len(lines) <= max_lines: + return lines, False + + top_n = min(50, max_lines) + bottom_n = min(30, max_lines - top_n) + middle_budget = max(0, max_lines - top_n - bottom_n) + + top = lines[:top_n] + bottom = lines[-bottom_n:] if bottom_n > 0 else [] + + # Extract function/class signatures + sigs = [] + for ln in lines[top_n:len(lines) - bottom_n]: + s = ln.lstrip() + if s.startswith(("def ", "async def ", "class ")): + sigs.append(ln) + if len(sigs) >= middle_budget: + break + + out = top + ["\n... [middle omitted] ...\n"] + sigs + ["\n... [end] ...\n"] + bottom + return out[:max_lines], True + + +def _pattern_window_lines(lines: List[str], max_lines: int) -> tuple[list[str], bool]: + """ + Pattern window sampling: detect high-value patterns, extract ±6 lines around matches + + Patterns detected: + - FastAPI/Flask routes: @app.get, @router.post, APIRouter, include_router + - Error handling: try, except, raise + - Entry points: if __name__ == "__main__", uvicorn.run, def main + + Args: + lines: All file lines + max_lines: Budget for total lines + + Returns: + (selected_lines, was_truncated) + """ + if len(lines) <= max_lines: + return lines, False + + patterns = [ + r"@app\.(get|post|put|delete)\b", + r"@router\.(get|post|put|delete)\b", + r"\bAPIRouter\b", + r"\binclude_router\b", + r"\btry:\b", + r"\bexcept\b", + r"\braise\b", + r'if __name__\s*==\s*[\'"]__main__[\'"]', + r"\buvicorn\.run\b", + r"\bdef main\(", + ] + + top_n = min(40, max_lines) + bottom_n = min(25, max_lines - top_n) + budget = max_lines - top_n - bottom_n + + # Find match line numbers + match_lines: list[int] = [] + for i, ln in enumerate(lines): + for pat in patterns: + if re.search(pat, ln): + match_lines.append(i) + break + + # Fallback to smart if no patterns found + if not match_lines: + logger.debug("No patterns found, falling back to smart strategy") + return _smart_sample_lines(lines, max_lines) + + # Make windows ±6 lines around matches + windows: list[tuple[int, int]] = [] + for i in match_lines[:50]: # Cap matches + start = max(0, i - 6) + end = min(len(lines), i + 7) + windows.append((start, end)) + + # Merge overlapping windows + windows.sort() + merged: list[tuple[int, int]] = [] + for s, e in windows: + if not merged or s > merged[-1][1]: + merged.append((s, e)) + else: + merged[-1] = (merged[-1][0], max(merged[-1][1], e)) + + # Emit lines within budget + out = lines[:top_n] + used = len(out) + last_end = top_n + + for s, e in merged: + if used >= top_n + budget: + break + if e <= top_n or s >= len(lines) - bottom_n: + continue # Already in top/bottom + + if s > last_end: + out.append("\n... [omitted] ...\n") + used += 1 + + chunk = lines[s:e] + room = top_n + budget - used + if room <= 0: + break + out.extend(chunk[:room]) + used = len(out) + last_end = e + + out.append("\n... [end] ...\n") + out.extend(lines[-bottom_n:]) + + # Final clamp + out = out[:max_lines] + return out, True + + +@tool +def read_file_tool( + repo_path: str, + file_path: str, + max_lines: int = None, + strategy: Literal["full", "smart", "pattern_window"] = "full" +) -> str: + """ + Read contents of a file from the repository. + + Args: + repo_path: Absolute path to the repository root + file_path: Relative path to the file from repo root + max_lines: Maximum lines to read (default: from settings.MAX_LINES_PER_FILE) + strategy: Reading strategy: + - "full": first N lines (default, backwards compatible) + - "smart": top + signatures + bottom + - "pattern_window": extract windows around high-value patterns (routes, errors, entrypoints) + + Returns: + File contents as string + """ + try: + # Use config setting if not specified + if max_lines is None: + max_lines = settings.MAX_LINES_PER_FILE + + full_path = os.path.normpath(os.path.join(repo_path, file_path)) + + # Security check + if not full_path.startswith(os.path.normpath(repo_path)): + return "Error: Access denied - path traversal detected" + + if not os.path.exists(full_path): + return f"Error: File not found: {file_path}" + + if not os.path.isfile(full_path): + return f"Error: Path is not a file: {file_path}" + + # Check file size using config setting + file_size = os.path.getsize(full_path) + max_file_size_mb = settings.MAX_FILE_SIZE / 1_000_000 + if file_size > settings.MAX_FILE_SIZE: + return f"Error: File too large ({file_size} bytes). Maximum is {max_file_size_mb:.1f}MB." + + # Read file + with open(full_path, "r", encoding="utf-8", errors="ignore") as f: + lines = f.readlines() + + # Strategy dispatch + if strategy == "full": + selected = lines[:max_lines] + truncated = len(lines) > max_lines + + elif strategy == "smart": + selected, truncated = _smart_sample_lines(lines, max_lines=max_lines) + + elif strategy == "pattern_window": + selected, truncated = _pattern_window_lines(lines, max_lines=max_lines) + + else: + selected = lines[:max_lines] + truncated = len(lines) > max_lines + + content = "".join(selected) + + # Deterministic header for verification + header = f"File: {file_path} | strategy: {strategy} | lines: {len(selected)}/{len(lines)}" + if truncated: + return f"{header}\n\n{content}\n\n[File truncated...]" + return f"{header}\n\n{content}" + + except Exception as e: + logger.error(f"Error reading file {file_path}: {e}") + return f"Error reading file: {str(e)}" + + +@tool +def detect_languages_tool(repo_path: str) -> str: + """ + Detect programming languages used in the repository. + Analyzes file extensions and provides statistics. + + Args: + repo_path: Absolute path to the repository root + + Returns: + JSON string with language statistics + """ + try: + language_counter = Counter() + total_files = 0 + + for root, dirs, files in os.walk(repo_path): + # Filter out ignored directories + dirs[:] = [d for d in dirs if d not in IGNORE_DIRS and not d.startswith('.')] + + for file in files: + if file.startswith('.'): + continue + + ext = Path(file).suffix.lower() + if ext in LANGUAGE_MAP: + language = LANGUAGE_MAP[ext] + language_counter[language] += 1 + total_files += 1 + + if not language_counter: + return "No recognized programming languages detected" + + # Format output + result = { + "total_files": total_files, + "languages": dict(language_counter.most_common()), + "primary_language": language_counter.most_common(1)[0][0] if language_counter else "Unknown" + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error detecting languages: {e}") + return f"Error detecting languages: {str(e)}" + + +@tool +def extract_dependencies_tool(repo_path: str) -> str: + """ + Extract dependencies from common dependency files. + Recursively scans subdirectories for package.json, requirements.txt, go.mod, Cargo.toml, etc. + + Args: + repo_path: Absolute path to the repository root + + Returns: + JSON string with dependencies by ecosystem + """ + try: + dependencies = {} + found_files = [] + + # Recursively walk directory tree to find dependency files + for root, dirs, files in os.walk(repo_path): + # Filter out ignored directories + dirs[:] = [d for d in dirs if d not in IGNORE_DIRS and not d.startswith('.')] + + # Python - requirements.txt + if 'requirements.txt' in files: + req_file = os.path.join(root, 'requirements.txt') + rel_path = os.path.relpath(req_file, repo_path) + try: + with open(req_file, 'r') as f: + deps = [line.strip().split('==')[0].split('>=')[0].split('[')[0] + for line in f if line.strip() and not line.startswith('#')] + if deps and "Python" not in dependencies: + dependencies["Python"] = deps[:20] # Limit to 20 + found_files.append(f"requirements.txt ({rel_path})") + except: + pass + + # Node.js - package.json + if 'package.json' in files: + pkg_file = os.path.join(root, 'package.json') + rel_path = os.path.relpath(pkg_file, repo_path) + try: + with open(pkg_file, 'r') as f: + pkg_data = json.load(f) + deps = list(pkg_data.get("dependencies", {}).keys()) + if deps and "Node.js" not in dependencies: + dependencies["Node.js"] = deps[:20] + found_files.append(f"package.json ({rel_path})") + except: + pass + + # Go - go.mod + if 'go.mod' in files: + go_file = os.path.join(root, 'go.mod') + rel_path = os.path.relpath(go_file, repo_path) + try: + with open(go_file, 'r') as f: + deps = [line.split()[0] for line in f if line.strip().startswith('require')] + if deps and "Go" not in dependencies: + dependencies["Go"] = deps[:20] + found_files.append(f"go.mod ({rel_path})") + except: + pass + + # Rust - Cargo.toml + if 'Cargo.toml' in files: + cargo_file = os.path.join(root, 'Cargo.toml') + rel_path = os.path.relpath(cargo_file, repo_path) + try: + with open(cargo_file, 'r') as f: + in_deps = False + deps = [] + for line in f: + if '[dependencies]' in line: + in_deps = True + continue + if in_deps and line.strip() and line.startswith('['): + break + if in_deps and '=' in line: + dep_name = line.split('=')[0].strip() + deps.append(dep_name) + if deps and "Rust" not in dependencies: + dependencies["Rust"] = deps[:20] + found_files.append(f"Cargo.toml ({rel_path})") + except: + pass + + if not dependencies: + return "No dependency files found" + + result = { + "dependencies": dependencies, + "files_found": found_files + } + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error extracting dependencies: {e}") + return f"Error extracting dependencies: {str(e)}" + + +@tool +def analyze_code_structure_tool(repo_path: str, file_path: str) -> str: + """ + Analyze code structure of a file (functions, classes, imports). + Uses AST parsing for Python, basic regex for others. + + Args: + repo_path: Absolute path to the repository root + file_path: Relative path to the file from repo root + + Returns: + JSON string with code structure analysis + """ + try: + full_path = os.path.normpath(os.path.join(repo_path, file_path)) + + if not full_path.startswith(os.path.normpath(repo_path)): + return "Error: Access denied" + + if not os.path.exists(full_path): + return "Error: File not found" + + ext = Path(file_path).suffix.lower() + + # Python AST analysis + if ext == '.py': + try: + import ast + with open(full_path, 'r', encoding='utf-8') as f: + tree = ast.parse(f.read()) + + functions = [node.name for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)] + classes = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)] + imports = [alias.name for node in ast.walk(tree) if isinstance(node, ast.Import) for alias in node.names] + + result = { + "language": "Python", + "functions": functions[:20], + "classes": classes[:20], + "imports": imports[:20], + "total_functions": len(functions), + "total_classes": len(classes) + } + return json.dumps(result, indent=2) + except Exception as e: + return f"Error parsing Python file: {str(e)}" + + # For other languages, provide basic info + with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + lines = content.split('\n') + + result = { + "language": LANGUAGE_MAP.get(ext, "Unknown"), + "lines_of_code": len(lines), + "non_empty_lines": len([l for l in lines if l.strip()]), + "note": "Detailed analysis only available for Python files" + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error analyzing code structure: {e}") + return f"Error analyzing code structure: {str(e)}" + + +# ============================================================================ +# SPECIALIZED TOOLS FOR PLANNER AGENT +# ============================================================================ + +@tool +def find_ui_files_tool(repo_path: str) -> str: + """ + Check if the project has a UI/frontend component. + Looks for frontend directories and files (React, Vue, Angular, HTML, etc.). + + Args: + repo_path: Absolute path to the repository root + + Returns: + JSON string with UI detection results + """ + try: + ui_indicators = { + "directories": [], + "files": [], + "frameworks": [] + } + + # Check for common frontend directories + ui_dirs = ['frontend', 'ui', 'client', 'web', 'app', 'src/components', 'public'] + for dir_name in ui_dirs: + dir_path = os.path.join(repo_path, dir_name) + if os.path.exists(dir_path) and os.path.isdir(dir_path): + ui_indicators["directories"].append(dir_name) + + # Check for frontend files + ui_files = ['index.html', 'App.jsx', 'App.tsx', 'App.vue', 'index.jsx', 'index.tsx'] + for root, dirs, files in os.walk(repo_path): + dirs[:] = [d for d in dirs if d not in IGNORE_DIRS] + for file in files: + if file in ui_files: + rel_path = os.path.relpath(os.path.join(root, file), repo_path) + ui_indicators["files"].append(rel_path) + + # Check package.json for frontend frameworks + pkg_file = os.path.join(repo_path, "package.json") + if os.path.exists(pkg_file): + with open(pkg_file, 'r') as f: + pkg_data = json.load(f) + deps = {**pkg_data.get("dependencies", {}), **pkg_data.get("devDependencies", {})} + + if "react" in deps: + ui_indicators["frameworks"].append("React") + if "vue" in deps: + ui_indicators["frameworks"].append("Vue.js") + if "@angular/core" in deps: + ui_indicators["frameworks"].append("Angular") + if "next" in deps: + ui_indicators["frameworks"].append("Next.js") + if "svelte" in deps: + ui_indicators["frameworks"].append("Svelte") + + has_ui = bool(ui_indicators["directories"] or ui_indicators["files"] or ui_indicators["frameworks"]) + + result = { + "has_ui": has_ui, + "indicators": ui_indicators + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error finding UI files: {e}") + return f"Error finding UI files: {str(e)}" + + +@tool +def find_docker_files_tool(repo_path: str) -> str: + """ + Check for Docker/container deployment files. + + Args: + repo_path: Absolute path to the repository root + + Returns: + JSON string with Docker file detection results + """ + try: + docker_files = [] + + # Check for Docker files + docker_indicators = [ + 'Dockerfile', + 'docker-compose.yml', + 'docker-compose.yaml', + '.dockerignore', + 'Dockerfile.prod', + 'Dockerfile.dev' + ] + + for file_name in docker_indicators: + file_path = os.path.join(repo_path, file_name) + if os.path.exists(file_path): + docker_files.append(file_name) + + result = { + "has_docker": bool(docker_files), + "docker_files": docker_files + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error finding Docker files: {e}") + return f"Error finding Docker files: {str(e)}" + + +@tool +def find_config_files_tool(repo_path: str) -> str: + """ + Find configuration files (.env, config files, etc.). + Recursively scans subdirectories. + + Args: + repo_path: Absolute path to the repository root + + Returns: + JSON string with configuration file detection results + """ + try: + config_files = [] + + # Config file patterns to search for + config_indicators = [ + '.env.example', + '.env.sample', + '.env.template', + 'config.json', + 'config.yaml', + 'config.yml', + 'settings.py', + 'config.py', + 'appsettings.json' + ] + + # Recursively walk directory tree + for root, dirs, files in os.walk(repo_path): + # Filter out ignored directories + dirs[:] = [d for d in dirs if d not in IGNORE_DIRS and not d.startswith('.')] + + for file in files: + if file in config_indicators: + full_path = os.path.join(root, file) + rel_path = os.path.relpath(full_path, repo_path) + config_files.append(rel_path) + + result = { + "has_config": bool(config_files), + "config_files": config_files, + "count": len(config_files) + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error finding config files: {e}") + return f"Error finding config files: {str(e)}" + + +@tool +def find_dependency_files_tool(repo_path: str) -> str: + """ + Find dependency/package files in the repository. + Recursively scans subdirectories for requirements.txt, package.json, go.mod, Cargo.toml, pom.xml, etc. + + Args: + repo_path: Absolute path to the repository root + + Returns: + JSON string with dependency file locations + """ + try: + dependency_files = [] + + # Common dependency file patterns + dep_patterns = [ + 'requirements.txt', + 'package.json', + 'go.mod', + 'Cargo.toml', + 'pom.xml', + 'build.gradle', + 'Gemfile', + 'composer.json', + 'Pipfile', + 'poetry.lock', + 'yarn.lock', + 'package-lock.json' + ] + + # Recursively walk directory tree + for root, dirs, files in os.walk(repo_path): + # Filter out ignored directories + dirs[:] = [d for d in dirs if d not in IGNORE_DIRS and not d.startswith('.')] + + for file in files: + if file in dep_patterns: + full_path = os.path.join(root, file) + rel_path = os.path.relpath(full_path, repo_path) + dependency_files.append(rel_path) + + result = { + "dependency_files": dependency_files, + "count": len(dependency_files) + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error finding dependency files: {e}") + return f"Error finding dependency files: {str(e)}" + + +# ============================================================================ +# SPECIALIZED TOOLS FOR DIAGRAM GENERATOR AGENT +# ============================================================================ + +@tool +def find_entry_points_tool(repo_path: str) -> str: + """ + Find main entry point files (main.py, server.py, index.js, etc.). + + Args: + repo_path: Absolute path to the repository root + + Returns: + JSON string with entry point file locations + """ + try: + entry_points = [] + + # Common entry point patterns + entry_patterns = [ + 'main.py', 'app.py', 'server.py', '__main__.py', + 'index.js', 'index.ts', 'server.js', 'app.js', + 'main.go', 'main.rs', 'Main.java' + ] + + for root, dirs, files in os.walk(repo_path): + dirs[:] = [d for d in dirs if d not in IGNORE_DIRS] + + for file in files: + if file in entry_patterns: + rel_path = os.path.relpath(os.path.join(root, file), repo_path) + entry_points.append(rel_path) + + result = { + "entry_points": entry_points, + "count": len(entry_points) + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error finding entry points: {e}") + return f"Error finding entry points: {str(e)}" + + +@tool +def find_api_routes_tool(repo_path: str, entry_file: str) -> str: + """ + Analyze an entry file to find API routes/endpoints. + Looks for common patterns like @app.route, @router.get, etc. + + Args: + repo_path: Absolute path to the repository root + entry_file: Relative path to entry file from repo root + + Returns: + JSON string with discovered API routes + """ + try: + full_path = os.path.normpath(os.path.join(repo_path, entry_file)) + + if not full_path.startswith(os.path.normpath(repo_path)): + return "Error: Access denied" + + if not os.path.exists(full_path): + return "Error: File not found" + + routes = [] + + with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + lines = content.split('\n') + + # Pattern matching for routes + route_patterns = [ + r'@app\.route\(["\'](.+?)["\']\)', # Flask + r'@router\.(get|post|put|delete)\(["\'](.+?)["\']\)', # FastAPI + r'app\.(get|post|put|delete)\(["\'](.+?)["\']\)', # Express + r'@(Get|Post|Put|Delete)Mapping\(["\'](.+?)["\']\)' # Spring + ] + + for line in lines: + for pattern in route_patterns: + matches = re.findall(pattern, line) + if matches: + routes.extend([str(m) if isinstance(m, str) else m[-1] for m in matches]) + + result = { + "file": entry_file, + "routes": routes[:50], # Limit to 50 routes + "route_count": len(routes) + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error finding API routes: {e}") + return f"Error finding API routes: {str(e)}" + + + +# ============================================================================ +# VALIDATION TOOLS FOR SELF-CRITIQUE LOOPS +# ============================================================================ + +@tool +def validate_readme_structure_tool(readme_content: str) -> str: + """ + Validate README structure and completeness. + Checks for expected sections, proper markdown formatting, code blocks. + + Args: + readme_content: README markdown content to validate + + Returns: + JSON string with validation results + """ + try: + issues = [] + sections_found = [] + + # Extract sections (markdown headers) + import re + header_pattern = r'^##\s+(.+)$' + sections = re.findall(header_pattern, readme_content, re.MULTILINE) + sections_found = sections + + # Expected sections (at least some of these should be present) + expected_sections = ["Overview", "Features", "Installation", "Usage", "Configuration", "API"] + found_expected = [s for s in expected_sections if any(exp.lower() in s.lower() for exp in sections)] + + if len(found_expected) < 3: + issues.append({ + "severity": "warning", + "message": f"Only {len(found_expected)} standard sections found. Consider adding more." + }) + + # Check for code blocks + code_blocks = readme_content.count("```") + if code_blocks == 0: + issues.append({ + "severity": "info", + "message": "No code examples found. Consider adding usage examples." + }) + elif code_blocks % 2 != 0: + issues.append({ + "severity": "error", + "message": "Unbalanced code blocks (missing closing ```)" + }) + + # Check for mermaid diagrams + has_mermaid = "```mermaid" in readme_content + if not has_mermaid: + issues.append({ + "severity": "info", + "message": "No Mermaid diagrams found. Visual diagrams enhance documentation." + }) + + # Check minimum length + if len(readme_content) < 500: + issues.append({ + "severity": "warning", + "message": "README is very short. Consider adding more detail." + }) + + result = { + "sections_found": sections_found, + "section_count": len(sections_found), + "code_blocks": code_blocks // 2 if code_blocks % 2 == 0 else 0, + "has_mermaid": has_mermaid, + "content_length": len(readme_content), + "issues": issues + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error validating README structure: {e}") + return json.dumps({ + "error": str(e), + "issues": [{"severity": "error", "message": f"Validation failed: {str(e)}"}] + }) + + +@tool +def validate_mermaid_syntax_tool(mermaid_code: str) -> str: + """ + Validate Mermaid diagram syntax with STRICT Mermaid 8.14.0 render-safe checks. + Returns JSON string with validation results. + + FIXED VERSION: Enforces render-safe patterns that prevent "Syntax error in graph" + + Checks: + - Node IDs must be alphanumeric + underscore only + - Node labels must be quoted: ID["Label"] + - Edge labels must not contain: / ( ) : ` " + - Basic structure (brackets, arrows, diagram type) + + Args: + mermaid_code: Mermaid diagram code to validate + + Returns: + JSON: {"valid": bool, "errors": [...], "warnings": [...]} + """ + try: + errors = [] + warnings = [] + + # Basic checks + if not mermaid_code or not mermaid_code.strip(): + errors.append("Empty diagram code") + return json.dumps({"valid": False, "errors": errors, "warnings": warnings}) + + code = mermaid_code.strip() + + # Check for diagram type + diagram_types = [ + "graph", "flowchart", "sequenceDiagram", "classDiagram", + "stateDiagram", "erDiagram", "journey", "gantt", "pie" + ] + has_type = any(code.startswith(dt) for dt in diagram_types) + if not has_type: + errors.append(f"Must start with diagram type: {diagram_types[:4]}...") + + # Check for common syntax errors + open_brackets = code.count("[") + close_brackets = code.count("]") + if open_brackets != close_brackets: + errors.append(f"Unbalanced square brackets: {open_brackets} open vs {close_brackets} close") + + open_parens = code.count("(") + close_parens = code.count(")") + if open_parens != close_parens: + errors.append(f"Unbalanced parentheses: {open_parens} open vs {close_parens} close") + + open_braces = code.count("{") + close_braces = code.count("}") + if open_braces != close_braces: + errors.append(f"Unbalanced curly braces: {open_braces} open vs {close_braces} close") + + # Check for arrow syntax (graph/flowchart) + if code.startswith(("graph", "flowchart")): + # Must have at least one arrow + has_arrow = ("-->" in code or "---" in code or "==>" in code or + "-.->" in code or "-..->" in code) + if not has_arrow: + warnings.append("Graph/flowchart diagrams typically need arrows (-->, --->, etc.)") + + # Check for node definitions + lines = code.split("\n") + node_lines = [l for l in lines if "-->" in l or "---" in l] + if len(node_lines) == 0: + errors.append("No connections found - graph needs edges like: A --> B") + + # =================================================================== + # MERMAID 8.14.0 RENDER-SAFE VALIDATION (NEW) + # =================================================================== + + # Check 1: Node IDs must be alphanumeric + underscore only + # Pattern: extract node IDs from lines like "NodeID[...]" or "NodeID --> OtherNode" + node_id_pattern = r'\b([A-Za-z][A-Za-z0-9_]*)\s*(?:\[|-->|---|\||$)' + node_ids = [] + for line in lines[1:]: # Skip first line (graph TD/LR) + if not line.strip() or line.strip().startswith('#'): + continue + matches = re.findall(node_id_pattern, line) + node_ids.extend(matches) + + # Check for invalid node IDs (contain special chars besides underscore) + invalid_node_pattern = r'\b([A-Za-z0-9_]*[^A-Za-z0-9_\s\[\](){}|>-]+[A-Za-z0-9_]*)\s*(?:\[|-->)' + for line in lines[1:]: + if not line.strip(): + continue + invalid_matches = re.findall(invalid_node_pattern, line) + for invalid_id in invalid_matches: + if invalid_id and not invalid_id.isspace(): + errors.append( + f"Invalid node ID '{invalid_id}' - must be alphanumeric + underscore only. " + f"Use: {re.sub(r'[^A-Za-z0-9_]', '', invalid_id)}" + ) + + # Check 2: Node labels must be quoted if they contain special characters + # Pattern: NodeID[unquoted text with spaces or special chars] + unquoted_label_pattern = r'([A-Za-z][A-Za-z0-9_]*)\[([^\]"]+[^"\]])\]' + for line in lines[1:]: + if not line.strip(): + continue + matches = re.findall(unquoted_label_pattern, line) + for node_id, label in matches: + # Check if label has spaces or special chars and is not quoted + if ' ' in label or '(' in label or ')' in label or '/' in label: + if not (label.strip().startswith('"') and label.strip().endswith('"')): + errors.append( + f"Unquoted label in node '{node_id}[{label}]' - must use quotes: {node_id}[\"{label}\"]" + ) + + # Check 3: Edge labels must not contain unsafe characters + # Pattern: |label text| + edge_label_pattern = r'\|([^|]+)\|' + unsafe_edge_chars = ['/', '(', ')', ':', '`', '"', '{', '}'] + for line in lines[1:]: + if not line.strip(): + continue + edge_matches = re.findall(edge_label_pattern, line) + for edge_label in edge_matches: + for unsafe_char in unsafe_edge_chars: + if unsafe_char in edge_label: + safe_label = edge_label + for char in unsafe_edge_chars: + safe_label = safe_label.replace(char, '') + safe_label = safe_label.strip() + errors.append( + f"Unsafe edge label '|{edge_label}|' contains '{unsafe_char}'. " + f"Use simple label: |{safe_label}| or create a separate node for complex paths." + ) + break + + # Check 4: No self-referencing edges (Node --> Node) + self_ref_pattern = r'([A-Za-z][A-Za-z0-9_]*)\s*-->\s*\1(?:\s|$|\|)' + for line in lines[1:]: + if not line.strip(): + continue + matches = re.findall(self_ref_pattern, line) + for node_id in matches: + errors.append( + f"Self-referencing edge detected: {node_id} --> {node_id}. " + f"Show data flow between different components instead." + ) + + # Check sequenceDiagram syntax + if code.startswith("sequenceDiagram"): + if "->" not in code and "->>" not in code: + errors.append("sequenceDiagram needs arrows like: Alice->>Bob: Hello") + + # Warning for common mistakes + if "graph TD" in code and "TB" not in code: + pass # TD (top-down) is valid + elif "graph TB" in code: + pass # TB (top-bottom) is valid + elif "graph LR" in code: + pass # LR (left-right) is valid + elif "graph RL" in code: + pass # RL (right-left) is valid + elif "graph" in code and not any(d in code for d in ["TD", "TB", "LR", "RL", "BT"]): + warnings.append("graph should specify direction: graph TD, graph LR, etc.") + + valid = len(errors) == 0 + + result = { + "valid": valid, + "errors": errors, + "warnings": warnings, + "diagram_type": next((dt for dt in diagram_types if code.startswith(dt)), "unknown") + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.error(f"Error validating Mermaid syntax: {e}") + return json.dumps({ + "valid": False, + "errors": [f"Validation error: {str(e)}"], + "warnings": [] + }) + + +# ============================================================================ +# TOOL BINDING HELPERS - Bind repo_path to simplify LLM tool usage +# ============================================================================ + +def make_bound_tools_for_code_explorer(repo_path: str) -> List: + """ + Create tools for Code Explorer with repo_path pre-bound. + This simplifies the tool signatures the LLM sees. + + Args: + repo_path: Absolute path to repository root + + Returns: + List of tool instances with bound repo_path + """ + # Create wrapper functions with repo_path bound + @tool + def list_directory(relative_path: str = ".") -> str: + """List contents of a directory. Args: relative_path (str, default '.'): Path from repo root""" + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file( + file_path: str, + max_lines: int = None, + strategy: Literal["full", "smart", "pattern_window"] = "pattern_window" + ) -> str: + """Read a file. Args: file_path (str), max_lines (int, optional), strategy (full|smart|pattern_window, default: pattern_window)""" + return read_file_tool.func( + repo_path=repo_path, + file_path=file_path, + max_lines=max_lines, + strategy=strategy + ) + + @tool + def detect_languages() -> str: + """Detect programming languages in the repository. No arguments needed.""" + return detect_languages_tool.func(repo_path=repo_path) + + @tool + def extract_dependencies() -> str: + """Extract dependencies from package files (requirements.txt, package.json, etc.). No arguments needed.""" + return extract_dependencies_tool.func(repo_path=repo_path) + + @tool + def analyze_code_structure(file_path: str) -> str: + """Analyze code structure (functions, classes, imports). Args: file_path (str): Relative path from repo root""" + return analyze_code_structure_tool.func(repo_path=repo_path, file_path=file_path) + + return [list_directory, read_file, detect_languages, extract_dependencies, analyze_code_structure] + + +def make_bound_tools_for_planner(repo_path: str) -> List: + """ + Create tools for Planner with repo_path pre-bound. + + Args: + repo_path: Absolute path to repository root + + Returns: + List of tool instances with bound repo_path + """ + # Create wrapper functions with repo_path bound + @tool + def list_directory(relative_path: str = ".") -> str: + """List contents of a directory. Args: relative_path (str, default '.')""" + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file( + file_path: str, + max_lines: int = None, + strategy: Literal["full", "smart", "pattern_window"] = "pattern_window" + ) -> str: + """Read a file. Args: file_path (str), max_lines (int, optional), strategy (full|smart|pattern_window, default: pattern_window)""" + return read_file_tool.func( + repo_path=repo_path, + file_path=file_path, + max_lines=max_lines, + strategy=strategy + ) + + @tool + def detect_languages() -> str: + """Detect programming languages. No arguments.""" + return detect_languages_tool.func(repo_path=repo_path) + + @tool + def extract_dependencies() -> str: + """Extract dependencies from package files. No arguments.""" + return extract_dependencies_tool.func(repo_path=repo_path) + + @tool + def analyze_code_structure(file_path: str) -> str: + """Analyze code structure. Args: file_path (str)""" + return analyze_code_structure_tool.func(repo_path=repo_path, file_path=file_path) + + @tool + def find_ui_files() -> str: + """Check if project has UI/frontend components. No arguments.""" + return find_ui_files_tool.func(repo_path=repo_path) + + @tool + def find_docker_files() -> str: + """Check for Docker deployment files. No arguments.""" + return find_docker_files_tool.func(repo_path=repo_path) + + @tool + def find_config_files() -> str: + """Find configuration files (.env, config files). No arguments.""" + return find_config_files_tool.func(repo_path=repo_path) + + return [list_directory, read_file, detect_languages, extract_dependencies, analyze_code_structure, + find_ui_files, find_docker_files, find_config_files] + + +def make_bound_tools_for_writer(repo_path: str) -> List: + """ + Create tools for Writer with repo_path pre-bound. + + Args: + repo_path: Absolute path to repository root + + Returns: + List of StructuredTool instances with bound repo_path + """ + # Same as code explorer + return make_bound_tools_for_code_explorer(repo_path) + + +def make_bound_tools_for_diagram_generator(repo_path: str) -> List: + """ + Create tools for Diagram Generator with repo_path pre-bound. + + FIXED: Adds validate_mermaid_syntax wrapper (without _tool suffix) to match prompt. + + Args: + repo_path: Absolute path to repository root + + Returns: + List of tool instances with bound repo_path + """ + # Create wrapper functions with repo_path bound + @tool + def list_directory(relative_path: str = ".") -> str: + """List contents of a directory. Args: relative_path (str, default '.')""" + return list_directory_tool.func(repo_path=repo_path, relative_path=relative_path) + + @tool + def read_file( + file_path: str, + max_lines: int = None, + strategy: Literal["full", "smart", "pattern_window"] = "pattern_window" + ) -> str: + """Read a file. Args: file_path (str), max_lines (int, optional), strategy (full|smart|pattern_window, default: pattern_window)""" + return read_file_tool.func( + repo_path=repo_path, + file_path=file_path, + max_lines=max_lines, + strategy=strategy + ) + + @tool + def detect_languages() -> str: + """Detect programming languages. No arguments.""" + return detect_languages_tool.func(repo_path=repo_path) + + @tool + def find_entry_points() -> str: + """Find main entry point files (main.py, server.py, index.js, etc.). No arguments.""" + return find_entry_points_tool.func(repo_path=repo_path) + + @tool + def find_api_routes(entry_file: str) -> str: + """Find API routes in an entry file. Args: entry_file (str): Relative path to entry file""" + return find_api_routes_tool.func(repo_path=repo_path, entry_file=entry_file) + + @tool + def find_docker_files() -> str: + """Check for Docker files. No arguments.""" + return find_docker_files_tool.func(repo_path=repo_path) + + # FIXED: Add wrapper for validate_mermaid_syntax (without _tool suffix) + # This matches the tool name used in the diagram generator prompt + @tool + def validate_mermaid_syntax(mermaid_code: str) -> str: + """ + Validate Mermaid diagram syntax with strict Mermaid 8.14.0 render-safe checks. + + CRITICAL: Use this to validate EVERY diagram before finalizing output. + + Args: + mermaid_code (str): The complete Mermaid diagram code to validate + + Returns: + JSON with {"valid": bool, "errors": [...], "warnings": [...]} + """ + return validate_mermaid_syntax_tool.func(mermaid_code=mermaid_code) + + return [list_directory, read_file, detect_languages, find_entry_points, find_api_routes, + find_docker_files, validate_mermaid_syntax] diff --git a/sample_solutions/Docugen-Microagents/api/utils/__init__.py b/sample_solutions/Docugen-Microagents/api/utils/__init__.py new file mode 100644 index 00000000..2b0e869f --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/utils/__init__.py @@ -0,0 +1,5 @@ +"""Utility functions for DocuGen AI""" + +from .project_detector import detect_projects, ProjectDetector + +__all__ = ["detect_projects", "ProjectDetector"] diff --git a/sample_solutions/Docugen-Microagents/api/utils/metrics_extractor.py b/sample_solutions/Docugen-Microagents/api/utils/metrics_extractor.py new file mode 100644 index 00000000..3d5e70c0 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/utils/metrics_extractor.py @@ -0,0 +1,56 @@ +""" +Utility for extracting metrics from LangGraph agent execution results +""" + +from typing import Dict, List, Any + + +def extract_agent_metrics(messages: List[Any]) -> Dict[str, int]: + """ + Extract token usage and call counts from LangGraph messages. + + Args: + messages: List of messages from LangGraph agent execution + + Returns: + Dict with input_tokens, output_tokens, tool_calls, llm_calls + """ + input_tokens = 0 + output_tokens = 0 + tool_calls = 0 + llm_calls = 0 + + for msg in messages: + # Count tool calls (messages with tool_calls attribute) + if hasattr(msg, 'tool_calls') and msg.tool_calls: + tool_calls += len(msg.tool_calls) + + # Count LLM calls and extract token usage from AIMessage responses ONLY + # Check if this is an AIMessage by looking for response_metadata attribute + if hasattr(msg, 'response_metadata') and msg.__class__.__name__ == 'AIMessage': + llm_calls += 1 + metadata = msg.response_metadata + + # Try different token usage formats (different LLM providers use different keys) + if 'usage_metadata' in metadata: + # LangChain format + usage = metadata['usage_metadata'] + input_tokens += usage.get('input_tokens', 0) + output_tokens += usage.get('output_tokens', 0) + elif 'token_usage' in metadata: + # OpenAI format + usage = metadata['token_usage'] + input_tokens += usage.get('prompt_tokens', 0) + output_tokens += usage.get('completion_tokens', 0) + elif 'usage' in metadata: + # Alternative format + usage = metadata['usage'] + input_tokens += usage.get('input_tokens', usage.get('prompt_tokens', 0)) + output_tokens += usage.get('output_tokens', usage.get('completion_tokens', 0)) + + return { + "input_tokens": input_tokens, + "output_tokens": output_tokens, + "tool_calls": tool_calls, + "llm_calls": llm_calls + } diff --git a/sample_solutions/Docugen-Microagents/api/utils/project_detector.py b/sample_solutions/Docugen-Microagents/api/utils/project_detector.py new file mode 100644 index 00000000..dd3c6065 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/utils/project_detector.py @@ -0,0 +1,431 @@ +""" +Smart Project Detector +Analyzes repository structure and identifies individual projects within a monorepo +Includes grouping logic to treat parent folders (with api/ui children) as single projects +""" + +import os +import logging +import fnmatch +from typing import List, Dict, Any +from pathlib import Path + +logger = logging.getLogger(__name__) + +# Project indicators - files that typically mark a project root +PROJECT_INDICATORS = { + "python": ["requirements.txt", "setup.py", "pyproject.toml", "Pipfile", "poetry.lock"], + "nodejs": ["package.json"], + "java": ["pom.xml", "build.gradle", "build.gradle.kts"], + "go": ["go.mod"], + "rust": ["Cargo.toml"], + "php": ["composer.json"], + "ruby": ["Gemfile"], + "dotnet": ["*.csproj", "*.sln", "*.fsproj", "*.vbproj"], + "cpp": ["CMakeLists.txt", "Makefile"], + "r": ["DESCRIPTION", "NAMESPACE"], +} + +# Directories to ignore +IGNORE_DIRS = { + "node_modules", ".git", "__pycache__", "venv", "env", ".venv", + "dist", "build", ".idea", ".vscode", "target", "out", + ".next", ".nuxt", "coverage", ".pytest_cache" +} + +# Common subproject folder names (generic patterns, not hardcoded to specific names) +SUBPROJECT_PATTERNS = ["api", "ui", "frontend", "backend", "web", "server", "client", "app", "service"] + + +class ProjectDetector: + """Detects and analyzes projects within a repository""" + + def __init__(self, repo_path: str, max_depth: int = 3, group_subprojects: bool = True): + """ + Args: + repo_path: Path to repository root + max_depth: Maximum depth to scan + group_subprojects: If True, group api/ui siblings under parent project + """ + self.repo_path = Path(repo_path) + self.max_depth = max_depth + self.group_subprojects = group_subprojects + + def detect_projects(self) -> Dict[str, Any]: + """ + Scan repository and detect individual projects + + Returns: + Dict with: + - is_monorepo: bool + - project_count: int + - projects: List[Dict] with project metadata + - skipped_folders: List[Dict] with skipped folder info and reasons + """ + projects = [] + skipped_folders = [] + + # Check if root itself is a project + root_project = self._analyze_directory(self.repo_path, depth=0) + if root_project: + projects.append(root_project) + + # Scan subdirectories + self._scan_recursive(self.repo_path, projects, depth=1, skipped=skipped_folders) + + # Apply grouping logic if enabled + if self.group_subprojects: + projects = self._group_composite_projects(projects) + + # Filter out nested skipped folders (children of other skipped folders) + skipped_folders = self._filter_nested_skipped_folders(skipped_folders) + + # Filter out standalone media/asset folders when there's a doc folder + skipped_folders = self._filter_redundant_asset_folders(skipped_folders) + + # Classify as monorepo or single project + is_monorepo = len(projects) > 1 + + logger.info(f"Detected {len(projects)} project(s) - Monorepo: {is_monorepo}") + if skipped_folders: + logger.info(f"Skipped {len(skipped_folders)} folder(s) without project indicators") + + return { + "is_monorepo": is_monorepo, + "project_count": len(projects), + "projects": projects, + "skipped_folders": skipped_folders + } + + def _scan_recursive(self, directory: Path, projects: List[Dict], depth: int, skipped: List[Dict] = None): + """Recursively scan directories for projects""" + if depth > self.max_depth: + return + + if skipped is None: + skipped = [] + + try: + for item in directory.iterdir(): + # Skip ignored directories + if not item.is_dir() or item.name in IGNORE_DIRS or item.name.startswith('.'): + continue + + # Check if this directory is a project + project_info = self._analyze_directory(item, depth) + if project_info: + projects.append(project_info) + # Don't scan deeper if we found a project (avoid nested projects) + continue + + # Directory has no project indicators - analyze why and track it + skip_reason = self._analyze_skipped_folder(item) + if skip_reason: + rel_path = item.relative_to(self.repo_path) + skipped.append({ + "name": item.name, + "path": str(rel_path), + "reason": skip_reason["reason"], + "details": skip_reason["details"], + "depth": depth + }) + + # Continue scanning deeper + self._scan_recursive(item, projects, depth + 1, skipped) + + except PermissionError: + logger.warning(f"Permission denied: {directory}") + + def _analyze_skipped_folder(self, directory: Path) -> Dict[str, str]: + """ + Analyze why a folder was skipped (no project indicators) + + Returns: + Dict with 'reason' and 'details', or None if folder should be ignored + """ + try: + items = list(directory.iterdir()) + files = [f for f in items if f.is_file()] + subdirs = [d for d in items if d.is_dir() and d.name not in IGNORE_DIRS] + + # Empty folder + if not files and not subdirs: + return { + "reason": "Empty folder", + "details": "Contains no files or subfolders" + } + + # Analyze file types + doc_extensions = {'.md', '.txt', '.rst', '.adoc', '.pdf'} + image_extensions = {'.png', '.jpg', '.jpeg', '.gif', '.svg', '.ico', '.webp', '.bmp'} + + doc_files = [f for f in files if f.suffix.lower() in doc_extensions] + image_files = [f for f in files if f.suffix.lower() in image_extensions] + other_files = [f for f in files if f.suffix.lower() not in doc_extensions | image_extensions] + + # Documentation-only folder + if doc_files and not other_files and not image_files: + return { + "reason": "Documentation only", + "details": f"Contains only documentation files ({len(doc_files)} markdown/text files)" + } + + # Images/media-only folder + if image_files and not other_files and not doc_files: + return { + "reason": "Media only", + "details": f"Contains only image files ({len(image_files)} images)" + } + + # Mixed docs and images, no code + if (doc_files or image_files) and not other_files and len(files) < 5: + return { + "reason": "Documentation and media", + "details": f"Contains {len(doc_files)} docs, {len(image_files)} images, no code" + } + + # Has files but no project indicators + if files and not subdirs: + return { + "reason": "No project indicators", + "details": f"Contains {len(files)} files but no package.json, requirements.txt, etc." + } + + # Has subdirectories - likely being scanned recursively + # Don't report these as skipped since we'll scan their children + return None + + except PermissionError: + return { + "reason": "Permission denied", + "details": "Cannot access folder contents" + } + + def _analyze_directory(self, directory: Path, depth: int) -> Dict[str, Any]: + """ + Analyze a directory to determine if it's a project + + Returns: + Project metadata dict if project detected, None otherwise + """ + try: + files = [f.name for f in directory.iterdir() if f.is_file()] + except PermissionError: + return None + + # Check for project indicators (with glob pattern support) + detected_types = [] + indicator_files = [] + + for proj_type, indicators in PROJECT_INDICATORS.items(): + for indicator in indicators: + # Support glob patterns like *.csproj + if '*' in indicator or '?' in indicator: + # Use fnmatch for glob patterns + matches = [f for f in files if fnmatch.fnmatch(f, indicator)] + if matches: + detected_types.append(proj_type) + indicator_files.extend(matches) + else: + # Exact match + if indicator in files: + detected_types.append(proj_type) + indicator_files.append(indicator) + + # If we found project indicators, it's likely a project + if detected_types: + # Calculate relative path from repo root + rel_path = directory.relative_to(self.repo_path) + + # Estimate project complexity + file_count = len(files) + dir_count = len([d for d in directory.iterdir() if d.is_dir() and d.name not in IGNORE_DIRS]) + + return { + "name": directory.name, + "path": str(rel_path) if str(rel_path) != "." else "/", + "full_path": str(directory), + "types": list(set(detected_types)), # Remove duplicates + "indicators": indicator_files, + "depth": depth, + "file_count": file_count, + "dir_count": dir_count, + "is_root": depth == 0, + "is_composite": False # Will be updated by grouping logic + } + + return None + + def _group_composite_projects(self, projects: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + Group projects that are subprojects under a common parent. + + Logic: If multiple sibling folders at the same depth are projects, + and they match common subproject patterns (api, ui, frontend, backend, etc.), + group them under their parent folder as a single composite project. + + Example: + rag-chatbot/api (project) + rag-chatbot/ui (project) + → Groups into: rag-chatbot (composite project) + + Returns: + New list with grouped projects + """ + # Build a parent-to-children mapping + parent_map: Dict[str, List[Dict]] = {} + + for project in projects: + project_path = Path(project["full_path"]) + parent_path = str(project_path.parent) + + if parent_path not in parent_map: + parent_map[parent_path] = [] + parent_map[parent_path].append(project) + + # Identify parents with multiple subproject children + grouped_projects = [] + processed_projects = set() + + for parent_path, children in parent_map.items(): + # Skip root-level projects (no grouping needed) + if len(children) == 1: + if children[0]["path"] not in processed_projects: + grouped_projects.append(children[0]) + processed_projects.add(children[0]["path"]) + continue + + # Check if these siblings look like subprojects (api, ui, etc.) + child_names = [child["name"].lower() for child in children] + subproject_count = sum(1 for name in child_names if any(pattern in name for pattern in SUBPROJECT_PATTERNS)) + + # If 2+ children match subproject patterns, group them under parent + if len(children) >= 2 and subproject_count >= 2: + parent_dir = Path(parent_path) + parent_name = parent_dir.name + + # Skip if parent is the repo root + if str(parent_dir) == str(self.repo_path): + # Don't group root-level subprojects, keep them separate + for child in children: + if child["path"] not in processed_projects: + grouped_projects.append(child) + processed_projects.add(child["path"]) + continue + + # Create composite project for the parent + composite_project = { + "name": parent_name, + "path": str(parent_dir.relative_to(self.repo_path)), + "full_path": str(parent_dir), + "types": list(set([t for child in children for t in child["types"]])), + "indicators": [f"{child['name']}/{ind}" for child in children for ind in child["indicators"]], + "depth": children[0]["depth"] - 1, # Parent is one level up + "file_count": sum(child["file_count"] for child in children), + "dir_count": len(children), + "is_root": False, + "is_composite": True, + "subprojects": [child["name"] for child in children] + } + + grouped_projects.append(composite_project) + + # Mark children as processed + for child in children: + processed_projects.add(child["path"]) + + logger.info(f"Grouped {len(children)} subprojects under '{parent_name}': {', '.join(child_names)}") + + else: + # Not a composite project pattern, keep children separate + for child in children: + if child["path"] not in processed_projects: + grouped_projects.append(child) + processed_projects.add(child["path"]) + + return grouped_projects + + def _filter_nested_skipped_folders(self, skipped_folders: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + Filter out skipped folders that are children of other skipped folders. + Only keep top-level skipped folders to avoid redundant information. + + Example: If 'code-generation' is skipped, don't also show 'code-generation/src' and 'code-generation/img' + """ + if not skipped_folders: + return [] + + # Sort by depth (shallowest first) and then by path + sorted_folders = sorted(skipped_folders, key=lambda x: (x['depth'], x['path'])) + + filtered = [] + for folder in sorted_folders: + # Normalize path using Path to handle both / and \ correctly + folder_path = Path(folder['path']) + + # Check if this folder is a child of any already-added skipped folder + is_nested = False + for parent in filtered: + parent_path = Path(parent['path']) + # Check if folder_path is a child of parent_path + try: + folder_path.relative_to(parent_path) + # If no exception, folder_path is under parent_path + is_nested = True + break + except ValueError: + # Not a subpath, continue checking + pass + + # Only add if it's not nested under another skipped folder + if not is_nested: + filtered.append(folder) + + logger.info(f"Filtered nested folders: {len(skipped_folders)} -> {len(filtered)}") + return filtered + + def _filter_redundant_asset_folders(self, skipped_folders: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + Filter out standalone asset folders (images, media) when there's a documentation folder. + + Rationale: If a repo has both a 'docs' folder and an 'images' folder at the same level, + the images likely belong to the docs, so we only need to show the docs folder. + """ + if not skipped_folders: + return [] + + # Check if there's at least one documentation folder + has_doc_folder = any( + folder['reason'] in ['Documentation only', 'Documentation and media'] + for folder in skipped_folders + ) + + # If there's a doc folder, filter out standalone media folders + if has_doc_folder: + filtered = [ + folder for folder in skipped_folders + if folder['reason'] != 'Media only' + ] + if len(filtered) < len(skipped_folders): + logger.info(f"Filtered asset folders: {len(skipped_folders)} -> {len(filtered)} (removed media-only folders)") + return filtered + + # No doc folder, keep all skipped folders + return skipped_folders + + +def detect_projects(repo_path: str) -> Dict[str, Any]: + """ + Convenience function to detect projects in a repository + + Args: + repo_path: Path to the cloned repository + + Returns: + Dict with: + - is_monorepo: bool + - project_count: int + - projects: List[Dict] with project metadata + """ + detector = ProjectDetector(repo_path, group_subprojects=True) + return detector.detect_projects() diff --git a/sample_solutions/Docugen-Microagents/api/workflow.py b/sample_solutions/Docugen-Microagents/api/workflow.py new file mode 100644 index 00000000..a1ffc02a --- /dev/null +++ b/sample_solutions/Docugen-Microagents/api/workflow.py @@ -0,0 +1,1322 @@ +""" +Simplified LangGraph Workflow for 10 Micro-Agents +Optimized for 8K context models with evidence-based architecture +""" + +import logging +from typing import Dict, Any +from langgraph.graph import StateGraph, END +from langgraph.checkpoint.memory import MemorySaver + +from models.state import DocGenState +from models.evidence import EvidencePacket, EvidenceItem +from models import get_log_manager, LogType +from services import get_llm, GitService +from services.git_service import parse_github_url +from utils import detect_projects +from config import settings +from langchain_core.messages import AIMessage +import os +import json +import re + +# Import all agents (6 section writers + planner + mermaid + QA) +from agents.code_explorer_agent import run_code_explorer_agent +from agents.api_reference_agent import run_api_reference_agent +from agents.call_graph_agent import run_call_graph_agent +from agents.error_analysis_agent import run_error_analysis_agent +from agents.env_config_agent import run_env_config_agent +from agents.dependency_analyzer_agent import run_dependency_analyzer_agent +from agents.planner_agent import run_planner_agent +from agents.mermaid_agent import run_mermaid_agent +from agents.qa_validator_agent import run_qa_validator_agent +from core.metrics_collector import MetricsCollector + +logger = logging.getLogger(__name__) + + +class SimplifiedDocuGenWorkflow: + """ + Simplified workflow with 9 micro-agents optimized for 8K context models. + + Architecture: + - 6 Section Writer Agents (write sections directly) + - Code Explorer: Project Overview + Features + - API Reference: API Reference + - Call Graph: Architecture + - Error Analysis: Troubleshooting + - Env Config: Configuration + - Dependency Analyzer: Prerequisites + Quick Start Deployment + - 1 Planner Agent (decides which sections to include) + - 1 Mermaid Agent (generates architecture diagram) + - 1 QA Agent (validates quality) + """ + + def __init__(self): + self.git_service = GitService() + self.log_manager = get_log_manager() + self.graph = None + self.checkpointer = None + self.metrics_collectors = {} # Store metrics collector per job_id + + def _get_metrics_collector(self, job_id: str) -> MetricsCollector: + """Get or create metrics collector for a job""" + if job_id not in self.metrics_collectors: + self.metrics_collectors[job_id] = MetricsCollector(job_id) + return self.metrics_collectors[job_id] + + def _get_target_path(self, state: DocGenState) -> str: + """ + Get the target path for agent analysis. + If user selected specific projects, return path to that project. + Otherwise, return full repo path. + """ + repo_path = state["repo_path"] + selected_projects = state.get("selected_projects") + + if selected_projects and len(selected_projects) > 0: + import os + return os.path.join(repo_path, selected_projects[0]) + return repo_path + + def _get_final_assistant_text(self, messages) -> str: + """ + Extract the last non-empty AIMessage content from LangGraph result. + + FIX: messages[-1] is not guaranteed to be the final assistant answer. + It can be a ToolMessage, intermediate AIMessage, or truncated stub. + """ + # Walk backwards and return the last assistant AIMessage with non-empty content + for m in reversed(messages or []): + if isinstance(m, AIMessage) and isinstance(getattr(m, "content", None), str): + txt = m.content.strip() + if txt: + return txt + # Fallback + return (messages[-1].content or "").strip() if messages else "" + + def _store_section(self, sections_dict: Dict[str, str], heading: str, section_md: str): + """ + Store section with guard against overwriting good content with stubs. + + FIX: Don't replace a complete section with an empty header-only stub. + """ + new_md = (section_md or "").strip() + old_md = (sections_dict.get(heading) or "").strip() + + # Don't overwrite a real section (>= 80 chars) with a stub (< 80 chars) + if len(new_md) < 80 and len(old_md) >= 80: + logger.warning(f"[Parser] Skipping stub overwrite for '{heading}': {len(new_md)} < {len(old_md)} chars") + return + + sections_dict[heading] = new_md + + def _parse_and_store_sections(self, output: str, sections_dict: Dict[str, str]): + """ + Parse agent output to extract markdown sections with subsection handling. + + FIX: Merges ### subsections back into their parent ## sections. + + Logic: + - If we see "## Parent" followed by "### Child", merge them into one section + - If we see standalone "## Section" with content, store it as-is + """ + # Split by ## headings but preserve them + lines = output.split('\n') + current_section = None + current_content = [] + + i = 0 + while i < len(lines): + line = lines[i] + + # Check if this is a ## heading + if line.startswith('## '): + # Store previous section if exists + if current_section and current_content: + full_content = '\n'.join(current_content).strip() + self._store_section(sections_dict, current_section, full_content) + + # Start new section + current_section = line.replace('##', '').strip() + current_content = [line] # Include the heading itself + + elif line.strip(): # Non-empty line + if current_section: + current_content.append(line) + else: # Empty line + if current_section: + current_content.append(line) + + i += 1 + + # Store last section + if current_section and current_content: + full_content = '\n'.join(current_content).strip() + self._store_section(sections_dict, current_section, full_content) + + async def create_workflow(self) -> StateGraph: + """Build the simplified 10-agent workflow""" + workflow = StateGraph(DocGenState) + + # Add nodes + workflow.add_node("clone_repository", self.clone_repository_node) + workflow.add_node("project_detection", self.project_detection_node) + + # Section Writer Agents + workflow.add_node("code_explorer", self.code_explorer_node) + workflow.add_node("api_reference", self.api_reference_node) + workflow.add_node("call_graph", self.call_graph_node) + workflow.add_node("error_analysis", self.error_analysis_node) + workflow.add_node("env_config", self.env_config_node) + workflow.add_node("dependency_analyzer", self.dependency_analyzer_node) + + # Evidence Aggregation + workflow.add_node("evidence_aggregator", self.evidence_aggregator_node) + + # Planning + workflow.add_node("planner", self.planner_node) + + # Mermaid Diagram Generation + workflow.add_node("mermaid", self.mermaid_node) + + # QA Validation + workflow.add_node("qa_validator", self.qa_validator_node) + + # Final assembly + workflow.add_node("assembly", self.assembly_node) + + # Define workflow flow (NEW: analysis → evidence_aggregator → planner → mermaid → QA → assembly) + workflow.set_entry_point("clone_repository") + workflow.add_edge("clone_repository", "project_detection") + workflow.add_edge("project_detection", "code_explorer") + workflow.add_edge("code_explorer", "api_reference") + workflow.add_edge("api_reference", "call_graph") + workflow.add_edge("call_graph", "error_analysis") + workflow.add_edge("error_analysis", "env_config") + workflow.add_edge("env_config", "dependency_analyzer") + workflow.add_edge("dependency_analyzer", "evidence_aggregator") # NEW: aggregate evidence after all analysis + workflow.add_edge("evidence_aggregator", "planner") # Planner uses evidence for routing + workflow.add_edge("planner", "mermaid") + workflow.add_edge("mermaid", "qa_validator") + workflow.add_edge("qa_validator", "assembly") + workflow.add_edge("assembly", END) + + # Use memory checkpointer (ephemeral) + checkpointer = MemorySaver() + compiled = workflow.compile(checkpointer=checkpointer) + + self.graph = compiled + self.checkpointer = checkpointer + return compiled + + async def clone_repository_node(self, state: DocGenState) -> DocGenState: + """Clone the GitHub repository (reuse existing implementation)""" + job_id = state["job_id"] + repo_url = state["repo_url"] + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message=f"📦 Cloning repository: {repo_url}" + ) + + try: + parsed_url = parse_github_url(repo_url) + + def progress_callback(message: str): + import asyncio + try: + loop = asyncio.get_event_loop() + loop.create_task(self.log_manager.log_async( + job_id=job_id, + log_type=LogType.INFO, + message=message + )) + except RuntimeError: + logger.info(message) + + repo_path, metadata = self.git_service.clone_repository( + parsed_url["clone_url"], + branch=parsed_url["branch"], + progress_callback=progress_callback + ) + + if parsed_url["is_subfolder"]: + import os + target_path = os.path.join(repo_path, parsed_url["subfolder"]) + if not os.path.exists(target_path): + raise ValueError(f"Subfolder '{parsed_url['subfolder']}' not found") + state["repo_path"] = target_path + state["repo_name"] = parsed_url["display_name"] + state["is_subfolder_target"] = True + else: + state["repo_path"] = repo_path + state["repo_name"] = parsed_url["repo"] + state["is_subfolder_target"] = False + + state["default_branch"] = metadata["default_branch"] + state["workflow_status"] = "detecting" + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Repository cloned successfully" + ) + + return state + + except Exception as e: + logger.error(f"Clone failed: {e}") + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.ERROR, + message=f"❌ Clone failed: {str(e)}" + ) + state["error"] = str(e) + state["workflow_status"] = "failed" + raise + + async def project_detection_node(self, state: DocGenState) -> DocGenState: + """Detect projects in repository (reuse existing implementation)""" + job_id = state["job_id"] + repo_path = state["repo_path"] + is_subfolder = state.get("is_subfolder_target", False) + + if is_subfolder: + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message=f"🎯 Targeting subfolder: {state['repo_name']}" + ) + state["awaiting_project_selection"] = False + state["selected_projects"] = None + state["is_monorepo"] = False + state["detected_projects"] = [] + state["workflow_status"] = "analyzing" + return state + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="🔍 Detecting projects..." + ) + + try: + detection_result = detect_projects(repo_path) + state["is_monorepo"] = detection_result["is_monorepo"] + state["detected_projects"] = detection_result["projects"] + state["skipped_folders"] = detection_result.get("skipped_folders", []) + + if detection_result["project_count"] <= 1: + state["awaiting_project_selection"] = False + state["selected_projects"] = None + state["workflow_status"] = "analyzing" + else: + # Multiple projects - need selection (handled by server) + state["awaiting_project_selection"] = True + state["workflow_status"] = "awaiting_selection" + + return state + + except Exception as e: + logger.error(f"Detection failed: {e}") + state["awaiting_project_selection"] = False + state["selected_projects"] = None + state["workflow_status"] = "analyzing" + return state + + # Section Writer Agents + async def code_explorer_node(self, state: DocGenState) -> DocGenState: + """Run Code Explorer agent - writes Overview + Features sections""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("CodeExplorer") + + # CRITICAL: Update repo_name to selected project name if project was selected + selected_projects = state.get("selected_projects") + if selected_projects and len(selected_projects) > 0: + # Update repo_name to the selected project folder name + state["repo_name"] = selected_projects[0] + logger.info(f"[CodeExplorer] Updated repo_name to selected project: {state['repo_name']}") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message=f"🔍 Running Overview & Features Writer (1/7) for {state['repo_name']}...", + agent_name="CodeExplorer" + ) + + llm = get_llm(model_name=settings.CODE_EXPLORER_MODEL, temperature=0.7) + result = await run_code_explorer_agent(llm=llm, repo_path=target_path, job_id=job_id) + + if result.get("success"): + # Parse output to extract two sections: Project Overview and Features + output = result.get("output", "") + sections_dict = state.get("readme_sections") or {} + + # Extract sections from output (they're in format: ## Section Name\n\nContent...) + self._parse_and_store_sections(output, sections_dict) + state["readme_sections"] = sections_dict + + logger.info(f"[CodeExplorer] Stored sections: {list(sections_dict.keys())}") + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Overview & Features sections completed", + agent_name="CodeExplorer" + ) + + # End metrics tracking (success) + metrics.end_agent( + "CodeExplorer", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"CodeExplorer failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("CodeExplorer", success=False, error_message=result.get('error')) + + state["current_agent"] = "CodeExplorer" + return state + + async def api_reference_node(self, state: DocGenState) -> DocGenState: + """Run API Reference agent - extracts endpoint data (no markdown sections)""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("APIReference") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="📡 Running API Endpoint Extractor (2/7)...", + agent_name="APIReference" + ) + + llm = get_llm(model_name=settings.API_REFERENCE_MODEL, temperature=0.7) + result = await run_api_reference_agent(llm=llm, repo_path=target_path, job_id=job_id) + + if result.get("success"): + output = result.get("output", "") + # Parse JSON output to extract endpoint data + import json + import re + try: + # Try to find JSON in output (might be wrapped in markdown code block) + json_match = re.search(r'```json\s*(\{.*?\})\s*```', output, re.DOTALL) + if json_match: + json_str = json_match.group(1) + else: + # Try to find raw JSON object + json_match = re.search(r'\{.*\}', output, re.DOTALL) + if json_match: + json_str = json_match.group(0) + else: + json_str = output + + endpoint_data = json.loads(json_str) + state["api_endpoints"] = endpoint_data.get("endpoints", []) + logger.info(f"[APIReference] Extracted {len(state['api_endpoints'])} endpoints") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message=f"✅ Extracted {len(state['api_endpoints'])} API endpoints", + agent_name="APIReference" + ) + # End metrics tracking (success) + metrics.end_agent( + "APIReference", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + except (json.JSONDecodeError, AttributeError) as e: + logger.warning(f"[APIReference] Failed to parse JSON: {e}. Storing empty endpoint list.") + state["api_endpoints"] = [] + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WARNING, + message="⚠️ No API endpoints extracted", + agent_name="APIReference" + ) + # End metrics tracking (success with warning) + metrics.end_agent( + "APIReference", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"APIReference failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("APIReference", success=False, error_message=result.get('error')) + + state["current_agent"] = "APIReference" + return state + + async def call_graph_node(self, state: DocGenState) -> DocGenState: + """Run Call Graph agent - writes Architecture section""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("CallGraph") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="🔗 Running Architecture Writer (3/7)...", + agent_name="CallGraph" + ) + + llm = get_llm(model_name=settings.CALL_GRAPH_MODEL, temperature=0.7) + result = await run_call_graph_agent(llm=llm, repo_path=target_path, job_id=job_id) + + if result.get("success"): + output = result.get("output", "") + sections_dict = state.get("readme_sections") or {} + self._parse_and_store_sections(output, sections_dict) + state["readme_sections"] = sections_dict + + logger.info(f"[CallGraph] Stored sections: {list(sections_dict.keys())}") + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Architecture section completed", + agent_name="CallGraph" + ) + # End metrics tracking (success) + metrics.end_agent( + "CallGraph", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"CallGraph failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("CallGraph", success=False, error_message=result.get('error')) + + state["current_agent"] = "CallGraph" + return state + + async def error_analysis_node(self, state: DocGenState) -> DocGenState: + """Run Error Analysis agent - writes Troubleshooting section""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("ErrorAnalysis") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="⚠️ Running Troubleshooting Writer (4/7)...", + agent_name="ErrorAnalysis" + ) + + llm = get_llm(model_name=settings.ERROR_ANALYSIS_MODEL, temperature=0.7) + result = await run_error_analysis_agent(llm=llm, repo_path=target_path, job_id=job_id) + + if result.get("success"): + output = result.get("output", "") + sections_dict = state.get("readme_sections") or {} + self._parse_and_store_sections(output, sections_dict) + state["readme_sections"] = sections_dict + + logger.info(f"[ErrorAnalysis] Stored sections: {list(sections_dict.keys())}") + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Troubleshooting section completed", + agent_name="ErrorAnalysis" + ) + # End metrics tracking (success) + metrics.end_agent( + "ErrorAnalysis", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"ErrorAnalysis failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("ErrorAnalysis", success=False, error_message=result.get('error')) + + state["current_agent"] = "ErrorAnalysis" + return state + + async def env_config_node(self, state: DocGenState) -> DocGenState: + """Run Environment Config agent - writes Configuration section""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("EnvConfig") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="⚙️ Running Configuration Writer (5/7)...", + agent_name="EnvConfig" + ) + + llm = get_llm(model_name=settings.ENV_CONFIG_MODEL, temperature=0.7) + result = await run_env_config_agent(llm=llm, repo_path=target_path, job_id=job_id) + + if result.get("success"): + output = result.get("output", "") + + # DEBUG: Log raw output + logger.info(f"[EnvConfig] raw_output_head={output[:400]!r}") + logger.info(f"[EnvConfig] raw_output_tail={output[-400:]!r}") + + sections_dict = state.get("readme_sections") or {} + self._parse_and_store_sections(output, sections_dict) + state["readme_sections"] = sections_dict + + logger.info(f"[EnvConfig] Stored sections: {list(sections_dict.keys())}") + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Configuration section completed", + agent_name="EnvConfig" + ) + # End metrics tracking (success) + metrics.end_agent( + "EnvConfig", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"EnvConfig failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("EnvConfig", success=False, error_message=result.get('error')) + + state["current_agent"] = "EnvConfig" + return state + + async def dependency_analyzer_node(self, state: DocGenState) -> DocGenState: + """Run Dependency Analyzer agent - writes Prerequisites & Deployment sections""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("DependencyAnalyzer") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="📦 Running Prerequisites & Deployment Writer (6/7)...", + agent_name="DependencyAnalyzer" + ) + + llm = get_llm(model_name=settings.DEPENDENCY_ANALYZER_MODEL, temperature=0.7) + repo_url = state.get("repo_url", "") + result = await run_dependency_analyzer_agent(llm=llm, repo_path=target_path, job_id=job_id, repo_url=repo_url) + + if result.get("success"): + output = result.get("output", "") + + # DEBUG: Log raw output + logger.info(f"[DependencyAnalyzer] raw_output_head={output[:400]!r}") + logger.info(f"[DependencyAnalyzer] raw_output_tail={output[-400:]!r}") + + sections_dict = state.get("readme_sections") or {} + self._parse_and_store_sections(output, sections_dict) + state["readme_sections"] = sections_dict + + logger.info(f"[DependencyAnalyzer] Stored sections: {list(sections_dict.keys())}") + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Prerequisites & Deployment sections completed", + agent_name="DependencyAnalyzer" + ) + # End metrics tracking (success) + metrics.end_agent( + "DependencyAnalyzer", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"DependencyAnalyzer failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("DependencyAnalyzer", success=False, error_message=result.get('error')) + + state["current_agent"] = "DependencyAnalyzer" + return state + + # Evidence Aggregation + async def evidence_aggregator_node(self, state: DocGenState) -> DocGenState: + """ + DUAL-MODE Evidence Aggregator - Collects evidence from file system and agent outputs. + + This node creates the central EvidencePacket by: + 1. Direct file system checks (requirements.txt, package.json, etc.) + 2. Extracting structured data from agent outputs (dual-mode: supports both strings and JSON) + + FIX 1: Dual-mode aggregator - works with current string outputs AND future JSON outputs + FIX 2: Deterministic repo_name derivation from URL or folder path + FIX: Uses target_path (selected project) instead of repo_path (root) + """ + job_id = state["job_id"] + repo_url = state.get("repo_url", "") + readme_sections = state.get("readme_sections", {}) + + # CRITICAL: Use target path (respects project selection) + target_path = self._get_target_path(state) + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="📊 Aggregating evidence from repository...", + agent_name="EvidenceAggregator" + ) + + try: + # FIX 2: Derive repo_name deterministically (never from LLM output) + # Use selected project name if available, otherwise derive from URL/path + selected_projects = state.get("selected_projects") + if selected_projects and len(selected_projects) > 0: + repo_name = selected_projects[0] + elif repo_url: + # Parse from GitHub URL: github.com/user/repo-name + match = re.search(r'github\.com/[^/]+/([^/\.]+)', repo_url) + repo_name = match.group(1) if match else os.path.basename(target_path) + else: + # Use folder name + repo_name = os.path.basename(target_path) if target_path else "Repository" + + # Initialize evidence packet with target_path + evidence = EvidencePacket(repo_name=repo_name, repo_path=target_path) + + # === DIRECT FILE SYSTEM CHECKS (don't rely on agent outputs) === + + # Check for Python dependencies (root and subdirectories) + python_dep_locations = [ + os.path.join(target_path, "requirements.txt"), + os.path.join(target_path, "api", "requirements.txt"), + os.path.join(target_path, "backend", "requirements.txt"), + os.path.join(target_path, "server", "requirements.txt") + ] + + for requirements_path in python_dep_locations: + if os.path.exists(requirements_path): + rel_path = os.path.relpath(requirements_path, target_path) + evidence.add_evidence(EvidenceItem( + category="dependency", + key=rel_path, + value="Python dependencies", + source_files=[rel_path] + )) + try: + with open(requirements_path, 'r', encoding='utf-8', errors='ignore') as f: + deps = [ + line.split('==')[0].split('>=')[0].split('~=')[0].strip() + for line in f + if line.strip() and not line.startswith('#') + ] + evidence.python_deps.extend(deps) + logger.info(f"[Evidence] Found {len(deps)} Python dependencies in {rel_path}") + except Exception as e: + logger.warning(f"[Evidence] Failed to parse {rel_path}: {e}") + + if evidence.python_deps: + evidence.has_backend = True + # Remove duplicates + evidence.python_deps = list(set(evidence.python_deps)) + + # Check for Node.js dependencies (root and subdirectories) + node_dep_locations = [ + os.path.join(target_path, "package.json"), + os.path.join(target_path, "ui", "package.json"), + os.path.join(target_path, "frontend", "package.json"), + os.path.join(target_path, "client", "package.json") + ] + + for package_json_path in node_dep_locations: + if os.path.exists(package_json_path): + rel_path = os.path.relpath(package_json_path, target_path) + evidence.has_frontend = True + try: + with open(package_json_path, 'r', encoding='utf-8') as f: + pkg = json.load(f) + deps = list(pkg.get("dependencies", {}).keys()) + evidence.node_deps.extend(deps) + + # Detect frontend framework + if "react" in deps and not evidence.frontend_framework: + evidence.frontend_framework = "React" + elif "vue" in deps and not evidence.frontend_framework: + evidence.frontend_framework = "Vue" + elif "@angular/core" in deps and not evidence.frontend_framework: + evidence.frontend_framework = "Angular" + + evidence.add_evidence(EvidenceItem( + category="dependency", + key=rel_path, + value=f"Node.js project with {len(deps)} dependencies", + source_files=[rel_path] + )) + logger.info(f"[Evidence] Found {len(deps)} Node dependencies in {rel_path}") + except Exception as e: + logger.warning(f"[Evidence] Failed to parse {rel_path}: {e}") + + if evidence.node_deps: + # Remove duplicates + evidence.node_deps = list(set(evidence.node_deps)) + + # Check for Docker + dockerfile_path = os.path.join(target_path, "Dockerfile") + compose_path = os.path.join(target_path, "docker-compose.yml") + if os.path.exists(dockerfile_path): + evidence.has_docker = True + evidence.docker_files.append("Dockerfile") + evidence.add_evidence(EvidenceItem( + category="infrastructure", + key="Dockerfile", + value="Docker containerization", + source_files=["Dockerfile"] + )) + if os.path.exists(compose_path): + evidence.has_docker = True + evidence.docker_files.append("docker-compose.yml") + evidence.add_evidence(EvidenceItem( + category="infrastructure", + key="docker-compose.yml", + value="Docker Compose orchestration", + source_files=["docker-compose.yml"] + )) + + if evidence.has_docker: + logger.info(f"[Evidence] Found Docker files: {evidence.docker_files}") + + # Check for .env files + env_example_path = os.path.join(target_path, ".env.example") + if os.path.exists(env_example_path): + evidence.env_files.append(".env.example") + evidence.add_evidence(EvidenceItem( + category="config", + key=".env.example", + value="Environment configuration template", + source_files=[".env.example"] + )) + logger.info("[Evidence] Found .env.example") + + # Extract API endpoints from state (populated by API Reference agent) + api_endpoints = state.get("api_endpoints", []) + if api_endpoints: + evidence.api_endpoints = api_endpoints + logger.info(f"[Evidence] Extracted {len(api_endpoints)} API endpoints from state") + + # Detect languages from file extensions + try: + for root, dirs, files in os.walk(target_path): + # Skip node_modules, .git, venv, etc. + dirs[:] = [d for d in dirs if d not in ['.git', 'node_modules', 'venv', '__pycache__', 'dist', 'build']] + + for file in files: + ext = os.path.splitext(file)[1] + if ext in ['.py', '.js', '.jsx', '.ts', '.tsx', '.java', '.go', '.rs', '.cpp', '.c', '.rb']: + lang_map = { + '.py': 'Python', '.js': 'JavaScript', '.jsx': 'JavaScript', + '.ts': 'TypeScript', '.tsx': 'TypeScript', '.java': 'Java', + '.go': 'Go', '.rs': 'Rust', '.cpp': 'C++', '.c': 'C', '.rb': 'Ruby' + } + lang = lang_map.get(ext, 'Unknown') + evidence.languages[lang] = evidence.languages.get(lang, 0) + 1 + + logger.info(f"[Evidence] Detected languages: {evidence.languages}") + except Exception as e: + logger.warning(f"[Evidence] Failed to detect languages: {e}") + + # Store evidence packet in state + state["evidence_packet"] = evidence + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message=f"✅ Evidence aggregated: {len(evidence.python_deps)} Python deps, {len(evidence.node_deps)} Node deps, {len(evidence.docker_files)} Docker files", + agent_name="EvidenceAggregator" + ) + + logger.info(f"[Evidence] Final evidence: {evidence.to_dict()}") + + except Exception as e: + logger.error(f"Evidence aggregation failed: {e}") + # Create minimal evidence packet on failure + state["evidence_packet"] = EvidencePacket( + repo_name=os.path.basename(target_path) if target_path else "Repository", + repo_path=target_path + ) + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WARNING, + message=f"⚠️ Evidence aggregation failed, using minimal evidence: {str(e)}", + agent_name="EvidenceAggregator" + ) + + state["current_agent"] = "EvidenceAggregator" + return state + + # Planning + async def planner_node(self, state: DocGenState) -> DocGenState: + """Run Planner agent - decides which sections to include""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("Planner") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="📋 Running Planner (7/7)...", + agent_name="Planner" + ) + + llm = get_llm(model_name=settings.PLANNER_MODEL, temperature=0.7) + result = await run_planner_agent(llm=llm, repo_path=target_path, job_id=job_id) + + if result.get("success"): + output = result.get("output", "") + # Try to parse JSON output + import json + try: + plan_data = json.loads(output) + state["project_type"] = plan_data.get("project_type", "Unknown") + state["documentation_sections"] = plan_data.get("sections", ["Project Overview", "Features", "Architecture", "Prerequisites", "Quick Start Deployment", "Troubleshooting"]) + logger.info(f"[Planner] Planned sections: {state['documentation_sections']}") + except: + state["project_type"] = "Unknown" + state["documentation_sections"] = ["Project Overview", "Features", "Architecture", "Prerequisites", "Quick Start Deployment", "Troubleshooting"] + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message=f"✅ Planner completed - {len(state['documentation_sections'])} sections planned", + agent_name="Planner" + ) + # End metrics tracking (success) + metrics.end_agent( + "Planner", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"Planner failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("Planner", success=False, error_message=result.get('error')) + + state["current_agent"] = "Planner" + return state + + async def mermaid_node(self, state: DocGenState) -> DocGenState: + """Run Mermaid Diagram agent - generates architecture diagram with semantic validation""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + evidence_packet = state.get("evidence_packet") + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("Mermaid") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="📊 Generating architecture diagram...", + agent_name="Mermaid" + ) + + llm = get_llm(model_name=settings.MERMAID_MODEL, temperature=0.7) + # Pass API endpoints from state to Mermaid agent + api_endpoints = state.get("api_endpoints", []) + result = await run_mermaid_agent(llm=llm, repo_path=target_path, job_id=job_id, api_endpoints=api_endpoints) + + if result.get("success"): + diagram_output = result.get("output", "") + # Extract only the mermaid code (remove markdown blocks and extra text) + diagram_code = self._extract_mermaid_code(diagram_output) + if diagram_code: + # === NEW: Semantic Validation (FIX 4 applied) === + is_valid, errors = self._validate_mermaid_semantics(diagram_code, evidence_packet) + + if not is_valid: + logger.warning(f"[Mermaid] Semantic validation failed: {errors}") + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WARNING, + message=f"⚠️ Diagram has semantic issues: {', '.join(errors[:2])}", + agent_name="Mermaid" + ) + # Note: We still use the diagram, but log the issues + # Future enhancement: add a retry mechanism here + + state["mermaid_diagrams"] = {"architecture": diagram_code} + logger.info(f"[Mermaid] Extracted diagram: {len(diagram_code)} chars") + else: + state["mermaid_diagrams"] = {} + logger.warning("[Mermaid] Could not extract valid mermaid code") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Mermaid Generator completed", + agent_name="Mermaid" + ) + # End metrics tracking (success) + metrics.end_agent( + "Mermaid", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"Mermaid failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("Mermaid", success=False, error_message=result.get('error')) + + state["current_agent"] = "Mermaid" + return state + + def _extract_mermaid_code(self, text: str) -> str: + """Extract clean mermaid code from agent output""" + # Try to find mermaid code block + patterns = [ + r'```mermaid\s+(.*?)\s+```', # ```mermaid ... ``` + r'```\s+(graph\s+TD.*?)```', # ``` graph TD ... ``` + r'(graph\s+TD.*?)(?=\n\n|\Z)' # graph TD ... (until double newline or end) + ] + + for pattern in patterns: + match = re.search(pattern, text, re.DOTALL) + if match: + code = match.group(1).strip() + # Validate it starts with graph/flowchart + if code.startswith(('graph ', 'flowchart ', 'sequenceDiagram', 'classDiagram')): + return code + + # Fallback: if text starts with graph/flowchart directly + if text.strip().startswith(('graph ', 'flowchart ')): + return text.strip() + + return "" + + def _validate_mermaid_semantics(self, diagram_code: str, evidence_packet) -> tuple: + """ + Validate Mermaid diagram semantics (FIX 4 applied - minimal rules only). + + Returns: + (is_valid, errors_list) + """ + errors = [] + lines = diagram_code.split('\n') + + # Rule 1: No endpoint nodes with paths/methods + endpoint_patterns = ['/upload', '/query', '/health', '/api/', 'GET ', 'POST ', 'PUT ', 'DELETE '] + for line in lines: + for pattern in endpoint_patterns: + if pattern in line and '[' in line: + errors.append(f"Diagram contains endpoint/route node: {pattern}") + break + + # Rule 2: Must include User or Client + has_user = any(('User' in line or 'Client' in line) and '[' in line for line in lines) + if not has_user: + errors.append("Diagram should include User or Client node") + + # Rule 3: Must have Backend if backend exists + if evidence_packet and evidence_packet.has_backend: + has_backend = any(('API' in line or 'Backend' in line or 'Server' in line) + and '[' in line for line in lines) + if not has_backend: + errors.append("Diagram missing Backend/API node (backend detected in repo)") + + # Rule 4: Must have Frontend if frontend exists + if evidence_packet and evidence_packet.has_frontend: + has_frontend = any(('Frontend' in line or 'UI' in line or 'Client' in line or 'Web' in line) + and '[' in line for line in lines) + if not has_frontend: + errors.append(f"Diagram missing Frontend/UI node ({evidence_packet.frontend_framework} detected)") + + # Rule 5: Must have Database if common DB deps found + if evidence_packet: + db_deps = ['sqlalchemy', 'psycopg2', 'pymongo', 'mysql', 'redis', 'elasticsearch'] + has_db_dep = any(dep in [d.lower() for d in evidence_packet.python_deps] for dep in db_deps) + if has_db_dep: + has_db_node = any(('Database' in line or 'DB' in line or 'Storage' in line or 'Cache' in line) + and '[' in line for line in lines) + if not has_db_node: + errors.append("Diagram missing Database node (database dependency detected)") + + return (len(errors) == 0, errors) + + # QA Validation + async def qa_validator_node(self, state: DocGenState) -> DocGenState: + """Run QA Validator agent - validates sections BEFORE assembly with evidence-based guardrails""" + job_id = state["job_id"] + readme_sections = state.get("readme_sections", {}) + evidence_packet = state.get("evidence_packet") + + # Start metrics tracking + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("QAValidator") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="✅ Validating README sections with evidence checks...", + agent_name="QAValidator" + ) + + llm = get_llm(model_name=settings.QA_VALIDATOR_MODEL, temperature=0.7) + result = await run_qa_validator_agent( + llm=llm, + readme_sections=readme_sections, + job_id=job_id, + evidence_packet=evidence_packet + ) + + if result.get("success"): + qa_output = result.get("output", "") + # Try to parse QA score + import json + try: + qa_data = json.loads(qa_output) + state["qa_score"] = qa_data.get("qa_score", 75) + state["qa_passed"] = qa_data.get("qa_passed", True) + state["qa_validation_result"] = qa_data + except: + state["qa_score"] = 75 + state["qa_passed"] = True + state["qa_validation_result"] = {"output": qa_output} + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message=f"✅ QA Validator completed (Score: {state.get('qa_score', 'N/A')})", + agent_name="QAValidator" + ) + # End metrics tracking (success) + metrics.end_agent( + "QAValidator", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + logger.error(f"QAValidator failed: {result.get('error')}") + # End metrics tracking (failure) + metrics.end_agent("QAValidator", success=False, error_message=result.get('error')) + + state["current_agent"] = "QAValidator" + return state + + async def assembly_node(self, state: DocGenState) -> DocGenState: + """Assemble final README with GenAISamples structure (EXACT logic from docugen)""" + job_id = state["job_id"] + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="🔨 Assembling final README..." + ) + + readme_parts = [] + readme_sections = state.get("readme_sections", {}) + mermaid_diagrams = state.get("mermaid_diagrams", {}) + repo_name = state.get('repo_name', 'Project') + + # Helper function to convert kebab-case/snake_case to Title Case + def to_title_case(text: str) -> str: + """Convert 'rag-chatbot' or 'doc_summarization' to 'Rag Chatbot' or 'Doc Summarization'""" + return ' '.join(word.capitalize() for word in text.replace('-', ' ').replace('_', ' ').split()) + + # 1. ## Title (H2, not H1!) - Convert to Title Case + readme_parts.append(f"## {to_title_case(repo_name)}\n\n") + + # 2. Brief intro (extract ONLY first 1-2 sentences from Project Overview as teaser) + if "Project Overview" in readme_sections: + overview_content = readme_sections["Project Overview"] + # Extract content after heading + lines = overview_content.split('\n') + content_text = [] + found_heading = False + + for line in lines: + if line.startswith("## Project Overview"): + found_heading = True + continue + if found_heading and line.strip(): + content_text.append(line.strip()) + + # Join all content and split by sentences + full_text = ' '.join(content_text) + # Split by period followed by space (basic sentence splitting) + sentences = [s.strip() + '.' for s in full_text.split('. ') if s.strip()] + + # Take only the first sentence as intro (to avoid duplication) + if sentences: + intro = sentences[0] + readme_parts.append(f"{intro}\n\n") + + # Define explicit section order (chronological) + SECTION_ORDER = [ + "Project Overview", + "Features", + "Architecture", + "Prerequisites", + "Quick Start Deployment", + "User Interface", + "Configuration", + "Troubleshooting" + ] + + # 3. ## Table of Contents (use explicit order) + readme_parts.append("## Table of Contents\n\n") + for section in SECTION_ORDER: + if section in readme_sections: + anchor = section.lower().replace(" ", "-").replace("/", "") + readme_parts.append(f"- [{section}](#{anchor})\n") + readme_parts.append("\n") + + # 4. --- separator + readme_parts.append("---\n\n") + + # 5. Add all sections in explicit order with --- separators + architecture_section_found = False + + for section in SECTION_ORDER: + if section not in readme_sections: + continue + + content = readme_sections[section] + # Add section content + readme_parts.append(content) + readme_parts.append("\n\n") + + # Insert Architecture diagram RIGHT after Architecture section content + if section.lower() == "architecture": + architecture_section_found = True + if mermaid_diagrams: + for diagram_name, diagram_code in mermaid_diagrams.items(): + readme_parts.append(f"```mermaid\n{diagram_code}\n```\n\n") + mermaid_diagrams = {} # Clear so we don't add again + + # Add --- separator after each section + readme_parts.append("---\n\n") + + # 6. If diagrams were generated but no Architecture section exists, add minimal Architecture section with diagram + if mermaid_diagrams and not architecture_section_found: + readme_parts.append("## Architecture\n\n") + for diagram_name, diagram_code in mermaid_diagrams.items(): + readme_parts.append(f"```mermaid\n{diagram_code}\n```\n\n") + readme_parts.append("---\n\n") + + state["final_readme"] = "".join(readme_parts) + state["workflow_status"] = "completed" + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.SUCCESS, + message="✅ Documentation generation complete!" + ) + + # Finalize metrics and log summary + metrics = self._get_metrics_collector(job_id) + metrics.finalize_workflow() + summary = metrics.get_summary() + + # Build per-agent metrics display + agent_metrics_lines = [] + for agent in summary['agents']: + if agent['success']: + # Calculate per-agent TPS (output tokens per second) + agent_duration_sec = agent['duration_ms'] / 1000 if agent['duration_ms'] > 0 else 0 + agent_tps = agent['output_tokens'] / agent_duration_sec if agent_duration_sec > 0 else 0.0 + + agent_metrics_lines.append( + f" ├─ {agent['agent_name']}: total={agent['total_tokens']:,} " + f"(out={agent['output_tokens']:,}), {agent['duration_ms']:,.0f}ms, outTPS={agent_tps:.2f}" + ) + + agent_metrics_str = "\n".join(agent_metrics_lines) + + # Log metrics summary to agent logs panel + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.INFO, + message=f"\n📊 **Workflow Metrics Summary**\n" + f"├─ Total Agents: {summary['workflow']['total_agents']}\n" + f"├─ Successful: {summary['workflow']['successful_agents']}\n" + f"├─ Failed: {summary['workflow']['failed_agents']}\n" + f"├─ Total Duration: {summary['workflow']['total_duration_seconds']}s\n" + f"├─ Total Tokens: {summary['workflow']['total_tokens']:,}\n" + f"│ ├─ Input: {summary['workflow']['total_input_tokens']:,}\n" + f"│ └─ Output: {summary['workflow']['total_output_tokens']:,}\n" + f"├─ Total Tool Calls: {summary['workflow']['total_tool_calls']}\n" + f"├─ Total LLM Calls: {summary['workflow']['total_llm_calls']}\n" + f"├─ Workflow Average TPS: {summary['workflow']['average_tps']} tokens/sec\n" + f"│\n" + f"├─ **Per-Agent Metrics (Model TPS)**\n" + f"{agent_metrics_str}\n" + ) + + logger.info(f"[{job_id}] Metrics Summary: {summary}") + + # Cleanup + if state.get("repo_path"): + self.git_service.cleanup_repository(state["repo_path"]) + + return state + + +# Global workflow instance +_workflow: SimplifiedDocuGenWorkflow = None + + +async def get_workflow() -> SimplifiedDocuGenWorkflow: + """Get or create workflow instance""" + global _workflow + if _workflow is None: + _workflow = SimplifiedDocuGenWorkflow() + await _workflow.create_workflow() + return _workflow diff --git a/sample_solutions/Docugen-Microagents/docker-compose.yml b/sample_solutions/Docugen-Microagents/docker-compose.yml new file mode 100644 index 00000000..d3a973e0 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/docker-compose.yml @@ -0,0 +1,66 @@ +services: + backend: + build: + context: ./api + dockerfile: Dockerfile + container_name: Docugen-Microagents-backend + ports: + - "5001:5001" + env_file: + - ./api/.env + environment: + - PYTHONUNBUFFERED=1 + - HOST=0.0.0.0 + - API_PORT=5001 + volumes: + - ./api:/app + - backend-tmp:/app/tmp + - /var/run/docker.sock:/var/run/docker.sock # Docker socket for MCP server + extra_hosts: + - "${LOCAL_URL_ENDPOINT}:host-gateway" + networks: + - docugen-network + restart: unless-stopped + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:5001/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + frontend: + build: + context: ./ui + dockerfile: Dockerfile + container_name: Docugen-Microagents-frontend + ports: + - "3000:3000" + environment: + - HOST=0.0.0.0 + - PORT=3000 + - VITE_API_URL=/api + - VITE_API_TARGET=http://backend:5001 + volumes: + - ./ui:/app + - /app/node_modules + depends_on: + backend: + condition: service_healthy + networks: + - docugen-network + restart: unless-stopped + healthcheck: + test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:3000', (r) => process.exit(r.statusCode === 200 ? 0 : 1))\""] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + +networks: + docugen-network: + driver: bridge + name: Docugen-Microagents-network + +volumes: + backend-tmp: + driver: local diff --git a/sample_solutions/Docugen-Microagents/docs/CUSTOMIZATION.md b/sample_solutions/Docugen-Microagents/docs/CUSTOMIZATION.md new file mode 100644 index 00000000..d769a212 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/docs/CUSTOMIZATION.md @@ -0,0 +1,1015 @@ +# DocuGen Micro-Agents - Customization Guide + +This documentation covers how to customize the DocuGen Micro-Agents application for organizational workflows, including modifying agent behavior, strategic file sampling, workflow logic, and model configurations. + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [Architecture Summary](#architecture-summary) +3. [Strategic File Sampling](#strategic-file-sampling) +4. [Customizing Section Writer Agents](#customizing-section-writer-agents) +5. [Customizing Coordination Agents](#customizing-coordination-agents) +6. [Customizing Workflow Logic](#customizing-workflow-logic) +7. [Adding Custom Tools](#adding-custom-tools) +8. [Model Configuration](#model-configuration) +9. [Advanced Scenarios](#advanced-scenarios) + +--- + +## Overview + +DocuGen Micro-Agents is designed for extension at multiple layers: +- **Prompt engineering** - Modify agent-specific system prompts +- **Strategic sampling** - Configure file reading strategies for context optimization +- **Section customization** - Modify which sections are generated +- **Workflow modification** - Change execution flow, add validation gates +- **Tool extension** - Add custom repository analysis tools +- **Model configuration** - Use different LLM configurations per agent + +All customization points are centralized in well-defined files. This guide provides concrete examples for common modification scenarios. + +--- + +## Architecture Summary + +### File Structure + +``` +Docugen-Microagents/api/ +├── agents/ +│ ├── code_explorer_agent.py # Overview + Features writer +│ ├── api_reference_agent.py # API endpoint extractor +│ ├── call_graph_agent.py # Architecture writer +│ ├── error_analysis_agent.py # Troubleshooting writer +│ ├── env_config_agent.py # Configuration writer +│ ├── dependency_analyzer_agent.py # Prerequisites + Deployment writer +│ ├── planner_agent.py # Section selector +│ ├── mermaid_agent.py # Diagram generator + validator +│ └── qa_validator_agent.py # Quality assurance +├── tools/ +│ └── repo_tools.py # Strategic file reading + analysis tools +├── core/ +│ └── metrics_collector.py # Token usage, TPS, duration tracking +├── models/ +│ ├── state.py # LangGraph state definition +│ └── evidence.py # Evidence-based architecture +├── workflow.py # LangGraph workflow orchestration +├── config.py # Configuration and settings +└── server.py # FastAPI server with SSE streaming +``` + +### Micro-Agent Execution Flow + +**Section Writer Agents** (6 agents - write sections directly): +1. **Code Explorer** - Writes "Project Overview" + "Features" sections +2. **API Reference** - Extracts API endpoints (structured data, not markdown) +3. **Call Graph** - Writes "Architecture" section +4. **Error Analysis** - Writes "Troubleshooting" section +5. **Env Config** - Writes "Configuration" section +6. **Dependency Analyzer** - Writes "Prerequisites" + "Quick Start Deployment" sections + +**Coordination Agents** (3 agents - orchestration): +7. **Evidence Aggregator** (node, not agent) - Consolidates filesystem evidence +8. **Planner** - Decides which sections to include in final README +9. **Mermaid Generator** - Creates and validates architecture diagram +10. **QA Validator** - Validates sections against evidence, checks for hallucinations + +**Post-Workflow:** +11. **Assembly** (node) - Combines sections into final README +12. **PR Agent** (optional) - Creates GitHub pull request via MCP + +--- + +## Strategic File Sampling + +**Location:** `Docugen-Microagents/api/tools/repo_tools.py` + +### Understanding Strategic Sampling + +The system implements three intelligent file reading strategies to optimize context usage with small language models (Qwen3-4B with 8K context window). All strategies work within a configurable line budget. + +**Configuration:** +```python +# In api/config.py +MAX_LINES_PER_FILE: int = 500 # Line budget per file (pattern_window extracts ~150-300 lines) + +# In api/.env +MAX_LINES_PER_FILE=500 # Line budget per file (pattern_window strategy extracts ~150-300 lines focusing on key patterns) +``` + +### Three Strategies + +#### 1. Full Strategy (simple) +```python +# Reads first N lines within budget +strategy="full" # Returns first 500 lines +``` + +**When to use:** +- Small files (< 200 lines) +- Configuration files +- Simple scripts + +**Behavior:** +- Deterministic and simple +- May miss important code at end of file +- Fast execution + +#### 2. Smart Strategy (structural) +```python +# Extracts top + function/class signatures + bottom +strategy="smart" # Returns ~100-200 lines +``` + +**Logic:** +```python +# From tools/repo_tools.py lines 95-126 +def _smart_sample_lines(lines: List[str], max_lines: int): + top_n = min(50, max_lines) + bottom_n = min(30, max_lines - top_n) + middle_budget = max(0, max_lines - top_n - bottom_n) + + # Extract function/class signatures from middle + sigs = [] + for ln in lines[top_n:len(lines) - bottom_n]: + if ln.lstrip().startswith(("def ", "async def ", "class ")): + sigs.append(ln) + + return top + sigs + bottom +``` + +**When to use:** +- Python modules with many functions +- Class-heavy OOP code +- Preserving file structure overview + +**Behavior:** +- Preserves imports (top) +- Captures all function/class signatures +- Includes file ending (bottom) +- Typical extraction: 100-200 lines + +#### 3. Pattern Window Strategy (context-aware) ⭐ DEFAULT +```python +# Extracts ±6 lines around high-value patterns +strategy="pattern_window" # Returns ~150-300 lines +``` + +**Logic:** +```python +# From tools/repo_tools.py lines 129-222 +def _pattern_window_lines(lines: List[str], max_lines: int): + patterns = [ + r"@app\.(get|post|put|delete)\b", # FastAPI routes + r"@router\.(get|post|put|delete)\b", # FastAPI router + r"\bAPIRouter\b", # Router definitions + r"\btry:\b", # Error handling + r"\bexcept\b", # Exception handlers + r"\braise\b", # Error raising + r'if __name__\s*==\s*[\'"]__main__[\'"]', # Entry points + r"\buvicorn\.run\b", # Server startup + r"\bdef main\(", # Main functions + ] + + # Find all pattern matches + # Create windows of ±6 lines around each match + # Merge overlapping windows + # Return within max_lines budget +``` + +**When to use:** (DEFAULT for all agents) +- API route files +- Error handling logic +- Entry point detection +- High-value code extraction + +**Behavior:** +- Detects FastAPI/Flask routes, error handlers, entry points +- Extracts ±6 lines around each match +- Falls back to smart strategy if no patterns found +- Typical extraction: 150-300 lines +- Maximizes information density + +### Customizing Sampling Strategy + +#### Example 1: Change Default Strategy for All Agents + +```python +# In tools/repo_tools.py, line 912 (Code Explorer binding) +@tool +def read_file( + file_path: str, + max_lines: int = None, + strategy: Literal["full", "smart", "pattern_window"] = "smart" # Change from "pattern_window" +) -> str: + """Read a file with smart strategy as default""" + return read_file_tool.func( + repo_path=repo_path, + file_path=file_path, + max_lines=max_lines, + strategy=strategy + ) +``` + +#### Example 2: Add Custom Pattern for Domain-Specific Code + +```python +# In tools/repo_tools.py, add to _pattern_window_lines patterns list +def _pattern_window_lines(lines: List[str], max_lines: int): + patterns = [ + # Existing patterns... + r"@app\.(get|post|put|delete)\b", + + # NEW: Domain-specific patterns for your organization + r"@dataclass", # Python dataclasses + r"@celery\.task", # Celery background tasks + r"class\s+\w+\(BaseModel\)", # Pydantic models + r"def\s+test_\w+", # Test functions + r"@pytest\.fixture", # Pytest fixtures + r"@tool", # LangChain tools + r"@app\.middleware", # Middleware definitions + r"@app\.exception_handler", # Exception handlers + ] +``` + +#### Example 3: Adjust Line Budget for Larger Context Models + +```python +# If using larger models with more context (e.g., 32K context) +# In api/.env +MAX_LINES_PER_FILE=800 # Increase budget for larger models + +# Pattern window will extract ~300-500 lines +# Smart strategy will extract ~200-400 lines +``` + +#### Example 4: Force Full Strategy for Specific File Types + +```python +# In tools/repo_tools.py, modify read_file_tool +@tool +def read_file_tool( + repo_path: str, + file_path: str, + max_lines: int = None, + strategy: Literal["full", "smart", "pattern_window"] = "full" +) -> str: + """Read file with strategy selection""" + if max_lines is None: + max_lines = settings.MAX_LINES_PER_FILE + + # Force full strategy for config files + if file_path.endswith(('.json', '.yaml', '.yml', '.toml', '.ini')): + strategy = "full" + + # Force pattern_window for API files + elif 'routes/' in file_path or 'api/' in file_path: + strategy = "pattern_window" + + # Original logic continues... +``` + +--- + +## Customizing Section Writer Agents + +### Code Explorer Agent (Overview + Features Writer) + +**Location:** `Docugen-Microagents/api/agents/code_explorer_agent.py` + +The Code Explorer writes two sections directly: "Project Overview" and "Features". + +#### Example: Customize Output Format + +```python +# In agents/code_explorer_agent.py +CODE_EXPLORER_SYSTEM_PROMPT = """You are an expert Code Explorer agent... + +**Output Format (CRITICAL - FOLLOW EXACTLY):** + +## Project Overview + +[Write 2-3 paragraphs describing the application's purpose, architecture pattern, and value proposition] + +## Features + +**Backend Features:** +- Feature 1 (with file reference) +- Feature 2 (with evidence from code) +- Feature 3 (extracted from actual implementation) + +**Frontend Features:** +- Feature 1 (with component reference) +- Feature 2 (with framework details) + +**NEW REQUIREMENT - Security Features:** +- Authentication mechanism (with file:line reference) +- Authorization model (with evidence) +- Data encryption approach (if detected) +""" +``` + +#### Example: Force Deeper File Analysis + +```python +CODE_EXPLORER_SYSTEM_PROMPT = """... + +**Required Tool Usage Pattern (ENHANCED):** + +1. list_directory(".") - Get root structure +2. detect_languages() - Get language breakdown +3. extract_dependencies() - Get all dependencies +4. read_file("README.md", strategy="full") - Check existing docs +5. read_file("main entry file", strategy="pattern_window") - Analyze entry point + +**NEW: Deep Security Analysis** +6. list_directory("auth") or list_directory("middleware") - Find auth code +7. read_file("auth/main.py", strategy="pattern_window") - Extract auth logic +8. read_file(".env.example", strategy="full") - Identify secret requirements + +**NEW: API Analysis** +9. list_directory("routes") or list_directory("api") - Find API routes +10. read_file("routes/*.py", strategy="pattern_window") - Extract all endpoints +""" +``` + +### API Reference Agent (Endpoint Extractor) + +**Location:** `Docugen-Microagents/api/agents/api_reference_agent.py` + +This agent extracts API endpoints as structured JSON data (not markdown). + +#### Example: Customize Endpoint Extraction Format + +```python +# In agents/api_reference_agent.py +API_REFERENCE_SYSTEM_PROMPT = """... + +**Output Format (JSON ONLY - NO MARKDOWN):** + +```json +{ + "endpoints": [ + { + "method": "POST", + "path": "/api/documents", + "file": "api/routes/documents.py", + "line": 45, + "description": "Upload and process document", + "request_body": "DocumentInput model", + "response": "DocumentResponse model", + "auth_required": true, + "rate_limit": "10/minute" + } + ] +} +``` + +**NEW FIELDS TO EXTRACT:** +- auth_required: Check for @requires_auth, Depends(get_current_user), etc. +- rate_limit: Look for @limiter.limit decorators +- deprecated: Check for @deprecated decorators +""" +``` + +### Dependency Analyzer Agent (Prerequisites + Deployment Writer) + +**Location:** `Docugen-Microagents/api/agents/dependency_analyzer_agent.py` + +#### Example: Add Docker Compose Analysis + +```python +DEPENDENCY_ANALYZER_SYSTEM_PROMPT = """... + +**Required Analysis:** + +1. **Prerequisites Section:** + - Runtime versions (Python 3.11+, Node 20+, etc.) + - System dependencies (Redis, PostgreSQL, etc.) + - API keys and credentials + +2. **Quick Start Deployment Section:** + - Installation commands + - Setup instructions + - Running the application + +**NEW: Docker Compose Analysis** +3. If docker-compose.yml exists: + - Read docker-compose.yml with strategy="full" + - List all services defined + - Extract port mappings + - Document volume mounts + - Include docker-compose up command + + Example output: + ```markdown + ## Quick Start Deployment + + ### Docker Deployment (Recommended) + + The application uses Docker Compose with 3 services: + - backend (FastAPI) - Port 8000 + - frontend (React) - Port 3000 + - database (PostgreSQL) - Port 5432 + + \`\`\`bash + docker-compose up -d + \`\`\` + ``` +""" +``` + +--- + +## Customizing Coordination Agents + +### Planner Agent (Section Selector) + +**Location:** `Docugen-Microagents/api/agents/planner_agent.py` + +The Planner decides which sections to include in the final README based on project type. + +#### Example: Add Custom "Performance" Section + +```python +# In agents/planner_agent.py +PLANNER_SYSTEM_PROMPT = """... + +**Standard Sections (REQUIRED for ALL projects):** +1. Project Overview +2. Features +3. Architecture +4. Prerequisites +5. Quick Start Deployment + +**Conditional Sections (include if applicable):** +6. User Interface - If frontend detected (React, Vue, Angular) +7. Configuration - If .env.example exists +8. Troubleshooting - If error handlers or try/except blocks found + +**NEW: Performance Section (conditional)** +9. Performance - Include if: + - Caching detected (Redis, Memcached dependencies) + - Performance monitoring (Prometheus, Datadog integrations) + - Load balancing configuration (nginx.conf, HAProxy) + - Database optimization (indexes, query optimization comments) + - CDN configuration (Cloudflare, Fastly) + +**Detection Logic for Performance Section:** +```python +# Check for performance indicators +if any(dep in dependencies for dep in ['redis', 'memcached', 'prometheus-client']): + sections.append("Performance") +if 'nginx.conf' in config_files or 'load_balancer' in directories: + sections.append("Performance") +``` +""" +``` + +### Mermaid Generator Agent (Diagram Creator) + +**Location:** `Docugen-Microagents/api/agents/mermaid_agent.py` + +#### Example: Customize Diagram Style + +```python +MERMAID_SYSTEM_PROMPT = """... + +**Diagram Requirements:** + +1. **Architecture Diagram (graph TB):** + - Maximum 8-10 nodes + - Show user flow: User/Client → Frontend → Backend → Database + - Include external integrations (LLM Provider, Auth Service, etc.) + +**NEW: Corporate Color Scheme** +2. **Styling (REQUIRED):** + ```mermaid + graph TB + A[Frontend]:::frontend + B[Backend API]:::backend + C[Database]:::database + D[External Service]:::external + + classDef frontend fill:#3b82f6,stroke:#1e40af,color:#fff + classDef backend fill:#10b981,stroke:#059669,color:#fff + classDef database fill:#f59e0b,stroke:#d97706,color:#fff + classDef external fill:#8b5cf6,stroke:#6d28d9,color:#fff + ``` + +3. **Node Naming Rules:** + - Use service names, not technical jargon + - Good: "User Authentication", "Document Processor" + - Bad: "JWT Middleware", "PDF Parser Class" +""" +``` + +### QA Validator Agent (Quality Assurance) + +**Location:** `Docugen-Microagents/api/agents/qa_validator_agent.py` + +#### Example: Add Custom Validation Rules + +```python +QA_VALIDATOR_SYSTEM_PROMPT = """... + +**Validation Checks (Evidence-Based):** + +1. **Hallucination Detection:** + - Verify all dependencies mentioned exist in evidence.python_deps or evidence.node_deps + - Verify all environment variables match evidence.env_variables + - Verify technology stack matches evidence.languages + +2. **Completeness Checks:** + - All sections have > 100 characters + - Prerequisites lists actual version requirements + - Configuration section covers all .env.example variables + +**NEW: Custom Corporate Validation** +3. **Security Documentation (REQUIRED for ALL projects):** + - Must mention authentication mechanism (if any) + - Must document all *_SECRET, *_KEY, *_TOKEN env vars + - Must warn about credential security + +4. **Compliance Check (REQUIRED for enterprise):** + - If project handles PII: Must mention data protection + - If project logs data: Must mention audit logging + - If project uses external APIs: Must mention API key security + +**Scoring:** +- Base score: 75 +- +5 for each security item documented +- +10 for compliance documentation +- -10 for each hallucination detected +- -5 for incomplete sections +""" +``` + +--- + +## Customizing Workflow Logic + +**Location:** `Docugen-Microagents/api/workflow.py` + +### Example: Add Approval Gate After Section Writers + +Require human approval after all section writers complete, before coordination agents run. + +**Step 1:** Add approval state in `models/state.py`: + +```python +class DocGenState(TypedDict): + # Existing fields + job_id: str + repo_url: str + # ... + + # NEW: Approval gate + sections_approved: Optional[bool] +``` + +**Step 2:** Add conditional routing in `workflow.py`: + +```python +class SimplifiedDocuGenWorkflow: + async def create_workflow(self) -> StateGraph: + workflow = StateGraph(DocGenState) + + # Existing nodes... + workflow.add_node("dependency_analyzer", self.dependency_analyzer_node) + workflow.add_node("evidence_aggregator", self.evidence_aggregator_node) + + # Add edges + workflow.add_edge("dependency_analyzer", "evidence_aggregator") + + # NEW: Add approval gate before planner + workflow.add_conditional_edges( + "evidence_aggregator", + self._check_sections_approval, + { + "approved": "planner", + "pending": END # Pause for approval + } + ) + + def _check_sections_approval(self, state: DocGenState) -> str: + """Check if sections were approved""" + if state.get("sections_approved") is None: + # First time: pause for approval + return "pending" + elif state["sections_approved"]: + return "approved" + else: + # Rejected: end workflow + return END +``` + +**Step 3:** Add API endpoint in `server.py`: + +```python +@app.post("/api/approve-sections/{job_id}") +async def approve_sections(job_id: str, approved: bool = True): + """Approve section writer outputs before coordination""" + workflow = await get_workflow() + config = {"configurable": {"thread_id": job_id}} + + # Update state with approval + await workflow.graph.aupdate_state( + config, + {"sections_approved": approved} + ) + + if approved: + # Resume workflow + async for event in workflow.graph.astream(None, config): + pass + + return {"status": "approved" if approved else "rejected"} +``` + +### Example: Skip Mermaid Generation for APIs + +```python +def _should_generate_diagram(self, state: DocGenState) -> str: + """Skip diagrams for API-only projects""" + evidence = state.get("evidence_packet") + + # Skip if only backend, no frontend + if evidence and evidence.has_backend and not evidence.has_frontend: + logger.info("Skipping diagram: API-only project") + return "skip" + + return "generate" + +# In workflow setup: +workflow.add_conditional_edges( + "planner", + self._should_generate_diagram, + { + "generate": "mermaid", + "skip": "qa_validator" + } +) +``` + +--- + +## Adding Custom Tools + +**Location:** `Docugen-Microagents/api/tools/repo_tools.py` + +### Example: Add Database Schema Extraction Tool + +```python +from langchain.tools import tool +import re +from pathlib import Path + +@tool +def extract_database_schema_tool(repo_path: str) -> str: + """ + Extract database schema from SQLAlchemy models or Prisma schema files. + + Args: + repo_path: Absolute path to repository root + + Returns: + Formatted database schema with tables and columns + """ + schema_info = [] + + # Check for Prisma schema + prisma_file = Path(repo_path) / "prisma" / "schema.prisma" + if prisma_file.exists(): + with open(prisma_file) as f: + content = f.read() + models = re.findall(r'model\s+(\w+)\s*\{([^}]+)\}', content, re.DOTALL) + for model_name, model_body in models: + fields = re.findall(r'(\w+)\s+(\w+)', model_body) + schema_info.append(f"Table: {model_name}") + for field_name, field_type in fields: + schema_info.append(f" - {field_name}: {field_type}") + + # Check for SQLAlchemy models + models_dir = Path(repo_path) / "models" + if models_dir.exists(): + for model_file in models_dir.glob("*.py"): + with open(model_file) as f: + content = f.read() + tables = re.findall(r'class\s+(\w+)\([^)]*Base[^)]*\):', content) + for table in tables: + schema_info.append(f"Table: {table} ({model_file.name})") + + if not schema_info: + return "No database schema files found" + + return "\n".join(schema_info) +``` + +**Bind tool for agents:** + +```python +def make_bound_tools_for_code_explorer(repo_path: str) -> List: + """Create tools with repo_path pre-bound""" + + @tool + def extract_database_schema() -> str: + """Extract database schema. No arguments needed.""" + return extract_database_schema_tool.func(repo_path=repo_path) + + return [ + list_directory, + read_file, + detect_languages, + extract_dependencies, + extract_database_schema # NEW + ] +``` + +**Update agent prompt:** + +```python +CODE_EXPLORER_SYSTEM_PROMPT = """... + +**Tool Usage:** +1. list_directory(".") +2. detect_languages() +3. extract_dependencies() +4. extract_database_schema() # NEW - Get DB schema +5. read_file("main.py", strategy="pattern_window") + +**Required Analysis:** +... +4. **Database Schema (if database detected):** + - Use extract_database_schema() to get table definitions + - List primary tables and relationships + - Example: "Database: PostgreSQL with 5 tables (users, documents, sessions, ...)" +""" +``` + +--- + +## Model Configuration + +**Location:** `Docugen-Microagents/api/config.py` + +### Current Configuration (Qwen3-4B for all agents) + +```python +# In .env file +AUTH_MODE=genai_gateway +GENAI_GATEWAY_URL=https://your-gateway-url.com +GENAI_GATEWAY_API_KEY=your-api-key + +# All agents use Qwen3-4B-Instruct (optimized for Intel Xeon) +CODE_EXPLORER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +API_REFERENCE_MODEL=Qwen/Qwen3-4B-Instruct-2507 +CALL_GRAPH_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ERROR_ANALYSIS_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ENV_CONFIG_MODEL=Qwen/Qwen3-4B-Instruct-2507 +DEPENDENCY_ANALYZER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +PLANNER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +MERMAID_MODEL=Qwen/Qwen3-4B-Instruct-2507 +QA_VALIDATOR_MODEL=Qwen/Qwen3-4B-Instruct-2507 +WRITER_MODEL=Qwen/Qwen3-4B-Instruct-2507 # Legacy, not used +``` + +### Example: Use Larger Model for QA Validator + +```python +# If you have access to larger models for quality assurance +QA_VALIDATOR_MODEL=Qwen/Qwen2.5-32B-Instruct +``` + +### Example: Add Custom Model for Compliance Agent + +```python +# In config.py +class Settings(BaseSettings): + # Existing model configs... + CODE_EXPLORER_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + + # NEW: Custom agent model + COMPLIANCE_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" +``` + +--- + +## Advanced Scenarios + +### Scenario 1: Add Compliance Section Writer Agent + +**Step 1:** Create `agents/compliance_agent.py`: + +```python +from langchain_core.language_models import BaseChatModel +from langchain_core.prompts import ChatPromptTemplate +from tools.repo_tools import make_bound_tools_for_code_explorer + +COMPLIANCE_SYSTEM_PROMPT = """You are a Compliance Documentation Specialist. + +**Your Task:** +Write a comprehensive "Compliance" section for the README. + +**Required Content:** +1. Data Classification - What types of data does this system process? +2. Access Controls - Authentication and authorization mechanisms +3. Audit Logging - What actions are logged and where? +4. Encryption - Data at rest and in transit +5. Compliance Frameworks - SOC2, HIPAA, GDPR considerations + +**Tools Available:** +- list_directory(path) - List directory contents +- read_file(file_path, strategy) - Read file with strategy +- detect_languages() - Get language breakdown +- extract_dependencies() - Get all dependencies + +**Output Format:** + +## Compliance + +### Data Classification +[Describe what types of data the system processes...] + +### Access Controls +[Document authentication and authorization...] + +### Audit Logging +[Describe what is logged and retention...] + +### Encryption +[Document encryption at rest and in transit...] + +### Compliance Frameworks +[List applicable frameworks and how system complies...] +""" + +async def run_compliance_agent(llm: BaseChatModel, repo_path: str, job_id: str) -> dict: + """Run compliance documentation agent""" + from langchain.agents import create_react_agent, AgentExecutor + from langchain_core.prompts import PromptTemplate + + # Bind tools + tools = make_bound_tools_for_code_explorer(repo_path) + + # Create ReAct agent + agent_prompt = PromptTemplate.from_template("""...React template...""") + agent = create_react_agent(llm, tools, agent_prompt) + + # Execute + executor = AgentExecutor( + agent=agent, + tools=tools, + verbose=True, + max_iterations=15, + handle_parsing_errors=True + ) + + result = await executor.ainvoke({ + "system_prompt": COMPLIANCE_SYSTEM_PROMPT, + "repo_path": repo_path + }) + + return { + "success": True, + "output": result["output"], + "input_tokens": 0, # Extract from result + "output_tokens": 0, + "tool_calls": 0, + "llm_calls": 0 + } +``` + +**Step 2:** Add node to workflow: + +```python +# In workflow.py +from agents.compliance_agent import run_compliance_agent + +class SimplifiedDocuGenWorkflow: + async def create_workflow(self) -> StateGraph: + # Add compliance node + workflow.add_node("compliance", self.compliance_node) + + # Insert after dependency_analyzer, before evidence_aggregator + workflow.add_edge("dependency_analyzer", "compliance") + workflow.add_edge("compliance", "evidence_aggregator") + + async def compliance_node(self, state: DocGenState) -> DocGenState: + """Run Compliance agent""" + job_id = state["job_id"] + target_path = self._get_target_path(state) + + metrics = self._get_metrics_collector(job_id) + metrics.start_agent("Compliance") + + await self.log_manager.log_async( + job_id=job_id, + log_type=LogType.WORKFLOW_PROGRESS, + message="📋 Running Compliance Writer (7/8)...", + agent_name="Compliance" + ) + + llm = get_llm(model_name=settings.COMPLIANCE_MODEL, temperature=0.7) + result = await run_compliance_agent(llm=llm, repo_path=target_path, job_id=job_id) + + if result.get("success"): + output = result.get("output", "") + sections_dict = state.get("readme_sections") or {} + self._parse_and_store_sections(output, sections_dict) + state["readme_sections"] = sections_dict + + metrics.end_agent( + "Compliance", + success=True, + input_tokens=result.get("input_tokens", 0), + output_tokens=result.get("output_tokens", 0), + tool_calls=result.get("tool_calls", 0), + llm_calls=result.get("llm_calls", 0) + ) + else: + metrics.end_agent("Compliance", success=False) + + return state +``` + +**Step 3:** Add to config: + +```python +# In config.py +COMPLIANCE_MODEL: str = "Qwen/Qwen3-4B-Instruct-2507" + +# In .env +COMPLIANCE_MODEL=Qwen/Qwen3-4B-Instruct-2507 +``` + +--- + +## Configuration Reference + +### Environment Variables + +```bash +# LLM Configuration +AUTH_MODE=genai_gateway +GENAI_GATEWAY_URL=https://your-gateway-url.com +GENAI_GATEWAY_API_KEY=your-api-key + +# Micro-Agent Model Configuration (all use Qwen3-4B) +CODE_EXPLORER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +API_REFERENCE_MODEL=Qwen/Qwen3-4B-Instruct-2507 +CALL_GRAPH_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ERROR_ANALYSIS_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ENV_CONFIG_MODEL=Qwen/Qwen3-4B-Instruct-2507 +DEPENDENCY_ANALYZER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +PLANNER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +MERMAID_MODEL=Qwen/Qwen3-4B-Instruct-2507 +QA_VALIDATOR_MODEL=Qwen/Qwen3-4B-Instruct-2507 + +# GitHub Integration (MCP) +GITHUB_TOKEN=ghp_... + +# Repository Analysis Limits +MAX_REPO_SIZE=10737418240 # 10GB in bytes +MAX_FILE_SIZE=1000000 # 1MB in bytes +MAX_FILES_TO_SCAN=500 # Maximum files to analyze +MAX_LINES_PER_FILE=500 # Line budget per file (pattern_window extracts ~150-300 lines) + +# Agent Settings +AGENT_TEMPERATURE=0.7 +AGENT_MAX_TOKENS=1000 +AGENT_TIMEOUT=300 # 5 minutes + +# Server Configuration +API_PORT=5001 +HOST=0.0.0.0 +CORS_ORIGINS=["http://localhost:3000"] +``` + +--- + +## Testing Customizations + +After making customizations, test with representative repositories: + +```bash +# Test with different project types +1. Simple web app (React + FastAPI) +2. Complex monorepo (multiple services) +3. API-only service (FastAPI/Spring Boot) +4. CLI tool (Python script) + +# Validate outputs: +- Check all custom sections appear +- Verify custom tools are called in logs +- Confirm tone and format match requirements +- Test strategic sampling extracts key patterns +- Verify metrics tracking (tokens, TPS, duration) +- Test with private repos (authentication) +``` + +Review agent logs in real-time via the UI to debug ReAct reasoning loops, tool usage, and file sampling strategies. + +--- diff --git a/sample_solutions/Docugen-Microagents/docs/GENERIC_USAGE.md b/sample_solutions/Docugen-Microagents/docs/GENERIC_USAGE.md new file mode 100644 index 00000000..58c59403 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/docs/GENERIC_USAGE.md @@ -0,0 +1,1402 @@ +# DocuGen Micro-Agents - Generic Usage Guide + +This documentation covers how to deploy and use DocuGen Micro-Agents to automatically generate comprehensive README documentation for any software repository. + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [System Architecture](#system-architecture) +3. [Prerequisites](#prerequisites) +4. [Deployment](#deployment) +5. [Generating Documentation](#generating-documentation) +6. [Supported Project Types](#supported-project-types) +7. [Understanding Output](#understanding-output) +8. [Limitations](#limitations) +9. [Troubleshooting](#troubleshooting) + +--- + +## Overview + +DocuGen Micro-Agents is a specialized multi-agent system that autonomously generates comprehensive README documentation for software repositories. The system uses **9 micro-agents** that work together to analyze code structure, extract dependencies, understand architecture, and produce professional technical documentation based on actual code rather than templates. + +### Core Capabilities + +- **Micro-agent architecture** with 9 specialized agents for targeted analysis +- **Strategic file sampling** to optimize context usage and processing speed +- **Evidence-based documentation** - every statement verified through code analysis +- **Autonomous repository exploration** using ReAct (Reason-Act-Observe) pattern +- **Multi-language support** with automatic technology stack detection +- **Monorepo project detection** with human-in-the-loop selection +- **Architecture diagram generation** with Mermaid syntax validation +- **GitHub pull request creation** for documentation submission +- **Real-time agent activity streaming** via Server-Sent Events +- **Performance metrics tracking** with per-agent token usage and blast radius reports + +### Design Philosophy + +DocuGen generates documentation from code analysis, not assumptions. Every feature, dependency, and configuration mentioned in the output is extracted from actual files in your repository. If information cannot be verified through code inspection, it is omitted rather than fabricated. + +**Key Principles:** +- **Evidence-first**: All agents write findings to a central EvidencePacket +- **Specialized roles**: Each agent focuses on a specific analysis domain +- **Context-efficient**: Strategic file sampling keeps processing under LLM context limits +- **Quality-assured**: QA Validator scores all sections before final assembly + +--- + +## System Architecture + +### Micro-Agent Overview + +DocuGen uses **9 specialized micro-agents** organized into three categories: + +#### 1. Section Writer Agents (6 agents) +These agents analyze code and contribute specific sections to the documentation: + +- **Code Explorer** - Filesystem structure, languages, key files +- **API Reference** - REST/GraphQL endpoints, request/response schemas +- **Call Graph** - Function relationships, entry points, call paths +- **Error Analysis** - Exception handlers, error patterns, logging +- **Env Config** - Environment variables, configuration files +- **Dependency Analyzer** - Package dependencies, security vulnerabilities + +#### 2. Coordination Agents (3 agents) +These agents orchestrate documentation generation: + +- **Planner** - Determines sections to generate based on project type +- **Mermaid Generator** - Creates validated architecture diagrams +- **QA Validator** - Scores documentation quality (0-100) and validates completeness + +#### 3. Infrastructure Nodes +- **Evidence Aggregator** - Consolidates filesystem data for all agents +- **PR Agent** (via MCP) - Creates GitHub pull requests + +### Micro-Agent Execution Flow + +```mermaid +graph TB + subgraph "Frontend Layer" + UI[React UI
Repository URL input
Agent log streaming
Project selection
README preview] + end + + subgraph "Backend Layer" + API[FastAPI Server
REST API endpoints
SSE streaming
Job orchestration] + WF[LangGraph Workflow
State management
Agent orchestration
Checkpointing] + GIT[Git Service
Repository cloning
Branch operations] + MCP[GitHub MCP
PR creation
Docker integration] + end + + subgraph "Micro-Agent Pipeline" + direction TB + A1[1. Clone Repository
GitService clones repo] + A2[2. Evidence Aggregator
Scans filesystem, builds EvidencePacket] + A3[3. Section Writer Agents
6 agents run in parallel] + A4[4. Planner
Determines sections based on project type] + A5[5. Project Selection
Human-in-the-loop gate for monorepos] + A6[6. Writer Coordination
Generates markdown sections] + A7[7. Mermaid Generator
Creates validated diagrams] + A8[8. QA Validator
Scores quality 0-100] + A9[9. Assembly
Combines all sections into final README] + + A1 --> A2 + A2 --> A3 + A3 --> A4 + A4 --> A5 + A5 --> A6 + A6 --> A7 + A7 --> A8 + A8 --> A9 + end + + subgraph "External Services" + LLM[LLM Provider
Qwen3-4B via GenAI Gateway or Keycloak] + GITHUB[GitHub
Public/private repos] + end + + UI -->|HTTP/SSE| API + API -->|Executes| WF + WF -->|Orchestrates| A1 + A1 -->|Uses| GIT + GIT -->|Clones from| GITHUB + A3 & A6 & A7 & A8 -->|LLM calls| LLM + A9 -->|Creates PR via| MCP + MCP -->|Pushes to| GITHUB + API -->|Streams logs| UI + + classDef frontend fill:#3b82f6,stroke:#1e40af,color:#fff + classDef backend fill:#10b981,stroke:#059669,color:#fff + classDef agent fill:#f59e0b,stroke:#d97706,color:#fff + classDef external fill:#6366f1,stroke:#4f46e5,color:#fff + + class UI frontend + class API,WF,GIT,MCP backend + class A1,A2,A3,A4,A5,A6,A7,A8,A9 agent + class LLM,GITHUB external +``` + +### Strategic File Sampling + +DocuGen uses **intelligent file sampling** to work within LLM context limits: + +**MAX_LINES_PER_FILE=500** (configurable) acts as a budget, not a guarantee. Three strategies are available: + +#### 1. Pattern Window Strategy (DEFAULT) +- Detects high-value patterns (routes, error handlers, entry points) +- Extracts ±6 lines around each match +- **Typical extraction: 150-300 lines from 500 budget** +- Used by all agents by default + +**Detected patterns:** +```python +@app.get(), @router.post() # FastAPI/Flask routes +try:, except:, raise # Error handling +if __name__ == "__main__": # Entry points +def main(, uvicorn.run() # Main functions +``` + +#### 2. Smart Strategy +- Extracts top + function/class signatures + bottom +- **Typical extraction: 100-200 lines** +- Good for structural overviews + +#### 3. Full Strategy +- Takes first N lines (up to 500) +- Simple but can waste context on comments + +**Why this matters:** +- Qwen3-4B has 8K context window +- pattern_window keeps each file read to ~150-300 lines +- Allows processing more files within context limits +- Maximizes information density per token + +**Configuration location:** `api/tools/repo_tools.py` (lines 95-222) + +### Agent Communication Model + +Each micro-agent operates autonomously with access to repository tools: + +#### Section Writer Agents + +**Code Explorer Agent** +- **Tools**: list_directory, read_file, detect_languages, extract_dependencies +- **Output**: File structure tree, language distribution, key files list +- **Strategy**: pattern_window for key files, smart for structure + +**API Reference Agent** +- **Tools**: read_file, grep_search +- **Output**: Endpoint list with HTTP methods, request/response schemas +- **Strategy**: pattern_window for route files + +**Call Graph Agent** +- **Tools**: read_file, grep_search +- **Output**: Function call relationships, entry points, call paths +- **Strategy**: pattern_window for function definitions + +**Error Analysis Agent** +- **Tools**: read_file, grep_search +- **Output**: Exception handlers, error patterns, logging strategies +- **Strategy**: pattern_window for error handling code + +**Env Config Agent** +- **Tools**: read_file +- **Output**: Environment variable list, configuration files found +- **Strategy**: full for .env.example files (typically small) + +**Dependency Analyzer Agent** +- **Tools**: read_file, extract_dependencies +- **Output**: Dependency report, security warnings, version conflicts +- **Strategy**: full for package files (small) + +#### Coordination Agents + +**Planner Agent** +- **Input**: Evidence from all section writers +- **Output**: List of sections to generate (e.g., Prerequisites, Architecture, Configuration) +- **Logic**: Determines sections based on project type and detected features + +**Mermaid Generator Agent** +- **Tools**: Repository access + Mermaid syntax validation +- **Output**: Validated Mermaid diagrams showing system architecture +- **Strategy**: pattern_window for architecture-relevant files + +**QA Validator Agent** +- **Input**: All generated sections +- **Output**: Quality score (0-100), validation result with feedback +- **Criteria**: Completeness, accuracy, clarity, technical depth + +--- + +## Prerequisites + +### System Requirements + +**For Docker Deployment:** +- Docker Engine 20.10+ +- Docker Compose 2.0+ +- 4GB RAM minimum (8GB recommended for large repos) +- 20GB disk space + +**For Local Development:** +- Python 3.11+ +- Node.js 20+ +- pip and npm package managers +- Git 2.30+ + +### Required Credentials + +**LLM API Configuration** (required) + +Choose one authentication mode: + +**Option 1: GenAI Gateway (Recommended)** +```bash +AUTH_MODE=genai_gateway +GENAI_GATEWAY_URL=https://your-gateway-url.com +GENAI_GATEWAY_API_KEY=your-api-key +``` + +**Option 2: Keycloak (Enterprise)** +```bash +AUTH_MODE=keycloak +BASE_URL=https://your-inference-endpoint.company.com +KEYCLOAK_REALM=master +KEYCLOAK_CLIENT_ID=api +KEYCLOAK_CLIENT_SECRET=your-client-secret +``` + +**GitHub Personal Access Token** (optional, for private repos and PR creation) +```bash +# Generate at: https://github.com/settings/tokens +# Required scopes: repo (full control of private repositories) +GITHUB_TOKEN=ghp_... +``` + +### Repository Requirements + +The target repository must meet these criteria: +- Cloneable via HTTPS (public or with valid token) +- Size under 10GB (configurable via MAX_REPO_SIZE) +- Contains at least one package manager file (package.json, requirements.txt, Gemfile, pom.xml, etc.) +- Has readable file permissions + +--- + +## Deployment + +### Docker Deployment (Production) + +**Step 1:** Configure environment + +Create `api/.env`: +```bash +# ========================================== +# Authentication Configuration +# ========================================== +AUTH_MODE=genai_gateway +GENAI_GATEWAY_URL=https://your-gateway-url.com +GENAI_GATEWAY_API_KEY=your-api-key + +# OR for enterprise Keycloak authentication: +# AUTH_MODE=keycloak +# BASE_URL=https://your-inference-endpoint.company.com +# KEYCLOAK_REALM=master +# KEYCLOAK_CLIENT_ID=api +# KEYCLOAK_CLIENT_SECRET=your-client-secret + +# ========================================== +# Micro-Agent Model Configuration +# ========================================== +# All agents use Qwen3-4B-Instruct (optimized SLM for code analysis) +CODE_EXPLORER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +API_REFERENCE_MODEL=Qwen/Qwen3-4B-Instruct-2507 +CALL_GRAPH_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ERROR_ANALYSIS_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ENV_CONFIG_MODEL=Qwen/Qwen3-4B-Instruct-2507 +DEPENDENCY_ANALYZER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +PLANNER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +MERMAID_MODEL=Qwen/Qwen3-4B-Instruct-2507 +QA_VALIDATOR_MODEL=Qwen/Qwen3-4B-Instruct-2507 +WRITER_MODEL=Qwen/Qwen3-4B-Instruct-2507 + +# ========================================== +# Repository Analysis Limits +# ========================================== +TEMP_REPO_DIR=./tmp/repos +MAX_REPO_SIZE=10737418240 # 10GB in bytes +MAX_FILE_SIZE=1000000 # 1MB in bytes +MAX_FILES_TO_SCAN=500 # Maximum number of files to analyze +MAX_LINES_PER_FILE=500 # Line budget per file (pattern_window extracts ~150-300 lines) + +# ========================================== +# Agent Execution Settings +# ========================================== +AGENT_TEMPERATURE=0.7 # Model temperature (0.0-1.0) +AGENT_MAX_TOKENS=1000 # Maximum tokens per agent response +AGENT_TIMEOUT=300 # Agent timeout in seconds (5 minutes) + +# ========================================== +# GitHub Integration (MCP) +# ========================================== +# Optional: Required for automatic PR creation +GITHUB_TOKEN=your-github-token-here + +# ========================================== +# Server Configuration +# ========================================== +API_PORT=5001 +HOST=0.0.0.0 +CORS_ORIGINS=["http://localhost:3000", "http://localhost:3001", "http://localhost:5173"] +``` + +**Step 2:** Deploy services + +```bash +cd Docugen-Microagents +docker-compose up -d +``` + +**Step 3:** Verify deployment + +```bash +# Check container status +docker-compose ps + +# Expected output: +# docugen-backend Up (healthy) +# docugen-frontend Up (healthy) + +# View logs +docker-compose logs -f + +# Access application +# Frontend: http://localhost:3000 +# Backend: http://localhost:5001 +# Health: http://localhost:5001/health +``` + +### Local Development Deployment + +**Step 1:** Backend setup + +```bash +cd Docugen-Microagents/api + +# Create virtual environment +python -m venv venv +source venv/bin/activate # Windows: venv\Scripts\activate + +# Install dependencies +pip install -r requirements.txt + +# Create .env file (see Docker deployment section for full configuration) +cp .env.example .env +# Edit .env with your credentials + +# Run server +python server.py +``` + +**Step 2:** Frontend setup + +```bash +cd Docugen-Microagents/ui + +# Install dependencies +npm install + +# Run development server +npm run dev +``` + +**Step 3:** Access application + +``` +Frontend: http://localhost:3000 +Backend: http://localhost:5001 +``` + +--- + +## Generating Documentation + +### Basic Workflow + +**Step 1:** Enter repository URL + +Supported formats: +``` +# Full repository +https://github.com/owner/repository + +# Specific branch +https://github.com/owner/repository/tree/dev + +# Subfolder (for monorepos) +https://github.com/owner/repository/tree/main/services/api +``` + +**Step 2:** Initiate generation + +Click "Generate Documentation". The system will: +1. Parse and validate the URL +2. Clone the repository +3. Run Evidence Aggregator to scan filesystem +4. Execute 6 section writer agents in parallel +5. Run coordination agents (Planner → Writer → Mermaid → QA) + +**Step 3:** Monitor micro-agent activity + +Real-time logs display: +- **Repository cloning progress** +- **Evidence Aggregator** - Filesystem scanning +- **Section Writer Agents** (parallel execution): + - Code Explorer - Language detection, key files + - API Reference - Endpoint discovery + - Call Graph - Function relationships + - Error Analysis - Exception handling patterns + - Env Config - Environment variables + - Dependency Analyzer - Package analysis +- **Planner** - Section determination +- **Writer** - Documentation generation with ReAct loops +- **Mermaid Generator** - Diagram creation and validation +- **QA Validator** - Quality scoring (0-100) +- **Performance Metrics** - Token usage, TPS, blast radius + +**Step 4:** Project selection (monorepos only) + +If multiple projects are detected: +1. Review detected projects list +2. Select ONE project to document +3. Click "Continue" + +The workflow resumes automatically after selection. + +**Step 5:** Review output + +When complete: +- **QA Score displayed** (e.g., "Quality Score: 87/100") +- **Preview README** in the UI +- **Download** as README.md file +- **Create GitHub pull request** (if GITHUB_TOKEN configured) +- **View performance metrics** - Total tokens, duration, agent-level breakdown + +### Project Selection Logic + +DocuGen automatically detects projects based on: +- Package manager files (package.json, requirements.txt, pom.xml, etc.) +- Directory structure patterns +- Configuration files (docker-compose.yml, Dockerfile, etc.) + +**Detection Rules:** +``` +Project indicators: +- package.json → Node.js project +- requirements.txt → Python project +- pom.xml or build.gradle → Java project +- Cargo.toml → Rust project +- go.mod → Go project +- Gemfile → Ruby project +- composer.json → PHP project +- *.csproj → .NET project +``` + +Monorepo example: +``` +repo/ +├── services/ +│ ├── auth-api/ ← Detected project 1 (package.json) +│ ├── data-pipeline/ ← Detected project 2 (requirements.txt) +│ └── frontend/ ← Detected project 3 (package.json) +``` + +**Selection UI:** +``` +Detected Projects: +[x] services/auth-api (Node.js) +[ ] services/data-pipeline (Python) +[ ] services/frontend (Node.js) + +[Continue with Selected Project] +``` + +### Creating Pull Requests + +**Prerequisites:** +- GITHUB_TOKEN configured in .env +- Token has `repo` scope +- Target repository is on GitHub + +**Process:** +1. Generate documentation +2. Review README output +3. Click "Create PR" +4. System will: + - Create branch: `docs/update-readme-{timestamp}` + - Commit README.md + - Create pull request to base branch + - Return PR URL + +**PR Contents:** +- **Title**: `[AI] Update documentation` +- **Body**: Generated by DocuGen Micro-Agents with metadata +- **Files**: README.md +- **Base branch**: Extracted from URL (e.g., `dev`, `main`) + +--- + +## Supported Project Types + +### Fully Supported + +**Web Applications** +- React, Vue, Angular, Svelte frontends +- FastAPI, Flask, Django, Express, Spring Boot backends +- Full-stack applications (MERN, MEAN, LAMP) + +**REST APIs** +- OpenAPI/Swagger documented services +- RESTful microservices +- GraphQL APIs (via API Reference Agent) + +**CLI Tools** +- Python click/argparse applications +- Node.js commander applications +- Go cobra applications + +**Libraries and SDKs** +- NPM packages +- PyPI packages +- Maven/Gradle libraries +- Go modules +- RubyGems + +**Microservices** +- Docker Compose multi-service applications +- Kubernetes-deployed services +- Monorepo microservice architectures + +### Partially Supported + +**Mobile Applications** +- React Native (JavaScript dependencies detected) +- Flutter (Dart dependencies detected) +- Native iOS/Android (limited package manager detection) + +**Data Pipelines** +- Apache Airflow DAGs +- Apache Spark jobs +- ETL scripts + +**Infrastructure as Code** +- Terraform configurations +- CloudFormation templates +- Ansible playbooks + +### Not Supported + +- Pure documentation repositories (no code to analyze) +- Binary-only releases +- Repositories without package managers +- Obfuscated or minified code without source + +--- + +## Understanding Output + +### Generated Sections + +The **Planner Agent** determines which sections to generate based on project type. Common sections: + +**Project Overview** +- Purpose and value proposition +- High-level architecture pattern +- Technology stack summary + +**Features** +- User-facing capabilities +- Organized by component (Backend/Frontend) +- Based on code analysis from **Code Explorer** and **API Reference** agents + +Example: +```markdown +## Features + +**Backend (FastAPI)** +- Multiple input format support (text, PDF, audio, video) +- Streaming and non-streaming response modes +- Support for vLLM, TGI, and Ollama providers +- File validation with size limits + +**Frontend (React)** +- Drag-and-drop file upload +- Real-time summary display +- Mobile-responsive design +``` + +**API Reference** +- Generated by **API Reference Agent** +- Detected endpoints with HTTP methods +- Request/response schemas +- Authentication requirements + +Example: +```markdown +## API Reference + +### POST /api/summarize +Generates a summary from uploaded document. + +**Request:** +- Content-Type: multipart/form-data +- Body: `file` (PDF, TXT, MP3, MP4) + +**Response:** +{ + "summary": "Generated summary text...", + "duration": 1.23 +} +``` + +**Architecture** +- Generated by **Mermaid Generator Agent** +- Validated Mermaid diagrams +- System component visualization +- Data flow diagrams + +Example: +```mermaid +graph TB + A[React Frontend] --> B[FastAPI Backend] + B --> C[LLM Provider] + B --> D[Document Processor] + D --> E[PDF Service] + D --> F[Audio Transcription] +``` + +**Prerequisites** +- Required tools and versions from **Dependency Analyzer Agent** +- API keys and credentials from **Env Config Agent** +- System requirements + +**Configuration** +- Environment variables from **Env Config Agent** +- Actual variable names and purposes from .env.example +- Configuration file examples + +**Error Handling** +- Exception handling patterns from **Error Analysis Agent** +- Logging strategies +- Common error scenarios + +**Quick Start** +- Installation commands +- Setup instructions +- Running the application + +### Quality Assurance + +The **QA Validator Agent** scores documentation on a 0-100 scale: + +**Scoring Criteria:** +- **Completeness** (25 points) - All expected sections present +- **Accuracy** (25 points) - Technically correct information +- **Clarity** (25 points) - Clear, understandable language +- **Technical Depth** (25 points) - Sufficient detail for developers + +**Score Interpretation:** +- **90-100**: Excellent - Production-ready documentation +- **80-89**: Good - Minor improvements needed +- **70-79**: Fair - Some sections need enhancement +- **Below 70**: Needs work - Significant gaps or errors + +**QA Report Example:** +```json +{ + "score": 87, + "passed": true, + "feedback": { + "strengths": [ + "Comprehensive API documentation with examples", + "Clear installation instructions", + "Well-structured architecture diagram" + ], + "improvements": [ + "Add troubleshooting section for common errors", + "Include performance benchmarks", + "Expand testing documentation" + ] + } +} +``` + +### Performance Metrics + +DocuGen tracks detailed metrics for each agent: + +**Per-Agent Metrics:** +- **Tokens used** - Input + output tokens +- **Duration** - Processing time in milliseconds +- **TPS** (Tokens Per Second) - Generation speed +- **LLM calls** - Number of API requests +- **Tool calls** - Number of tool invocations + +**Blast Radius Report:** +- Total tokens across all agents +- Total duration +- Most expensive agent +- Context efficiency score + +Example metrics display: +``` +Performance Metrics: +- Total tokens: 45,231 +- Total duration: 23.4s +- Code Explorer: 12,450 tokens (5.2s) +- API Reference: 8,120 tokens (3.1s) +- Dependency Analyzer: 6,340 tokens (2.8s) +- Writer: 15,221 tokens (8.9s) +- QA Validator: 3,100 tokens (1.4s) +``` + +### Quality Characteristics + +Documentation generated by DocuGen exhibits these traits: + +**Accuracy** +- All dependencies extracted from package files by **Dependency Analyzer** +- Code snippets are actual code from repository +- Configuration variables match .env.example files (via **Env Config Agent**) +- Commands match package.json scripts or Makefiles + +**Specificity** +- File paths reference actual repository files +- Version numbers match package manager specifications +- Port numbers match configuration files +- Directory structures match actual layout + +**Completeness** +- All detected features documented (via **Code Explorer**) +- All package manager dependencies listed (via **Dependency Analyzer**) +- All environment variables explained (via **Env Config Agent**) +- All major components represented in diagrams (via **Mermaid Generator**) +- Error handling documented (via **Error Analysis Agent**) +- API endpoints cataloged (via **API Reference Agent**) + +--- + +## Limitations + +### Functional Limitations + +**Code Analysis Depth** +- Analyzes structure and configuration, not runtime behavior +- Does not execute code or run tests +- Cannot verify if setup instructions actually work +- Does not understand complex business logic +- Limited to static analysis patterns + +**Context Boundaries** +- Only analyzes files in the repository +- Does not read external documentation (wikis, Confluence) +- Does not access git history or issue trackers +- Does not read inline comments extensively +- MAX_LINES_PER_FILE budget limits depth per file + +**Single Project Focus** +- Monorepos require selecting one project at a time +- Cannot document all services in one generation pass +- Each project must have clear boundaries + +**Git Provider Support** +- PR creation only works with GitHub repositories +- GitLab and Bitbucket not supported for PR creation +- Cloning works with any Git provider + +### Technical Constraints + +**Repository Size** +- Default limit: 10GB +- Large repositories may timeout or hit memory limits +- Use subfolder URLs for focused analysis + +**File Count** +- Default: 500 files scanned +- Very large codebases may not be fully analyzed +- Adjust MAX_FILES_TO_SCAN in configuration + +**Token Limits** +- Large repositories generate extensive context +- Qwen3-4B has 8K context window +- Strategic file sampling mitigates this (pattern_window extracts ~150-300 lines per file) +- Processing time scales with repository size + +**Language Support** +- Best support for Python, JavaScript/TypeScript, Java, Go +- Limited support for less common languages +- Depends on package manager file formats + +**Micro-Agent Constraints** +- Each agent has AGENT_TIMEOUT (default 300s) +- Failed agents logged in state.failed_agents +- Retry logic limited to 3 attempts per agent + +--- + +## Troubleshooting + +### Repository Access Issues + +**Symptom:** "Repository not found or access denied" + +**Diagnosis:** +```bash +# Test repository URL +curl -I https://github.com/owner/repo + +# If 404: Repository does not exist or is private +# If 200: Repository exists and is public +``` + +**Resolution:** +1. Verify repository URL spelling +2. For private repositories: + - Add GITHUB_TOKEN to api/.env + - Ensure token has `repo` scope + - Regenerate token if expired +3. Restart backend to load new token: + ```bash + docker-compose restart backend + ``` + +### Micro-Agent Failures + +**Symptom:** Specific agent fails (e.g., "API Reference Agent failed") + +**Diagnosis:** +```bash +# Check backend logs for agent-specific errors +docker-compose logs backend | grep -i "agent.*error" + +# Look for: +# - "Failed to parse JSON from agent output" +# - LLM API errors (connection refused, invalid key, rate limits) +# - Tool execution failures +# - Timeout errors (exceeds AGENT_TIMEOUT) +``` + +**Resolution:** +1. **JSON parsing errors:** + - Agent output format invalid + - Check agent prompt in agents/ directory + - Verify model supports JSON output + +2. **LLM API errors:** + - Verify LLM endpoint is accessible + - Check API key is valid (GENAI_GATEWAY_API_KEY or KEYCLOAK_CLIENT_SECRET) + - Verify model names match available models + +3. **Timeout errors:** + - Increase AGENT_TIMEOUT in .env (default 300s) + - Reduce MAX_FILES_TO_SCAN or MAX_LINES_PER_FILE + - Use subfolder URL for smaller scope + +4. **Tool execution failures:** + - Check repository permissions (read access) + - Verify file paths are valid + - Check TEMP_REPO_DIR has write permissions + +**View failed agents:** +```bash +# Check state.failed_agents in logs +docker-compose logs backend | grep "failed_agents" +``` + +### Log Streaming Issues + +**Symptom:** Agent logs appear all at once at the end + +**Cause:** Python output buffering + +**Resolution:** + +For Docker deployment (already configured): +```yaml +# docker-compose.yml includes: +environment: + - PYTHONUNBUFFERED=1 +``` + +For local deployment: +```bash +# Run with unbuffered output +PYTHONUNBUFFERED=1 python server.py +``` + +### Pull Request Creation Failures + +**Symptom:** "MCP PR Agent error: No such file or directory" + +**Cause:** Docker CLI not installed in backend container + +**Resolution:** + +Verify Docker CLI installation in backend: +```bash +docker-compose exec backend which docker +# Should return: /usr/bin/docker +``` + +If not installed, rebuild backend: +```bash +docker-compose up -d --build backend +``` + +### Monorepo Project Detection + +**Symptom:** Projects not detected or wrong projects detected + +**Diagnosis:** +```bash +# Check for package manager files +docker-compose exec backend ls -la ./tmp/repos/*/ + +# Look for: +# - package.json (Node.js) +# - requirements.txt (Python) +# - pom.xml (Java) +# - go.mod (Go) +``` + +**Resolution:** +1. Ensure projects have package manager files +2. Use subfolder URL to target specific project +3. Customize detection logic in utils/__init__.py (see CUSTOMIZATION.md) + +### Memory and Performance Issues + +**Symptom:** Container OOM errors or slow performance + +**Diagnosis:** +```bash +# Check container resource usage +docker stats docugen-backend +``` + +**Resolution:** +1. Increase Docker memory limits: + ```yaml + # docker-compose.yml + services: + backend: + deploy: + resources: + limits: + memory: 8G # Increase from default 4G + ``` + +2. Reduce repository scan scope: + ```bash + # In .env + MAX_FILES_TO_SCAN=300 + MAX_LINES_PER_FILE=300 + ``` + +3. Use subfolder URLs for targeted analysis + +4. Check agent metrics for expensive agents: + ```bash + docker-compose logs backend | grep "agent_metrics" + ``` + +### Context Window Exceeded + +**Symptom:** "Context length exceeded" errors from LLM + +**Diagnosis:** +```bash +# Check blast_radius_report in logs +docker-compose logs backend | grep "blast_radius" + +# Look for total_tokens_used exceeding 8000 (Qwen3-4B context window) +``` + +**Resolution:** +1. Reduce MAX_LINES_PER_FILE: + ```bash + # In .env + MAX_LINES_PER_FILE=300 # Reduce from 500 + ``` + +2. Use pattern_window strategy (already default): + - Automatically limits to ~150-300 lines per file + - Verify in logs: "Using pattern_window strategy" + +3. Reduce MAX_FILES_TO_SCAN: + ```bash + # In .env + MAX_FILES_TO_SCAN=300 # Reduce from 500 + ``` + +4. Use subfolder URL for smaller scope + +### QA Validator Low Scores + +**Symptom:** QA scores consistently below 70 + +**Diagnosis:** +```bash +# Check QA validation results +docker-compose logs backend | grep -A 20 "qa_validation_result" + +# Review feedback in logs: +# - Missing sections +# - Incomplete information +# - Technical inaccuracies +``` + +**Resolution:** +1. **Missing sections:** + - Add missing package manager files to repository + - Ensure key configuration files exist (.env.example, docker-compose.yml) + +2. **Incomplete information:** + - Increase MAX_LINES_PER_FILE for more context + - Run specific agents with verbose logging + +3. **Technical inaccuracies:** + - Review agent prompts in agents/ directory + - Adjust model temperature (AGENT_TEMPERATURE) + - Try different model (upgrade from Qwen3-4B to larger model) + +4. **Customize QA criteria:** + - Edit agents/qa_validator.py + - Adjust scoring weights (see CUSTOMIZATION.md) + +### Health Check Failures + +**Symptom:** Containers show "unhealthy" status + +**Diagnosis:** +```bash +# Check health endpoint +curl http://localhost:5001/health + +# Check logs +docker-compose logs backend +``` + +**Resolution:** +1. Verify ports are not in use: + ```bash + netstat -an | grep 5001 + netstat -an | grep 3000 + ``` + +2. Check if services are running: + ```bash + docker-compose ps + ``` + +3. Restart services: + ```bash + docker-compose restart + ``` + +--- + +## Best Practices + +### Repository Preparation + +**Include Configuration Examples** +```bash +# Provide .env.example with all variables +cp .env .env.example + +# Document each variable with inline comments +AUTH_MODE=genai_gateway # Required: Authentication mode +GENAI_GATEWAY_URL=https://your-gateway.com # Required: GenAI Gateway URL +GENAI_GATEWAY_API_KEY=your-key-here # Required: GenAI Gateway API key +DATABASE_URL=postgresql://... # Required: PostgreSQL connection string +REDIS_URL=redis://... # Optional: Redis for caching +``` + +**Write Clear README Sections** +- Even basic setup instructions help +- DocuGen extracts and improves existing content +- Better input leads to better output + +**Use Standard Project Structure** +``` +project/ +├── src/ or app/ # Source code +├── tests/ # Tests +├── docs/ # Documentation +├── config/ # Configuration +├── package.json # Dependencies +├── .env.example # Environment template +└── README.md # Basic docs +``` + +**Add API Documentation** +- Use OpenAPI/Swagger specs +- Add docstrings to route handlers +- **API Reference Agent** extracts this automatically + +**Document Error Handling** +- Use try/except blocks consistently +- **Error Analysis Agent** detects these patterns + +### Documentation Review + +**Always Review Generated Output** +- AI-generated content is 80-90% accurate +- Verify technical details and commands +- Add domain-specific context AI cannot infer +- Correct any misinterpretations + +**Check QA Score** +- Scores below 80 warrant review +- Review QA feedback for specific improvements +- Re-run generation after fixing issues + +**Iterative Improvement** +- Run DocuGen on updated code +- Compare with previous output +- Identify and fix recurring issues +- Customize prompts if needed (see CUSTOMIZATION.md) + +### Model Selection + +**Configure models for all agents** +```bash +# Qwen3-4B-Instruct (default) - Balanced cost/quality +CODE_EXPLORER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +API_REFERENCE_MODEL=Qwen/Qwen3-4B-Instruct-2507 +CALL_GRAPH_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ERROR_ANALYSIS_MODEL=Qwen/Qwen3-4B-Instruct-2507 +ENV_CONFIG_MODEL=Qwen/Qwen3-4B-Instruct-2507 +DEPENDENCY_ANALYZER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +PLANNER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +MERMAID_MODEL=Qwen/Qwen3-4B-Instruct-2507 +QA_VALIDATOR_MODEL=Qwen/Qwen3-4B-Instruct-2507 +WRITER_MODEL=Qwen/Qwen3-4B-Instruct-2507 +``` + +**Model Selection Guidance:** +- **Qwen3-4B-Instruct**: Fast, cost-effective, good for most projects (8K context) +- **Qwen2.5-32B-Instruct**: Higher quality, slower, better for complex codebases (32K context) +- **Mix and match**: Use 4B for simple agents (Env Config), 32B for complex agents (Writer) + +**Batching and Caching** +- Generate docs once per release +- Cache results for repeated queries +- Use git hooks to trigger only on significant changes + +### Performance Optimization + +**Monitor Metrics** +- Check blast_radius_report after each run +- Identify expensive agents +- Optimize prompts or reduce scope + +**Adjust Sampling Strategy** +```python +# In tools/repo_tools.py, customize for your domain: +strategy = "pattern_window" # Default - best for most projects +strategy = "smart" # Faster, less context per file +strategy = "full" # Maximum detail, highest token usage +``` + +**Parallel Execution** +- Section writer agents already run in parallel +- 6 agents = ~6x speedup over sequential execution +- Monitor logs to verify parallel execution + +**Token Budget Management** +```bash +# Conservative (faster, lower cost) +MAX_LINES_PER_FILE=300 +MAX_FILES_TO_SCAN=300 + +# Balanced (default) +MAX_LINES_PER_FILE=500 +MAX_FILES_TO_SCAN=500 + +# Aggressive (slower, higher quality for large repos) +MAX_LINES_PER_FILE=700 +MAX_FILES_TO_SCAN=700 +``` + +--- + +## Integration Patterns + +### CI/CD Integration + +Run DocuGen automatically on pull requests or releases. Example GitHub Actions workflow: + +```yaml +name: Generate Documentation + +on: + pull_request: + branches: [main] + +jobs: + generate-docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Run DocuGen Micro-Agents + env: + GENAI_GATEWAY_URL: ${{ secrets.GENAI_GATEWAY_URL }} + GENAI_GATEWAY_API_KEY: ${{ secrets.GENAI_GATEWAY_API_KEY }} + run: | + docker run -v $(pwd):/repo \ + -e AUTH_MODE=genai_gateway \ + -e GENAI_GATEWAY_URL=$GENAI_GATEWAY_URL \ + -e GENAI_GATEWAY_API_KEY=$GENAI_GATEWAY_API_KEY \ + Docugen-Microagents:latest \ + /repo + + - name: Commit README + run: | + git config user.name "DocuGen Bot" + git config user.email "bot@example.com" + git add README.md + git commit -m "docs: Update README [automated]" + git push +``` + +### API Usage + +Programmatic access for automation: + +```python +import requests +import time + +# Start documentation generation +response = requests.post( + "http://localhost:5001/api/generate-docs", + json={"repo_url": "https://github.com/owner/repo"} +) +job_id = response.json()["job_id"] + +# Poll for completion +while True: + status = requests.get(f"http://localhost:5001/api/status/{job_id}") + data = status.json() + + if data["status"] == "complete": + # Check QA score + qa_score = data.get("qa_score", 0) + print(f"Documentation generated with QA score: {qa_score}/100") + + # Get performance metrics + metrics = data.get("agent_metrics", {}) + total_tokens = data.get("total_tokens_used", 0) + print(f"Total tokens: {total_tokens}") + break + + time.sleep(5) + +# Download README +readme = requests.get(f"http://localhost:5001/api/download/{job_id}") +with open("README.md", "w") as f: + f.write(readme.text) +``` + +### Webhook Integration + +Trigger documentation generation on git push: + +```python +# webhook_server.py +from flask import Flask, request +import requests + +app = Flask(__name__) + +@app.route("/webhook", methods=["POST"]) +def github_webhook(): + payload = request.json + + # Trigger on push to main branch + if payload["ref"] == "refs/heads/main": + repo_url = payload["repository"]["html_url"] + + # Start DocuGen + response = requests.post( + "http://localhost:5001/api/generate-docs", + json={"repo_url": repo_url} + ) + + return {"status": "started", "job_id": response.json()["job_id"]} + + return {"status": "ignored"} + +if __name__ == "__main__": + app.run(port=8000) +``` + +**Configure GitHub webhook:** +``` +Payload URL: http://your-server:8000/webhook +Content type: application/json +Events: Push events +``` + +--- + +## Advanced Usage + +### Customizing Micro-Agents + +See **CUSTOMIZATION.md** for detailed customization guides: + +- Customizing section writer agents (Code Explorer, API Reference, etc.) +- Customizing coordination agents (Planner, Mermaid, QA Validator) +- Adding custom tools +- Modifying strategic file sampling +- Adding new micro-agents +- Workflow customization + +### Environment Variables Reference + +See **api/.env.example** for complete list with descriptions. + +**Key variables:** +- `AUTH_MODE` - Authentication mode (genai_gateway | keycloak) +- `MAX_LINES_PER_FILE` - File sampling budget (default 500) +- `AGENT_TIMEOUT` - Agent execution timeout in seconds (default 300) +- `AGENT_TEMPERATURE` - Model temperature 0.0-1.0 (default 0.7) +- `AGENT_MAX_TOKENS` - Max tokens per agent response (default 1000) + +### Metrics and Observability + +**Available metrics:** +- Per-agent token usage +- Per-agent execution duration +- LLM call counts +- Tool call counts +- TPS (Tokens Per Second) +- Blast radius (total context usage) + +**Accessing metrics:** +```bash +# View in logs +docker-compose logs backend | grep "agent_metrics" + +# Or via API +curl http://localhost:5001/api/metrics/{job_id} +``` + +--- + +**For customization and development guides, see CUSTOMIZATION.md** diff --git a/sample_solutions/Docugen-Microagents/docs/images/01-homepage-input.png b/sample_solutions/Docugen-Microagents/docs/images/01-homepage-input.png new file mode 100644 index 00000000..07645f29 Binary files /dev/null and b/sample_solutions/Docugen-Microagents/docs/images/01-homepage-input.png differ diff --git a/sample_solutions/Docugen-Microagents/docs/images/02-agent-workflow.png b/sample_solutions/Docugen-Microagents/docs/images/02-agent-workflow.png new file mode 100644 index 00000000..23e7bb3a Binary files /dev/null and b/sample_solutions/Docugen-Microagents/docs/images/02-agent-workflow.png differ diff --git a/sample_solutions/Docugen-Microagents/docs/images/03-progress-tracking.png b/sample_solutions/Docugen-Microagents/docs/images/03-progress-tracking.png new file mode 100644 index 00000000..731cddf3 Binary files /dev/null and b/sample_solutions/Docugen-Microagents/docs/images/03-progress-tracking.png differ diff --git a/sample_solutions/Docugen-Microagents/docs/images/04-generated-readme.png b/sample_solutions/Docugen-Microagents/docs/images/04-generated-readme.png new file mode 100644 index 00000000..0d7460e5 Binary files /dev/null and b/sample_solutions/Docugen-Microagents/docs/images/04-generated-readme.png differ diff --git a/sample_solutions/Docugen-Microagents/docs/images/05-pr-agent-execution.png b/sample_solutions/Docugen-Microagents/docs/images/05-pr-agent-execution.png new file mode 100644 index 00000000..bd34fcfd Binary files /dev/null and b/sample_solutions/Docugen-Microagents/docs/images/05-pr-agent-execution.png differ diff --git a/sample_solutions/Docugen-Microagents/docs/images/06-pr-created.png b/sample_solutions/Docugen-Microagents/docs/images/06-pr-created.png new file mode 100644 index 00000000..5b822a26 Binary files /dev/null and b/sample_solutions/Docugen-Microagents/docs/images/06-pr-created.png differ diff --git a/sample_solutions/Docugen-Microagents/docs/images/07-metrics.png b/sample_solutions/Docugen-Microagents/docs/images/07-metrics.png new file mode 100644 index 00000000..db3989c4 Binary files /dev/null and b/sample_solutions/Docugen-Microagents/docs/images/07-metrics.png differ diff --git a/sample_solutions/Docugen-Microagents/ui/.dockerignore b/sample_solutions/Docugen-Microagents/ui/.dockerignore new file mode 100644 index 00000000..aca46af5 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/.dockerignore @@ -0,0 +1,39 @@ +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +yarn.lock + +dist/ +build/ +.next/ +out/ + +.vscode/ +.idea/ +*.swp +*.swo +*~ + +coverage/ +.nyc_output/ + +.env +.env.local +.env.*.local + +.git/ +.gitignore + +README.md +*.md + +Dockerfile +.dockerignore + +.DS_Store +Thumbs.db + +*.log +.cache/ diff --git a/sample_solutions/Docugen-Microagents/ui/Dockerfile b/sample_solutions/Docugen-Microagents/ui/Dockerfile new file mode 100644 index 00000000..c50854e6 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/Dockerfile @@ -0,0 +1,35 @@ +# Multi-stage build for DocuGen AI Frontend +FROM node:20-slim + +# Set working directory +WORKDIR /app + +# Copy package.json only +COPY package.json ./ + +# Install dependencies +RUN npm install + +# Copy application code +COPY . . + +# Use existing node user (already has UID 1000 in node:20-slim) +RUN chown -R node:node /app + +# Expose port +EXPOSE 3000 + +# Set environment for Vite +ENV HOST=0.0.0.0 +ENV PORT=3000 +ENV VITE_API_TARGET=http://backend:5001 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ + CMD node -e "require('http').get('http://localhost:3000', (r) => process.exit(r.statusCode === 200 ? 0 : 1))" + +# Switch to non-root user +USER node + +# Run development server with environment variable +CMD ["sh", "-c", "VITE_API_TARGET=http://backend:5001 npm run dev -- --host 0.0.0.0 --port 3000"] diff --git a/sample_solutions/Docugen-Microagents/ui/index.html b/sample_solutions/Docugen-Microagents/ui/index.html new file mode 100644 index 00000000..d8acf776 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/index.html @@ -0,0 +1,13 @@ + + + + + + + DocuGen AI - Automatic Documentation Generator + + +
+ + + diff --git a/sample_solutions/Docugen-Microagents/ui/nginx.conf b/sample_solutions/Docugen-Microagents/ui/nginx.conf new file mode 100644 index 00000000..080a1028 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/nginx.conf @@ -0,0 +1,64 @@ +server { + listen 8080; + server_name _; + + root /usr/share/nginx/html; + index index.html; + + # Gzip compression + gzip on; + gzip_vary on; + gzip_min_length 1024; + gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json; + + # Security headers + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + add_header Referrer-Policy "no-referrer-when-downgrade" always; + + # SPA routing - serve index.html for all routes + location / { + try_files $uri $uri/ /index.html; + } + + # API proxy to backend + location /api/ { + proxy_pass http://backend:5001; + proxy_http_version 1.1; + + # SSE support + proxy_set_header Connection ''; + proxy_set_header Cache-Control 'no-cache'; + proxy_buffering off; + chunked_transfer_encoding on; + + # Standard proxy headers + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Timeouts for long-running requests + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + } + + # Static files caching + location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ { + expires 1y; + add_header Cache-Control "public, immutable"; + } + + # Disable access log for static files + location = /favicon.ico { + log_not_found off; + access_log off; + } + + location = /robots.txt { + log_not_found off; + access_log off; + } +} diff --git a/sample_solutions/Docugen-Microagents/ui/package-lock.json b/sample_solutions/Docugen-Microagents/ui/package-lock.json new file mode 100644 index 00000000..4d368bc5 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/package-lock.json @@ -0,0 +1,8548 @@ +{ + "name": "docugen-ui", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "docugen-ui", + "version": "1.0.0", + "dependencies": { + "axios": "^1.13.5", + "github-markdown-css": "^5.8.1", + "highlight.js": "^11.11.1", + "lucide-react": "^0.294.0", + "mermaid": "^11.4.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-markdown": "^9.0.1", + "rehype-highlight": "^7.0.0", + "remark-gfm": "^4.0.1" + }, + "devDependencies": { + "@types/react": "^18.2.43", + "@types/react-dom": "^18.2.17", + "@vitejs/plugin-react": "^4.2.1", + "autoprefixer": "^10.4.16", + "eslint": "^8.55.0", + "eslint-plugin-react": "^7.33.2", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.4.32", + "tailwindcss": "^3.3.6", + "vite": "^5.0.8" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@antfu/install-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@antfu/install-pkg/-/install-pkg-1.1.0.tgz", + "integrity": "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==", + "license": "MIT", + "dependencies": { + "package-manager-detector": "^1.3.0", + "tinyexec": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@braintree/sanitize-url": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.1.2.tgz", + "integrity": "sha512-jigsZK+sMF/cuiB7sERuo9V7N9jx+dhmHHnQyDSVdpZwVutaBu7WvNYqMDLSgFgfB30n452TP3vjDAvFC973mA==", + "license": "MIT" + }, + "node_modules/@chevrotain/cst-dts-gen": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.1.1.tgz", + "integrity": "sha512-fRHyv6/f542qQqiRGalrfJl/evD39mAvbJLCekPazhiextEatq1Jx1K/i9gSd5NNO0ds03ek0Cbo/4uVKmOBcw==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/gast": "11.1.1", + "@chevrotain/types": "11.1.1", + "lodash-es": "4.17.23" + } + }, + "node_modules/@chevrotain/gast": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.1.1.tgz", + "integrity": "sha512-Ko/5vPEYy1vn5CbCjjvnSO4U7GgxyGm+dfUZZJIWTlQFkXkyym0jFYrWEU10hyCjrA7rQtiHtBr0EaZqvHFZvg==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/types": "11.1.1", + "lodash-es": "4.17.23" + } + }, + "node_modules/@chevrotain/regexp-to-ast": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.1.1.tgz", + "integrity": "sha512-ctRw1OKSXkOrR8VTvOxrQ5USEc4sNrfwXHa1NuTcR7wre4YbjPcKw+82C2uylg/TEwFRgwLmbhlln4qkmDyteg==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/types": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.1.1.tgz", + "integrity": "sha512-wb2ToxG8LkgPYnKe9FH8oGn3TMCBdnwiuNC5l5y+CtlaVRbCytU0kbVsk6CGrqTL4ZN4ksJa0TXOYbxpbthtqw==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/utils": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.1.1.tgz", + "integrity": "sha512-71eTYMzYXYSFPrbg/ZwftSaSDld7UYlS8OQa3lNnn9jzNtpFbaReRRyghzqS7rI3CDaorqpPJJcXGHK+FE1TVQ==", + "license": "Apache-2.0" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@iconify/types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@iconify/types/-/types-2.0.0.tgz", + "integrity": "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==", + "license": "MIT" + }, + "node_modules/@iconify/utils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@iconify/utils/-/utils-3.1.0.tgz", + "integrity": "sha512-Zlzem1ZXhI1iHeeERabLNzBHdOa4VhQbqAcOQaMKuTuyZCpwKbC2R4Dd0Zo3g9EAc+Y4fiarO8HIHRAth7+skw==", + "license": "MIT", + "dependencies": { + "@antfu/install-pkg": "^1.1.0", + "@iconify/types": "^2.0.0", + "mlly": "^1.8.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@mermaid-js/parser": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-1.0.0.tgz", + "integrity": "sha512-vvK0Hi/VWndxoh03Mmz6wa1KDriSPjS2XMZL/1l19HFwygiObEEoEwSDxOqyLzzAI6J2PU3261JjTMTO7x+BPw==", + "license": "MIT", + "dependencies": { + "langium": "^4.0.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", + "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", + "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", + "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", + "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", + "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", + "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", + "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", + "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", + "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", + "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", + "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", + "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", + "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", + "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", + "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", + "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", + "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", + "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", + "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", + "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", + "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", + "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", + "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", + "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", + "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/d3": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", + "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/d3-axis": "*", + "@types/d3-brush": "*", + "@types/d3-chord": "*", + "@types/d3-color": "*", + "@types/d3-contour": "*", + "@types/d3-delaunay": "*", + "@types/d3-dispatch": "*", + "@types/d3-drag": "*", + "@types/d3-dsv": "*", + "@types/d3-ease": "*", + "@types/d3-fetch": "*", + "@types/d3-force": "*", + "@types/d3-format": "*", + "@types/d3-geo": "*", + "@types/d3-hierarchy": "*", + "@types/d3-interpolate": "*", + "@types/d3-path": "*", + "@types/d3-polygon": "*", + "@types/d3-quadtree": "*", + "@types/d3-random": "*", + "@types/d3-scale": "*", + "@types/d3-scale-chromatic": "*", + "@types/d3-selection": "*", + "@types/d3-shape": "*", + "@types/d3-time": "*", + "@types/d3-time-format": "*", + "@types/d3-timer": "*", + "@types/d3-transition": "*", + "@types/d3-zoom": "*" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", + "license": "MIT" + }, + "node_modules/@types/d3-axis": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", + "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-brush": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", + "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-chord": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", + "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-contour": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", + "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==", + "license": "MIT" + }, + "node_modules/@types/d3-dispatch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz", + "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-dsv": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", + "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==", + "license": "MIT" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-fetch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", + "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", + "license": "MIT", + "dependencies": { + "@types/d3-dsv": "*" + } + }, + "node_modules/@types/d3-force": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz", + "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==", + "license": "MIT" + }, + "node_modules/@types/d3-format": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", + "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==", + "license": "MIT" + }, + "node_modules/@types/d3-geo": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", + "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", + "license": "MIT", + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-hierarchy": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", + "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==", + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-polygon": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", + "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==", + "license": "MIT" + }, + "node_modules/@types/d3-quadtree": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", + "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==", + "license": "MIT" + }, + "node_modules/@types/d3-random": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", + "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==", + "license": "MIT" + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-shape": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.8.tgz", + "integrity": "sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-time-format": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", + "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/geojson": { + "version": "7946.0.16", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz", + "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.28", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.28.tgz", + "integrity": "sha512-z9VXpC7MWrhfWipitjNdgCauoMLRdIILQsAEV+ZesIzBq/oUlxk0m3ApZuMFCXdnS4U7KrI+l3WRUEGQ8K1QKw==", + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "license": "MIT", + "optional": true + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", + "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "is-array-buffer": "^3.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.9", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz", + "integrity": "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.0", + "es-object-atoms": "^1.1.1", + "get-intrinsic": "^1.3.0", + "is-string": "^1.1.1", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.3.tgz", + "integrity": "sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz", + "integrity": "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", + "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/async-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", + "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.24", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.24.tgz", + "integrity": "sha512-uHZg7N9ULTVbutaIsDRoUkoS8/h3bdsmVJYZ5l3wv8Cp/6UIIoRDm90hZ+BwxUj/hGBEzLxdHNSKuFpn8WOyZw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001766", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axios": { + "version": "1.13.5", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz", + "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.11", + "form-data": "^4.0.5", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.19", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.19.tgz", + "integrity": "sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001770", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001770.tgz", + "integrity": "sha512-x/2CLQ1jHENRbHg5PSId2sXq1CIO1CISvwWAj027ltMVG2UNgW+w9oH2+HzgEIRFembL8bUlXtfbBHR1fCg2xw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chevrotain": { + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.1.1.tgz", + "integrity": "sha512-f0yv5CPKaFxfsPTBzX7vGuim4oIC1/gcS7LUGdBSwl2dU6+FON6LVUksdOo1qJjoUvXNn45urgh8C+0a24pACQ==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/cst-dts-gen": "11.1.1", + "@chevrotain/gast": "11.1.1", + "@chevrotain/regexp-to-ast": "11.1.1", + "@chevrotain/types": "11.1.1", + "@chevrotain/utils": "11.1.1", + "lodash-es": "4.17.23" + } + }, + "node_modules/chevrotain-allstar": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/chevrotain-allstar/-/chevrotain-allstar-0.3.1.tgz", + "integrity": "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==", + "license": "MIT", + "dependencies": { + "lodash-es": "^4.17.21" + }, + "peerDependencies": { + "chevrotain": "^11.0.0" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/chokidar/node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/confbox": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz", + "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==", + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cose-base": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", + "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", + "license": "MIT", + "dependencies": { + "layout-base": "^1.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/cytoscape": { + "version": "3.33.1", + "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.33.1.tgz", + "integrity": "sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==", + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/cytoscape-cose-bilkent": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", + "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^1.0.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", + "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^2.2.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/cose-base": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", + "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", + "license": "MIT", + "dependencies": { + "layout-base": "^2.0.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/layout-base": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", + "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==", + "license": "MIT" + }, + "node_modules/d3": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", + "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", + "license": "ISC", + "dependencies": { + "d3-array": "3", + "d3-axis": "3", + "d3-brush": "3", + "d3-chord": "3", + "d3-color": "3", + "d3-contour": "4", + "d3-delaunay": "6", + "d3-dispatch": "3", + "d3-drag": "3", + "d3-dsv": "3", + "d3-ease": "3", + "d3-fetch": "3", + "d3-force": "3", + "d3-format": "3", + "d3-geo": "3", + "d3-hierarchy": "3", + "d3-interpolate": "3", + "d3-path": "3", + "d3-polygon": "3", + "d3-quadtree": "3", + "d3-random": "3", + "d3-scale": "4", + "d3-scale-chromatic": "3", + "d3-selection": "3", + "d3-shape": "3", + "d3-time": "3", + "d3-time-format": "4", + "d3-timer": "3", + "d3-transition": "3", + "d3-zoom": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-axis": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", + "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-brush": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", + "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "3", + "d3-transition": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-chord": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", + "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", + "license": "ISC", + "dependencies": { + "d3-path": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-contour": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", + "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", + "license": "ISC", + "dependencies": { + "d3-array": "^3.2.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", + "license": "ISC", + "dependencies": { + "delaunator": "5" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dsv": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", + "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", + "license": "ISC", + "dependencies": { + "commander": "7", + "iconv-lite": "0.6", + "rw": "1" + }, + "bin": { + "csv2json": "bin/dsv2json.js", + "csv2tsv": "bin/dsv2dsv.js", + "dsv2dsv": "bin/dsv2dsv.js", + "dsv2json": "bin/dsv2json.js", + "json2csv": "bin/json2dsv.js", + "json2dsv": "bin/json2dsv.js", + "json2tsv": "bin/json2dsv.js", + "tsv2csv": "bin/dsv2dsv.js", + "tsv2json": "bin/dsv2json.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-fetch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", + "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", + "license": "ISC", + "dependencies": { + "d3-dsv": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-force": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", + "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-quadtree": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.2.tgz", + "integrity": "sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-geo": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", + "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2.5.0 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-hierarchy": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", + "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-polygon": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", + "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-quadtree": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", + "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-random": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", + "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-sankey": { + "version": "0.12.3", + "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz", + "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "1 - 2", + "d3-shape": "^1.2.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-array": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz", + "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==", + "license": "BSD-3-Clause", + "dependencies": { + "internmap": "^1.0.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz", + "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==", + "license": "BSD-3-Clause" + }, + "node_modules/d3-sankey/node_modules/d3-shape": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz", + "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-path": "1" + } + }, + "node_modules/d3-sankey/node_modules/internmap": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz", + "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==", + "license": "ISC" + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-interpolate": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/dagre-d3-es": { + "version": "7.0.13", + "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.13.tgz", + "integrity": "sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==", + "license": "MIT", + "dependencies": { + "d3": "^7.9.0", + "lodash-es": "^4.17.21" + } + }, + "node_modules/data-view-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", + "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz", + "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/inspect-js" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz", + "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/dayjs": { + "version": "1.11.19", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.19.tgz", + "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", + "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/delaunator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", + "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", + "license": "ISC", + "dependencies": { + "robust-predicates": "^3.0.2" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dompurify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.1.tgz", + "integrity": "sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==", + "license": "(MPL-2.0 OR Apache-2.0)", + "optionalDependencies": { + "@types/trusted-types": "^2.0.7" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.286", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.286.tgz", + "integrity": "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==", + "dev": true, + "license": "ISC" + }, + "node_modules/es-abstract": { + "version": "1.24.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.1.tgz", + "integrity": "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.2", + "arraybuffer.prototype.slice": "^1.0.4", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "data-view-buffer": "^1.0.2", + "data-view-byte-length": "^1.0.2", + "data-view-byte-offset": "^1.0.1", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "es-set-tostringtag": "^2.1.0", + "es-to-primitive": "^1.3.0", + "function.prototype.name": "^1.1.8", + "get-intrinsic": "^1.3.0", + "get-proto": "^1.0.1", + "get-symbol-description": "^1.1.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "internal-slot": "^1.1.0", + "is-array-buffer": "^3.0.5", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.2", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.2.1", + "is-set": "^2.0.3", + "is-shared-array-buffer": "^1.0.4", + "is-string": "^1.1.1", + "is-typed-array": "^1.1.15", + "is-weakref": "^1.1.1", + "math-intrinsics": "^1.1.0", + "object-inspect": "^1.13.4", + "object-keys": "^1.1.1", + "object.assign": "^4.1.7", + "own-keys": "^1.0.1", + "regexp.prototype.flags": "^1.5.4", + "safe-array-concat": "^1.1.3", + "safe-push-apply": "^1.0.0", + "safe-regex-test": "^1.1.0", + "set-proto": "^1.0.0", + "stop-iteration-iterator": "^1.1.0", + "string.prototype.trim": "^1.2.10", + "string.prototype.trimend": "^1.0.9", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.3", + "typed-array-byte-length": "^1.0.3", + "typed-array-byte-offset": "^1.0.4", + "typed-array-length": "^1.0.7", + "unbox-primitive": "^1.1.0", + "which-typed-array": "^1.1.19" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.2.tgz", + "integrity": "sha512-BrUQ0cPTB/IwXj23HtwHjS9n7O4h9FX94b4xc5zlTHxeLgTAdzYUDyy6KdExAl9lbN5rtfe44xpjpmj9grxs5w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.1", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.1.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.3.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "iterator.prototype": "^1.1.5", + "safe-array-concat": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz", + "integrity": "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-to-primitive": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", + "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-symbol": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.37.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.5.tgz", + "integrity": "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.3", + "array.prototype.tosorted": "^1.1.4", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.2.1", + "estraverse": "^5.3.0", + "hasown": "^2.0.2", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.9", + "object.fromentries": "^2.0.8", + "object.values": "^1.2.1", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.12", + "string.prototype.repeat": "^1.0.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", + "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.4.26", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz", + "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "eslint": ">=8.40" + } + }, + "node_modules/eslint-plugin-react/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz", + "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "functions-have-names": "^1.2.3", + "hasown": "^2.0.2", + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", + "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/generator-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", + "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-symbol-description": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", + "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/github-markdown-css": { + "version": "5.9.0", + "resolved": "https://registry.npmjs.org/github-markdown-css/-/github-markdown-css-5.9.0.tgz", + "integrity": "sha512-tmT5sY+zvg2302XLYEfH2mtkViIM1SWf2nvYoF5N1ZsO0V6B2qZTiw3GOzw4vpjLygK/KG35qRlPFweHqfzz5w==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globalthis": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/hachure-fill": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/hachure-fill/-/hachure-fill-0.5.2.tgz", + "integrity": "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==", + "license": "MIT" + }, + "node_modules/has-bigints": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", + "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", + "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-is-element": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz", + "integrity": "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-text": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz", + "integrity": "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "hast-util-is-element": "^3.0.0", + "unist-util-find-after": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/highlight.js": { + "version": "11.11.1", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", + "integrity": "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/html-url-attributes": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", + "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + "license": "MIT" + }, + "node_modules/internal-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", + "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", + "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-async-function": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", + "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "async-function": "^1.0.0", + "call-bound": "^1.0.3", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", + "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-boolean-object": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", + "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", + "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz", + "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz", + "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz", + "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.4", + "generator-function": "^2.0.0", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number-object": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz", + "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz", + "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-string": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz", + "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz", + "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-symbols": "^1.1.0", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz", + "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz", + "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/iterator.prototype": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz", + "integrity": "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "get-proto": "^1.0.0", + "has-symbols": "^1.1.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/katex": { + "version": "0.16.28", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.28.tgz", + "integrity": "sha512-YHzO7721WbmAL6Ov1uzN/l5mY5WWWhJBSW+jq4tkfZfsxmo1hu6frS0EOswvjBUnWE6NtjEs48SFn5CQESRLZg==", + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "license": "MIT", + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/katex/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/khroma": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", + "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" + }, + "node_modules/langium": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/langium/-/langium-4.2.1.tgz", + "integrity": "sha512-zu9QWmjpzJcomzdJQAHgDVhLGq5bLosVak1KVa40NzQHXfqr4eAHupvnPOVXEoLkg6Ocefvf/93d//SB7du4YQ==", + "license": "MIT", + "dependencies": { + "chevrotain": "~11.1.1", + "chevrotain-allstar": "~0.3.1", + "vscode-languageserver": "~9.0.1", + "vscode-languageserver-textdocument": "~1.0.11", + "vscode-uri": "~3.1.0" + }, + "engines": { + "node": ">=20.10.0", + "npm": ">=10.2.3" + } + }, + "node_modules/layout-base": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", + "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==", + "license": "MIT" + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash-es": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.23.tgz", + "integrity": "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==", + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lowlight": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-3.3.0.tgz", + "integrity": "sha512-0JNhgFoPvP6U6lE/UdVsSq99tn6DhjjpAj5MxG49ewd2mOBVtwWYIT8ClyABhq198aXXODMU6Ox8DrGy/CpTZQ==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.0.0", + "highlight.js": "~11.11.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.294.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.294.0.tgz", + "integrity": "sha512-V7o0/VECSGbLHn3/1O67FUgBwWB+hmzshrgDVRJQhMh8uj5D3HBuIvhuAmQTtlupILSplwIZg5FTc4tTKMA2SA==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/marked": { + "version": "16.4.2", + "resolved": "https://registry.npmjs.org/marked/-/marked-16.4.2.tgz", + "integrity": "sha512-TI3V8YYWvkVf3KJe1dRkpnjs68JUPyEa5vjKrp1XEEJUAOaQc+Qj+L1qWbPd0SJuAdQkFU0h73sXXqwDYxsiDA==", + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/mermaid": { + "version": "11.12.3", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.12.3.tgz", + "integrity": "sha512-wN5ZSgJQIC+CHJut9xaKWsknLxaFBwCPwPkGTSUYrTiHORWvpT8RxGk849HPnpUAQ+/9BPRqYb80jTpearrHzQ==", + "license": "MIT", + "dependencies": { + "@braintree/sanitize-url": "^7.1.1", + "@iconify/utils": "^3.0.1", + "@mermaid-js/parser": "^1.0.0", + "@types/d3": "^7.4.3", + "cytoscape": "^3.29.3", + "cytoscape-cose-bilkent": "^4.1.0", + "cytoscape-fcose": "^2.2.0", + "d3": "^7.9.0", + "d3-sankey": "^0.12.3", + "dagre-d3-es": "7.0.13", + "dayjs": "^1.11.18", + "dompurify": "^3.2.5", + "katex": "^0.16.22", + "khroma": "^2.1.0", + "lodash-es": "^4.17.23", + "marked": "^16.2.1", + "roughjs": "^4.6.6", + "stylis": "^4.3.6", + "ts-dedent": "^2.2.0", + "uuid": "^11.1.0" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/mlly": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.8.0.tgz", + "integrity": "sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==", + "license": "MIT", + "dependencies": { + "acorn": "^8.15.0", + "pathe": "^2.0.3", + "pkg-types": "^1.3.1", + "ufo": "^1.6.1" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-exports-info": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/node-exports-info/-/node-exports-info-1.6.0.tgz", + "integrity": "sha512-pyFS63ptit/P5WqUkt+UUfe+4oevH+bFeIiPPdfb0pFeYEu/1ELnJu5l+5EcTKYL5M7zaAa7S8ddywgXypqKCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "array.prototype.flatmap": "^1.3.3", + "es-errors": "^1.3.0", + "object.entries": "^1.1.9", + "semver": "^6.3.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.9.tgz", + "integrity": "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.values": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.1.tgz", + "integrity": "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/own-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", + "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.6", + "object-keys": "^1.1.1", + "safe-push-apply": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-manager-detector": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-1.6.0.tgz", + "integrity": "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==", + "license": "MIT" + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/path-data-parser": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/path-data-parser/-/path-data-parser-0.1.0.tgz", + "integrity": "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==", + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-types": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz", + "integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==", + "license": "MIT", + "dependencies": { + "confbox": "^0.1.8", + "mlly": "^1.7.4", + "pathe": "^2.0.1" + } + }, + "node_modules/points-on-curve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/points-on-curve/-/points-on-curve-0.2.0.tgz", + "integrity": "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==", + "license": "MIT" + }, + "node_modules/points-on-path": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/points-on-path/-/points-on-path-0.2.1.tgz", + "integrity": "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==", + "license": "MIT", + "dependencies": { + "path-data-parser": "0.1.0", + "points-on-curve": "0.2.0" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-import/node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/react-markdown": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-9.1.0.tgz", + "integrity": "sha512-xaijuJB0kzGiUdG7nc2MOMDUDBWPyGAjZtUrow9XxUeua8IqeP+VlIfAZ3bphpcLTnSZXz6z9jcVC/TCwbfgdw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/read-cache/node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", + "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.1", + "which-builtin-type": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", + "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/rehype-highlight": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/rehype-highlight/-/rehype-highlight-7.0.2.tgz", + "integrity": "sha512-k158pK7wdC2qL3M5NcZROZ2tR/l7zOzjxXd5VGdcfIyoijjQqpHd3JKtYSBDpDZ38UI2WJWuFAtkMDxmx5kstA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-to-text": "^4.0.0", + "lowlight": "^3.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/resolve": { + "version": "2.0.0-next.6", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.6.tgz", + "integrity": "sha512-3JmVl5hMGtJ3kMmB3zi3DL25KfkCEyy3Tw7Gmw7z5w8M9WlwoPFnIvwChzu1+cF3iaK3sp18hhPz8ANeimdJfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "is-core-module": "^2.16.1", + "node-exports-info": "^1.6.0", + "object-keys": "^1.1.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/robust-predicates": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", + "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==", + "license": "Unlicense" + }, + "node_modules/rollup": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", + "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/rollup/node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/roughjs": { + "version": "4.6.6", + "resolved": "https://registry.npmjs.org/roughjs/-/roughjs-4.6.6.tgz", + "integrity": "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==", + "license": "MIT", + "dependencies": { + "hachure-fill": "^0.5.2", + "path-data-parser": "^0.1.0", + "points-on-curve": "^0.2.0", + "points-on-path": "^0.2.1" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rw": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", + "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==", + "license": "BSD-3-Clause" + }, + "node_modules/safe-array-concat": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", + "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "has-symbols": "^1.1.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-push-apply": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", + "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-proto": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz", + "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stop-iteration-iterator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz", + "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "internal-slot": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", + "integrity": "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.6", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "regexp.prototype.flags": "^1.5.3", + "set-function-name": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.repeat": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", + "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", + "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-data-property": "^1.1.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-object-atoms": "^1.0.0", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", + "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sucrase/node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tailwindcss/node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/tailwindcss/node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/tailwindcss/node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/tailwindcss/node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/tailwindcss/node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/to-regex-range/node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "license": "MIT", + "engines": { + "node": ">=6.10" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", + "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz", + "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz", + "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.15", + "reflect.getprototypeof": "^1.0.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", + "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0", + "reflect.getprototypeof": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ufo": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.3.tgz", + "integrity": "sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==", + "license": "MIT" + }, + "node_modules/unbox-primitive": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz", + "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.1.0", + "which-boxed-primitive": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified/node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-find-after": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz", + "integrity": "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz", + "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/vscode-jsonrpc": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", + "integrity": "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/vscode-languageserver": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.1.tgz", + "integrity": "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==", + "license": "MIT", + "dependencies": { + "vscode-languageserver-protocol": "3.17.5" + }, + "bin": { + "installServerIntoExtension": "bin/installServerIntoExtension" + } + }, + "node_modules/vscode-languageserver-protocol": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz", + "integrity": "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==", + "license": "MIT", + "dependencies": { + "vscode-jsonrpc": "8.2.0", + "vscode-languageserver-types": "3.17.5" + } + }, + "node_modules/vscode-languageserver-textdocument": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.12.tgz", + "integrity": "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==", + "license": "MIT" + }, + "node_modules/vscode-languageserver-types": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", + "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==", + "license": "MIT" + }, + "node_modules/vscode-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz", + "integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==", + "license": "MIT" + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz", + "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-bigint": "^1.1.0", + "is-boolean-object": "^1.2.1", + "is-number-object": "^1.1.1", + "is-string": "^1.1.1", + "is-symbol": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz", + "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.1.0", + "is-finalizationregistry": "^1.1.0", + "is-generator-function": "^1.0.10", + "is-regex": "^1.2.1", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.1.0", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.20", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.20.tgz", + "integrity": "sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/sample_solutions/Docugen-Microagents/ui/package.json b/sample_solutions/Docugen-Microagents/ui/package.json new file mode 100644 index 00000000..f5218f83 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/package.json @@ -0,0 +1,55 @@ +{ + "name": "docugen-ui", + "version": "1.0.0", + "private": true, + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview", + "lint": "eslint . --ext js,jsx --report-unused-disable-directives --max-warnings 0" + }, + "dependencies": { + "axios": "^1.13.5", + "github-markdown-css": "^5.8.1", + "highlight.js": "^11.11.1", + "lucide-react": "^0.294.0", + "mermaid": "^11.4.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-markdown": "^9.0.1", + "rehype-highlight": "^7.0.0", + "remark-gfm": "^4.0.1" + }, + "devDependencies": { + "@types/react": "^18.2.43", + "@types/react-dom": "^18.2.17", + "@vitejs/plugin-react": "^4.2.1", + "autoprefixer": "^10.4.16", + "eslint": "^8.55.0", + "eslint-plugin-react": "^7.33.2", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.4.32", + "tailwindcss": "^3.3.6", + "vite": "^5.0.8" + }, + "overrides": { + "mermaid": "^11.4.0", + "shell-quote": "^1.8.1", + "loader-utils": "^3.2.1", + "minimatch": "^3.0.5", + "node-forge": "^1.3.2", + "nth-check": "^2.0.1", + "object-path": "^0.11.8", + "qs": "^6.14.1", + "semver": "^7.5.2", + "serialize-javascript": "^6.0.0", + "webpack-dev-middleware": "^5.3.4", + "ansi-html": "^0.0.8", + "braces": "^3.0.3", + "cross-spawn": "^7.0.5", + "form-data": "^4.0.4", + "http-proxy-middleware": "^3.0.3" + } +} diff --git a/sample_solutions/Docugen-Microagents/ui/postcss.config.js b/sample_solutions/Docugen-Microagents/ui/postcss.config.js new file mode 100644 index 00000000..2e7af2b7 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/sample_solutions/Docugen-Microagents/ui/src/App.jsx b/sample_solutions/Docugen-Microagents/ui/src/App.jsx new file mode 100644 index 00000000..b8e969c9 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/App.jsx @@ -0,0 +1,137 @@ +import { useState, useEffect } from 'react' +import Header from './components/Header' +import DocGenInterface from './components/DocGenInterface' +import AgentLogPanel from './components/AgentLogPanel' +import ResultsViewer from './components/ResultsViewer' +import ProjectSelector from './components/ProjectSelector' +import { api } from './services/api' + +function App() { + const [currentJob, setCurrentJob] = useState(null) + const [logs, setLogs] = useState([]) + const [logsOpen, setLogsOpen] = useState(false) + const [generatedReadme, setGeneratedReadme] = useState(null) + const [workflowStatus, setWorkflowStatus] = useState('idle') // idle, running, completed, failed + const [workflowError, setWorkflowError] = useState(null) // Store error message from backend + const [awaitingProjectSelection, setAwaitingProjectSelection] = useState(false) + const [detectedProjects, setDetectedProjects] = useState(null) + const [skippedFolders, setSkippedFolders] = useState(null) + + // Poll job status to check for project selection + useEffect(() => { + if (!currentJob || workflowStatus === 'completed' || workflowStatus === 'failed') { + return + } + + const pollInterval = setInterval(async () => { + try { + const status = await api.getJobStatus(currentJob) + + if (status.awaiting_project_selection && !awaitingProjectSelection) { + // Only update once when state changes to awaiting_selection + console.log('DEBUG: API Status Response:', status) + console.log('DEBUG: Skipped Folders from API:', status.skipped_folders) + setAwaitingProjectSelection(true) + setDetectedProjects(status.detected_projects) + setSkippedFolders(status.skipped_folders || []) + setWorkflowStatus('awaiting_selection') + } + + // Check for error in job status + if (status.error_message && status.status === 'failed') { + setWorkflowError(status.error_message) + setWorkflowStatus('failed') + } + } catch (error) { + console.error('Failed to poll job status:', error) + } + }, 2000) // Poll every 2 seconds + + return () => clearInterval(pollInterval) + }, [currentJob, workflowStatus, awaitingProjectSelection]) + + const handleJobStart = (jobId) => { + setCurrentJob(jobId) + setLogs([]) + setGeneratedReadme(null) + setWorkflowStatus('running') + setWorkflowError(null) // Clear previous errors + setAwaitingProjectSelection(false) + setDetectedProjects(null) + setSkippedFolders(null) + // Don't auto-open logs panel - let user click to open + } + + const handleLogReceived = (log) => { + setLogs(prevLogs => [...prevLogs, log]) + + // Check if workflow is complete - ONLY match the final completion message + if (log.log_type === 'success' && log.message.includes('Documentation generation complete')) { + setWorkflowStatus('completed') + } else if (log.log_type === 'error') { + setWorkflowStatus('failed') + // Errors are already shown in agent logs, no need for additional alert + } + } + + const handleReadmeGenerated = (readme) => { + setGeneratedReadme(readme) + } + + const handleProjectsSelected = () => { + setAwaitingProjectSelection(false) + setDetectedProjects(null) + setSkippedFolders(null) + setWorkflowStatus('running') + } + + return ( +
+
setLogsOpen(!logsOpen)} + hasActiveJob={!!currentJob} + workflowStatus={workflowStatus} + /> + +
+
+ {/* Top: Input Interface */} + + + {/* Middle: Project Selection (shown when multiple projects detected) */} + {awaitingProjectSelection && detectedProjects && ( + + )} + + {/* Bottom: Results Viewer */} + +
+
+ + setLogsOpen(false)} + logs={logs} + currentJob={currentJob} + /> +
+ ) +} + +export default App diff --git a/sample_solutions/Docugen-Microagents/ui/src/components/AgentLogPanel.jsx b/sample_solutions/Docugen-Microagents/ui/src/components/AgentLogPanel.jsx new file mode 100644 index 00000000..b43f61b9 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/components/AgentLogPanel.jsx @@ -0,0 +1,138 @@ +import { X, Bot, CheckCircle, AlertCircle, Info, Zap } from 'lucide-react' + +function AgentLogPanel({ isOpen, onClose, logs, currentJob }) { + if (!isOpen) return null + + const getLogIcon = (logType) => { + switch (logType) { + case 'agent_start': + return + case 'agent_complete': + return + case 'agent_thinking': + return + case 'error': + return + case 'success': + return + default: + return + } + } + + const getLogColor = (logType) => { + switch (logType) { + case 'agent_start': + return 'bg-blue-50 border-blue-200' + case 'agent_complete': + return 'bg-green-50 border-green-200' + case 'agent_thinking': + return 'bg-yellow-50 border-yellow-200' + case 'error': + return 'bg-red-50 border-red-200' + case 'success': + return 'bg-green-50 border-green-200' + default: + return 'bg-gray-50 border-gray-200' + } + } + + const formatTimestamp = (timestamp) => { + return new Date(timestamp).toLocaleTimeString() + } + + return ( +
+
+ {/* Header */} +
+
+

Agent Activity Log

+ {currentJob && ( +

Job ID: {currentJob}

+ )} +
+ +
+ + {/* Logs Content */} +
+ {logs.length === 0 ? ( +
+ +

No activity yet

+

Agent logs will appear here during generation

+
+ ) : ( + logs.map((log, index) => ( +
+
+
+ {getLogIcon(log.log_type)} +
+
+
+ {log.agent_name && ( + + {log.agent_name} + + )} + {log.timestamp && ( + + {formatTimestamp(log.timestamp)} + + )} +
+

{log.message}

+ + {/* Metadata */} + {log.metadata && Object.keys(log.metadata).length > 0 && ( +
+ {JSON.stringify(log.metadata, null, 2)} +
+ )} +
+
+
+ )) + )} +
+ + {/* Footer Stats */} + {logs.length > 0 && ( +
+
+ + Total Events: {logs.length} + +
+ + Agents: + {new Set(logs.filter(l => l.agent_name).map(l => l.agent_name)).size} + + + {logs.filter(l => l.log_type === 'error').length > 0 && ( + + Errors: + {logs.filter(l => l.log_type === 'error').length} + + + )} +
+
+
+ )} +
+
+ ) +} + +export default AgentLogPanel diff --git a/sample_solutions/Docugen-Microagents/ui/src/components/DocGenInterface.jsx b/sample_solutions/Docugen-Microagents/ui/src/components/DocGenInterface.jsx new file mode 100644 index 00000000..0ed55578 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/components/DocGenInterface.jsx @@ -0,0 +1,220 @@ +import { useState, useEffect } from 'react' +import { Github, Play, Loader2, CheckCircle, AlertCircle, Clock } from 'lucide-react' +import { api } from '../services/api' + +function DocGenInterface({ onJobStart, onLogReceived, currentJob, workflowStatus, workflowError }) { + const [repoUrl, setRepoUrl] = useState('') + const [loading, setLoading] = useState(false) + const [error, setError] = useState(null) + const [sseConnection, setSseConnection] = useState(null) + + // Update local error when workflow error changes + useEffect(() => { + if (workflowError) { + setError(workflowError) + setLoading(false) + } + }, [workflowError]) + + // Clean up SSE connection on unmount + useEffect(() => { + return () => { + if (sseConnection) { + sseConnection.close() + } + } + }, [sseConnection]) + + // Connect to SSE stream when job starts + useEffect(() => { + if (currentJob && !sseConnection) { + console.log('Connecting to SSE for job:', currentJob) + + const eventSource = api.connectToLogs( + currentJob, + (log) => { + console.log('Received log:', log) + onLogReceived(log) + + // Stop loading when workflow completes or fails + if (log.log_type === 'success' && log.message.includes('complete')) { + setLoading(false) + } else if (log.log_type === 'error') { + setLoading(false) + } + }, + (error) => { + console.error('SSE connection error:', error) + setError('Connection to the server was lost. Please try again.') + setLoading(false) + } + ) + + setSseConnection(eventSource) + + return () => { + console.log('Closing SSE connection') + eventSource.close() + } + } + }, [currentJob]) + + // Stop loading when workflow status changes to completed or failed + useEffect(() => { + if (workflowStatus === 'completed' || workflowStatus === 'failed') { + setLoading(false) + } + }, [workflowStatus]) + + const handleSubmit = async (e) => { + e.preventDefault() + setError(null) + setLoading(true) + + try { + // Validate URL + if (!repoUrl.trim()) { + throw new Error('Please enter a GitHub repository URL.') + } + + if (!repoUrl.includes('github.com')) { + throw new Error('Please provide a valid GitHub repository URL (e.g., https://github.com/username/repository)') + } + + // Note: Subfolder URLs are now supported! (e.g., /tree/main/path/to/folder) + + // Start documentation generation + const response = await api.generateDocs(repoUrl) + onJobStart(response.job_id) + + } catch (err) { + // Extract error message from axios error response or use default + const errorMessage = err.response?.data?.detail || err.response?.data?.message || err.message || 'Failed to start documentation generation. Please try again.' + setError(errorMessage) + setLoading(false) + } + } + + const isDisabled = loading || workflowStatus === 'running' + + const getStatusMessage = () => { + switch (workflowStatus) { + case 'running': + return { + icon: , + bg: 'bg-blue-50 border-blue-200', + text: 'text-blue-800', + message: 'AI agents are analyzing your repository and generating documentation. Click "Agent Logs" in the header to watch the progress.' + } + case 'completed': + return { + icon: , + bg: 'bg-green-50 border-green-200', + text: 'text-green-800', + message: 'Documentation generation complete! Check the generated README below.' + } + case 'failed': + return { + icon: , + bg: 'bg-red-50 border-red-200', + text: 'text-red-800', + message: workflowError || 'Documentation generation failed. Please check the Agent Logs for details.' + } + default: + return null + } + } + + const statusInfo = getStatusMessage() + + return ( +
+
+

Generate Documentation

+

Enter a GitHub repository URL to generate comprehensive documentation

+
+ +
+ {/* Repository URL Input */} +
+ +
+
+ +
+ setRepoUrl(e.target.value)} + placeholder="https://github.com/username/repository" + disabled={isDisabled} + className="w-full pl-10 pr-4 py-3 border border-gray-300 rounded-lg focus:ring-2 focus:ring-blue-500 focus:border-transparent disabled:bg-gray-100 disabled:cursor-not-allowed" + /> +
+

+ Max repo size: 10GB. Analyzes up to 500 files (1MB max each, upto 500 lines/file). All limits configurable in backend .env +

+
+ + + {/* Error Message */} + {error && ( +
+
+ +
+

{error}

+
+ +
+
+ )} + + {/* Submit Button */} + +
+ + {/* Workflow Status */} + {currentJob && statusInfo && ( +
+
+
+ {statusInfo.icon} +
+
+

+ {statusInfo.message} +

+
+
+
+ )} +
+ ) +} + +export default DocGenInterface diff --git a/sample_solutions/Docugen-Microagents/ui/src/components/Header.jsx b/sample_solutions/Docugen-Microagents/ui/src/components/Header.jsx new file mode 100644 index 00000000..9c654bad --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/components/Header.jsx @@ -0,0 +1,70 @@ +import { FileText, Activity, CheckCircle, XCircle, Clock } from 'lucide-react' + +function Header({ onLogsToggle, hasActiveJob, workflowStatus }) { + const getStatusIcon = () => { + switch (workflowStatus) { + case 'running': + return + case 'completed': + return + case 'failed': + return + default: + return null + } + } + + const getStatusText = () => { + switch (workflowStatus) { + case 'running': + return 'Generating...' + case 'completed': + return 'Completed' + case 'failed': + return 'Failed' + default: + return 'Ready' + } + } + + return ( +
+
+
+
+
+ +
+
+

+ DocuGen AI +

+

Automatic Documentation Generator

+
+
+ +
+ {/* Status Indicator */} + {hasActiveJob && ( +
+ {getStatusIcon()} + {getStatusText()} +
+ )} + + {/* Agent Logs Button */} + +
+
+
+
+ ) +} + +export default Header diff --git a/sample_solutions/Docugen-Microagents/ui/src/components/Mermaid.jsx b/sample_solutions/Docugen-Microagents/ui/src/components/Mermaid.jsx new file mode 100644 index 00000000..66368002 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/components/Mermaid.jsx @@ -0,0 +1,44 @@ +import { useEffect, useRef } from 'react' +import mermaid from 'mermaid' + +function Mermaid({ chart }) { + const containerRef = useRef(null) + + useEffect(() => { + // Initialize mermaid with config + mermaid.initialize({ + startOnLoad: false, + theme: 'default', + securityLevel: 'strict', + fontFamily: 'ui-sans-serif, system-ui, sans-serif' + }) + }, []) + + useEffect(() => { + if (containerRef.current && chart) { + const renderDiagram = async () => { + try { + // Clear previous content + containerRef.current.innerHTML = '' + + // Generate new ID for each render to avoid conflicts + const newId = `mermaid-${Date.now()}-${Math.random().toString(36).substr(2, 9)}` + + // Render the mermaid chart + const { svg } = await mermaid.render(newId, chart) + containerRef.current.innerHTML = svg + } catch (error) { + console.error('Mermaid rendering error:', error) + console.error('Chart content:', chart) + containerRef.current.innerHTML = `
Error rendering diagram: ${error.message}
` + } + } + + renderDiagram() + } + }, [chart]) + + return
+} + +export default Mermaid diff --git a/sample_solutions/Docugen-Microagents/ui/src/components/ProjectSelector.jsx b/sample_solutions/Docugen-Microagents/ui/src/components/ProjectSelector.jsx new file mode 100644 index 00000000..279da952 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/components/ProjectSelector.jsx @@ -0,0 +1,275 @@ +import { useState, useEffect } from 'react' +import { Folder, CheckSquare, Square, Play, Loader2, CheckCheck, XSquare, Search, AlertCircle } from 'lucide-react' +import { api } from '../services/api' + +function ProjectSelector({ currentJob, detectedProjects, skippedFolders, onProjectsSelected }) { + const [selectedPaths, setSelectedPaths] = useState([]) + const [submitting, setSubmitting] = useState(false) + const [searchQuery, setSearchQuery] = useState('') + const [error, setError] = useState(null) + + // Debug logging + console.log('DEBUG ProjectSelector - skippedFolders:', skippedFolders) + console.log('DEBUG ProjectSelector - skippedFolders type:', typeof skippedFolders) + console.log('DEBUG ProjectSelector - skippedFolders length:', skippedFolders?.length) + + // Initialize with empty selection (better UX for large repos) + useEffect(() => { + if (detectedProjects && detectedProjects.length > 0) { + // For small repos (< 10 projects), select all. For large repos, select none. + if (detectedProjects.length <= 10) { + setSelectedPaths(detectedProjects.map(p => p.path)) + } else { + setSelectedPaths([]) + } + } + }, [detectedProjects]) + + const selectAll = () => { + const filtered = getFilteredProjects() + setSelectedPaths(prev => { + const newPaths = filtered.map(p => p.path) + return Array.from(new Set([...prev, ...newPaths])) + }) + } + + const deselectAll = () => { + const filtered = getFilteredProjects() + const filteredPaths = filtered.map(p => p.path) + setSelectedPaths(prev => prev.filter(path => !filteredPaths.includes(path))) + } + + const getFilteredProjects = () => { + if (!detectedProjects) return [] + if (!searchQuery.trim()) return detectedProjects + + const query = searchQuery.toLowerCase() + return detectedProjects.filter(p => + p.name.toLowerCase().includes(query) || + p.path.toLowerCase().includes(query) || + p.types.some(t => t.toLowerCase().includes(query)) + ) + } + + const toggleProject = (path) => { + setError(null) // Clear error when user changes selection + setSelectedPaths(prev => { + if (prev.includes(path)) { + return prev.filter(p => p !== path) + } else { + return [...prev, path] + } + }) + } + + const handleSubmit = async () => { + if (selectedPaths.length === 0) { + setError('Please select at least one project to document') + return + } + + setSubmitting(true) + setError(null) + try { + await api.selectProjects(currentJob, selectedPaths) + onProjectsSelected() + } catch (err) { + console.error('Failed to submit project selection:', err) + // Extract error message from backend response + const errorMessage = err.response?.data?.detail || 'Failed to submit selection. Please try again.' + setError(errorMessage) + setSubmitting(false) + } + } + + if (!detectedProjects || detectedProjects.length === 0) { + return null + } + + const filteredProjects = getFilteredProjects() + const filteredSelectedCount = filteredProjects.filter(p => selectedPaths.includes(p.path)).length + + return ( +
+
+

+ 🔍 Multiple Projects Detected +

+

+ {detectedProjects.length} project(s) Found in this repository. Select a project to generate Readme file. +

+
+ + {/* Error Banner */} + {error && ( +
+
+ +
+

{error}

+
+
+
+ )} + + {/* Search and Bulk Actions */} +
+ {/* Search Bar */} +
+ + setSearchQuery(e.target.value)} + className="w-full pl-10 pr-4 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-purple-500 focus:border-transparent" + /> +
+ + {/* Bulk Selection Buttons */} +
+
+ + +
+

+ {selectedPaths.length} of {detectedProjects.length} selected +

+
+
+ + {/* Project List */} +
+ {filteredProjects.length === 0 ? ( +
+ +

No projects match your search criteria.

+
+ ) : ( + filteredProjects.map((project) => { + const isSelected = selectedPaths.includes(project.path) + const projectTypes = project.types.join(', ') + const isRoot = project.path === '/' + + return ( +
toggleProject(project.path)} + className={` + flex items-start space-x-3 p-4 rounded-lg border-2 cursor-pointer transition-all + ${isSelected + ? 'bg-white border-purple-500 shadow-md' + : 'bg-gray-50 border-gray-200 hover:border-purple-300' + } + `} + > +
+ {isSelected ? ( + + ) : ( + + )} +
+ +
+
+ +

+ {project.name} + {isRoot && (Root)} +

+
+ +

+ Path: {project.path} +

+ +

+ Type: {projectTypes} +

+ +
+ {project.file_count} files + {project.dir_count} directories +
+ + {project.indicators && project.indicators.length > 0 && ( +
+
+ {project.indicators.map((indicator, i) => ( + + {indicator} + + ))} +
+
+ )} +
+
+ ) + }) + )} +
+ + {/* Skipped Folders Section */} + {skippedFolders && skippedFolders.length > 0 && ( +
+

+ Skipped Folders ({skippedFolders.length}) +

+

+ The following folders were not detected as code projects: +

+
    + {skippedFolders.map((folder, idx) => ( +
  • + • {folder.name} - {folder.reason} ({folder.details}) +
  • + ))} +
+
+ )} + +
+

+ {selectedPaths.length} project{selectedPaths.length !== 1 ? 's' : ''} selected +

+ + +
+
+ ) +} + +export default ProjectSelector diff --git a/sample_solutions/Docugen-Microagents/ui/src/components/ResultsViewer.jsx b/sample_solutions/Docugen-Microagents/ui/src/components/ResultsViewer.jsx new file mode 100644 index 00000000..b85f979b --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/components/ResultsViewer.jsx @@ -0,0 +1,290 @@ +import { useState, useEffect } from 'react' +import { Download, Copy, Check, FileText, Loader2, AlertCircle, GitPullRequest, ExternalLink } from 'lucide-react' +import ReactMarkdown from 'react-markdown' +import remarkGfm from 'remark-gfm' +import rehypeHighlight from 'rehype-highlight' +import Mermaid from './Mermaid' +import { api } from '../services/api' + +// Import GitHub markdown CSS and syntax highlighting theme +import 'github-markdown-css/github-markdown.css' +import 'highlight.js/styles/github.css' + +function ResultsViewer({ readme, onReadmeGenerated, currentJob, workflowStatus }) { + const [copied, setCopied] = useState(false) + const [markdownContent, setMarkdownContent] = useState(null) + const [projectTitle, setProjectTitle] = useState(null) + const [creatingPR, setCreatingPR] = useState(false) + const [prUrl, setPrUrl] = useState(null) + const [prError, setPrError] = useState(null) + + // Helper function to convert title to proper case + const toTitleCase = (str) => { + return str + .split(/[-_\s]+/) + .map(word => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase()) + .join(' ') + } + + // Extract project title from markdown content + useEffect(() => { + if (markdownContent) { + const lines = markdownContent.split('\n') + // Look for H2 (## Title) in first 15 lines + for (let i = 0; i < Math.min(15, lines.length); i++) { + const line = lines[i].trim() + if (line.startsWith('## ')) { + const title = line.substring(3).trim() + setProjectTitle(toTitleCase(title)) + break + } else if (line.startsWith('# ') && !line.startsWith('## ')) { + const title = line.substring(2).trim() + setProjectTitle(toTitleCase(title)) + break + } + } + } + }, [markdownContent]) + + // Fetch final README when workflow completes + useEffect(() => { + console.log('[ResultsViewer] useEffect triggered:', { workflowStatus, currentJob, hasMarkdown: !!markdownContent }) + if (workflowStatus === 'completed' && currentJob && !markdownContent) { + console.log('[ResultsViewer] Conditions met - starting fetch') + const fetchReadme = async () => { + try { + console.log('Fetching job status for:', currentJob) + const jobStatus = await api.getJobStatus(currentJob) + console.log('Job status:', jobStatus) + + // Check if README is ready in job storage + if (jobStatus.readme_preview || jobStatus.status === 'completed') { + // Try to download the full README + try { + const readmeBlob = await api.downloadReadme(currentJob) + const readmeText = await readmeBlob.text() + console.log('Downloaded README:', readmeText.substring(0, 100)) + setMarkdownContent(readmeText) + if (onReadmeGenerated) { + onReadmeGenerated(readmeText) + } + } catch (err) { + console.warn('Could not download README, using preview:', err) + // Fallback to preview if download fails + if (jobStatus.readme_preview) { + setMarkdownContent(jobStatus.readme_preview) + if (onReadmeGenerated) { + onReadmeGenerated(jobStatus.readme_preview) + } + } + } + } + } catch (error) { + console.error('Failed to fetch README:', error) + } + } + + // Small delay to let backend finish processing + const timer = setTimeout(fetchReadme, 1000) + return () => clearTimeout(timer) + } + }, [workflowStatus, currentJob, markdownContent]) + + const handleCopy = async () => { + if (markdownContent) { + await navigator.clipboard.writeText(markdownContent) + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } + } + + const handleDownload = () => { + if (markdownContent) { + const blob = new Blob([markdownContent], { type: 'text/markdown' }) + const url = URL.createObjectURL(blob) + const a = document.createElement('a') + a.href = url + a.download = 'README.md' + document.body.appendChild(a) + a.click() + document.body.removeChild(a) + URL.revokeObjectURL(url) + } + } + + const handleCreatePR = async () => { + if (!currentJob) return + + setCreatingPR(true) + setPrError(null) + + try { + const result = await api.createPullRequest(currentJob) + + if (result.status === 'success') { + setPrUrl(result.pr_url) + console.log('PR created successfully:', result.pr_url) + } else { + setPrError(result.message || 'Failed to create PR') + } + } catch (error) { + console.error('Failed to create PR:', error) + setPrError(error.response?.data?.detail || error.message || 'Failed to create pull request') + } finally { + setCreatingPR(false) + } + } + + // Only render the component when README is completed and ready + if (workflowStatus !== 'completed' || !markdownContent) { + return null + } + + return ( +
+ {/* Header */} +
+
+ +
+

README

+ {projectTitle && ( +

{projectTitle}

+ )} +
+
+ +
+ + + + {/* Create PR Button */} + {prUrl ? ( + + + View PR + + + ) : ( + + )} +
+
+ + {/* Content */} +
+ {/* Markdown Content with GitHub styling */} +
+ + +
+ ) + } + + // Regular code blocks + return ( + + {children} + + ) + } + }} + > + {markdownContent} + +
+
+ + {/* Footer Info */} + {markdownContent && ( +
+
+ + Lines: {markdownContent.split('\n').length} + + + Characters: {markdownContent.length} + + + Words: {markdownContent.split(/\s+/).length} + +
+
+ )} + + {/* PR Error Display */} + {prError && ( +
+ +
+

Failed to Create Pull Request

+

{prError}

+ {prError.includes('GITHUB_TOKEN') && ( +

+ Make sure GITHUB_TOKEN is configured in your backend environment variables. +

+ )} +
+ +
+ )} +
+ ) +} + +export default ResultsViewer diff --git a/sample_solutions/Docugen-Microagents/ui/src/index.css b/sample_solutions/Docugen-Microagents/ui/src/index.css new file mode 100644 index 00000000..abd21268 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/index.css @@ -0,0 +1,156 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +body { + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', + 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', + sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +#root { + min-height: 100vh; +} + +.scrollbar-hide { + -ms-overflow-style: none; + scrollbar-width: none; +} + +.scrollbar-hide::-webkit-scrollbar { + display: none; +} + +/* GitHub Markdown Styling Overrides */ +.markdown-body { + box-sizing: border-box; + width: 100%; + max-width: 100%; + margin: 0; + padding: 0; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Helvetica Neue', sans-serif; + font-size: 16px; + line-height: 1.6; + color: #24292f !important; /* Force dark text */ + word-wrap: break-word; + overflow-wrap: break-word; +} + +/* Ensure all headings are visible */ +.markdown-body h1, +.markdown-body h2, +.markdown-body h3, +.markdown-body h4, +.markdown-body h5, +.markdown-body h6 { + color: #24292f !important; + font-weight: 600 !important; + margin-top: 24px !important; + margin-bottom: 16px !important; +} + +/* Ensure paragraphs and lists are visible */ +.markdown-body p, +.markdown-body li, +.markdown-body span, +.markdown-body div { + color: #24292f !important; + max-width: 100%; + word-wrap: break-word; + overflow-wrap: break-word; +} + +/* Prevent list overflow */ +.markdown-body ul, +.markdown-body ol { + max-width: 100%; + word-wrap: break-word; + overflow-wrap: break-word; + padding-left: 2em; +} + +/* Links */ +.markdown-body a { + color: #0969da !important; + text-decoration: none; +} + +.markdown-body a:hover { + text-decoration: underline; +} + +/* Syntax highlighting for code blocks */ +.markdown-body pre { + background-color: #f6f8fa !important; + padding: 16px !important; + border-radius: 6px !important; + overflow-x: auto !important; + margin: 16px 0 !important; + max-width: 100%; + white-space: pre-wrap; + word-wrap: break-word; +} + +.markdown-body pre code { + background-color: transparent !important; + padding: 0 !important; + color: #24292f !important; + white-space: pre-wrap; + word-wrap: break-word; +} + +/* Inline code */ +.markdown-body code { + background-color: rgba(175, 184, 193, 0.2) !important; + padding: 0.2em 0.4em !important; + border-radius: 6px !important; + font-family: 'SFMono-Regular', 'Consolas', 'Liberation Mono', 'Menlo', monospace !important; + font-size: 85% !important; + color: #24292f !important; + word-wrap: break-word; + overflow-wrap: break-word; +} + +/* Blockquotes */ +.markdown-body blockquote { + padding: 0 1em !important; + color: #57606a !important; + border-left: 0.25em solid #d0d7de !important; + margin: 16px 0 !important; +} + +/* Tables */ +.markdown-body table { + border-collapse: collapse; + width: 100%; + margin: 16px 0; +} + +.markdown-body th, +.markdown-body td { + padding: 6px 13px; + border: 1px solid #d0d7de; + color: #24292f !important; +} + +.markdown-body th { + background-color: #f6f8fa; + font-weight: 600; +} + +/* Horizontal rule */ +.markdown-body hr { + height: 0.25em; + padding: 0; + margin: 24px 0; + background-color: #d0d7de; + border: 0; +} + +/* Task lists */ +.markdown-body input[type="checkbox"] { + margin-right: 0.5em; +} diff --git a/sample_solutions/Docugen-Microagents/ui/src/main.jsx b/sample_solutions/Docugen-Microagents/ui/src/main.jsx new file mode 100644 index 00000000..54b39dd1 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/main.jsx @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App.jsx' +import './index.css' + +ReactDOM.createRoot(document.getElementById('root')).render( + + + , +) diff --git a/sample_solutions/Docugen-Microagents/ui/src/services/api.js b/sample_solutions/Docugen-Microagents/ui/src/services/api.js new file mode 100644 index 00000000..a339f53a --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/src/services/api.js @@ -0,0 +1,112 @@ +import axios from 'axios' + +// Use environment variable or default to /api for production (nginx proxy) +// In development: http://localhost:5001/api +// In production Docker: /api (proxied by nginx) +const API_BASE_URL = import.meta.env.VITE_API_URL || '/api' + +export const api = { + /** + * Start documentation generation workflow + * @param {string} repoUrl - GitHub repository URL + * @returns {Promise<{job_id: string, status: string}>} + */ + generateDocs: async (repoUrl) => { + const response = await axios.post(`${API_BASE_URL}/generate-docs`, { + repo_url: repoUrl + }) + return response.data + }, + + /** + * Get job status + * @param {string} jobId - Job ID + * @returns {Promise} + */ + getJobStatus: async (jobId) => { + const response = await axios.get(`${API_BASE_URL}/status/${jobId}`) + return response.data + }, + + /** + * Connect to SSE stream for real-time logs + * @param {string} jobId - Job ID + * @param {function} onMessage - Callback for log messages + * @param {function} onError - Callback for errors + * @returns {EventSource} SSE connection + */ + connectToLogs: (jobId, onMessage, onError) => { + const eventSource = new EventSource(`${API_BASE_URL}/logs/${jobId}`) + + eventSource.onmessage = (event) => { + try { + const data = JSON.parse(event.data) + onMessage(data) + } catch (error) { + console.error('Failed to parse SSE message:', error) + } + } + + eventSource.onerror = (error) => { + console.error('SSE connection error:', error) + if (onError) onError(error) + eventSource.close() + } + + return eventSource + }, + + /** + * Approve agent output and continue workflow + * @param {string} jobId - Job ID + */ + approveStep: async (jobId) => { + const response = await axios.post(`${API_BASE_URL}/approve/${jobId}`) + return response.data + }, + + /** + * Reject agent output and retry with feedback + * @param {string} jobId - Job ID + * @param {string} feedback - Feedback for retry + */ + rejectStep: async (jobId, feedback) => { + const response = await axios.post(`${API_BASE_URL}/reject/${jobId}`, { + feedback + }) + return response.data + }, + + /** + * Download generated README + * @param {string} jobId - Job ID + */ + downloadReadme: async (jobId) => { + const response = await axios.get(`${API_BASE_URL}/download/${jobId}`, { + responseType: 'blob' + }) + return response.data + }, + + /** + * Submit project selection + * @param {string} jobId - Job ID + * @param {string[]} selectedProjectPaths - Array of selected project paths + */ + selectProjects: async (jobId, selectedProjectPaths) => { + const response = await axios.post(`${API_BASE_URL}/select-projects/${jobId}`, { + selected_project_paths: selectedProjectPaths + }) + return response.data + }, + + /** + * Create GitHub Pull Request with generated README + * @param {string} jobId - Job ID + * @returns {Promise<{status: string, message: string, pr_url?: string}>} + */ + createPullRequest: async (jobId) => { + const response = await axios.post(`${API_BASE_URL}/create-pr/${jobId}`) + return response.data + } +} diff --git a/sample_solutions/Docugen-Microagents/ui/tailwind.config.js b/sample_solutions/Docugen-Microagents/ui/tailwind.config.js new file mode 100644 index 00000000..dca8ba02 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/tailwind.config.js @@ -0,0 +1,11 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: [ + "./index.html", + "./src/**/*.{js,ts,jsx,tsx}", + ], + theme: { + extend: {}, + }, + plugins: [], +} diff --git a/sample_solutions/Docugen-Microagents/ui/vite.config.js b/sample_solutions/Docugen-Microagents/ui/vite.config.js new file mode 100644 index 00000000..a36a5e79 --- /dev/null +++ b/sample_solutions/Docugen-Microagents/ui/vite.config.js @@ -0,0 +1,17 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' + +export default defineConfig({ + plugins: [react()], + server: { + host: true, + port: 3000, + proxy: { + '/api': { + // Use 'backend' hostname in Docker, 'localhost' when running locally + target: process.env.VITE_API_TARGET || 'http://localhost:5001', + changeOrigin: true + } + } + } +})