diff --git a/skills/technical-change/LICENSE.txt b/skills/technical-change/LICENSE.txt new file mode 100644 index 000000000..0625970da --- /dev/null +++ b/skills/technical-change/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Elkidogz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/skills/technical-change/SKILL.md b/skills/technical-change/SKILL.md new file mode 100644 index 000000000..c21271b29 --- /dev/null +++ b/skills/technical-change/SKILL.md @@ -0,0 +1,352 @@ +--- +name: tc +description: | + Technical Change tracking skill. Use when user says /tc, /tc init, /tc create, /tc update, /tc status, /tc resume, /tc close, /tc export, /tc dashboard, or /tc retro. Also auto-runs at session start to check for TC initialization and active TCs. Tracks code changes with structured JSON records and accessible HTML output for AI session continuity. +user-invocable: true +tools: Read, Write, Edit, Glob, Grep, Bash +--- + +# /tc — Technical Change Tracker + +Track every code change with structured JSON records and accessible HTML output. +Ensures AI bot sessions can resume seamlessly when previous sessions expire or are abandoned. +Designed for deployment across multiple projects. + +## First-Use Detection (MANDATORY — Every Session) + +At the start of EVERY session, before doing any work: + +1. Check if `docs/TC/tc_config.json` exists in the current working directory +2. **If it EXISTS**: follow the Session Start Protocol in the `/tc resume` section +3. **If it does NOT exist**: prompt the user: + > TC tracking is not initialized in this project. Would you like to set it up? + > This enables structured change tracking, AI session handoff, and HTML documentation. + > Run `/tc init` to get started. +4. Wait for the user's response. If they agree, run `/tc init`. +5. If the user declines, continue without TC tracking for this session. + +A global skill is installed at `~/.claude/skills/tc.md` to ensure this check runs +in every project, even those that haven't been initialized yet. + +## Overview + +Each Technical Change (TC) is a structured record that documents: +- **What** changed (files, code, configuration) +- **Why** it changed (motivation, scope, design decisions) +- **Who** changed it (human or AI bot session) +- **When** it changed (revision history with timestamps) +- **How it was tested** (test cases with evidence from logs) +- **Where work stands** (session handoff data for bot continuity) + +### Storage Location +Each project stores TCs at `{project_root}/docs/TC/`: +``` +docs/TC/ +├── tc_config.json # Project settings +├── tc_registry.json # Master index +├── index.html # Dashboard +├── records/ +│ └── TC-001-MM-DD-YY-name/ +│ ├── tc_record.json # System of record +│ └── tc_record.html # Human-readable +└── evidence/ + └── TC-001/ # Log snippets, screenshots +``` + +### TC Naming Convention +- **Parent TC**: `TC-NNN-MM-DD-YY-functionality-slug` (e.g., `TC-001-04-03-26-user-authentication`) +- **Sub-TC**: `TC-NNN.A` or `TC-NNN.A.1` (letter = revision, number = sub-revision) +- NNN = sequential number, MM-DD-YY = creation date, slug = kebab-case functionality name + +### Implementation States +``` +planned → in_progress → implemented → tested → deployed + │ │ │ │ │ + └→ blocked ←┘ └→ in_progress ←──────┘ + │ (rework/hotfix) + └→ planned +``` + +--- + +## Commands + +### /tc init +Initialize TC tracking in the current project. Run this once per project. + +**Steps:** +1. Check if `docs/TC/tc_config.json` exists. If yes, report "Already initialized" with current stats and stop. +2. Detect project name: try CLAUDE.md first heading, then package.json name, then pyproject.toml name, then directory basename. Confirm with user. +3. Create directories: `docs/TC/`, `docs/TC/records/`, `docs/TC/evidence/` +4. Create `tc_config.json`: + ```json + { + "project_name": "", + "tc_root": "docs/TC", + "created": "", + "skills_library_path": "", + "auto_track": true, + "auto_regenerate_html": true, + "auto_regenerate_dashboard": true, + "default_author": "Claude", + "categories": ["feature","bugfix","refactor","infrastructure","documentation","hotfix","enhancement"] + } + ``` +5. Create `tc_registry.json`: + ```json + { + "project_name": "", + "created": "", + "updated": "", + "next_tc_number": 1, + "records": [], + "statistics": { + "total": 0, + "by_status": {"planned":0,"in_progress":0,"blocked":0,"implemented":0,"tested":0,"deployed":0}, + "by_scope": {"feature":0,"bugfix":0,"refactor":0,"infrastructure":0,"documentation":0,"hotfix":0,"enhancement":0}, + "by_priority": {"critical":0,"high":0,"medium":0,"low":0} + } + } + ``` +6. Generate empty dashboard: run `python "/generators/generate_dashboard.py" "docs/TC/tc_registry.json"` +7. Update CLAUDE.md: read existing file (or create new). Check for marker `## Technical Change (TC) Tracking (MANDATORY)`. If not found, append the contents of `init/claude_md_snippet.md` with `{skills_library_path}` replaced with the actual absolute path. +8. Update `.claude/settings.local.json`: read existing file (or create `{"permissions":{"allow":[]}}`). Merge TC permissions from `init/settings_template.json` (with paths substituted). Deduplicate. Write back. +9. Report all created/updated files. Suggest `/tc create` as next step. + +### /tc create +Create a new TC record. + +**Steps:** +1. Read `docs/TC/tc_registry.json` +2. Generate TC ID: `TC-{next_tc_number:03d}-{MM-DD-YY}-{slugify(name)}` +3. Ask user for: + - Title (default: formatted version of the slug) + - Scope: feature, bugfix, refactor, infrastructure, documentation, hotfix, enhancement + - Priority: critical, high, medium, low (default: medium) + - Summary (at least 10 characters) + - Motivation (why is this change needed?) +4. Create directory: `docs/TC/records/TC-NNN-MM-DD-YY-slug/` +5. Create `tc_record.json` with all fields initialized: + - status = "planned" + - revision_history = [R1 creation event] + - session_context.current_session populated with this session's info + - All arrays initialized to [] + - approval.approved = false, test_coverage_status = "none" +6. Add entry to tc_registry.json records array. Increment next_tc_number. Recompute statistics. +7. Generate HTML: run the tc_record HTML generator +8. Regenerate dashboard: run the dashboard generator +9. Report: display TC ID, link to HTML, suggest next steps + +### /tc update +Update an existing TC record. This is the general-purpose update command. + +**Steps:** +1. Read the TC record from `docs/TC/records//tc_record.json` +2. Determine what to update (user may specify, or you determine from context): + - **Status change**: validate transition with state machine. Ask for reason. + - **Add files**: append to files_affected array + - **Add test case**: create new test entry with sequential ID (T1, T2...) + - **Update test result**: set actual_result, status, evidence, tested_by, tested_date + - **Add evidence**: append to a test case's evidence array + - **Update handoff**: update session_context.handoff fields + - **Add notes**: append to notes field + - **Add sub-TC**: append to sub_tcs array +3. For EVERY change: + - Append a new revision entry to revision_history (sequential R-id, timestamp, author, summary, field_changes with old/new values and reason) + - Update the `updated` timestamp + - Update `metadata.last_modified` and `metadata.last_modified_by` + - Update `session_context.current_session.last_active` +4. Write tc_record.json (atomic: write to .tmp, then rename) +5. Update tc_registry.json (sync status, scope, priority, updated, test_summary). Recompute statistics. +6. If auto_regenerate_html: regenerate TC HTML +7. If status changed and auto_regenerate_dashboard: regenerate dashboard + +### /tc status [tc-id] +View TC status. + +**Without tc-id**: Read tc_registry.json and display a summary table of all TCs: +- TC ID, Title, Status (with badge), Scope, Priority, Tests (pass/total), Last Updated + +**With tc-id**: Read the specific TC record and display: +- Full status including handoff data, test results, revision count, files affected +- Any validation errors + +### /tc resume +Resume work on a TC from a previous session. + +**Steps:** +1. Read the TC record +2. Display the handoff section prominently: + - Progress summary + - Next steps (numbered) + - Blockers (highlighted) + - Key context + - Files in progress with their states + - Recent decisions +3. Archive the current session to session_history: + - Move current_session data to a new entry in session_history + - Set ended = now +4. Create new current_session with this session's info +5. Append revision entry: "Session resumed by [platform/model]" +6. Write tc_record.json +7. Prompt: "Ready to continue. Here are the next steps: [list from handoff]" + +### /tc close +Close a TC by transitioning it to deployed. + +**Steps:** +1. Read the TC record +2. Validate current status allows transition to `deployed` +3. Check all test cases — warn if any are pending/fail/blocked +4. Ask for: + - Approval: who is approving? (user name or "self") + - Approval notes (optional) + - Final test coverage assessment (none/partial/full) +5. Update: + - status = "deployed" + - approval.approved = true + - approval.approved_by, approved_date, approval_notes, test_coverage_status + - Append final revision entry + - Archive session to session_history +6. Write tc_record.json and update registry +7. Regenerate HTML and dashboard +8. Report: "TC-NNN closed and deployed." + +### /tc export +Regenerate ALL HTML files from their JSON records. + +**Steps:** +1. Read tc_registry.json +2. For each record: run the TC HTML generator on its tc_record.json +3. Run the dashboard generator +4. Report: "Regenerated X TC pages and dashboard." + +### /tc dashboard +Regenerate just the dashboard index.html. + +**Steps:** +1. Run the dashboard generator on tc_registry.json +2. Report path to generated index.html + +### /tc retro +Retroactively create TC records in bulk from a structured changelog file. +Use this when onboarding an existing project with extensive undocumented history. + +**Steps:** +1. Read the retro_changelog.json file (must match `schemas/tc_retro_changelog.schema.json`) +2. Validate the changelog structure +3. Run the batch generator: + ```bash + python "{skills_library_path}/generators/generate_retro_tcs.py" "" "docs/TC" + ``` +4. The generator will: + - Create a TC record for each entry (TC-001 through TC-NNN) + - Validate every record against the schema + - Generate HTML for every record + - Update the registry with all entries + - Regenerate the dashboard +5. Report: total created, any errors, link to dashboard + +**Retro Changelog Format** (`retro_changelog.json`): +```json +{ + "project": "Project Name", + "default_author": "retroactive", + "changes": [ + { + "title": "Feature or Change Title", + "scope": "feature|bugfix|refactor|infrastructure|documentation|hotfix|enhancement", + "priority": "critical|high|medium|low", + "status": "deployed", + "date": "YYYY-MM-DD", + "description": "What changed and why (10+ chars)", + "motivation": "Why this change was needed (optional)", + "files": ["path/to/file.py", "path/to/other.py"], + "tags": ["tag1", "tag2"], + "version": "v1.0.0" + } + ] +} +``` + +**Building the changelog**: Claude should analyze the project's git history, docs, changelogs, +README, and code to build the retro_changelog.json. Group related changes into single TCs. +Each TC should represent one logical unit of work (a feature, a fix, a refactor). + +--- + +## Auto-Detection Rules — Non-Blocking Subagent Pattern + +TC tracking MUST NOT interrupt the main workflow. Use background subagents for all bookkeeping. + +### During Work +- **NEVER stop to update TC records inline.** Focus entirely on the task. +- Do not read/write TC files between code changes. +- The main agent's job is to code, not to do paperwork. + +### At Natural Milestones +When a logical unit of work is complete (feature done, test passing, stopping point): +- Spawn a **background Agent** (run_in_background=true) with this prompt: + "Read docs/TC/tc_registry.json. Find the in_progress TC. Read its tc_record.json. Update files_affected with [list files changed]. Append a revision entry summarizing what was done. Update session_context.current_session.last_active. Write the updated record. Regenerate the TC HTML and dashboard." +- The main agent continues working without waiting. + +### Only Surface Questions When Genuinely Needed +- "This work doesn't match any active TC — should I create one?" (ask once per session, not per file) +- "TC-NNN looks complete — transition to implemented?" (at milestones only, don't nag) +- Never interrupt the user for routine TC bookkeeping. + +### At Session End +Before the session closes, spawn a final background Agent to write the handoff summary: +- progress_summary: what was accomplished +- next_steps: what still needs doing +- blockers: anything preventing progress +- key_context: important decisions, gotchas, patterns the next bot needs +- files_in_progress: which files are mid-edit + +### On Session Start +1. Check if `docs/TC/` exists in the project +2. If yes: read tc_registry.json, find in_progress/blocked TCs +3. Display handoff summary for any active TCs +4. Ask user if they want to resume + +--- + +## Validation Rules (Always Enforced) + +1. **State machine**: only valid transitions allowed (see diagram above) +2. **Sequential IDs**: revision_history uses R1,R2,R3...; test_cases uses T1,T2,T3... +3. **Append-only history**: revision_history entries are never modified or deleted +4. **Approval consistency**: approved=true requires approved_by and approved_date +5. **TC ID format**: must match `TC-NNN-MM-DD-YY-slug` pattern +6. **Sub-TC ID format**: must match `TC-NNN.A` or `TC-NNN.A.N` pattern +7. **HTML escaping**: all user data is escaped before HTML rendering +8. **Atomic writes**: JSON files written to .tmp then renamed +9. **Registry stats**: recomputed on every registry write + +--- + +## Python Generators + +Located at `{skills_library_path}/generators/`: + +```bash +# Generate individual TC HTML +python "generators/generate_tc_html.py" "" [--output ] + +# Generate dashboard +python "generators/generate_dashboard.py" "" [--output ] + +# Validate a TC record +python "validators/validate_tc.py" "" + +# Validate the registry +python "validators/validate_tc.py" --registry "" + +# Retroactive batch creation +python "generators/generate_retro_tcs.py" "" "" +``` + +All generators use Python stdlib only — no external dependencies. +All generators validate their input before producing output. +All HTML output is self-contained with inlined CSS (works from file:// URLs). +All HTML output is WCAG AA+ accessible with rem-based fonts, high contrast dark theme, skip links, and aria labels. diff --git a/skills/technical-change/scripts/generate_dashboard.py b/skills/technical-change/scripts/generate_dashboard.py new file mode 100644 index 000000000..e5507dc52 --- /dev/null +++ b/skills/technical-change/scripts/generate_dashboard.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python3 +"""TC Dashboard HTML Generator — Converts tc_registry.json into a dashboard page. + +Reads the TC registry and optionally all individual TC records to build an +interactive (CSS-only) dashboard with status metrics, filters, and activity feed. + +Usage: + python generate_dashboard.py [--output ] + +If --output is not specified, writes index.html in the same directory as the registry. + +Exit codes: + 0 = SUCCESS + 1 = VALIDATION ERRORS + 2 = FILE NOT FOUND or other errors +""" + +from __future__ import annotations + +import json +import sys +from datetime import datetime, timezone +from html import escape +from pathlib import Path + +# Add parent dir to path for validator import +_SKILL_ROOT = Path(__file__).resolve().parent.parent +sys.path.insert(0, str(_SKILL_ROOT / "validators")) + +from validate_tc import validate_registry, VALID_STATUSES, VALID_SCOPES # noqa: E402 + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _esc(value) -> str: + if value is None: + return "—" + return escape(str(value)) + + +def _format_datetime(iso_str: str | None) -> str: + if not iso_str: + return "—" + try: + dt = datetime.fromisoformat(iso_str) + return dt.strftime("%Y-%m-%d %H:%M").strip() + except (ValueError, TypeError): + return _esc(iso_str) + + +def _relative_time(iso_str: str | None) -> str: + """Return a human-readable relative time string.""" + if not iso_str: + return "—" + try: + dt = datetime.fromisoformat(iso_str) + now = datetime.now(timezone.utc) + if dt.tzinfo is None: + dt = dt.replace(tzinfo=timezone.utc) + diff = now - dt + seconds = int(diff.total_seconds()) + if seconds < 60: + return "just now" + if seconds < 3600: + m = seconds // 60 + return f"{m}m ago" + if seconds < 86400: + h = seconds // 3600 + return f"{h}h ago" + d = seconds // 86400 + if d == 1: + return "yesterday" + if d < 30: + return f"{d}d ago" + return _format_datetime(iso_str) + except (ValueError, TypeError): + return _esc(iso_str) + + +def _status_display(status: str) -> str: + return status.replace("_", " ").title() + + +def _load_css() -> str: + css_path = _SKILL_ROOT / "templates" / "tc_styles.css" + if css_path.exists(): + return css_path.read_text(encoding="utf-8") + return "body { font-family: sans-serif; background: #0d0d18; color: #e0dcd0; }" + + +# --------------------------------------------------------------------------- +# Section Builders +# --------------------------------------------------------------------------- + +def build_status_stats(stats: dict) -> str: + """Build the 6 status stat cards.""" + by_status = stats.get("by_status", {}) + parts = [] + for status in VALID_STATUSES: + count = by_status.get(status, 0) + parts.append( + f'
' + f'{count}' + f'{_status_display(status)}' + f'
' + ) + return "\n".join(parts) + + +def build_status_bar(stats: dict) -> tuple[str, str]: + """Build the status distribution bar. Returns (html, aria_label).""" + total = stats.get("total", 0) + if total == 0: + return '

No TCs yet.

', "No technical changes" + + by_status = stats.get("by_status", {}) + parts = [] + aria_parts = [] + + for status in VALID_STATUSES: + count = by_status.get(status, 0) + if count == 0: + continue + pct = (count / total) * 100 + label = _status_display(status) + parts.append( + f'
' + f'{count}' + f'
' + ) + aria_parts.append(f"{label}: {count} ({pct:.0f}%)") + + return "\n".join(parts), ", ".join(aria_parts) + + +def build_filter_radios() -> str: + """Build the CSS-only filter radio buttons.""" + parts = ['
'] + + # All filter (default checked) + parts.append('') + parts.append('') + + for status in VALID_STATUSES: + safe = _esc(status) + parts.append(f'') + parts.append(f'') + + parts.append('
') + return "\n".join(parts) + + +def build_tc_cards(records: list[dict], tc_root: Path | None = None) -> str: + """Build the TC card list.""" + if not records: + return '

No technical changes yet. Use /tc create to start tracking.

' + + # Sort by updated descending + sorted_records = sorted(records, key=lambda r: r.get("updated", ""), reverse=True) + + parts = [] + for rec in sorted_records: + tc_id = rec.get("tc_id", "") + status = rec.get("status", "planned") + scope = rec.get("scope", "feature") + priority = rec.get("priority", "medium") + title = rec.get("title", "") + path = rec.get("path", "") + + # Build link to tc_record.html + href = f"{path}/tc_record.html" if path else "#" + + # Test summary + ts = rec.get("test_summary", {}) + test_str = "" + if ts.get("total", 0) > 0: + test_str = f'Tests: {ts.get("pass", 0)}/{ts.get("total", 0)} pass' + + parts.append( + f'') + + return "\n".join(parts) + + +def build_activity_feed(tc_root: Path | None, records: list[dict]) -> str: + """Build the recent activity feed from revision histories.""" + if not tc_root or not records: + return '

No activity yet.

' + + # Collect recent revisions across all TCs + activities: list[tuple[str, str, str, str]] = [] # (timestamp, tc_id, revision_id, summary) + + for rec in records: + tc_id = rec.get("tc_id", "") + path = rec.get("path", "") + record_path = tc_root / path / "tc_record.json" + + if record_path.exists(): + try: + with open(record_path, "r", encoding="utf-8") as f: + full_record = json.load(f) + for rev in full_record.get("revision_history", []): + activities.append(( + rev.get("timestamp", ""), + tc_id, + rev.get("revision_id", ""), + rev.get("summary", ""), + )) + except (json.JSONDecodeError, OSError): + pass + + if not activities: + return '

No activity recorded yet.

' + + # Sort by timestamp descending, take last 10 + activities.sort(key=lambda x: x[0], reverse=True) + activities = activities[:10] + + parts = [] + for ts, tc_id, rev_id, summary in activities: + parts.append( + f'
' + f'{_relative_time(ts)}' + f'' + f'{_esc(tc_id)} {_esc(rev_id)}: {_esc(summary)}' + f'' + f'
' + ) + + return "\n".join(parts) + + +def build_scope_stats(stats: dict) -> str: + """Build scope breakdown stat cards.""" + by_scope = stats.get("by_scope", {}) + parts = [] + for scope in VALID_SCOPES: + count = by_scope.get(scope, 0) + if count > 0: + parts.append( + f'
' + f'{count}' + f'{_esc(scope)}' + f'
' + ) + if not parts: + return '

No scope data.

' + return "\n".join(parts) + + +# --------------------------------------------------------------------------- +# Main Generator +# --------------------------------------------------------------------------- + +def generate_dashboard_html(registry: dict, css: str, tc_root: Path | None = None) -> str: + """Generate the complete dashboard HTML.""" + project = registry.get("project_name", "Project") + total = registry.get("statistics", {}).get("total", 0) + records = registry.get("records", []) + stats = registry.get("statistics", {}) + + now_str = datetime.now(timezone.utc).astimezone().strftime("%Y-%m-%d %H:%M %Z").strip() + + status_bar_html, status_bar_aria = build_status_bar(stats) + + # Read the template + template_path = _SKILL_ROOT / "templates" / "tc_dashboard_template.html" + if template_path.exists(): + template = template_path.read_text(encoding="utf-8") + else: + template = ( + '' + '{{PROJECT}} — TC Dashboard' + '
' + '{{STATUS_STATS}}{{TC_CARDS}}' + '
' + ) + + replacements = { + "{{CSS}}": css, + "{{PROJECT}}": _esc(project), + "{{TOTAL_TCS}}": str(total), + "{{LAST_UPDATED}}": now_str, + "{{STATUS_STATS}}": build_status_stats(stats), + "{{STATUS_BAR}}": status_bar_html, + "{{STATUS_BAR_ARIA}}": status_bar_aria, + "{{FILTER_RADIOS}}": build_filter_radios(), + "{{TC_CARDS}}": build_tc_cards(records, tc_root), + "{{ACTIVITY_FEED}}": build_activity_feed(tc_root, records), + "{{SCOPE_STATS}}": build_scope_stats(stats), + "{{GENERATED_DATE}}": now_str, + } + + html = template + for placeholder, value in replacements.items(): + html = html.replace(placeholder, value) + + return html + + +# --------------------------------------------------------------------------- +# CLI Entry Point +# --------------------------------------------------------------------------- + +def main() -> int: + """CLI entry point.""" + if len(sys.argv) < 2: + print("Usage: python generate_dashboard.py [--output ]") + return 2 + + input_path = Path(sys.argv[1]) + output_path = None + + if "--output" in sys.argv: + idx = sys.argv.index("--output") + if idx + 1 < len(sys.argv): + output_path = Path(sys.argv[idx + 1]) + + if not input_path.exists(): + print(f"ERROR: File not found: {input_path}") + return 2 + + try: + with open(input_path, "r", encoding="utf-8") as f: + registry = json.load(f) + except json.JSONDecodeError as e: + print(f"ERROR: Invalid JSON: {e}") + return 2 + + # Validate + errors = validate_registry(registry) + if errors: + print(f"VALIDATION ERRORS ({len(errors)}):") + for i, err in enumerate(errors, 1): + print(f" {i}. {err}") + return 1 + + # Generate + css = _load_css() + tc_root = input_path.parent + html = generate_dashboard_html(registry, css, tc_root) + + # Write output + if output_path is None: + output_path = input_path.parent / "index.html" + + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w", encoding="utf-8") as f: + f.write(html) + + print(f"Generated: {output_path}") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/skills/technical-change/scripts/generate_retro_tcs.py b/skills/technical-change/scripts/generate_retro_tcs.py new file mode 100644 index 000000000..6500f6417 --- /dev/null +++ b/skills/technical-change/scripts/generate_retro_tcs.py @@ -0,0 +1,472 @@ +#!/usr/bin/env python3 +"""Retroactive TC Batch Generator — Creates TC records from a structured changelog. + +Reads a retro_changelog.json file and batch-creates all TC records, validates them, +generates HTML pages, updates the registry, and regenerates the dashboard. + +Usage: + python generate_retro_tcs.py + + retro_changelog.json: Path to the changelog file matching tc_retro_changelog.schema.json + tc_root_dir: Path to the project's docs/TC/ directory (must already be initialized) + +Example: + python generate_retro_tcs.py retro_changelog.json "/path/to/project/docs/TC" + +Exit codes: + 0 = SUCCESS + 1 = VALIDATION ERRORS + 2 = FILE NOT FOUND or other errors +""" + +from __future__ import annotations + +import json +import os +import re +import sys +from datetime import datetime, timezone +from pathlib import Path + +# Add parent dirs for imports +_SKILL_ROOT = Path(__file__).resolve().parent.parent +sys.path.insert(0, str(_SKILL_ROOT / "validators")) +sys.path.insert(0, str(_SKILL_ROOT / "generators")) + +from validate_tc import ( # noqa: E402 + validate_tc_record, + validate_registry, + compute_registry_statistics, + slugify, + VALID_STATUSES, + VALID_SCOPES, + VALID_PRIORITIES, +) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _now_iso() -> str: + """Return current time as ISO 8601 string with timezone.""" + return datetime.now(timezone.utc).astimezone().isoformat() + + +def _date_to_iso(date_str: str | None, fallback: str | None = None) -> str: + """Convert YYYY-MM-DD to ISO 8601 datetime string.""" + if not date_str: + return fallback or _now_iso() + try: + dt = datetime.strptime(date_str, "%Y-%m-%d") + dt = dt.replace(hour=12, tzinfo=timezone.utc).astimezone() + return dt.isoformat() + except ValueError: + return fallback or _now_iso() + + +def _date_to_mmddyy(date_str: str | None) -> str: + """Convert YYYY-MM-DD to MM-DD-YY format for TC IDs.""" + if not date_str: + now = datetime.now() + return now.strftime("%m-%d-%y") + try: + dt = datetime.strptime(date_str, "%Y-%m-%d") + return dt.strftime("%m-%d-%y") + except ValueError: + now = datetime.now() + return now.strftime("%m-%d-%y") + + +def _validate_changelog(changelog: dict) -> list[str]: + """Validate the retro changelog structure.""" + errors: list[str] = [] + + if not isinstance(changelog, dict): + return ["Changelog must be a JSON object"] + + if "project" not in changelog: + errors.append("Missing required field: 'project'") + if "changes" not in changelog: + errors.append("Missing required field: 'changes'") + return errors + + changes = changelog["changes"] + if not isinstance(changes, list): + errors.append("'changes' must be an array") + return errors + + if len(changes) == 0: + errors.append("'changes' must have at least 1 entry") + + for i, change in enumerate(changes): + prefix = f"changes[{i}]" + if not isinstance(change, dict): + errors.append(f"{prefix} must be an object") + continue + + # Required fields + for field in ("title", "scope", "description"): + if field not in change: + errors.append(f"{prefix} missing required field: '{field}'") + + if "scope" in change and change["scope"] not in VALID_SCOPES: + errors.append(f"{prefix}.scope '{change['scope']}' invalid") + + if "priority" in change and change["priority"] not in VALID_PRIORITIES: + errors.append(f"{prefix}.priority '{change['priority']}' invalid") + + if "status" in change and change["status"] not in VALID_STATUSES: + errors.append(f"{prefix}.status '{change['status']}' invalid") + + if "title" in change and isinstance(change["title"], str): + if len(change["title"]) < 5: + errors.append(f"{prefix}.title must be at least 5 characters") + + if "description" in change and isinstance(change["description"], str): + if len(change["description"]) < 10: + errors.append(f"{prefix}.description must be at least 10 characters") + + return errors + + +# --------------------------------------------------------------------------- +# TC Record Builder +# --------------------------------------------------------------------------- + +def build_tc_record( + tc_number: int, + change: dict, + project: str, + author: str, +) -> dict: + """Build a complete tc_record.json from a changelog entry.""" + date_str = change.get("date") + mmddyy = _date_to_mmddyy(date_str) + title_slug = slugify(change["title"]) + + # Truncate slug to keep ID reasonable + if len(title_slug) > 60: + title_slug = title_slug[:60].rstrip("-") + + tc_id = f"TC-{tc_number:03d}-{mmddyy}-{title_slug}" + iso_date = _date_to_iso(date_str) + now = _now_iso() + + status = change.get("status", "deployed") + priority = change.get("priority", "medium") + scope = change["scope"] + description_text = change["description"] + motivation = change.get("motivation") or description_text + detailed_design = change.get("detailed_design") + + # Build files_affected from the files list + files_affected = [] + for f in change.get("files", []): + files_affected.append({ + "path": f.replace("\\", "/"), + "action": "modified", + "description": None, + "lines_added": None, + "lines_removed": None, + }) + + # Build the record + record = { + "tc_id": tc_id, + "parent_tc": None, + "title": change["title"], + "status": status, + "priority": priority, + "created": iso_date, + "updated": now, + "created_by": author, + "project": project, + "description": { + "summary": description_text, + "motivation": motivation, + "scope": scope, + "detailed_design": detailed_design, + "breaking_changes": change.get("breaking_changes", []), + "dependencies": change.get("dependencies", []), + }, + "files_affected": files_affected, + "revision_history": [ + { + "revision_id": "R1", + "timestamp": iso_date, + "author": author, + "summary": f"Retroactive TC creation — {change['title']}", + "field_changes": [ + { + "field": "status", + "action": "set", + "new_value": status, + "reason": "Retroactive documentation of existing change", + } + ], + } + ], + "sub_tcs": [], + "test_cases": [], + "approval": { + "approved": status == "deployed", + "approved_by": author if status == "deployed" else None, + "approved_date": now if status == "deployed" else None, + "approval_notes": "Retroactive approval — change was already in production" if status == "deployed" else "", + "test_coverage_status": "none", + }, + "session_context": { + "current_session": { + "session_id": "retro-batch-generation", + "platform": "claude_code", + "model": "batch-generator", + "started": now, + "last_active": now, + }, + "handoff": { + "progress_summary": f"Retroactive TC created for: {change['title']}", + "next_steps": [], + "blockers": [], + "key_context": ["This TC was created retroactively from project history"], + "files_in_progress": [], + "decisions_made": [], + }, + "session_history": [], + }, + "tags": change.get("tags", []), + "related_tcs": [], + "notes": f"Retroactively documented.{' Version: ' + change['version'] if change.get('version') else ''}", + "metadata": { + "project": project, + "created_by": author, + "last_modified_by": author, + "last_modified": now, + "estimated_effort": None, + }, + } + + return record + + +# --------------------------------------------------------------------------- +# Batch Processor +# --------------------------------------------------------------------------- + +def process_retro_changelog(changelog: dict, tc_root: Path) -> tuple[int, int, list[str]]: + """Process the entire retro changelog. + + Returns: (created_count, error_count, error_messages) + """ + project = changelog["project"] + author = changelog.get("default_author", "retroactive") + changes = changelog["changes"] + + # Read existing registry + registry_path = tc_root / "tc_registry.json" + if not registry_path.exists(): + return 0, 0, ["tc_registry.json not found — run /tc init first"] + + with open(registry_path, "r", encoding="utf-8") as f: + registry = json.load(f) + + records_dir = tc_root / "records" + records_dir.mkdir(parents=True, exist_ok=True) + + created = 0 + errors_list: list[str] = [] + start_number = registry.get("next_tc_number", 1) + + # Build a mapping of index -> tc_id for related_tcs + tc_id_map: dict[int, str] = {} + + print(f"Processing {len(changes)} changes for project '{project}'...") + print() + + for i, change in enumerate(changes): + tc_number = start_number + i + record = build_tc_record(tc_number, change, project, author) + tc_id = record["tc_id"] + tc_id_map[i] = tc_id + + # Validate + validation_errors = validate_tc_record(record) + if validation_errors: + errors_list.append(f"TC-{tc_number:03d} ({change['title']}): {'; '.join(validation_errors)}") + continue + + # Create directory + tc_dir = records_dir / tc_id + tc_dir.mkdir(parents=True, exist_ok=True) + + # Write record (atomic: write to .tmp then rename) + tmp_path = tc_dir / "tc_record.json.tmp" + final_path = tc_dir / "tc_record.json" + + with open(tmp_path, "w", encoding="utf-8") as f: + json.dump(record, f, indent=2, ensure_ascii=False) + tmp_path.replace(final_path) + + # Add to registry + date_mmddyy = _date_to_mmddyy(change.get("date")) + registry["records"].append({ + "tc_id": tc_id, + "title": change["title"], + "status": record["status"], + "scope": change["scope"], + "priority": record["priority"], + "created": record["created"], + "updated": record["updated"], + "path": f"records/{tc_id}", + "sub_tc_count": 0, + "test_summary": { + "total": 0, "pass": 0, "fail": 0, + "pending": 0, "skip": 0, "blocked": 0, + }, + }) + + created += 1 + status_icon = "+" if record["status"] == "deployed" else "~" + print(f" [{status_icon}] {tc_id}: {change['title']}") + + # Resolve related_tcs references + for i, change in enumerate(changes): + related_indices = change.get("related_indices", []) + if related_indices and i in tc_id_map: + tc_id = tc_id_map[i] + tc_dir = records_dir / tc_id + record_path = tc_dir / "tc_record.json" + if record_path.exists(): + with open(record_path, "r", encoding="utf-8") as f: + record = json.load(f) + record["related_tcs"] = [ + tc_id_map[idx] for idx in related_indices + if idx in tc_id_map and idx != i + ] + with open(record_path, "w", encoding="utf-8") as f: + json.dump(record, f, indent=2, ensure_ascii=False) + + # Update registry + registry["next_tc_number"] = start_number + len(changes) + registry["updated"] = _now_iso() + registry["statistics"] = compute_registry_statistics(registry["records"]) + + # Write registry (atomic) + tmp_reg = tc_root / "tc_registry.json.tmp" + with open(tmp_reg, "w", encoding="utf-8") as f: + json.dump(registry, f, indent=2, ensure_ascii=False) + tmp_reg.replace(registry_path) + + print() + print(f"Created: {created}/{len(changes)} TC records") + if errors_list: + print(f"Errors: {len(errors_list)}") + for err in errors_list: + print(f" ! {err}") + + return created, len(errors_list), errors_list + + +def generate_all_html(tc_root: Path) -> int: + """Generate HTML for all TC records and the dashboard.""" + from generate_tc_html import generate_tc_html, _load_css # noqa: E402 + from generate_dashboard import generate_dashboard_html # noqa: E402 + + css = _load_css() + records_dir = tc_root / "records" + generated = 0 + + if records_dir.exists(): + for tc_dir in sorted(records_dir.iterdir()): + record_path = tc_dir / "tc_record.json" + if record_path.exists(): + try: + with open(record_path, "r", encoding="utf-8") as f: + record = json.load(f) + html = generate_tc_html(record, css) + html_path = tc_dir / "tc_record.html" + with open(html_path, "w", encoding="utf-8") as f: + f.write(html) + generated += 1 + except Exception as e: + print(f" ! HTML error for {tc_dir.name}: {e}") + + # Generate dashboard + registry_path = tc_root / "tc_registry.json" + if registry_path.exists(): + with open(registry_path, "r", encoding="utf-8") as f: + registry = json.load(f) + dashboard_html = generate_dashboard_html(registry, css, tc_root) + with open(tc_root / "index.html", "w", encoding="utf-8") as f: + f.write(dashboard_html) + print(f" Dashboard generated") + + return generated + + +# --------------------------------------------------------------------------- +# CLI Entry Point +# --------------------------------------------------------------------------- + +def main() -> int: + """CLI entry point.""" + if len(sys.argv) < 3: + print("Usage: python generate_retro_tcs.py ") + print() + print(" retro_changelog.json: Structured changelog file") + print(" tc_root_dir: Project's docs/TC/ directory (must be initialized)") + return 2 + + changelog_path = Path(sys.argv[1]) + tc_root = Path(sys.argv[2]) + + if not changelog_path.exists(): + print(f"ERROR: Changelog not found: {changelog_path}") + return 2 + + if not (tc_root / "tc_registry.json").exists(): + print(f"ERROR: tc_registry.json not found in {tc_root}") + print("Run /tc init first to initialize TC tracking.") + return 2 + + # Load and validate changelog + try: + with open(changelog_path, "r", encoding="utf-8") as f: + changelog = json.load(f) + except json.JSONDecodeError as e: + print(f"ERROR: Invalid JSON: {e}") + return 2 + + errors = _validate_changelog(changelog) + if errors: + print(f"CHANGELOG VALIDATION ERRORS ({len(errors)}):") + for i, err in enumerate(errors, 1): + print(f" {i}. {err}") + return 1 + + # Process + print(f"=== TC Retro Batch Generator ===") + print(f"Project: {changelog['project']}") + print(f"Changes: {len(changelog['changes'])}") + print(f"TC Root: {tc_root}") + print() + + created, error_count, error_msgs = process_retro_changelog(changelog, tc_root) + + if created > 0: + print() + print(f"Generating HTML for {created} records...") + html_count = generate_all_html(tc_root) + print(f"Generated {html_count} HTML pages + dashboard") + + print() + if error_count == 0: + print(f"SUCCESS: {created} TC records created, validated, and rendered.") + return 0 + else: + print(f"PARTIAL: {created} created, {error_count} errors.") + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/skills/technical-change/scripts/generate_tc_html.py b/skills/technical-change/scripts/generate_tc_html.py new file mode 100644 index 000000000..b7e3931be --- /dev/null +++ b/skills/technical-change/scripts/generate_tc_html.py @@ -0,0 +1,643 @@ +#!/usr/bin/env python3 +"""TC Record HTML Generator — Converts tc_record.json into accessible HTML. + +Reads a TC record JSON file, validates it, inlines the shared CSS, and produces +a self-contained HTML page with dark theme, WCAG AA+ accessibility, and rem-based +typography. + +Usage: + python generate_tc_html.py [--output ] + +If --output is not specified, writes tc_record.html in the same directory as the input. + +Exit codes: + 0 = SUCCESS + 1 = VALIDATION ERRORS + 2 = FILE NOT FOUND or other errors +""" + +from __future__ import annotations + +import json +import sys +from datetime import datetime, timezone +from html import escape +from pathlib import Path + +# Add parent dir to path so we can import the validator +_SKILL_ROOT = Path(__file__).resolve().parent.parent +sys.path.insert(0, str(_SKILL_ROOT / "validators")) + +from validate_tc import validate_tc_record # noqa: E402 + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _esc(value) -> str: + """HTML-escape any value, converting non-strings to str first.""" + if value is None: + return '' + return escape(str(value)) + + +def _format_datetime(iso_str: str | None) -> str: + """Format an ISO datetime string for display.""" + if not iso_str: + return "—" + try: + dt = datetime.fromisoformat(iso_str) + return dt.strftime("%Y-%m-%d %H:%M %Z").strip() + except (ValueError, TypeError): + return _esc(iso_str) + + +def _status_display(status: str) -> str: + """Convert status slug to display text.""" + return status.replace("_", " ").title() + + +def _load_css() -> str: + """Load the shared CSS file.""" + css_path = _SKILL_ROOT / "templates" / "tc_styles.css" + if css_path.exists(): + return css_path.read_text(encoding="utf-8") + return "/* CSS not found — using inline fallback */\nbody { font-family: sans-serif; background: #0d0d18; color: #e0dcd0; }" + + +# --------------------------------------------------------------------------- +# Section Builders +# --------------------------------------------------------------------------- + +def build_stats_grid(record: dict) -> str: + """Build the stats grid HTML.""" + files_count = len(record.get("files_affected", [])) + revisions_count = len(record.get("revision_history", [])) + test_cases = record.get("test_cases", []) + tests_total = len(test_cases) + tests_pass = sum(1 for t in test_cases if t.get("status") == "pass") + sub_tc_count = len(record.get("sub_tcs", [])) + sessions_count = len(record.get("session_context", {}).get("session_history", [])) + 1 + + cards = [ + (str(files_count), "Files Affected"), + (f"{tests_pass}/{tests_total}", "Tests Passing"), + (str(revisions_count), "Revisions"), + (str(sessions_count), "Sessions"), + (str(sub_tc_count), "Sub-TCs"), + (_esc(record.get("priority", "medium")).title(), "Priority"), + ] + + html_parts = [] + for value, label in cards: + html_parts.append( + f'
' + f'{value}' + f'{_esc(label)}' + f'
' + ) + return "\n".join(html_parts) + + +def build_overview(record: dict) -> str: + """Build the overview section content.""" + desc = record.get("description", {}) + parts = [] + + parts.append(f'
') + parts.append(f'

Summary

') + parts.append(f'

{_esc(desc.get("summary", ""))}

') + parts.append(f'
') + + parts.append(f'
') + parts.append(f'

Motivation

') + parts.append(f'

{_esc(desc.get("motivation", ""))}

') + parts.append(f'
') + + if desc.get("detailed_design"): + parts.append(f'
') + parts.append(f'

Detailed Design

') + parts.append(f'

{_esc(desc["detailed_design"])}

') + parts.append(f'
') + + if desc.get("breaking_changes"): + parts.append(f'
') + parts.append(f'

Breaking Changes

') + parts.append(f'
    ') + for bc in desc["breaking_changes"]: + parts.append(f'
  • {_esc(bc)}
  • ') + parts.append(f'
') + + if desc.get("dependencies"): + parts.append(f'
') + parts.append(f'

Dependencies

') + parts.append(f'
    ') + for dep in desc["dependencies"]: + parts.append(f'
  • {_esc(dep)}
  • ') + parts.append(f'
') + + # Metadata + meta = record.get("metadata", {}) + parts.append(f'
') + parts.append(f'

Metadata

') + parts.append(f'
') + for label, value in [ + ("Created", _format_datetime(record.get("created"))), + ("Updated", _format_datetime(record.get("updated"))), + ("Created By", _esc(record.get("created_by", ""))), + ("Last Modified By", _esc(meta.get("last_modified_by", ""))), + ("Effort", _esc(meta.get("estimated_effort", "—")).title()), + ]: + parts.append( + f'
' + f'
{label}
' + f'
{value}
' + f'
' + ) + parts.append(f'
') + + # Tags + tags = record.get("tags", []) + if tags: + parts.append(f'
') + parts.append(f'

Tags

') + parts.append(f'
') + for tag in tags: + parts.append(f'{_esc(tag)}') + parts.append(f'
') + + # Related TCs + related = record.get("related_tcs", []) + if related: + parts.append(f'
') + parts.append(f'

Related TCs

') + parts.append(f'
    ') + for r in related: + parts.append(f'
  • {_esc(r)}
  • ') + parts.append(f'
') + + # Notes + notes = record.get("notes", "") + if notes: + parts.append(f'
') + parts.append(f'

Notes

') + parts.append(f'

{_esc(notes)}

') + parts.append(f'
') + + return "\n".join(parts) + + +def build_files(record: dict) -> str: + """Build the files affected section.""" + files = record.get("files_affected", []) + if not files: + return '

No files affected yet.

' + + action_badges = { + "created": "badge-feature", + "modified": "badge-refactor", + "deleted": "badge-bugfix", + "renamed": "badge-enhancement", + } + + parts = ['
', ""] + parts.append("") + parts.append("") + + for f in files: + action = f.get("action", "") + badge_cls = action_badges.get(action, "") + added = f.get("lines_added") + removed = f.get("lines_removed") + diff_str = "" + if added is not None: + diff_str += f'+{added}' + if removed is not None: + if diff_str: + diff_str += " / " + diff_str += f'-{removed}' + if not diff_str: + diff_str = "—" + + parts.append( + f"" + f'' + f'' + f'' + f"" + f"" + ) + + parts.append("
FileActionDescription+/-
{_esc(f.get("path", ""))}{_esc(action)}{_esc(f.get("description", ""))}{diff_str}
") + return "\n".join(parts) + + +def build_revisions(record: dict) -> str: + """Build the revision history timeline.""" + revisions = record.get("revision_history", []) + if not revisions: + return '

No revisions recorded.

' + + parts = ['
'] + + for rev in reversed(revisions): + parts.append('
') + parts.append( + f'{_esc(rev.get("revision_id", ""))}' + f' ' + f'{_format_datetime(rev.get("timestamp"))} by {_esc(rev.get("author", ""))}' + f'' + ) + parts.append(f'
{_esc(rev.get("summary", ""))}
') + + field_changes = rev.get("field_changes", []) + if field_changes: + for fc in field_changes: + action = fc.get("action", "") + parts.append(f'
') + parts.append(f'{_esc(fc.get("field", ""))} ') + + if action == "changed": + parts.append( + f'{_esc(fc.get("old_value"))} ' + f'→ {_esc(fc.get("new_value"))}' + ) + elif action == "set": + parts.append(f'set to {_esc(fc.get("new_value"))}') + elif action == "added": + parts.append(f'+ {_esc(fc.get("new_value"))}') + elif action == "removed": + parts.append(f'- {_esc(fc.get("old_value"))}') + + reason = fc.get("reason") + if reason: + parts.append(f'
Reason: {_esc(reason)}') + + parts.append('
') + + parts.append('
') + + parts.append('
') + return "\n".join(parts) + + +def build_sub_tcs(record: dict) -> str: + """Build the sub-TCs section.""" + subs = record.get("sub_tcs", []) + if not subs: + return '

No sub-TCs defined.

' + + parts = [] + for sub in subs: + status = sub.get("status", "planned") + parts.append(f'
') + parts.append(f'
') + parts.append(f'
') + parts.append(f'{_esc(sub.get("title", ""))}') + parts.append(f'
{_esc(sub.get("sub_id", ""))}') + parts.append(f'
') + parts.append(f'{_status_display(status)}') + parts.append(f'
') + if sub.get("description"): + parts.append(f'

{_esc(sub["description"])}

') + if sub.get("files_affected"): + parts.append(f'
Files:') + for fp in sub["files_affected"]: + parts.append(f' {_esc(fp)}') + parts.append(f'
') + parts.append(f'
') + + return "\n".join(parts) + + +def build_tests(record: dict) -> str: + """Build the test cases section.""" + tests = record.get("test_cases", []) + if not tests: + return '

No test cases defined yet.

' + + parts = [] + for tc in tests: + status = tc.get("status", "pending") + parts.append(f'
') + parts.append(f'
') + parts.append( + f'
' + f'{_esc(tc.get("test_id", ""))} ' + f'{_esc(tc.get("title", ""))}' + f'
' + f'{_esc(status).upper()}' + ) + parts.append(f'
') + + # Procedure + procedure = tc.get("procedure", []) + if procedure: + parts.append(f'
Procedure
') + parts.append(f'
    ') + for step in procedure: + parts.append(f'
  1. {_esc(step)}
  2. ') + parts.append(f'
') + + # Expected + parts.append(f'
Expected Result
') + parts.append(f'
{_esc(tc.get("expected_result", ""))}
') + + # Actual + actual = tc.get("actual_result") + if actual: + parts.append(f'
Actual Result
') + parts.append(f'
{_esc(actual)}
') + + # Evidence + evidence = tc.get("evidence", []) + if evidence: + parts.append(f'
Evidence
') + for ev in evidence: + ev_type = ev.get("type", "") + parts.append(f'
') + parts.append( + f'{_esc(ev_type)} ' + f'{_esc(ev.get("description", ""))}' + ) + content = ev.get("content") + if content: + parts.append(f'
{_esc(content)}
') + path = ev.get("path") + if path: + parts.append(f'

{_esc(path)}

') + parts.append(f'
') + + # Tested by/date + tested_by = tc.get("tested_by") + tested_date = tc.get("tested_date") + if tested_by or tested_date: + parts.append(f'
') + if tested_by: + parts.append(f'Tested by: {_esc(tested_by)}') + if tested_date: + parts.append(f' on {_format_datetime(tested_date)}') + parts.append(f'
') + + parts.append(f'
') + + return "\n".join(parts) + + +def build_session(record: dict) -> str: + """Build the session context section.""" + ctx = record.get("session_context", {}) + parts = [] + + # Current session + cs = ctx.get("current_session", {}) + parts.append(f'

Current Session

') + parts.append(f'
') + for label, value in [ + ("Session ID", _esc(cs.get("session_id", "—"))), + ("Platform", _esc(cs.get("platform", "—"))), + ("Model", _esc(cs.get("model", "—"))), + ("Started", _format_datetime(cs.get("started"))), + ("Last Active", _format_datetime(cs.get("last_active"))), + ]: + parts.append( + f'
' + f'
{label}
' + f'
{value}
' + f'
' + ) + parts.append(f'
') + + # Handoff data + handoff = ctx.get("handoff", {}) + has_handoff = any([ + handoff.get("progress_summary"), + handoff.get("next_steps"), + handoff.get("blockers"), + handoff.get("key_context"), + handoff.get("files_in_progress"), + handoff.get("decisions_made"), + ]) + + if has_handoff: + parts.append(f'
') + parts.append(f'

Handoff Data

') + + if handoff.get("progress_summary"): + parts.append(f'
Progress Summary
') + parts.append(f'

{_esc(handoff["progress_summary"])}

') + + if handoff.get("next_steps"): + parts.append(f'
Next Steps
') + parts.append(f'
    ') + for step in handoff["next_steps"]: + parts.append(f'
  • {_esc(step)}
  • ') + parts.append(f'
') + + if handoff.get("blockers"): + parts.append(f'
Blockers
') + parts.append(f'
    ') + for b in handoff["blockers"]: + parts.append(f'
  • {_esc(b)}
  • ') + parts.append(f'
') + + if handoff.get("key_context"): + parts.append(f'
Key Context
') + parts.append(f'
    ') + for c in handoff["key_context"]: + parts.append(f'
  • {_esc(c)}
  • ') + parts.append(f'
') + + if handoff.get("files_in_progress"): + parts.append(f'
Files In Progress
') + parts.append(f'
') + parts.append(f'') + parts.append(f'') + for fip in handoff["files_in_progress"]: + state = fip.get("state", "") + parts.append( + f'' + f'' + f'' + f'' + f'' + ) + parts.append(f'
FileStateNotes
{_esc(fip.get("path", ""))}{_esc(state)}{_esc(fip.get("notes", ""))}
') + + if handoff.get("decisions_made"): + parts.append(f'
Decisions Made
') + for d in handoff["decisions_made"]: + parts.append(f'
') + parts.append(f'{_esc(d.get("decision", ""))}') + parts.append(f'
{_esc(d.get("rationale", ""))}') + parts.append(f'
{_format_datetime(d.get("timestamp"))}') + parts.append(f'
') + + parts.append(f'
') + + # Session history + history = ctx.get("session_history", []) + if history: + parts.append(f'

Session History

') + for sess in reversed(history): + parts.append(f'
') + parts.append( + f'
' + f'{_esc(sess.get("platform", ""))} — {_esc(sess.get("model", ""))}' + f'{_format_datetime(sess.get("started"))} - {_format_datetime(sess.get("ended"))}' + f'
' + ) + parts.append(f'

{_esc(sess.get("summary", ""))}

') + changes = sess.get("changes_made", []) + if changes: + parts.append(f'
    ') + for ch in changes: + parts.append(f'
  • {_esc(ch)}
  • ') + parts.append(f'
') + parts.append(f'
') + + return "\n".join(parts) + + +def build_approval(record: dict) -> str: + """Build the approval section.""" + appr = record.get("approval", {}) + approved = appr.get("approved", False) + coverage = appr.get("test_coverage_status", "none") + + cls = "approved" if approved else "not-approved" + icon = "✓" if approved else "✗" + label = "Approved" if approved else "Not Yet Approved" + + parts = [f'
'] + parts.append(f'{icon}') + parts.append(f'
') + parts.append(f'{label}') + + if approved: + parts.append(f'
By: {_esc(appr.get("approved_by", ""))} on {_format_datetime(appr.get("approved_date"))}') + + parts.append(f'
Test Coverage: {_esc(coverage).upper()}') + + if appr.get("approval_notes"): + parts.append(f'
{_esc(appr["approval_notes"])}') + + parts.append(f'
') + return "\n".join(parts) + + +# --------------------------------------------------------------------------- +# Main Generator +# --------------------------------------------------------------------------- + +def generate_tc_html(record: dict, css: str) -> str: + """Generate the complete HTML document for a TC record.""" + tc_id = record.get("tc_id", "Unknown") + title = record.get("title", "Unknown") + project = record.get("project", "Unknown") + status = record.get("status", "planned") + scope = record.get("description", {}).get("scope", "feature") + priority = record.get("priority", "medium") + + now_str = datetime.now(timezone.utc).astimezone().strftime("%Y-%m-%d %H:%M %Z").strip() + + # Read the template + template_path = _SKILL_ROOT / "templates" / "tc_record_template.html" + if template_path.exists(): + template = template_path.read_text(encoding="utf-8") + else: + # Minimal fallback + template = ( + '' + '' + '{{TC_ID}}' + '
' + '{{OVERVIEW_CONTENT}}{{FILES_CONTENT}}{{REVISIONS_CONTENT}}' + '{{TESTS_CONTENT}}{{SESSION_CONTENT}}{{APPROVAL_CONTENT}}' + '
' + ) + + # Build all sections + replacements = { + "{{CSS}}": css, + "{{TC_ID}}": _esc(tc_id), + "{{TITLE}}": _esc(title), + "{{PROJECT}}": _esc(project), + "{{STATUS}}": _esc(status), + "{{STATUS_DISPLAY}}": _status_display(status), + "{{SCOPE}}": _esc(scope), + "{{PRIORITY}}": _esc(priority), + "{{STATS_GRID}}": build_stats_grid(record), + "{{OVERVIEW_CONTENT}}": build_overview(record), + "{{FILES_CONTENT}}": build_files(record), + "{{REVISIONS_CONTENT}}": build_revisions(record), + "{{SUB_TCS_CONTENT}}": build_sub_tcs(record), + "{{TESTS_CONTENT}}": build_tests(record), + "{{SESSION_CONTENT}}": build_session(record), + "{{APPROVAL_CONTENT}}": build_approval(record), + "{{GENERATED_DATE}}": now_str, + } + + html = template + for placeholder, value in replacements.items(): + html = html.replace(placeholder, value) + + return html + + +# --------------------------------------------------------------------------- +# CLI Entry Point +# --------------------------------------------------------------------------- + +def main() -> int: + """CLI entry point.""" + if len(sys.argv) < 2: + print("Usage: python generate_tc_html.py [--output ]") + return 2 + + input_path = Path(sys.argv[1]) + output_path = None + + if "--output" in sys.argv: + idx = sys.argv.index("--output") + if idx + 1 < len(sys.argv): + output_path = Path(sys.argv[idx + 1]) + + if not input_path.exists(): + print(f"ERROR: File not found: {input_path}") + return 2 + + try: + with open(input_path, "r", encoding="utf-8") as f: + record = json.load(f) + except json.JSONDecodeError as e: + print(f"ERROR: Invalid JSON: {e}") + return 2 + + # Validate + errors = validate_tc_record(record) + if errors: + print(f"VALIDATION ERRORS ({len(errors)}):") + for i, err in enumerate(errors, 1): + print(f" {i}. {err}") + return 1 + + # Generate + css = _load_css() + html = generate_tc_html(record, css) + + # Write output + if output_path is None: + output_path = input_path.parent / "tc_record.html" + + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w", encoding="utf-8") as f: + f.write(html) + + print(f"Generated: {output_path}") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/skills/technical-change/scripts/validate_tc.py b/skills/technical-change/scripts/validate_tc.py new file mode 100644 index 000000000..1c6c83c99 --- /dev/null +++ b/skills/technical-change/scripts/validate_tc.py @@ -0,0 +1,566 @@ +#!/usr/bin/env python3 +"""TC Record Validator — Schema validation + state machine enforcement. + +Validates TC records against the canonical schema, enforces state machine +transitions, and provides utilities for TC ID generation. + +Usage: + python validate_tc.py + python validate_tc.py --registry + +Exit codes: + 0 = VALID + 1 = VALIDATION ERRORS (printed to stdout) + 2 = FILE NOT FOUND or JSON PARSE ERROR +""" + +from __future__ import annotations + +import json +import re +import sys +from datetime import datetime, timezone +from pathlib import Path + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +VALID_STATUSES = ("planned", "in_progress", "blocked", "implemented", "tested", "deployed") + +VALID_TRANSITIONS: dict[str, list[str]] = { + "planned": ["in_progress", "blocked"], + "in_progress": ["blocked", "implemented"], + "blocked": ["in_progress", "planned"], + "implemented": ["tested", "in_progress"], + "tested": ["deployed", "in_progress"], + "deployed": ["in_progress"], +} + +VALID_SCOPES = ("feature", "bugfix", "refactor", "infrastructure", "documentation", "hotfix", "enhancement") +VALID_PRIORITIES = ("critical", "high", "medium", "low") +VALID_FILE_ACTIONS = ("created", "modified", "deleted", "renamed") +VALID_TEST_STATUSES = ("pending", "pass", "fail", "skip", "blocked") +VALID_EVIDENCE_TYPES = ("log_snippet", "screenshot", "file_reference", "command_output") +VALID_FIELD_CHANGE_ACTIONS = ("set", "changed", "added", "removed") +VALID_PLATFORMS = ("claude_code", "claude_web", "api", "other") +VALID_EFFORTS = ("trivial", "small", "medium", "large", "epic", None) +VALID_COVERAGE = ("none", "partial", "full") +VALID_FILE_IN_PROGRESS_STATES = ("editing", "needs_review", "partially_done", "ready") + +TC_ID_PATTERN = re.compile(r"^TC-\d{3}-\d{2}-\d{2}-\d{2}-[a-z0-9]+(-[a-z0-9]+)*$") +SUB_TC_PATTERN = re.compile(r"^TC-\d{3}\.[A-Z](\.\d+)?$") +REVISION_ID_PATTERN = re.compile(r"^R(\d+)$") +TEST_ID_PATTERN = re.compile(r"^T(\d+)$") + + +# --------------------------------------------------------------------------- +# Validation Functions +# --------------------------------------------------------------------------- + +def validate_tc_id(tc_id: str) -> list[str]: + """Validate a TC identifier against the naming convention. + + Returns empty list if valid, list of error strings otherwise. + """ + errors: list[str] = [] + if not isinstance(tc_id, str): + errors.append(f"tc_id must be a string, got {type(tc_id).__name__}") + return errors + if not TC_ID_PATTERN.match(tc_id): + errors.append( + f"tc_id '{tc_id}' does not match pattern TC-NNN-MM-DD-YY-slug " + f"(e.g., TC-001-04-03-26-user-authentication)" + ) + return errors + + +def validate_sub_tc_id(sub_id: str) -> list[str]: + """Validate a sub-TC identifier.""" + errors: list[str] = [] + if not isinstance(sub_id, str): + errors.append(f"sub_id must be a string, got {type(sub_id).__name__}") + return errors + if not SUB_TC_PATTERN.match(sub_id): + errors.append( + f"sub_id '{sub_id}' does not match pattern TC-NNN.A or TC-NNN.A.N " + f"(e.g., TC-001.A or TC-001.A.1)" + ) + return errors + + +def validate_state_transition(current_status: str, new_status: str) -> list[str]: + """Validate a state machine transition. + + Same-status transitions (no-ops) are allowed. + Returns empty list if valid, list of error strings otherwise. + """ + errors: list[str] = [] + if current_status not in VALID_STATUSES: + errors.append(f"Current status '{current_status}' is not valid. Must be one of: {', '.join(VALID_STATUSES)}") + if new_status not in VALID_STATUSES: + errors.append(f"New status '{new_status}' is not valid. Must be one of: {', '.join(VALID_STATUSES)}") + if errors: + return errors + + # Same-status is a no-op, always valid + if current_status == new_status: + return [] + + allowed = VALID_TRANSITIONS.get(current_status, []) + if new_status not in allowed: + errors.append( + f"Invalid state transition: '{current_status}' -> '{new_status}'. " + f"Allowed transitions from '{current_status}': {', '.join(allowed) if allowed else 'none'}" + ) + return errors + + +def _check_required_fields(record: dict, required: list[str], prefix: str = "") -> list[str]: + """Check that all required fields are present in a dict.""" + errors: list[str] = [] + for field in required: + if field not in record: + path = f"{prefix}.{field}" if prefix else field + errors.append(f"Missing required field: '{path}'") + return errors + + +def _check_enum(value, valid_values: tuple, field_name: str) -> list[str]: + """Check that a value is one of the valid enum values.""" + if value not in valid_values: + return [f"Field '{field_name}' has invalid value '{value}'. Must be one of: {', '.join(str(v) for v in valid_values)}"] + return [] + + +def _check_string(value, field_name: str, min_length: int = 0, max_length: int | None = None) -> list[str]: + """Check that a value is a string with optional length constraints.""" + errors: list[str] = [] + if not isinstance(value, str): + errors.append(f"Field '{field_name}' must be a string, got {type(value).__name__}") + return errors + if len(value) < min_length: + errors.append(f"Field '{field_name}' must be at least {min_length} characters, got {len(value)}") + if max_length is not None and len(value) > max_length: + errors.append(f"Field '{field_name}' must be at most {max_length} characters, got {len(value)}") + return errors + + +def _check_iso_datetime(value, field_name: str) -> list[str]: + """Check that a value is a valid ISO 8601 datetime string.""" + if value is None: + return [] + if not isinstance(value, str): + return [f"Field '{field_name}' must be an ISO 8601 datetime string, got {type(value).__name__}"] + try: + datetime.fromisoformat(value) + except ValueError: + return [f"Field '{field_name}' is not a valid ISO 8601 datetime: '{value}'"] + return [] + + +def _check_array(value, field_name: str) -> list[str]: + """Check that a value is a list.""" + if not isinstance(value, list): + return [f"Field '{field_name}' must be an array, got {type(value).__name__}"] + return [] + + +def validate_tc_record(record: dict) -> list[str]: + """Full validation of a TC record against the schema. + + Returns empty list if valid, list of error strings otherwise. + """ + errors: list[str] = [] + + if not isinstance(record, dict): + return [f"TC record must be a JSON object, got {type(record).__name__}"] + + # --- Top-level required fields --- + top_required = [ + "tc_id", "title", "status", "priority", "created", "updated", + "created_by", "project", "description", "files_affected", + "revision_history", "test_cases", "approval", "session_context", + "tags", "related_tcs", "notes", "metadata" + ] + errors.extend(_check_required_fields(record, top_required)) + + # If critical fields are missing, we can't validate further + if any(f"Missing required field: '{f}'" in e for e in errors for f in ["tc_id", "status"]): + return errors + + # --- tc_id --- + if "tc_id" in record: + errors.extend(validate_tc_id(record["tc_id"])) + + # --- title --- + if "title" in record: + errors.extend(_check_string(record["title"], "title", min_length=5, max_length=120)) + + # --- status --- + if "status" in record: + errors.extend(_check_enum(record["status"], VALID_STATUSES, "status")) + + # --- priority --- + if "priority" in record: + errors.extend(_check_enum(record["priority"], VALID_PRIORITIES, "priority")) + + # --- timestamps --- + for ts_field in ("created", "updated"): + if ts_field in record: + errors.extend(_check_iso_datetime(record[ts_field], ts_field)) + + # --- created_by --- + if "created_by" in record: + errors.extend(_check_string(record["created_by"], "created_by", min_length=1)) + + # --- project --- + if "project" in record: + errors.extend(_check_string(record["project"], "project", min_length=1)) + + # --- description --- + if "description" in record: + desc = record["description"] + if not isinstance(desc, dict): + errors.append("Field 'description' must be an object") + else: + errors.extend(_check_required_fields(desc, ["summary", "motivation", "scope"], "description")) + if "summary" in desc: + errors.extend(_check_string(desc["summary"], "description.summary", min_length=10)) + if "motivation" in desc: + errors.extend(_check_string(desc["motivation"], "description.motivation", min_length=1)) + if "scope" in desc: + errors.extend(_check_enum(desc["scope"], VALID_SCOPES, "description.scope")) + if "breaking_changes" in desc: + errors.extend(_check_array(desc["breaking_changes"], "description.breaking_changes")) + if "dependencies" in desc: + errors.extend(_check_array(desc["dependencies"], "description.dependencies")) + + # --- files_affected --- + if "files_affected" in record: + files = record["files_affected"] + errors.extend(_check_array(files, "files_affected")) + if isinstance(files, list): + for i, f in enumerate(files): + prefix = f"files_affected[{i}]" + if not isinstance(f, dict): + errors.append(f"{prefix} must be an object") + continue + errors.extend(_check_required_fields(f, ["path", "action"], prefix)) + if "path" in f: + errors.extend(_check_string(f["path"], f"{prefix}.path", min_length=1)) + if "action" in f: + errors.extend(_check_enum(f["action"], VALID_FILE_ACTIONS, f"{prefix}.action")) + + # --- revision_history --- + if "revision_history" in record: + revs = record["revision_history"] + errors.extend(_check_array(revs, "revision_history")) + if isinstance(revs, list): + if len(revs) < 1: + errors.append("revision_history must have at least 1 entry (the creation event)") + for i, rev in enumerate(revs): + prefix = f"revision_history[{i}]" + if not isinstance(rev, dict): + errors.append(f"{prefix} must be an object") + continue + errors.extend(_check_required_fields(rev, ["revision_id", "timestamp", "author", "summary"], prefix)) + + # Check sequential IDs + if "revision_id" in rev: + rid = rev["revision_id"] + match = REVISION_ID_PATTERN.match(rid) if isinstance(rid, str) else None + if not match: + errors.append(f"{prefix}.revision_id '{rid}' must match pattern R1, R2, R3...") + elif int(match.group(1)) != i + 1: + errors.append( + f"{prefix}.revision_id is '{rid}' but expected 'R{i + 1}' " + f"(revision IDs must be sequential)" + ) + + if "timestamp" in rev: + errors.extend(_check_iso_datetime(rev["timestamp"], f"{prefix}.timestamp")) + + # Validate field_changes if present + if "field_changes" in rev and isinstance(rev["field_changes"], list): + for j, fc in enumerate(rev["field_changes"]): + fc_prefix = f"{prefix}.field_changes[{j}]" + if not isinstance(fc, dict): + errors.append(f"{fc_prefix} must be an object") + continue + errors.extend(_check_required_fields(fc, ["field", "action"], fc_prefix)) + if "action" in fc: + errors.extend(_check_enum(fc["action"], VALID_FIELD_CHANGE_ACTIONS, f"{fc_prefix}.action")) + + # --- sub_tcs --- + if "sub_tcs" in record: + subs = record["sub_tcs"] + if isinstance(subs, list): + for i, sub in enumerate(subs): + prefix = f"sub_tcs[{i}]" + if not isinstance(sub, dict): + errors.append(f"{prefix} must be an object") + continue + errors.extend(_check_required_fields(sub, ["sub_id", "title", "status"], prefix)) + if "sub_id" in sub: + errors.extend(validate_sub_tc_id(sub["sub_id"])) + if "status" in sub: + errors.extend(_check_enum(sub["status"], VALID_STATUSES, f"{prefix}.status")) + + # --- test_cases --- + if "test_cases" in record: + tests = record["test_cases"] + errors.extend(_check_array(tests, "test_cases")) + if isinstance(tests, list): + for i, tc in enumerate(tests): + prefix = f"test_cases[{i}]" + if not isinstance(tc, dict): + errors.append(f"{prefix} must be an object") + continue + errors.extend(_check_required_fields(tc, ["test_id", "title", "procedure", "expected_result", "status"], prefix)) + + # Check sequential test IDs + if "test_id" in tc: + tid = tc["test_id"] + match = TEST_ID_PATTERN.match(tid) if isinstance(tid, str) else None + if not match: + errors.append(f"{prefix}.test_id '{tid}' must match pattern T1, T2, T3...") + elif int(match.group(1)) != i + 1: + errors.append( + f"{prefix}.test_id is '{tid}' but expected 'T{i + 1}' " + f"(test IDs must be sequential)" + ) + + if "status" in tc: + errors.extend(_check_enum(tc["status"], VALID_TEST_STATUSES, f"{prefix}.status")) + + if "procedure" in tc: + proc = tc["procedure"] + errors.extend(_check_array(proc, f"{prefix}.procedure")) + if isinstance(proc, list) and len(proc) < 1: + errors.append(f"{prefix}.procedure must have at least 1 step") + + # Validate evidence + if "evidence" in tc and isinstance(tc["evidence"], list): + for j, ev in enumerate(tc["evidence"]): + ev_prefix = f"{prefix}.evidence[{j}]" + if not isinstance(ev, dict): + errors.append(f"{ev_prefix} must be an object") + continue + errors.extend(_check_required_fields(ev, ["type", "description"], ev_prefix)) + if "type" in ev: + errors.extend(_check_enum(ev["type"], VALID_EVIDENCE_TYPES, f"{ev_prefix}.type")) + + # --- approval --- + if "approval" in record: + appr = record["approval"] + if not isinstance(appr, dict): + errors.append("Field 'approval' must be an object") + else: + errors.extend(_check_required_fields(appr, ["approved", "test_coverage_status"], "approval")) + if "approved" in appr and appr["approved"] is True: + if not appr.get("approved_by"): + errors.append("approval.approved_by is required when approval.approved is true") + if not appr.get("approved_date"): + errors.append("approval.approved_date is required when approval.approved is true") + if "test_coverage_status" in appr: + errors.extend(_check_enum(appr["test_coverage_status"], VALID_COVERAGE, "approval.test_coverage_status")) + + # --- session_context --- + if "session_context" in record: + ctx = record["session_context"] + if not isinstance(ctx, dict): + errors.append("Field 'session_context' must be an object") + else: + errors.extend(_check_required_fields(ctx, ["current_session"], "session_context")) + if "current_session" in ctx: + cs = ctx["current_session"] + if not isinstance(cs, dict): + errors.append("session_context.current_session must be an object") + else: + errors.extend(_check_required_fields( + cs, ["session_id", "platform", "model", "started"], + "session_context.current_session" + )) + if "platform" in cs: + errors.extend(_check_enum(cs["platform"], VALID_PLATFORMS, "session_context.current_session.platform")) + if "started" in cs: + errors.extend(_check_iso_datetime(cs["started"], "session_context.current_session.started")) + + # Validate handoff + if "handoff" in ctx and isinstance(ctx["handoff"], dict): + handoff = ctx["handoff"] + if "files_in_progress" in handoff and isinstance(handoff["files_in_progress"], list): + for i, fip in enumerate(handoff["files_in_progress"]): + fip_prefix = f"session_context.handoff.files_in_progress[{i}]" + if isinstance(fip, dict): + errors.extend(_check_required_fields(fip, ["path", "state"], fip_prefix)) + if "state" in fip: + errors.extend(_check_enum(fip["state"], VALID_FILE_IN_PROGRESS_STATES, f"{fip_prefix}.state")) + + # --- metadata --- + if "metadata" in record: + meta = record["metadata"] + if not isinstance(meta, dict): + errors.append("Field 'metadata' must be an object") + else: + errors.extend(_check_required_fields( + meta, ["project", "created_by", "last_modified_by", "last_modified"], + "metadata" + )) + if "last_modified" in meta: + errors.extend(_check_iso_datetime(meta["last_modified"], "metadata.last_modified")) + if "estimated_effort" in meta: + errors.extend(_check_enum(meta["estimated_effort"], VALID_EFFORTS, "metadata.estimated_effort")) + + return errors + + +def validate_registry(registry: dict) -> list[str]: + """Validate a TC registry against its schema.""" + errors: list[str] = [] + + if not isinstance(registry, dict): + return [f"TC registry must be a JSON object, got {type(registry).__name__}"] + + required = ["project_name", "created", "updated", "next_tc_number", "records", "statistics"] + errors.extend(_check_required_fields(registry, required)) + + if "project_name" in registry: + errors.extend(_check_string(registry["project_name"], "project_name", min_length=1)) + + for ts_field in ("created", "updated"): + if ts_field in registry: + errors.extend(_check_iso_datetime(registry[ts_field], ts_field)) + + if "next_tc_number" in registry: + val = registry["next_tc_number"] + if not isinstance(val, int) or val < 1: + errors.append(f"next_tc_number must be a positive integer, got {val}") + + if "records" in registry: + records = registry["records"] + errors.extend(_check_array(records, "records")) + if isinstance(records, list): + for i, rec in enumerate(records): + prefix = f"records[{i}]" + if not isinstance(rec, dict): + errors.append(f"{prefix} must be an object") + continue + errors.extend(_check_required_fields( + rec, ["tc_id", "title", "status", "scope", "priority", "created", "updated", "path"], + prefix + )) + if "status" in rec: + errors.extend(_check_enum(rec["status"], VALID_STATUSES, f"{prefix}.status")) + if "scope" in rec: + errors.extend(_check_enum(rec["scope"], VALID_SCOPES, f"{prefix}.scope")) + if "priority" in rec: + errors.extend(_check_enum(rec["priority"], VALID_PRIORITIES, f"{prefix}.priority")) + + if "statistics" in registry: + stats = registry["statistics"] + if not isinstance(stats, dict): + errors.append("statistics must be an object") + else: + errors.extend(_check_required_fields(stats, ["total", "by_status", "by_scope", "by_priority"], "statistics")) + + return errors + + +# --------------------------------------------------------------------------- +# Utility Functions +# --------------------------------------------------------------------------- + +def generate_next_tc_id(registry: dict, date_str: str, name_slug: str) -> str: + """Generate the next TC ID from the registry. + + Args: + registry: The tc_registry.json data + date_str: Date in MM-DD-YY format + name_slug: Kebab-case functionality name (e.g., 'user-authentication') + + Returns: + TC ID string (e.g., 'TC-001-04-03-26-user-authentication') + """ + num = registry.get("next_tc_number", 1) + return f"TC-{num:03d}-{date_str}-{name_slug}" + + +def compute_registry_statistics(records: list[dict]) -> dict: + """Recompute registry statistics from the records array. + + Call this every time the registry is modified. + """ + stats = { + "total": len(records), + "by_status": {s: 0 for s in VALID_STATUSES}, + "by_scope": {s: 0 for s in VALID_SCOPES}, + "by_priority": {p: 0 for p in VALID_PRIORITIES}, + } + for rec in records: + status = rec.get("status", "") + if status in stats["by_status"]: + stats["by_status"][status] += 1 + scope = rec.get("scope", "") + if scope in stats["by_scope"]: + stats["by_scope"][scope] += 1 + priority = rec.get("priority", "") + if priority in stats["by_priority"]: + stats["by_priority"][priority] += 1 + return stats + + +def slugify(text: str) -> str: + """Convert text to a URL/filename-safe kebab-case slug.""" + text = text.lower().strip() + text = re.sub(r"[^a-z0-9\s-]", "", text) + text = re.sub(r"[\s_]+", "-", text) + text = re.sub(r"-+", "-", text) + return text.strip("-") + + +# --------------------------------------------------------------------------- +# CLI Entry Point +# --------------------------------------------------------------------------- + +def main() -> int: + """CLI entry point. Validates a TC record or registry JSON file.""" + if len(sys.argv) < 2: + print("Usage:") + print(" python validate_tc.py ") + print(" python validate_tc.py --registry ") + return 2 + + is_registry = sys.argv[1] == "--registry" + file_path = sys.argv[2] if is_registry else sys.argv[1] + + path = Path(file_path) + if not path.exists(): + print(f"ERROR: File not found: {path}") + return 2 + + try: + with open(path, "r", encoding="utf-8") as f: + data = json.load(f) + except json.JSONDecodeError as e: + print(f"ERROR: Invalid JSON in {path}: {e}") + return 2 + + if is_registry: + errors = validate_registry(data) + else: + errors = validate_tc_record(data) + + if errors: + print(f"VALIDATION ERRORS ({len(errors)}):") + for i, err in enumerate(errors, 1): + print(f" {i}. {err}") + return 1 + + print("VALID") + return 0 + + +if __name__ == "__main__": + sys.exit(main())