From ec09a0a662ee910fef4baf9037dc1548f1bf053b Mon Sep 17 00:00:00 2001 From: DSS Date: Fri, 12 Dec 2025 14:33:18 -0300 Subject: [PATCH] Unify MCP across clients; remove legacy plugin server --- .gitignore | 1 + .mcp.json | 17 - CLAUDE.md | 34 +- README.md | 187 +- admin-ui/AI-REFERENCE.md | 6 +- admin-ui/index-legacy.html | 381 --- admin-ui/src/workdesks/AdminWorkdesk.tsx | 4 +- apps/api/server.py | 560 ++-- apps/cli/python/api/server.py | 10 +- cli/python/api/server.py | 10 +- docs/README.md | 35 + docs/ai.md | 21 + docs/architecture.md | 48 + docs/configuration.md | 102 + docs/quickstart.md | 108 + docs/storage.md | 59 + docs/upgrade-notes.md | 61 + .../.claude-plugin/marketplace.json | 2 +- dss-claude-plugin/.claude-plugin/plugin.json | 2 +- dss-claude-plugin/commands/dss-init.md | 4 +- dss-claude-plugin/commands/dss-reset.md | 15 +- dss-claude-plugin/commands/dss-services.md | 23 +- dss-claude-plugin/commands/dss-setup.md | 38 - dss-claude-plugin/commands/dss-storybook.md | 3 +- dss-claude-plugin/core/config.py | 2 +- dss-claude-plugin/core/mcp_integration.py | 2 +- dss-claude-plugin/servers/dss-mcp-server.py | 2908 ----------------- .../skills/storybook-integration/SKILL.md | 6 +- dss-claude-plugin/verify_tools.py | 158 - dss-cli.py | 10 +- dss-temp-handover.md | 59 - dss/__init__.py | 5 +- dss/analyze/project_analyzer.py | 246 +- dss/auth/atlassian_auth.py | 87 +- dss/export_import/security.py | 16 +- dss/export_import/service.py | 51 +- dss/mcp/__init__.py | 24 + dss/mcp/config.py | 106 + dss/mcp/guides.py | 304 ++ dss/mcp/handler.py | 1051 ++++++ dss/mcp/server.py | 113 + dss/project/core.py | 23 +- dss/project/manager.py | 18 +- dss/services/project_manager.py | 6 +- dss/settings.py | 56 +- dss/status/dashboard.py | 66 +- dss/storage/json_store.py | 420 ++- dss/storybook/generator.py | 33 +- dss_mcp | 1 - scripts/dss | 28 +- scripts/dss-init.sh | 52 +- scripts/dss-mcp | 31 + scripts/dss-reset.sh | 28 +- scripts/dss-setup.sh.deprecated | 185 -- scripts/enable-mcp-clients.sh | 94 + scripts/setup-mcp.sh | 53 +- storybook/config.yaml | 17 +- storybook/package.json | 4 +- tests/test_atomic_dss.py | 58 +- tests/test_project_analyzer.py | 67 +- 60 files changed, 3451 insertions(+), 4668 deletions(-) delete mode 100644 .mcp.json delete mode 100755 admin-ui/index-legacy.html create mode 100644 docs/README.md create mode 100644 docs/ai.md create mode 100644 docs/architecture.md create mode 100644 docs/configuration.md create mode 100644 docs/quickstart.md create mode 100644 docs/storage.md create mode 100644 docs/upgrade-notes.md delete mode 100644 dss-claude-plugin/commands/dss-setup.md delete mode 100644 dss-claude-plugin/servers/dss-mcp-server.py delete mode 100644 dss-claude-plugin/verify_tools.py delete mode 100644 dss-temp-handover.md create mode 100644 dss/mcp/__init__.py create mode 100644 dss/mcp/config.py create mode 100644 dss/mcp/guides.py create mode 100644 dss/mcp/handler.py create mode 100644 dss/mcp/server.py delete mode 120000 dss_mcp create mode 100755 scripts/dss-mcp delete mode 100755 scripts/dss-setup.sh.deprecated create mode 100755 scripts/enable-mcp-clients.sh diff --git a/.gitignore b/.gitignore index aec7ce2..011ad22 100644 --- a/.gitignore +++ b/.gitignore @@ -66,6 +66,7 @@ storybook-static/ # Local MCP config (generated by dss-init.sh) .mcp.json .claude/mcp.json +.gemini/settings.json # ============================================================================= # DSS GENERATED FILES - Created by /dss-init, not committed diff --git a/.mcp.json b/.mcp.json deleted file mode 100644 index e3ad100..0000000 --- a/.mcp.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json", - "mcpServers": { - "dss": { - "command": "/Users/bsarlo/Documents/SoFi/dss/.venv/bin/python3", - "args": ["/Users/bsarlo/Documents/SoFi/dss/dss-claude-plugin/servers/dss-mcp-server.py"], - "env": { - "PYTHONPATH": "/Users/bsarlo/Documents/SoFi/dss:/Users/bsarlo/Documents/SoFi/dss/dss-claude-plugin", - "DSS_HOME": "/Users/bsarlo/Documents/SoFi/dss/.dss", - "DSS_DATABASE": "/Users/bsarlo/Documents/SoFi/dss/.dss/dss.db", - "DSS_CACHE": "/Users/bsarlo/Documents/SoFi/dss/.dss/cache", - "DSS_BASE_PATH": "/Users/bsarlo/Documents/SoFi/dss" - }, - "description": "Design System Server MCP - local development" - } - } -} diff --git a/CLAUDE.md b/CLAUDE.md index a4bb694..a040187 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,5 +1,11 @@ # DSS - Design System Server +## Human Docs (start here) + +- Overview + setup: `docs/README.md` +- Quickstart: `docs/quickstart.md` +- Architecture: `docs/architecture.md` + ## FIRST: Enable DSS MCP Tools **Before starting work, check if DSS tools are available.** If you don't see `dss_*` tools in your available MCP tools, run: @@ -10,20 +16,20 @@ Then tell the user: "I've configured the DSS MCP server. Please restart Claude Code to load the DSS tools, then continue this conversation." -The DSS MCP server provides 40+ design system tools for Figma sync, token management, component generation, and project analysis. +The DSS MCP server provides core `dss_*` tools for project analysis, token work, and Storybook automation. ### Manual verification Check if `.claude/mcp.json` exists and has valid paths: - `command` should point to `.venv/bin/python3` (must exist) -- `args` should point to `dss-claude-plugin/servers/dss-mcp-server.py` (must exist) +- `args` should be `["-m","dss.mcp.server"]` ## Project Structure ``` dss/ ├── dss/ # Core Python library -│ ├── mcp_server/ # MCP server implementation +│ ├── mcp/ # Shared MCP tool layer + local MCP server │ ├── analyze/ # Code analysis tools │ ├── ingest/ # Token ingestion │ ├── figma/ # Figma integration @@ -33,8 +39,7 @@ dss/ │ ├── api/ # FastAPI server (port 6220) │ └── cli/ # TypeScript CLI ├── admin-ui/ # Admin dashboard (port 6221) -├── dss-claude-plugin/ # Claude Code MCP plugin -│ └── servers/ # MCP server scripts +├── dss-claude-plugin/ # Claude Code plugin assets (commands/skills) └── scripts/ # Setup & utility scripts ``` @@ -75,7 +80,8 @@ cd admin-ui && npm run dev ## Key Files -- `dss/mcp_server/handler.py` - MCP tool execution handler +- `dss/mcp/handler.py` - MCP tool registry + execution +- `dss/mcp/server.py` - Local MCP stdio server (`python -m dss.mcp.server`) - `dss/storage/json_store.py` - JSON-based data storage - `apps/api/server.py` - FastAPI server - `.claude/mcp.json` - Local MCP configuration (generated) @@ -96,10 +102,19 @@ If `/mcp` shows "Failed to reconnect to dss", check: 2. **MCP config paths are valid**: Check `.claude/mcp.json` points to existing files: - `.venv/bin/python3` must exist - - `dss-claude-plugin/servers/dss-mcp-server.py` must exist + - `args` should be `["-m","dss.mcp.server"]` 3. **Restart Claude Code** after fixing any configuration issues +### Proxying tools to a headless DSS server + +To run the MCP process locally but execute tools on a remote/headless server, set `DSS_API_URL` in the MCP env. + +Example: +```bash +./scripts/setup-mcp.sh --api-url https://dss.example.com +``` + ### Disabling unwanted MCP servers MCP servers can be configured in multiple locations. Check all of these: @@ -109,7 +124,6 @@ MCP servers can be configured in multiple locations. Check all of these: | `~/.claude/mcp.json` | Claude Code (global) | | `~/.config/claude/claude_desktop_config.json` | Claude Desktop app | | `.claude/mcp.json` (project) | Claude Code (project-specific) | -| `../.mcp.json` | Parent directory inheritance | To disable a server, remove its entry from the relevant config file and restart Claude Code. @@ -120,5 +134,5 @@ If you see repeated `MCP server "figma": No token data found` errors, the figma ## Notes - DSS uses JSON-based storage, not SQL database -- The `dss/mcp_server/` directory was renamed from `dss/mcp/` to avoid shadowing the pip `mcp` package -- Integration configs (Figma, Jira, etc.) are stored encrypted when database is configured +- `dss.mcp` is an internal DSS module; it does not shadow the upstream `mcp` package +- Integration configs can be stored encrypted when `DSS_MCP_ENCRYPTION_KEY` is configured diff --git a/README.md b/README.md index f0628bc..39466dd 100644 --- a/README.md +++ b/README.md @@ -1,158 +1,95 @@ -# DSS - Design System Server +# DSS (Design System Server) -Monolithic design system platform. Ingest tokens from Figma/CSS/SCSS/Tailwind, normalize to canonical format, generate outputs. +DSS is a design-system toolkit that works both as: -## Quick Start +- a **local developer tool** (run analysis and generation across many repos), and +- a **headless server** (so UX/QA/Admin teams can use the web Admin UI and AI-assisted workflows without a local dev environment). + +## What DSS does + +- Ingest tokens from **Figma / CSS / SCSS / Tailwind**, normalize them, and generate outputs +- Analyze codebases (components, styles, dependency graph, quick wins) +- Automate Storybook setup (scan/generate/configure) +- Expose a consistent set of `dss_*` tools via: + - local MCP (Claude Code) + - headless server MCP endpoints (`/api/mcp/*`) + - Claude chat/tool-calling (`/api/claude/chat`) + +## Docs + +Human docs live in `docs/README.md`. + +AI/agent-oriented docs live in `docs/ai.md` (entry points include `CLAUDE.md` and `admin-ui/AI-REFERENCE.md`). + +## Quickstart (local + server) + +### 1) Python setup ```bash -# 1. Create Python virtual environment python3 -m venv .venv source .venv/bin/activate pip install -r requirements.txt +``` -# 2. Generate MCP config for Claude Code -./scripts/setup-mcp.sh +### 2) Run the headless API -# 3. Start services +```bash +source .venv/bin/activate PYTHONPATH="$PWD:$PWD/apps/api" uvicorn apps.api.server:app --host 0.0.0.0 --port 6220 ``` -## Claude Code Plugin Integration +If you want a single-port server that serves the built Admin UI too, see `docs/quickstart.md` (Option B) or run `./scripts/dss start` after building `admin-ui`. -DSS integrates with Claude Code as a **plugin** that provides MCP tools, slash commands, skills, and agents. - -### Installation - -**Step 1: Set up the Python environment** +### 3) Run the Admin UI (dev mode) ```bash -python3 -m venv .venv -source .venv/bin/activate -pip install -r requirements.txt +cd admin-ui +npm install +npm run dev ``` -**Step 2: Run the setup script** +Admin UI: `http://localhost:6221` +API: `http://localhost:6220` + +### 4) Run analysis on any project + +```bash +./dss-cli.py analyze --project-path /absolute/path/to/your-project +``` + +This writes `/.dss/analysis_graph.json` (portable JSON output). + +### 5) Claude Code MCP setup ```bash ./scripts/setup-mcp.sh ``` -**Step 3: Add the DSS marketplace and install the plugin** - -In Claude Code, run: - -``` -/plugin marketplace add /path/to/dss/dss-claude-plugin -``` - -Replace `/path/to/dss` with your actual DSS installation path. - -Then install the plugin: - -``` -/plugin install dss-claude-plugin@dss -``` - -**Alternative: Manual configuration** - -Add to your `~/.claude/settings.json`: - -```json -{ - "extraKnownMarketplaces": { - "dss": { - "source": { - "source": "directory", - "path": "/path/to/dss/dss-claude-plugin" - } - } - }, - "enabledPlugins": { - "dss-claude-plugin@dss": true - } -} -``` - -**Step 4: Restart Claude Code** completely (quit and reopen) - -### Verification - -After restart, verify the plugin is loaded: - -1. Run `/mcp` - DSS server should appear in the list -2. If DSS shows as disconnected, select it to enable -3. DSS tools will be available as `dss_*` functions - -### Troubleshooting - -**Plugin not found error in debug logs?** - -The plugin must be discoverable. Ensure the path in `.claude/mcp.json` points to valid files: +Enable MCP for Claude + Codex + Gemini (when installed): ```bash -# Verify paths exist -ls -la .venv/bin/python3 -ls -la dss-claude-plugin/servers/dss-mcp-server.py +./scripts/enable-mcp-clients.sh ``` -**DSS server not connecting?** +See `docs/configuration.md` for proxy mode (`--api-url`) and environment variables. -Add DSS to your global MCP config (`~/.claude/mcp.json`): +## Storage (JSON-only) -```json -{ - "mcpServers": { - "dss": { - "command": "/path/to/dss/.venv/bin/python3", - "args": ["/path/to/dss/dss-claude-plugin/servers/dss-mcp-server.py"], - "env": { - "PYTHONPATH": "/path/to/dss:/path/to/dss/dss-claude-plugin", - "DSS_HOME": "/path/to/dss/.dss", - "DSS_BASE_PATH": "/path/to/dss" - } - } - } -} -``` +By default DSS stores data under: +- `DSS_HOME` (if set), else +- `./.dss` (if present), else +- `~/.dss` -**Test the MCP server manually:** +See `docs/storage.md` for layout and guidance on what to commit. -```bash -source .venv/bin/activate -PYTHONPATH="$PWD:$PWD/dss-claude-plugin" \ - python3 dss-claude-plugin/servers/dss-mcp-server.py -``` - -**Check debug logs:** - -```bash -cat ~/.claude/debug/latest | grep -i "dss\|plugin" -``` - -### Available Tools - -Once connected, DSS provides tools prefixed with `dss_`: -- `dss_figma_*` - Figma integration and token extraction -- `dss_token_*` - Design token management -- `dss_component_*` - Component generation -- `dss_project_*` - Project analysis - -## Structure +## Repo layout ``` -tools/ # Python backend (API, ingestion, analysis) -admin-ui/ # Web dashboard -cli/ # TypeScript CLI -dss-claude-plugin/ # Claude Code integration (skills, commands, agents) -.knowledge/ # AI knowledge base (DSS_CORE.json) -.dss/ # Runtime data, schemas, database +dss/ # Core Python library (analysis/ingest/storage/mcp) +apps/api/ # FastAPI headless server +admin-ui/ # Preact Admin UI (Vite dev server + build output) +dss-claude-plugin/ # Claude Code plugin assets (commands/skills) +scripts/ # Setup and operational scripts +docs/ # Human documentation +.knowledge/ # Internal knowledge base (AI-oriented) ``` - -## Core Concept - -DSS structure is immutable. External systems adapt TO DSS via translation dictionaries. - -See `.knowledge/DSS_CORE.json` for complete specification. - -# Test Commit to Verify Hooks -\n- CI/CD Verification Run diff --git a/admin-ui/AI-REFERENCE.md b/admin-ui/AI-REFERENCE.md index b168cd8..2e9a85e 100644 --- a/admin-ui/AI-REFERENCE.md +++ b/admin-ui/AI-REFERENCE.md @@ -1,7 +1,11 @@ # DSS Admin UI - AI Reference Documentation +Human-facing docs live in `docs/README.md` (start there). This document is an AI-oriented reference for Admin UI structure and API usage. + ## Overview -The DSS Admin UI is a Preact + Signals application that provides a team-centric dashboard for managing design system operations. It connects to the DSS backend API (FastAPI server running on port 8002). +The DSS Admin UI is a Preact + Signals application that provides a team-centric dashboard for managing design system operations. It connects to the DSS backend API (FastAPI server; typically `:6220` in dev setups). + +In development, the UI usually runs on `:6221` (Vite) and proxies `/api/*` to the API server. In server/headless mode, the API can serve the built UI from `admin-ui/dist/` on the same port. ## Technology Stack - **Framework**: Preact 10.x (~3KB) diff --git a/admin-ui/index-legacy.html b/admin-ui/index-legacy.html deleted file mode 100755 index cf963d5..0000000 --- a/admin-ui/index-legacy.html +++ /dev/null @@ -1,381 +0,0 @@ - - - - - - Design System Server - - - - - - - - - - - - - - - - - - - -
- - - - -
-
- -
-
- - -
-
- - - -
- - - - - -
- - - -
- U -
-
-
- - -
-
- -
- - - - -
-
- - - - - - - - diff --git a/admin-ui/src/workdesks/AdminWorkdesk.tsx b/admin-ui/src/workdesks/AdminWorkdesk.tsx index e56dbfe..7fa2f64 100644 --- a/admin-ui/src/workdesks/AdminWorkdesk.tsx +++ b/admin-ui/src/workdesks/AdminWorkdesk.tsx @@ -37,9 +37,9 @@ function SettingsTool() { const [testing, setTesting] = useState(false); const [config, setConfig] = useState({ server_host: 'localhost', - server_port: 8002, + server_port: 6220, figma_token: '', - storybook_url: 'http://localhost:6006' + storybook_url: 'http://localhost:6226' }); const [figmaStatus, setFigmaStatus] = useState<{ configured: boolean } | null>(null); const [testResult, setTestResult] = useState<{ success: boolean; message: string } | null>(null); diff --git a/apps/api/server.py b/apps/api/server.py index c5eab2f..d7efc24 100644 --- a/apps/api/server.py +++ b/apps/api/server.py @@ -25,7 +25,7 @@ from pathlib import Path from typing import Any, Dict, List, Optional from dotenv import load_dotenv -from fastapi import BackgroundTasks, Depends, FastAPI, Header, HTTPException, Query +from fastapi import BackgroundTasks, Body, Depends, FastAPI, Header, HTTPException, Query from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles @@ -45,6 +45,7 @@ from dss.storage.json_store import ( Cache, CodeMetrics, Components, + ESREDefinitions, FigmaFiles, IntegrationHealth, Integrations, @@ -52,6 +53,7 @@ from dss.storage.json_store import ( SyncHistory, Teams, TestResults, + TokenDriftDetector, get_stats, ) @@ -59,6 +61,12 @@ from dss.storage.json_store import ( _server_file = Path(__file__).resolve() _project_root = _server_file.parent.parent.parent # /home/.../dss +# Admin UI static serving (production-like) +# - In dev, run `admin-ui` via Vite (`npm run dev`) and use its `/api` proxy. +# - In headless/server mode, serve the built UI bundle from `admin-ui/dist/`. +_admin_ui_dist = _project_root / "admin-ui" / "dist" +_admin_ui_dist_index = _admin_ui_dist / "index.html" + # Try loading from multiple possible .env locations env_paths = [ _project_root / ".env", # root .env (primary) @@ -134,7 +142,11 @@ class _ConfigCompat: "env": settings.SERVER_ENV, "log_level": settings.LOG_LEVEL, }, - "database": {"path": str(settings.DATABASE_PATH)}, + "storage": { + "type": "json", + "dss_home": str(settings.DSS_HOME), + "data_dir": str(settings.DATA_DIR), + }, } @@ -226,7 +238,7 @@ runtime_config = RuntimeConfig() config_service = ConfigService() project_manager = ProjectManager(Projects, config_service) -# Ensure database schema is up to date (adds root_path column if missing) +# Legacy compatibility hook: JSON storage needs no migrations. ProjectManager.ensure_schema() @@ -419,7 +431,7 @@ async def login(request: LoginRequest): Authenticate with Atlassian credentials. Validates credentials against Jira or Confluence API, - creates/updates user in database, returns JWT token. + creates/updates user in JSON storage, returns JWT token. """ try: auth = get_auth() @@ -451,10 +463,19 @@ async def get_me(user: Dict[str, Any] = Depends(get_current_user)): @app.get("/") async def root(): - """Redirect to Admin UI dashboard.""" - from fastapi.responses import RedirectResponse + """Serve the Admin UI (when built) or show setup guidance.""" + if _admin_ui_dist_index.exists(): + from fastapi.responses import RedirectResponse - return RedirectResponse(url="/admin-ui/index.html") + return RedirectResponse(url="/index.html") + + return JSONResponse( + status_code=200, + content={ + "status": "ok", + "message": "Admin UI is not built. Run `cd admin-ui && npm run build` (or `npm run dev` for development).", + }, + ) @app.get("/health") @@ -494,7 +515,7 @@ async def health(): if str(project_root) not in sys.path: sys.path.insert(0, str(project_root)) - from dss.mcp_server.handler import get_mcp_handler + from dss.mcp.handler import get_mcp_handler handler = get_mcp_handler() mcp_ok = handler is not None except Exception as e: @@ -558,25 +579,18 @@ async def receive_browser_logs(logs: dict): log_file = browser_logs_dir / f"{session_id}.json" log_file.write_text(json.dumps(logs, indent=2)) - # Log to activity (skip if ActivityLog not available) + # Log to activity (JSON store) try: - with get_connection() as conn: - conn.execute( - """ - INSERT INTO activity_log (category, action, details, metadata, created_at) - VALUES (?, ?, ?, ?, ?) - """, - ( - "debug", - "browser_logs_received", - f"Received browser logs for session {session_id}", - json.dumps({"session_id": session_id, "log_count": len(logs.get("logs", []))}), - datetime.utcnow().isoformat(), - ), - ) - conn.commit() - except: - pass # Activity logging is optional + ActivityLog.log( + action="browser_logs_received", + entity_type="browser_logs", + entity_id=session_id, + description=f"Received browser logs for session {session_id}", + category="debug", + details={"session_id": session_id, "log_count": len(logs.get("logs", []))}, + ) + except Exception: + pass # Activity logging is best-effort # Check for errors and create notification task error_count = logs.get("diagnostic", {}).get("errorCount", 0) @@ -651,7 +665,7 @@ async def get_debug_diagnostic(): - Health status (from /health endpoint) - Browser log session count - API uptime - - Database size and stats + - Storage size and stats - Memory usage - Recent errors """ @@ -668,31 +682,34 @@ async def get_debug_diagnostic(): browser_logs_dir.mkdir(parents=True, exist_ok=True) browser_sessions = len(list(browser_logs_dir.glob("*.json"))) - # Get database size - db_path = Path(__file__).parent.parent.parent / ".dss" / "dss.db" - db_size_bytes = db_path.stat().st_size if db_path.exists() else 0 + from dss.storage.json_store import DATA_DIR, ActivityLog, get_stats + + storage_stats = get_stats() # Get process stats process = psutil.Process(os.getpid()) memory_info = process.memory_info() - # Get recent errors from activity log + # Get recent errors from activity log (JSON) + recent_errors: List[Dict[str, Any]] = [] try: - with get_connection() as conn: - recent_errors = conn.execute( - """ - SELECT category, action, details, created_at - FROM activity_log - WHERE category = 'error' OR action LIKE '%error%' OR action LIKE '%fail%' - ORDER BY created_at DESC - LIMIT 10 - """ - ).fetchall() - recent_errors = [ - {"category": row[0], "action": row[1], "details": row[2], "timestamp": row[3]} - for row in recent_errors - ] - except: + candidates = ActivityLog.search(days=7, limit=200) + for r in candidates: + action = (r.get("action") or "").lower() + severity = (r.get("severity") or "").lower() + if severity in {"error", "critical"} or "error" in action or "fail" in action: + recent_errors.append( + { + "category": r.get("category"), + "action": r.get("action"), + "details": r.get("details"), + "timestamp": r.get("timestamp"), + "severity": r.get("severity"), + } + ) + if len(recent_errors) >= 10: + break + except Exception: recent_errors = [] return { @@ -700,10 +717,10 @@ async def get_debug_diagnostic(): "timestamp": datetime.utcnow().isoformat() + "Z", "health": health_status, "browser": {"session_count": browser_sessions, "logs_directory": str(browser_logs_dir)}, - "database": { - "size_bytes": db_size_bytes, - "size_mb": round(db_size_bytes / 1024 / 1024, 2), - "path": str(db_path), + "storage": { + "type": "json", + "path": str(DATA_DIR), + "stats": storage_stats, }, "process": { "pid": os.getpid(), @@ -793,10 +810,12 @@ async def get_config(): @app.get("/api/stats") async def get_statistics(): - """Get database and system statistics.""" - db_stats = get_stats() + """Get storage and system statistics.""" + storage_stats = get_stats() return { - "database": db_stats, + "storage": storage_stats, + # Backwards-compatible alias (historical naming; underlying storage is JSON files). + "database": storage_stats, "figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured}, } @@ -1469,97 +1488,80 @@ async def get_storybook_status(): @app.post("/api/storybook/init") -async def init_storybook(request_data: Dict[str, Any] = None): +async def init_storybook(request_data: Dict[str, Any] = Body(default_factory=dict)): """ - Initialize Storybook with design system components. + Initialize Storybook stories for a project. - Clears existing generated stories and generates new ones from - the specified component source path. + Clears previously auto-generated stories and regenerates them using the + shared DSS StoryGenerator. Request body (optional): - source_path: Path to components directory (defaults to configured path) + - project_id: DSS project id (recommended for headless server mode) + - path: absolute path to the project directory (local/dev mode) Returns: JSON with generation status and count """ - import shutil - import sys - try: - # Get paths - dss_mvp1_path = Path(__file__).parent.parent.parent / "dss-mvp1" - generated_dir = dss_mvp1_path / "stories" / "generated" + from dss.storage.json_store import Projects + from dss.storybook.generator import StoryGenerator, StoryTemplate - # Default source path - can be overridden in request - source_path = dss_mvp1_path / "dss" / "components" - if request_data and request_data.get("source_path"): - # Validate path is within allowed directories - requested_path = Path(request_data["source_path"]).resolve() - if not str(requested_path).startswith(str(dss_mvp1_path.resolve())): - raise HTTPException(status_code=400, detail="Source path must be within dss-mvp1") - source_path = requested_path - - # Step 1: Clear existing generated stories - if generated_dir.exists(): - for item in generated_dir.iterdir(): - if item.name != ".gitkeep": - if item.is_dir(): - shutil.rmtree(item) - else: - item.unlink() + # Resolve project root (prefer project_id in headless mode) + project_root = None + if request_data.get("project_id"): + project = Projects.get(request_data["project_id"]) + if not project: + raise HTTPException(status_code=404, detail="Project not found") + project_root = project.get("root_path") + elif request_data.get("path"): + project_root = request_data["path"] else: - generated_dir.mkdir(parents=True, exist_ok=True) + # Default: first registered project with a root_path, else repo admin-ui. + for project in Projects.list(): + if project.get("root_path"): + project_root = project["root_path"] + break + project_root = project_root or str(_project_root / "admin-ui") - # Step 2: Generate stories using StoryGenerator - stories_generated = 0 - errors = [] + root = Path(project_root).resolve() + if not root.exists(): + raise HTTPException(status_code=400, detail=f"Project path not found: {root}") - # Add dss-mvp1 to path for imports - sys.path.insert(0, str(dss_mvp1_path)) + # Clear previously auto-generated stories (do NOT touch hand-written stories) + marker = "Auto-generated by DSS Storybook Generator" + cleared = 0 + for pattern in ["**/*.stories.tsx", "**/*.stories.jsx", "**/*.stories.js"]: + for story_path in root.rglob(pattern): + if any(skip in story_path.parts for skip in {"node_modules", ".git", "dist", "build"}): + continue + try: + if marker in story_path.read_text(encoding="utf-8", errors="ignore"): + story_path.unlink() + cleared += 1 + except Exception: + continue - try: - from dss.storybook.generator import StoryGenerator, StoryTemplate + generator = StoryGenerator(str(root)) + results = generator.generate(template=StoryTemplate.CSF3, dry_run=False) - generator = StoryGenerator(str(dss_mvp1_path)) - - # Check if source path exists and has components - if source_path.exists(): - results = await generator.generate_stories_for_directory( - str(source_path.relative_to(dss_mvp1_path)), - template=StoryTemplate.CSF3, - dry_run=False, - ) - - # Move generated stories to stories/generated/ - for result in results: - if "story" in result and "error" not in result: - story_filename = Path(result["component"]).stem + ".stories.js" - output_path = generated_dir / story_filename - output_path.write_text(result["story"]) - stories_generated += 1 - elif "error" in result: - errors.append(result) - else: - # No components yet - that's okay, Storybook will show welcome - pass - - except ImportError as e: - # StoryGenerator not available - log but don't fail - errors.append({"error": f"StoryGenerator import failed: {str(e)}"}) - finally: - # Clean up path - if str(dss_mvp1_path) in sys.path: - sys.path.remove(str(dss_mvp1_path)) + stories_generated = len([r for r in results if r.get("written")]) + errors = [r for r in results if r.get("error")] ActivityLog.log( action="storybook_initialized", entity_type="storybook", - details={"stories_generated": stories_generated, "errors_count": len(errors)}, + details={ + "project_path": str(root), + "stories_generated": stories_generated, + "cleared": cleared, + "errors_count": len(errors), + }, ) return { "success": True, "stories_generated": stories_generated, + "cleared": cleared, "message": f"Generated {stories_generated} stories" if stories_generated > 0 else "Storybook initialized (no components found)", @@ -1576,32 +1578,51 @@ async def init_storybook(request_data: Dict[str, Any] = None): @app.delete("/api/storybook/stories") -async def clear_storybook_stories(): +async def clear_storybook_stories(request_data: Dict[str, Any] = Body(default_factory=dict)): """ Clear all generated stories from Storybook. Returns Storybook to blank state (only Welcome page). """ - import shutil - try: - dss_mvp1_path = Path(__file__).parent.parent.parent / "dss-mvp1" - generated_dir = dss_mvp1_path / "stories" / "generated" + from dss.storage.json_store import Projects + project_root = None + if request_data.get("project_id"): + project = Projects.get(request_data["project_id"]) + if not project: + raise HTTPException(status_code=404, detail="Project not found") + project_root = project.get("root_path") + elif request_data.get("path"): + project_root = request_data["path"] + else: + for project in Projects.list(): + if project.get("root_path"): + project_root = project["root_path"] + break + project_root = project_root or str(_project_root / "admin-ui") + + root = Path(project_root).resolve() + if not root.exists(): + raise HTTPException(status_code=400, detail=f"Project path not found: {root}") + + marker = "Auto-generated by DSS Storybook Generator" cleared_count = 0 - if generated_dir.exists(): - for item in generated_dir.iterdir(): - if item.name != ".gitkeep": - if item.is_dir(): - shutil.rmtree(item) - else: - item.unlink() - cleared_count += 1 + for pattern in ["**/*.stories.tsx", "**/*.stories.jsx", "**/*.stories.js"]: + for story_path in root.rglob(pattern): + if any(skip in story_path.parts for skip in {"node_modules", ".git", "dist", "build"}): + continue + try: + if marker in story_path.read_text(encoding="utf-8", errors="ignore"): + story_path.unlink() + cleared_count += 1 + except Exception: + continue ActivityLog.log( action="storybook_cleared", entity_type="storybook", - details={"cleared_count": cleared_count}, + details={"cleared_count": cleared_count, "project_path": str(root)}, ) return { @@ -1900,44 +1921,10 @@ async def execute_ingestion( tokens_extracted = 0 if method == "npm" and system: - # Import existing token ingestion tools - sys.path.insert(0, str(Path(__file__).parent.parent.parent / "dss-mvp1")) - - try: - from dss.ingest import TokenCollection - - # Create a token collection for this design system - collection = TokenCollection(name=system.name) - - # Based on primary ingestion method, use appropriate source - if system.primary_ingestion.value == "css_variables": - if system.css_cdn_url: - # Fetch CSS from CDN and parse - import httpx - - async with httpx.AsyncClient() as client: - resp = await client.get(system.css_cdn_url) - if resp.status_code == 200: - from dss.ingest.css import CSSTokenSource - - # Write temp file and parse - temp_css = Path("/tmp") / f"{system.id}_tokens.css" - temp_css.write_text(resp.text) - source = CSSTokenSource(str(temp_css)) - source.parse() - collection.merge(source.tokens) - tokens_extracted = len(collection.tokens) - - elif system.primary_ingestion.value == "tailwind_config": - # For Tailwind-based systems, we'll need their config - tokens_extracted = 0 # Placeholder for Tailwind parsing - - except ImportError: - # Token ingestion module not available - pass - finally: - if str(Path(__file__).parent.parent.parent / "dss-mvp1") in sys.path: - sys.path.remove(str(Path(__file__).parent.parent.parent / "dss-mvp1")) + # MVP: npm ingestion is not implemented yet. + # Prefer using the dedicated ingest endpoints (/api/ingest/npm/*) to + # discover packages, then add a concrete extraction strategy per system. + tokens_extracted = 0 elif method == "figma" and source_url: # Use existing Figma extraction @@ -1947,23 +1934,13 @@ async def execute_ingestion( elif method == "css" and source_url: # Fetch and parse CSS import httpx + from dss.ingest.css import CSSTokenSource - sys.path.insert(0, str(Path(__file__).parent.parent.parent / "dss-mvp1")) - - try: - async with httpx.AsyncClient() as client: - resp = await client.get(source_url) - if resp.status_code == 200: - from dss.ingest.css import CSSTokenSource - - temp_css = Path("/tmp") / "ingested_tokens.css" - temp_css.write_text(resp.text) - source = CSSTokenSource(str(temp_css)) - source.parse() - tokens_extracted = len(source.tokens.tokens) - finally: - if str(Path(__file__).parent.parent.parent / "dss-mvp1") in sys.path: - sys.path.remove(str(Path(__file__).parent.parent.parent / "dss-mvp1")) + async with httpx.AsyncClient(timeout=30.0) as client: + resp = await client.get(source_url) + resp.raise_for_status() + collection = await CSSTokenSource().extract(resp.text) + tokens_extracted = len(collection.tokens) ActivityLog.log( action="ingestion_executed", @@ -2051,7 +2028,7 @@ async def set_mode(request_data: Dict[str, Any]): @app.post("/api/system/reset") async def reset_dss(request_data: Dict[str, Any]): """ - Reset DSS to fresh state by calling the reset command in dss-mvp1. + Reset DSS to fresh state by calling the built-in reset command. Requires confirmation. """ @@ -2061,13 +2038,12 @@ async def reset_dss(request_data: Dict[str, Any]): raise HTTPException(status_code=400, detail="Must confirm with 'RESET'") try: - # Path to dss-mvp1 directory - dss_mvp1_path = Path(__file__).parent.parent.parent / "dss-mvp1" + repo_root = Path(__file__).resolve().parent.parent.parent # Run the reset command result = subprocess.run( ["python3", "-m", "dss.settings", "reset", "--no-confirm"], - cwd=str(dss_mvp1_path), + cwd=str(repo_root), capture_output=True, text=True, timeout=60, @@ -2229,6 +2205,7 @@ async def record_token_drift(project_id: str, drift: TokenDriftCreate): raise HTTPException(status_code=404, detail="Project not found") created = TokenDriftDetector.record_drift( + project_id=project_id, component_id=drift.component_id, property_name=drift.property_name, hardcoded_value=drift.hardcoded_value, @@ -2251,15 +2228,18 @@ async def record_token_drift(project_id: str, drift: TokenDriftCreate): @app.put("/api/projects/{project_id}/token-drift/{drift_id}/status") -async def update_drift_status(project_id: str, drift_id: int, status: str): +async def update_drift_status( + project_id: str, drift_id: str, payload: Dict[str, Any] = Body(default_factory=dict) +): """Update token drift status: pending, fixed, ignored (UI Dashboard).""" if not Projects.get(project_id): raise HTTPException(status_code=404, detail="Project not found") + status = payload.get("status") or payload.get("status", "") if status not in ["pending", "fixed", "ignored"]: raise HTTPException(status_code=400, detail="Invalid status") - updated = TokenDriftDetector.update_status(drift_id, status) + updated = TokenDriftDetector.update_status(project_id=project_id, drift_id=drift_id, status=status) if not updated: raise HTTPException(status_code=404, detail="Drift issue not found") @@ -2314,12 +2294,13 @@ async def create_esre_definition(project_id: str, esre: ESRECreate): @app.put("/api/projects/{project_id}/esre/{esre_id}") -async def update_esre_definition(project_id: str, esre_id: int, updates: ESRECreate): +async def update_esre_definition(project_id: str, esre_id: str, updates: ESRECreate): """Update an ESRE definition (QA Dashboard).""" if not Projects.get(project_id): raise HTTPException(status_code=404, detail="Project not found") updated = ESREDefinitions.update( + project_id=project_id, esre_id=esre_id, name=updates.name, definition_text=updates.definition_text, @@ -2343,12 +2324,12 @@ async def update_esre_definition(project_id: str, esre_id: int, updates: ESRECre @app.delete("/api/projects/{project_id}/esre/{esre_id}") -async def delete_esre_definition(project_id: str, esre_id: int): +async def delete_esre_definition(project_id: str, esre_id: str): """Delete an ESRE definition (QA Dashboard).""" if not Projects.get(project_id): raise HTTPException(status_code=404, detail="Project not found") - if not ESREDefinitions.delete(esre_id): + if not ESREDefinitions.delete(project_id=project_id, esre_id=esre_id): raise HTTPException(status_code=404, detail="ESRE definition not found") ActivityLog.log( @@ -2424,11 +2405,13 @@ async def claude_chat(request_data: ClaudeChatRequest): "model": "error", } - # Import MCP handler (may fail if database not migrated) + # Import MCP handler (optional; tools disabled if unavailable) mcp_handler = None MCPContext = None try: - from dss_mcp.handler import get_mcp_handler, MCPContext as _MCPContext + from dss.mcp.handler import MCPContext as _MCPContext + from dss.mcp.handler import get_mcp_handler + MCPContext = _MCPContext mcp_handler = get_mcp_handler() except Exception as e: @@ -2490,10 +2473,7 @@ CURRENT PROJECT CONTEXT: # Create MCP context (or None if MCP not available) mcp_context = None if MCPContext is not None: - mcp_context = MCPContext( - project_id=project_id, - user_id=user_id - ) + mcp_context = MCPContext(project_id=project_id, user_id=user_id) # Call AI provider with all context result = await provider.chat( @@ -2538,85 +2518,12 @@ async def execute_mcp_tool(tool_name: str, params: Dict[str, Any] = {}): Calls the MCP server running on port 3457. """ try: - # Import MCP server functions - from mcp_server import ( - analyze_react_components, - analyze_style_values, - build_source_graph, - check_naming_consistency, - create_project, - discover_project, - export_tokens, - extract_components, - extract_tokens, - find_inline_styles, - find_style_patterns, - find_unused_styles, - generate_component_code, - generate_stories_batch, - generate_story, - generate_storybook_theme, - get_activity, - get_project, - get_quick_wins, - get_quick_wins_report, - get_status, - get_story_coverage, - get_sync_history, - ingest_css_tokens, - ingest_json_tokens, - ingest_scss_tokens, - ingest_tailwind_tokens, - list_projects, - merge_tokens, - scan_storybook, - sync_tokens_to_file, - validate_tokens, - ) + # Legacy endpoint: forward to unified MCP handler. + from dss.mcp.handler import MCPContext, get_mcp_handler - # Map tool names to functions - tool_map = { - "get_status": get_status, - "list_projects": list_projects, - "create_project": create_project, - "get_project": get_project, - "extract_tokens": extract_tokens, - "extract_components": extract_components, - "generate_component_code": generate_component_code, - "sync_tokens_to_file": sync_tokens_to_file, - "get_sync_history": get_sync_history, - "get_activity": get_activity, - "ingest_css_tokens": ingest_css_tokens, - "ingest_scss_tokens": ingest_scss_tokens, - "ingest_tailwind_tokens": ingest_tailwind_tokens, - "ingest_json_tokens": ingest_json_tokens, - "merge_tokens": merge_tokens, - "export_tokens": export_tokens, - "validate_tokens": validate_tokens, - "discover_project": discover_project, - "analyze_react_components": analyze_react_components, - "find_inline_styles": find_inline_styles, - "find_style_patterns": find_style_patterns, - "analyze_style_values": analyze_style_values, - "find_unused_styles": find_unused_styles, - "build_source_graph": build_source_graph, - "get_quick_wins": get_quick_wins, - "get_quick_wins_report": get_quick_wins_report, - "check_naming_consistency": check_naming_consistency, - "scan_storybook": scan_storybook, - "generate_story": generate_story, - "generate_stories_batch": generate_stories_batch, - "generate_storybook_theme": generate_storybook_theme, - "get_story_coverage": get_story_coverage, - } - - # Get the tool function - tool_func = tool_map.get(tool_name) - if not tool_func: - raise HTTPException(status_code=404, detail=f"Tool '{tool_name}' not found") - - # Execute tool - result = await tool_func(**params) + handler = get_mcp_handler() + context = MCPContext(project_id=params.get("project_id"), user_id=params.get("user_id")) + result = await handler.execute_tool(tool_name=tool_name, arguments=params or {}, context=context) # Log execution ActivityLog.log( @@ -2626,7 +2533,7 @@ async def execute_mcp_tool(tool_name: str, params: Dict[str, Any] = {}): details={"params": list(params.keys())}, ) - return JSONResponse(content={"success": True, "result": result}) + return JSONResponse(content=result.to_dict()) except Exception as e: ActivityLog.log( @@ -2645,7 +2552,7 @@ class IntegrationCreate(BaseModel): """Create/Update integration configuration.""" integration_type: str # figma, jira, confluence, sequential-thinking - config: Dict[str, Any] # Encrypted in database + config: Dict[str, Any] # Encrypted at rest when DSS_MCP_ENCRYPTION_KEY is configured enabled: bool = True @@ -2697,7 +2604,7 @@ async def create_integration( if not Projects.get(project_id): raise HTTPException(status_code=404, detail="Project not found") - from dss_mcp.config import mcp_config + from dss.mcp.config import mcp_config # Encrypt config config_json = json.dumps(integration.config) @@ -2744,7 +2651,7 @@ async def update_integration( if not Projects.get(project_id): raise HTTPException(status_code=404, detail="Project not found") - from dss_mcp.config import mcp_config + from dss.mcp.config import mcp_config try: encrypted_config = None @@ -2811,7 +2718,7 @@ async def list_mcp_tools( include_details: bool = Query(False, description="Include full tool schemas"), ): """List all available MCP tools via unified handler.""" - from dss_mcp.handler import get_mcp_handler + from dss.mcp.handler import get_mcp_handler handler = get_mcp_handler() return handler.list_tools(include_details=include_details) @@ -2820,7 +2727,7 @@ async def list_mcp_tools( @app.get("/api/mcp/tools/{tool_name}") async def get_mcp_tool_info(tool_name: str): """Get detailed information about a specific MCP tool.""" - from dss_mcp.handler import get_mcp_handler + from dss.mcp.handler import get_mcp_handler handler = get_mcp_handler() info = handler.get_tool_info(tool_name) @@ -2831,16 +2738,8 @@ async def get_mcp_tool_info(tool_name: str): return info -class MCPToolExecuteRequest(BaseModel): - """Request to execute an MCP tool.""" - - arguments: Dict[str, Any] - project_id: str - user_id: Optional[int] = 1 - - @app.post("/api/mcp/tools/{tool_name}/execute") -async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest): +async def execute_mcp_tool(tool_name: str, payload: Dict[str, Any] = Body(default_factory=dict)): """ Execute an MCP tool via unified handler. @@ -2850,16 +2749,23 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest): - Applies circuit breaker protection - Logs execution metrics """ - from dss_mcp.handler import MCPContext, get_mcp_handler + from dss.mcp.handler import MCPContext, get_mcp_handler handler = get_mcp_handler() + # Backwards-compatible request parsing: + # - New: { arguments: {...}, project_id: "...", user_id: 1 } + # - Old (Admin UI): { ...toolArgs } + arguments = payload.get("arguments") if isinstance(payload.get("arguments"), dict) else payload + project_id = payload.get("project_id") or payload.get("projectId") + user_id = payload.get("user_id") or payload.get("userId") or 1 + # Create execution context - context = MCPContext(project_id=request.project_id, user_id=request.user_id) + context = MCPContext(project_id=project_id, user_id=user_id) # Execute tool result = await handler.execute_tool( - tool_name=tool_name, arguments=request.arguments, context=context + tool_name=tool_name, arguments=arguments or {}, context=context ) # Log to activity @@ -2867,7 +2773,7 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest): action="mcp_tool_executed", entity_type="tool", entity_id=tool_name, - project_id=request.project_id, + project_id=project_id, details={ "success": result.success, "duration_ms": result.duration_ms, @@ -2881,28 +2787,43 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest): @app.get("/api/mcp/status") async def get_mcp_status(): """Get MCP server status and configuration.""" - from dss_mcp.config import integration_config, mcp_config, validate_config + from dss.mcp.config import integration_config, mcp_config, validate_config warnings = validate_config() + # Admin UI expects a minimal `{ connected, tools }` shape. + # Keep detailed config under `details` for debugging. + try: + from dss.mcp.handler import get_mcp_handler + + tools_count = len(get_mcp_handler().list_tools(include_details=False)) + connected = True + except Exception: + tools_count = 0 + connected = False + return { - "server": { - "host": mcp_config.HOST, - "port": mcp_config.PORT, - "encryption_enabled": bool(mcp_config.ENCRYPTION_KEY), - "context_cache_ttl": mcp_config.CONTEXT_CACHE_TTL, + "connected": connected, + "tools": tools_count, + "details": { + "server": { + "host": mcp_config.HOST, + "port": mcp_config.PORT, + "encryption_enabled": bool(mcp_config.ENCRYPTION_KEY), + "context_cache_ttl": mcp_config.CONTEXT_CACHE_TTL, + }, + "integrations": { + "figma": bool(integration_config.FIGMA_TOKEN), + "anthropic": bool(integration_config.ANTHROPIC_API_KEY), + "jira_default": bool(integration_config.JIRA_URL), + "confluence_default": bool(integration_config.CONFLUENCE_URL), + }, + "circuit_breaker": { + "failure_threshold": mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD, + "timeout_seconds": mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS, + }, + "warnings": warnings, }, - "integrations": { - "figma": bool(integration_config.FIGMA_TOKEN), - "anthropic": bool(integration_config.ANTHROPIC_API_KEY), - "jira_default": bool(integration_config.JIRA_URL), - "confluence_default": bool(integration_config.CONFLUENCE_URL), - }, - "circuit_breaker": { - "failure_threshold": mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD, - "timeout_seconds": mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS, - }, - "warnings": warnings, } @@ -3070,9 +2991,8 @@ async def write_project_file(project_id: str, request: FileWriteRequest): raise HTTPException(status_code=403, detail=str(e)) -UI_DIR = Path(__file__).parent.parent.parent / "admin-ui" -if UI_DIR.exists(): - app.mount("/", StaticFiles(directory=str(UI_DIR), html=True), name="ui") +if _admin_ui_dist_index.exists(): + app.mount("/", StaticFiles(directory=str(_admin_ui_dist), html=True), name="ui") def kill_port(port: int, wait: float = 0.5) -> None: diff --git a/apps/cli/python/api/server.py b/apps/cli/python/api/server.py index a6a2771..0a7dcb6 100644 --- a/apps/cli/python/api/server.py +++ b/apps/cli/python/api/server.py @@ -13,7 +13,7 @@ Modes: - Server: Deployed remotely, serves design systems to teams - Local: Dev companion, UI advisor, local services -Uses SQLite for persistence, integrates with Figma tools. +Uses JSON storage for persistence, integrates with Figma tools. """ import json @@ -261,10 +261,12 @@ async def health(): @app.get("/api/stats") async def get_statistics(): - """Get database and system statistics.""" - db_stats = get_stats() + """Get storage and system statistics.""" + storage_stats = get_stats() return { - "database": db_stats, + "storage": storage_stats, + # Backwards-compatible alias (historical naming; underlying storage is JSON files). + "database": storage_stats, "figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured}, } diff --git a/cli/python/api/server.py b/cli/python/api/server.py index a6a2771..0a7dcb6 100644 --- a/cli/python/api/server.py +++ b/cli/python/api/server.py @@ -13,7 +13,7 @@ Modes: - Server: Deployed remotely, serves design systems to teams - Local: Dev companion, UI advisor, local services -Uses SQLite for persistence, integrates with Figma tools. +Uses JSON storage for persistence, integrates with Figma tools. """ import json @@ -261,10 +261,12 @@ async def health(): @app.get("/api/stats") async def get_statistics(): - """Get database and system statistics.""" - db_stats = get_stats() + """Get storage and system statistics.""" + storage_stats = get_stats() return { - "database": db_stats, + "storage": storage_stats, + # Backwards-compatible alias (historical naming; underlying storage is JSON files). + "database": storage_stats, "figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured}, } diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..1d31579 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,35 @@ +# DSS Documentation (Human) + +This directory contains **human-facing** documentation for DSS. + +If you’re looking for AI/agent-oriented notes, see `docs/ai.md`. + +## What DSS Is + +DSS (Design System Server) is a toolkit that can run in two modes: + +1. **Local developer tool**: analyze any project on disk, extract tokens, generate Storybook stories, and expose `dss_*` tools to Claude Code via MCP. +2. **Headless server**: run a FastAPI backend that teams (UX/QA/Admin) can access through the web Admin UI, and that AI assistants can interact with via the `/api/claude/chat` endpoint and MCP tool endpoints. + +## MCP across clients + +DSS exposes the same `dss_*` toolset to multiple AI clients via MCP: + +- Recommended: `./scripts/enable-mcp-clients.sh` +- **Claude Code**: `./scripts/setup-mcp.sh` generates `.claude/mcp.json`. +- **Codex CLI**: `codex mcp add dss -- /absolute/path/to/dss/scripts/dss-mcp` +- **Gemini CLI**: `gemini mcp add dss /absolute/path/to/dss/scripts/dss-mcp` + +The Claude plugin system (commands/skills under `dss-claude-plugin/`) is Claude-specific, but DSS makes those guides/hooks available to *any* MCP client via: +- `dss_list_guides` (use `include_meta=true` for hook metadata) +- `dss_get_guide` +- `dss_match_skills` + +## Documentation Map + +- `docs/quickstart.md` — install + run locally, and run the headless server. +- `docs/architecture.md` — how CLI, API, Admin UI, and MCP fit together. +- `docs/configuration.md` — environment variables, ports, and operational knobs. +- `docs/storage.md` — JSON-only storage layout and `DSS_HOME` rules. +- `docs/upgrade-notes.md` — recent upgrades and migration notes. +- `docs/ai.md` — where the AI/agent documentation lives. diff --git a/docs/ai.md b/docs/ai.md new file mode 100644 index 0000000..cc6e39d --- /dev/null +++ b/docs/ai.md @@ -0,0 +1,21 @@ +# AI / Agent Documentation Index + +This repo also includes documentation intended for AI agents (Claude Code plugin skills/commands, internal reference docs, and knowledge base files). + +Key entry points: + +- `CLAUDE.md` — instructions for Claude Code sessions and MCP setup. +- `admin-ui/AI-REFERENCE.md` — Admin UI architecture and API usage reference. +- `dss-claude-plugin/commands/` — Claude Code slash commands (operator runbooks). +- `dss-claude-plugin/skills/` — skill prompts for tool-using agents. +- `.knowledge/` — internal knowledge base documents. + +## Cross-model (Claude, Codex, Gemini) + +The DSS MCP server is client-agnostic. For non-Claude clients, the Claude plugin system (slash commands) does not load, but: + +- the same MCP tools are available, and +- the plugin guides can be discovered via MCP tools: + - `dss_list_guides` + - `dss_get_guide` + - `dss_match_skills` (emulates skill hook matching via `globs` / `alwaysApply`) diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 0000000..fea678b --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,48 @@ +# Architecture + +## High-level pieces + +- **Python core library** (`dss/`): analysis, ingestion, Storybook generation, storage. +- **Headless API** (`apps/api/server.py`): FastAPI server for Admin UI + AI chat + MCP endpoints. +- **Admin UI** (`admin-ui/`): Preact app used by UX/QA/Admin (talks to the API via `/api/*`). +- **Claude Code integration** (`dss-claude-plugin/`): Claude plugin assets (commands + skills). +- **Local MCP stdio server** (`dss/mcp/server.py`): a minimal MCP process that exposes `dss_*` tools to Claude Code. + +## Execution modes + +### Local (developer machine) + +- Run `dss-cli.py` directly against any repo on disk. +- Or run MCP locally (`python -m dss.mcp.server`) so Claude Code can call `dss_*` tools. + +### Headless (team server) + +- Run `uvicorn apps.api.server:app ...` to expose: + - Admin UI API (`/api/*`) + - AI chat (`/api/claude/chat`) + - MCP tool listing/execution (`/api/mcp/*`) +- When `admin-ui/dist/` exists, the server can also serve the built Admin UI bundle as static files. + +### Hybrid (recommended for mixed teams) + +- Developers run the MCP process locally. +- Tool execution can be proxied to a team server by setting `DSS_API_URL`. + +## MCP: unified tool layer + +All MCP-facing tool calls run through a shared registry/handler: + +- Registry + execution: `dss/mcp/handler.py` +- Local MCP server (stdio): `dss/mcp/server.py` +- Headless server endpoints: `apps/api/server.py` (`/api/mcp/*`) + +This avoids “two different DSS tool implementations” drifting over time. + +## Storage: JSON-only + +DSS stores state as JSON under `DSS_HOME` (see `docs/storage.md`). + +Why JSON: +- portable across machines/containers +- reviewable diffs in Git +- simple backup/restore diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 0000000..e4a3c71 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,102 @@ +# Configuration + +## Ports (defaults) + +| Service | Default | +| --- | --- | +| API server | `6220` | +| Admin UI (Vite dev) | `6221` | +| MCP server (stdio process) | `6222` | +| Storybook (DSS repo) | `6226` | + +Notes: +- `./scripts/dss` runs a single-port server (UI + API) on `DSS_PORT` (defaults to `6220`). +- Many target projects run their own Storybook on `6006`/`6007`; DSS service discovery checks those ports. + +## Storage + +- `DSS_HOME` + - **Server mode**: set this explicitly (e.g. `/var/lib/dss` or a project volume). + - **Local mode**: if a project has `./.dss/`, DSS will use it automatically; otherwise it falls back to `~/.dss`. + +See `docs/storage.md` for the directory layout. + +## AI / Integrations + +- `FIGMA_TOKEN` — enables live Figma extraction/sync. +- `ANTHROPIC_API_KEY` — enables `/api/claude/chat` AI chat and tool calling. +- `DSS_MCP_ENCRYPTION_KEY` — optional Fernet key; encrypts integration configs at rest when set. + +## Dev workflow (optional) + +- `DSS_ENABLE_DEV_COMMANDS` + - When set to `1`, DSS exposes dev-only MCP workflow tools that wrap local scripts (`dss_init`, `dss_reset`, `dss_services`). + - `scripts/dss-mcp` and `./scripts/setup-mcp.sh` set this automatically for local development. + +## Single-port launcher + +- `DSS_PORT` — port for `./scripts/dss` (single-port server wrapper). + +## MCP / Proxy mode + +- `DSS_API_URL` (or `DSS_SERVER_URL`) — if set, `dss.mcp.server` forwards tool calls to a headless DSS server. +- `DSS_PROJECT_ID` — default project context for MCP calls (when proxying or when tools omit `path`). +- `DSS_USER_ID` — optional user context for integrations. + +## MCP client setup (Claude, Codex, Gemini) + +All three clients can run the same DSS MCP stdio server. + +Quick option (configures Claude + Codex + Gemini when installed): + +```bash +./scripts/enable-mcp-clients.sh +``` + +### Claude Code + +- Generate Claude Code MCP config: `./scripts/setup-mcp.sh` +- Restart Claude Code, then run `/mcp` to verify `dss` is connected. + +### Codex CLI + +Add DSS as a global MCP server: + +```bash +codex mcp add dss -- /absolute/path/to/dss/scripts/dss-mcp +``` + +Remove it: + +```bash +codex mcp remove dss +``` + +### Gemini CLI + +Add DSS as an MCP server: + +```bash +gemini mcp add dss /absolute/path/to/dss/scripts/dss-mcp +``` + +Remove it: + +```bash +gemini mcp remove dss +``` + +### Notes + +- The Claude Code *plugin* (commands/skills under `dss-claude-plugin/`) is Claude-specific; Codex/Gemini won’t “load” it as a plugin, but they can still use the same MCP tools. +- `scripts/dss-mcp` sets required env vars (`PYTHONPATH`, `DSS_HOME`, etc.) and runs `python -m dss.mcp.server`. +- DSS also exposes the plugin guides/hooks to all MCP clients via: + - `dss_list_guides` (use `include_meta=true` to see `globs` / `alwaysApply` / `arguments`) + - `dss_get_guide` + - `dss_match_skills` + +## Scripts + +- `./scripts/setup-mcp.sh`: + - generates `.claude/mcp.json` to run the local MCP stdio server (`python -m dss.mcp.server`) + - `--api-url ` — enable proxy mode for the local MCP server diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 0000000..42e4f21 --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,108 @@ +# Quickstart + +## Prerequisites + +- Python 3.10+ +- Node.js 18+ (for `admin-ui` and Storybook tooling) + +## 1) Install Python deps + +```bash +python3 -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +``` + +## 2) Local developer workflow (CLI only) + +Run analysis for any project on disk: + +```bash +./dss-cli.py analyze --project-path /absolute/path/to/your-project +``` + +This writes a portable JSON artifact: +- `/.dss/analysis_graph.json` + +Generate Storybook stories for a project: + +```bash +./dss-cli.py setup-storybook --action generate --project-path /absolute/path/to/your-project +``` + +## 3) Local developer workflow (Claude Code via MCP) + +Generate Claude Code MCP config: + +```bash +./scripts/setup-mcp.sh +``` + +Then restart Claude Code and run `/mcp` to confirm the `dss` server is connected. + +### Enable MCP for all supported clients (recommended) + +```bash +./scripts/enable-mcp-clients.sh +``` + +### Proxy mode (MCP process local, tools executed on a server) + +```bash +./scripts/setup-mcp.sh --api-url https://your-dss-server.example.com +``` + +Set `DSS_PROJECT_ID` in the MCP server environment when you want tools to default to a specific registered project. + +## 4) Headless server (API for Admin UI + teams) + +Choose one of the following: + +### Option A: Dev mode (Vite Admin UI + API) + +Start the API server: + +```bash +source .venv/bin/activate +PYTHONPATH="$PWD:$PWD/apps/api" uvicorn apps.api.server:app --host 0.0.0.0 --port 6220 +``` + +Start the Admin UI (dev mode with `/api` proxy to `:6220`): + +```bash +cd admin-ui +npm install +npm run dev +``` + +Admin UI: `http://localhost:6221` +API: `http://localhost:6220` + +### Option B: Single-port server (serves built Admin UI) + +Build the Admin UI once: + +```bash +cd admin-ui +npm install +npm run build +cd .. +``` + +Start DSS (serves UI + API from one process): + +```bash +./scripts/dss start +``` + +Dashboard: `http://localhost:6220` +API: `http://localhost:6220/api` + +## 5) Common environment variables + +- `DSS_HOME` — where DSS stores JSON data (defaults to `./.dss` when present, else `~/.dss`) +- `FIGMA_TOKEN` — enables live Figma tooling +- `ANTHROPIC_API_KEY` — enables AI chat/tool calling on the headless server +- `DSS_API_URL` — enables MCP proxy mode (local MCP → headless server) + +See `docs/configuration.md` for the full list and details. diff --git a/docs/storage.md b/docs/storage.md new file mode 100644 index 0000000..21a84f7 --- /dev/null +++ b/docs/storage.md @@ -0,0 +1,59 @@ +# Storage (JSON-only) + +## `DSS_HOME` resolution + +DSS chooses a storage root in this order: + +1. `$DSS_HOME` if set +2. `./.dss` if it exists in the current working directory +3. `~/.dss` + +This supports: +- **project-local** storage (recommended when running DSS inside a repo) +- **shared user** storage (default fallback) +- **server** storage (explicit `DSS_HOME` volume) + +## Directory layout + +Most persistent JSON state lives under: + +`$DSS_HOME/data/` + +``` +data/ + _system/ + activity/ # JSONL activity logs (by day) + cache/ # TTL cache entries (JSON) + users/ # auth users (JSON-only) + integration_health.json + projects/ + / + manifest.json + components/ + figma/ + integrations.json + metrics/ + styles/ + tokens/ + teams/ +``` + +## Concurrency / locking + +The JSON store uses simple file locking (`*.lock`) to reduce corruption when multiple processes write concurrently. + +## Project-local artifacts + +In addition to `$DSS_HOME/data/`, DSS also writes project-local artifacts under a repo’s `.dss/` directory, for example: + +- `.dss/analysis_graph.json` — portable analysis output (commit-friendly). +- `.dss/config.json` — per-project DSS configuration (used by `ConfigService`). +- `.dss/runtime-config.json` — runtime/server preferences (when using the headless server wrappers). + +## What is safe to commit + +Typical “commit-worthy” artifacts: +- `/.dss/analysis_graph.json` (generated analysis output) + +Typical “do not commit” data: +- per-user caches and secrets under `$DSS_HOME/data/_system/cache` diff --git a/docs/upgrade-notes.md b/docs/upgrade-notes.md new file mode 100644 index 0000000..641c9d1 --- /dev/null +++ b/docs/upgrade-notes.md @@ -0,0 +1,61 @@ +# Upgrade Notes + +This file summarizes major upgrades applied to DSS to support: +- JSON-only, portable storage +- a single MCP tool layer used by local + server + plugin +- local developer usage across many projects **and** a headless server for UX/QA/Admin + +## Key upgrades + +### 1) Unified MCP tool layer (`dss.mcp`) + +Created a shared tool registry/execution layer used by: +- Headless server (`apps/api/server.py` → `/api/mcp/*`) +- Local MCP stdio server (`python -m dss.mcp.server`) + +Proxy mode is supported via `DSS_API_URL`. + +### 2) JSON-only storage (removed SQLite) + +All state is stored as JSON under `DSS_HOME`: +- Project data: `$DSS_HOME/data/projects//...` +- System data: `$DSS_HOME/data/_system/...` + +Auth user storage is JSON-based (`dss/storage/json_store.py:Users`). + +### 3) Consistent storage root (`DSS_HOME`) + +Storage now resolves in this order: +1. `DSS_HOME` +2. `./.dss` (project-local, if present) +3. `~/.dss` + +### 4) Storybook generation reliability + +Story generation now: +- scans component directories recursively +- writes the correct story extension (`.stories.jsx` vs `.stories.tsx`) +- avoids deleting user-authored stories by only clearing files with a DSS marker + +### 5) Headless server serves built Admin UI + +When `admin-ui/dist/index.html` exists, the headless server now serves the built Admin UI bundle as static files (so teams can access the web dashboard from the server port). + +### 6) CLI analysis now produces portable JSON output + +`./dss-cli.py analyze --project-path ` writes: +- `/.dss/analysis_graph.json` + +This is intended to be commit-friendly and shareable across machines. + +### 7) Removed deprecated `dss-setup` + +The deprecated Claude Code command `/dss-setup` has been removed. Use `/dss-init`. + +## Migration notes + +- If you previously relied on `.dss/dss.db` or `DSS_DATABASE`: it is no longer used. +- If you previously imported `dss_mcp` or used `dss/mcp_server/*`: use `dss/mcp/*`. +- If you previously referenced `dss-claude-plugin/servers/*` for MCP: use `python -m dss.mcp.server`. +- Re-run `./scripts/setup-mcp.sh` after pulling to refresh `.claude/mcp.json`. +- If you used the single-port launcher on `:3456`, note `./scripts/dss` now defaults to `:6220` (override with `DSS_PORT` or update your reverse proxy). diff --git a/dss-claude-plugin/.claude-plugin/marketplace.json b/dss-claude-plugin/.claude-plugin/marketplace.json index b268ed0..26a065c 100644 --- a/dss-claude-plugin/.claude-plugin/marketplace.json +++ b/dss-claude-plugin/.claude-plugin/marketplace.json @@ -10,7 +10,7 @@ "name": "dss-claude-plugin", "source": "./", "description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components", - "version": "1.0.0" + "version": "1.0.2" } ] } diff --git a/dss-claude-plugin/.claude-plugin/plugin.json b/dss-claude-plugin/.claude-plugin/plugin.json index fa5c75b..d55f9af 100644 --- a/dss-claude-plugin/.claude-plugin/plugin.json +++ b/dss-claude-plugin/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "dss-claude-plugin", - "version": "1.0.1", + "version": "1.0.2", "description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components", "author": { "name": "overbits", diff --git a/dss-claude-plugin/commands/dss-init.md b/dss-claude-plugin/commands/dss-init.md index 6282d12..11b7d22 100644 --- a/dss-claude-plugin/commands/dss-init.md +++ b/dss-claude-plugin/commands/dss-init.md @@ -20,11 +20,11 @@ Single entry point for all DSS initialization. Handles MCP config, dependencies, ## Full Workflow (15 steps) 1. **Reset** (with --reset) - Clear all DSS data -2. **MCP Config** - Generate `.mcp.json` +2. **MCP Config** - Generate `.claude/mcp.json` (local MCP server) 3. **Dependencies** - Python venv, Node modules, admin-ui build 4. **Environment** - Validate Python, Node, Figma token 5. **Directory Structure** - Create `.dss/` folders -6. **Database** - Initialize SQLite +6. **Storage** - Initialize JSON data directories 7. **Analyze Targets** - admin-ui, storybook stats 8. **Token Structure** - Create base token files 9. **3-Layer Validation** - Core, skins, themes diff --git a/dss-claude-plugin/commands/dss-reset.md b/dss-claude-plugin/commands/dss-reset.md index 10e060a..46b0581 100644 --- a/dss-claude-plugin/commands/dss-reset.md +++ b/dss-claude-plugin/commands/dss-reset.md @@ -22,13 +22,12 @@ Without `--confirm`, runs in dry-run mode showing what would be deleted. ## What This Clears 1. `.dss/data/` - Projects, teams, cache, activity -2. `.dss/dss.db` - SQLite database -3. `admin-ui/css/dss-*.css` - Generated CSS files -4. `admin-ui/src/components/*.stories.js` - Generated stories -5. `admin-ui/src/components/ds-*.js` - Generated components -6. `dss/core_tokens/tokens.json` - Reset to empty -7. `dss-claude-plugin/core/skins/*.json` - Reset to awaiting sync -8. `.dss/logs/` - Clear log files +2. `admin-ui/css/dss-*.css` - Generated CSS files +3. `admin-ui/src/components/*.stories.js` - Generated stories +4. `admin-ui/src/components/ds-*.js` - Generated components +5. `dss/core_tokens/tokens.json` - Reset to empty +6. `dss-claude-plugin/core/skins/*.json` - Reset to awaiting sync +7. `.dss/logs/` - Clear log files ## Instructions for Claude @@ -59,8 +58,6 @@ DRY RUN MODE - No changes will be made 1. Clearing .dss/data/ structure... Would run: rm -rf .dss/data/projects/* ... -2. Resetting database... - Would run: rm -f .dss/dss.db ... DRY RUN COMPLETE diff --git a/dss-claude-plugin/commands/dss-services.md b/dss-claude-plugin/commands/dss-services.md index c0a2408..1655c03 100644 --- a/dss-claude-plugin/commands/dss-services.md +++ b/dss-claude-plugin/commands/dss-services.md @@ -34,9 +34,9 @@ Manage all DSS development services from a single command. | Service | Port | Description | |---------|------|-------------| -| api | 8000 | FastAPI REST server | -| admin-ui | 3456 | Vite dev server | -| storybook | 6006 | Storybook design docs | +| api | 6220 | FastAPI REST server | +| admin-ui | 6221 | Vite dev server | +| storybook | 6226 | Storybook design docs | ## Examples @@ -71,25 +71,25 @@ When the user runs this command: 3. For `status` action, show a table with service states 4. After `start`, provide clickable URLs: - - API: http://localhost:8000 - - admin-ui: http://localhost:3456 - - Storybook: http://localhost:6006 + - API: http://localhost:6220 + - admin-ui: http://localhost:6221 + - Storybook: http://localhost:6226 ## Service Details -### API Server (port 8000) +### API Server (port 6220) - FastAPI REST API - Endpoints: projects, figma, health, config -- Command: `uvicorn apps.api.server:app --reload` +- Command: `uvicorn apps.api.server:app --host 0.0.0.0 --port 6220 --reload` - Log: `/tmp/dss-api.log` -### Admin UI (port 3456) +### Admin UI (port 6221) - Preact/Vite development server - Design system management interface - Command: `npm run dev` - Log: `/tmp/dss-admin-ui.log` -### Storybook (port 6006) +### Storybook (port 6226) - Component documentation - Token visualization - Command: `npm run storybook` @@ -104,6 +104,5 @@ If a service fails to start: ## Related Commands -- `/dss-setup` - Full environment setup -- `/dss-init` - Initialize DSS structure +- `/dss-init` - Full environment setup + initialization - `/dss-reset` - Reset to clean state diff --git a/dss-claude-plugin/commands/dss-setup.md b/dss-claude-plugin/commands/dss-setup.md deleted file mode 100644 index c701b51..0000000 --- a/dss-claude-plugin/commands/dss-setup.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -name: dss-setup -description: DEPRECATED - Use /dss-init instead -arguments: - - name: flags - description: Optional flags (--reset, --skip-servers) - required: false ---- - -# DSS Setup Command (DEPRECATED) - -**This command is deprecated. Use `/dss-init` instead.** - -The `/dss-init` command now handles everything that `/dss-setup` did: -- MCP configuration -- Dependencies (Python venv, Node modules) -- DSS initialization -- Development servers - -## Migration - -| Old Command | New Command | -|-------------|-------------| -| `/dss-setup` | `/dss-init` | -| `/dss-setup --reset` | `/dss-init --reset` | -| `/dss-setup --skip-servers` | `/dss-init --skip-servers` | - -## Instructions for Claude - -When the user runs this command: - -1. Inform them that `/dss-setup` is deprecated -2. Run `/dss-init` with the same flags instead - -```bash -# Just run dss-init.sh directly -scripts/dss-init.sh [flags] -``` diff --git a/dss-claude-plugin/commands/dss-storybook.md b/dss-claude-plugin/commands/dss-storybook.md index 9f352da..eb6532c 100644 --- a/dss-claude-plugin/commands/dss-storybook.md +++ b/dss-claude-plugin/commands/dss-storybook.md @@ -143,5 +143,6 @@ CONFIGURATION CREATED - Controls configured Run: npm run storybook -Access: http://localhost:6006 +Access: http://localhost:6226 ``` +Note: DSS uses `6226` as its default Storybook port, but many target projects still run Storybook on `6006`/`6007`. diff --git a/dss-claude-plugin/core/config.py b/dss-claude-plugin/core/config.py index 8a53019..0398c14 100644 --- a/dss-claude-plugin/core/config.py +++ b/dss-claude-plugin/core/config.py @@ -23,7 +23,7 @@ logger = logging.getLogger(__name__) CONFIG_DIR = Path.home() / ".dss" CONFIG_FILE = CONFIG_DIR / "config.json" DEFAULT_REMOTE_URL = "https://dss.overbits.luz.uy" -DEFAULT_LOCAL_URL = "http://localhost:6006" +DEFAULT_LOCAL_URL = "http://localhost:6220" class DSSMode(str, Enum): diff --git a/dss-claude-plugin/core/mcp_integration.py b/dss-claude-plugin/core/mcp_integration.py index cda05fc..47c7b48 100644 --- a/dss-claude-plugin/core/mcp_integration.py +++ b/dss-claude-plugin/core/mcp_integration.py @@ -105,7 +105,7 @@ def mcp_get_compiler_status() -> str: # MCP Tool Registry -# This can be imported by dss-mcp-server.py to register the tools +# These tool definitions can be imported by the unified DSS MCP layer if needed. MCP_TOOLS = { "dss_get_resolved_context": { diff --git a/dss-claude-plugin/servers/dss-mcp-server.py b/dss-claude-plugin/servers/dss-mcp-server.py deleted file mode 100644 index db72b2b..0000000 --- a/dss-claude-plugin/servers/dss-mcp-server.py +++ /dev/null @@ -1,2908 +0,0 @@ -#!/usr/bin/env python3 -""" -DSS MCP Server - Design System Server Integration for Claude Code. - -A Python MCP server that exposes DSS functionality as tools for Claude. -Uses stdio transport for Claude Code integration. - -Author: overbits -Version: 2.0.0 - Architectural Refinement: Boundary Enforcement & Runtime -""" - -import asyncio -import base64 -import json -import logging -import os -import re -import sys -from collections import deque -from dataclasses import dataclass, field -from datetime import datetime -from pathlib import Path -from typing import Any, Dict, List, Optional - -# DSS Runtime - Boundary Enforcement (CRITICAL) -# All external API access MUST go through the runtime -try: - sys.path.insert(0, str(Path(__file__).parent.parent)) - from core.runtime import BoundaryViolationError, DSSRuntime, get_runtime - from core.structured_logger import ( - LogContext, - PerformanceLogger, - configure_log_rotation, - get_logger, - ) - - RUNTIME_AVAILABLE = True -except ImportError as e: - RUNTIME_AVAILABLE = False - RUNTIME_IMPORT_ERROR = str(e) - print(f"WARNING: DSSRuntime not available: {e}", file=sys.stderr) - print("Boundary enforcement will be disabled!", file=sys.stderr) - -# Playwright import (optional - only needed for DevTools features) -try: - from playwright.async_api import Browser, BrowserContext, Page, Playwright, async_playwright - - PLAYWRIGHT_AVAILABLE = True -except ImportError: - PLAYWRIGHT_AVAILABLE = False - -# Import LocalBrowserStrategy for unified browser automation -try: - from strategies.local.browser import LocalBrowserStrategy - - LOCAL_BROWSER_STRATEGY_AVAILABLE = True -except ImportError: - LOCAL_BROWSER_STRATEGY_AVAILABLE = False - -# Add DSS to path (project root) -DSS_PATH = Path(__file__).parent.parent.parent -sys.path.insert(0, str(DSS_PATH)) - -# MCP SDK imports -try: - from mcp.server import Server - from mcp.server.stdio import stdio_server - from mcp.types import TextContent, Tool -except ImportError: - print("MCP SDK not found. Install with: pip install mcp", file=sys.stderr) - sys.exit(1) - -# DSS imports -try: - import dss - from dss import ( # Analyze - Context generation & project graph; Ingest - Token sources; Models; Storybook; Settings; Figma - CSSTokenSource, - DependencyGraph, - DSSManager, - DSSSettings, - FigmaToolSuite, - JSONTokenSource, - MergeStrategy, - Project, - ProjectMetadata, - ProjectScanner, - QuickWinFinder, - ReactAnalyzer, - SCSSTokenSource, - StorybookScanner, - StoryGenerator, - StyleAnalyzer, - TailwindTokenSource, - Theme, - ThemeGenerator, - TokenCollection, - TokenMerger, - manager, - settings, - ) - - DSS_AVAILABLE = True -except ImportError as e: - DSS_AVAILABLE = False - DSS_IMPORT_ERROR = str(e) - -# Context Compiler imports -try: - from core import ( - get_active_context, - get_compiler_status, - list_skins, - resolve_token, - validate_manifest, - ) - - CONTEXT_COMPILER_AVAILABLE = True -except ImportError as e: - CONTEXT_COMPILER_AVAILABLE = False - CONTEXT_COMPILER_IMPORT_ERROR = str(e) - -# Project Management imports -try: - from dss.project import ( - DSSProject, - FigmaProjectSync, - FigmaSource, - ProjectConfig, - ProjectManager, - ProjectRegistry, - ProjectStatus, - ) - - PROJECT_MANAGEMENT_AVAILABLE = True -except ImportError as e: - PROJECT_MANAGEMENT_AVAILABLE = False - PROJECT_MANAGEMENT_IMPORT_ERROR = str(e) - -# Configure structured logging -if RUNTIME_AVAILABLE: - # Use structured JSON logging - logger = get_logger("dss.mcp.server") - logger.info("DSS MCP Server initializing with structured logging") -else: - # Fallback to basic logging if runtime not available - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", - handlers=[logging.StreamHandler(sys.stderr)], - ) - logger = logging.getLogger("dss-mcp-server") - logger.warning("Structured logging unavailable - using fallback") - -# Timeout configuration (seconds) -TIMEOUT_CONFIG = { - "analyze": 60, - "extract": 30, - "generate": 30, - "style_dictionary": 30, - "figma_api": 15, - "storybook": 60, - "audit": 45, - "quick_wins": 30, - "devtools_connect": 20, - "devtools_default": 10, -} - - -# ============================================================================= -# DEVTOOLS STATE MANAGEMENT -# ============================================================================= - -# DevTools Configuration Constants -DEVTOOLS_CONSOLE_MAX_ENTRIES = 1000 -DEVTOOLS_NETWORK_MAX_ENTRIES = 500 -DEVTOOLS_CONNECTION_TIMEOUT_MS = 30000 # 30 seconds - - -@dataclass -class DevToolsState: - """State management for Chrome DevTools Protocol connections. - - Manages Playwright CDP connections to Chrome instances, tracking: - - Browser and page references - - Console log capture (bounded buffer) - - Network request capture (bounded buffer) - - Connection lifecycle state - """ - - playwright: Optional[Any] = None - browser: Optional[Any] = None - contexts: Dict[str, Any] = field(default_factory=dict) - pages: Dict[str, Any] = field(default_factory=dict) - active_page_id: Optional[str] = None - console_logs: deque = field(default_factory=lambda: deque(maxlen=DEVTOOLS_CONSOLE_MAX_ENTRIES)) - network_requests: deque = field( - default_factory=lambda: deque(maxlen=DEVTOOLS_NETWORK_MAX_ENTRIES) - ) - connected: bool = False - - -devtools = DevToolsState() - - -# ============================================================================= -# BROWSER AUTOMATION STATE -# ============================================================================= - - -@dataclass -class BrowserAutomationState: - """State management for unified browser automation (LOCAL mode).""" - - strategy: Optional[Any] = None # LocalBrowserStrategy instance - mode: str = "local" # "local" or "remote" - session_id: Optional[str] = None - remote_api_url: Optional[str] = None - initialized: bool = False - - -browser_state = BrowserAutomationState() - - -# Create MCP server -server = Server("dss-server") - - -def with_timeout(timeout_key: str): - """Decorator to add timeout to async functions.""" - - def decorator(func): - async def wrapper(*args, **kwargs): - timeout = TIMEOUT_CONFIG.get(timeout_key, 30) - try: - return await asyncio.wait_for(func(*args, **kwargs), timeout=timeout) - except asyncio.TimeoutError: - return { - "success": False, - "error": f"Operation timed out after {timeout} seconds", - "timeout_key": timeout_key, - } - - return wrapper - - return decorator - - -def safe_serialize(obj: Any) -> Any: - """Safely serialize objects to JSON-compatible format.""" - if obj is None: - return None - if isinstance(obj, (str, int, float, bool)): - return obj - if isinstance(obj, (list, tuple)): - return [safe_serialize(item) for item in obj] - if isinstance(obj, dict): - return {str(k): safe_serialize(v) for k, v in obj.items()} - if isinstance(obj, Path): - return str(obj) - if isinstance(obj, deque): - return [safe_serialize(item) for item in obj] - if hasattr(obj, "__dict__"): - d = {k: v for k, v in obj.__dict__.items() if not k.startswith("_")} - return safe_serialize(d) - if hasattr(obj, "model_dump"): - return obj.model_dump() - return str(obj) - - -# ============================================================================= -# TOOL DEFINITIONS -# ============================================================================= - - -@server.list_tools() -async def list_tools() -> List[Tool]: - """List all available DSS and DevTools tools.""" - dss_tools = [ - Tool( - name="dss_analyze_project", - description="Analyze a project for design system patterns, component usage, and tokenization opportunities. Returns comprehensive analysis including style patterns, React components, and dependency graph.", - inputSchema={ - "type": "object", - "properties": { - "path": { - "type": "string", - "description": "Absolute path to the project directory to analyze", - } - }, - "required": ["path"], - }, - ), - Tool( - name="dss_extract_tokens", - description="Extract design tokens from CSS, SCSS, Tailwind, or JSON sources. Returns a unified TokenCollection with all discovered tokens.", - inputSchema={ - "type": "object", - "properties": { - "path": { - "type": "string", - "description": "Path to the file or directory containing design tokens", - }, - "sources": { - "type": "array", - "items": {"type": "string", "enum": ["css", "scss", "tailwind", "json"]}, - "description": "Token source types to extract from (default: all)", - }, - }, - "required": ["path"], - }, - ), - Tool( - name="dss_generate_theme", - description="Generate theme files from design tokens using style-dictionary. Supports CSS, SCSS, JSON, and JS output formats.", - inputSchema={ - "type": "object", - "properties": { - "tokens": { - "type": "object", - "description": "Design tokens to transform (or use tokens from previous extraction)", - }, - "format": { - "type": "string", - "enum": ["css", "scss", "json", "js"], - "description": "Output format for generated theme files", - }, - "theme_name": { - "type": "string", - "description": "Name for the generated theme (default: 'default')", - }, - }, - "required": ["format"], - }, - ), - Tool( - name="dss_list_themes", - description="List all available themes in the DSS system", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="dss_get_status", - description="Get DSS system status including health checks, dependencies, configuration, metrics, and recommendations.", - inputSchema={ - "type": "object", - "properties": { - "format": { - "type": "string", - "enum": ["json", "dashboard"], - "description": "Output format: 'json' for structured data, 'dashboard' for ASCII art display (default: json)", - } - }, - }, - ), - Tool( - name="dss_audit_components", - description="Audit React components for design system adoption. Identifies hardcoded values, missing tokens, and refactoring opportunities.", - inputSchema={ - "type": "object", - "properties": { - "path": {"type": "string", "description": "Path to React component directory"} - }, - "required": ["path"], - }, - ), - Tool( - name="dss_setup_storybook", - description="Set up or configure Storybook for the project. Generates stories and theme configuration.", - inputSchema={ - "type": "object", - "properties": { - "path": {"type": "string", "description": "Path to the project directory"}, - "action": { - "type": "string", - "enum": ["scan", "generate", "configure"], - "description": "Action to perform: scan existing, generate stories, or configure theme", - }, - }, - "required": ["path"], - }, - ), - Tool( - name="dss_sync_figma", - description="Sync design tokens from a Figma file. Requires FIGMA_TOKEN environment variable.", - inputSchema={ - "type": "object", - "properties": { - "file_key": {"type": "string", "description": "Figma file key (from URL)"} - }, - "required": ["file_key"], - }, - ), - Tool( - name="dss_find_quick_wins", - description="Find quick win opportunities for design system adoption. Identifies low-effort, high-impact improvements.", - inputSchema={ - "type": "object", - "properties": { - "path": {"type": "string", "description": "Path to the project directory"} - }, - "required": ["path"], - }, - ), - Tool( - name="dss_transform_tokens", - description="Transform tokens between formats using style-dictionary", - inputSchema={ - "type": "object", - "properties": { - "tokens": {"type": "object", "description": "Tokens to transform"}, - "input_format": { - "type": "string", - "enum": ["css", "scss", "json", "tailwind"], - "description": "Input token format", - }, - "output_format": { - "type": "string", - "enum": ["css", "scss", "json", "js"], - "description": "Desired output format", - }, - }, - "required": ["tokens", "output_format"], - }, - ), - ] - - devtools_tools = [ - Tool( - name="devtools_launch", - description="Launch a new headless Chromium browser. Use this on remote/headless servers where no Chrome is running.", - inputSchema={ - "type": "object", - "properties": { - "url": { - "type": "string", - "description": "Initial URL to navigate to (default: about:blank)", - }, - "headless": {"type": "boolean", "description": "Run headless (default: true)"}, - }, - }, - ), - Tool( - name="devtools_connect", - description="Connect to a running Chrome browser with remote debugging enabled. Start Chrome with: --remote-debugging-port=9222", - inputSchema={ - "type": "object", - "properties": { - "port": {"type": "integer", "description": "CDP port number (default: 9222)"}, - "host": {"type": "string", "description": "CDP host (default: 'localhost')"}, - }, - }, - ), - Tool( - name="devtools_disconnect", - description="Disconnect from Chrome DevTools and clean up resources.", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="devtools_list_pages", - description="List all available pages (tabs) in the connected browser with their URLs and titles.", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="devtools_select_page", - description="Set the active page for subsequent DevTools operations. Console and network logging will be enabled for the selected page.", - inputSchema={ - "type": "object", - "properties": { - "page_id": { - "type": "string", - "description": "The unique ID of the page to select (from devtools_list_pages)", - } - }, - "required": ["page_id"], - }, - ), - Tool( - name="devtools_console_logs", - description="Retrieve captured console log messages (log, warn, error, info, debug) from the active page.", - inputSchema={ - "type": "object", - "properties": { - "level": { - "type": "string", - "enum": ["all", "log", "warn", "error", "info", "debug"], - "description": "Filter by message level (default: all)", - }, - "limit": { - "type": "integer", - "description": "Maximum number of messages to return (default: 100)", - }, - "clear": { - "type": "boolean", - "description": "Clear captured logs after retrieving (default: false)", - }, - }, - }, - ), - Tool( - name="devtools_network_requests", - description="Retrieve captured network requests from the active page. Includes URL, method, headers, and resource type.", - inputSchema={ - "type": "object", - "properties": { - "filter_url": { - "type": "string", - "description": "Regex pattern to filter requests by URL", - }, - "limit": { - "type": "integer", - "description": "Maximum number of requests to return (default: 50)", - }, - }, - }, - ), - Tool( - name="devtools_evaluate", - description="Execute a JavaScript expression in the context of the active page and return the result.", - inputSchema={ - "type": "object", - "properties": { - "expression": { - "type": "string", - "description": "The JavaScript expression to evaluate", - } - }, - "required": ["expression"], - }, - ), - Tool( - name="devtools_query_dom", - description="Query DOM elements on the active page using a CSS selector. Returns tag, text content, and outer HTML for each match.", - inputSchema={ - "type": "object", - "properties": { - "selector": { - "type": "string", - "description": "CSS selector to query for elements", - } - }, - "required": ["selector"], - }, - ), - Tool( - name="devtools_goto", - description="Navigate the active page to a URL.", - inputSchema={ - "type": "object", - "properties": { - "url": {"type": "string", "description": "URL to navigate to"}, - "wait_until": { - "type": "string", - "description": "Wait condition: 'load', 'domcontentloaded', 'networkidle' (default: domcontentloaded)", - }, - }, - "required": ["url"], - }, - ), - Tool( - name="devtools_screenshot", - description="Capture a screenshot of the active page or a specific element. Returns base64 encoded PNG.", - inputSchema={ - "type": "object", - "properties": { - "selector": { - "type": "string", - "description": "CSS selector of an element to capture. If omitted, captures the viewport.", - }, - "full_page": { - "type": "boolean", - "description": "Capture the full scrollable page (default: false)", - }, - }, - }, - ), - Tool( - name="devtools_performance", - description="Get performance metrics for the active page including page load time, DNS lookup, TCP connect, and response times.", - inputSchema={"type": "object", "properties": {}}, - ), - ] - - # Browser Automation Tools (Unified LOCAL/REMOTE strategy) - browser_tools = [ - Tool( - name="browser_init", - description="Initialize browser automation. Mode 'local' uses Playwright for direct control. Mode 'remote' uses Shadow State pattern to fetch logs from a running admin-ui session.", - inputSchema={ - "type": "object", - "properties": { - "mode": { - "type": "string", - "enum": ["local", "remote"], - "description": "Automation mode: 'local' for Playwright, 'remote' for Shadow State API (default: local)", - }, - "url": { - "type": "string", - "description": "For local mode: URL to navigate to. For remote mode: API endpoint URL.", - }, - "session_id": { - "type": "string", - "description": "For remote mode: Session ID to fetch logs from.", - }, - "headless": { - "type": "boolean", - "description": "For local mode: Run browser headless (default: true)", - }, - }, - }, - ), - Tool( - name="browser_get_logs", - description="Get console logs from the browser. Works in both LOCAL and REMOTE modes.", - inputSchema={ - "type": "object", - "properties": { - "level": { - "type": "string", - "enum": ["all", "log", "warn", "error", "info", "debug"], - "description": "Filter by log level (default: all)", - }, - "limit": { - "type": "integer", - "description": "Maximum number of logs to return (default: 100)", - }, - }, - }, - ), - Tool( - name="browser_screenshot", - description="Capture a screenshot from the browser. Requires LOCAL mode.", - inputSchema={ - "type": "object", - "properties": { - "selector": { - "type": "string", - "description": "CSS selector to capture specific element. If omitted, captures viewport.", - }, - "full_page": { - "type": "boolean", - "description": "Capture full scrollable page (default: false)", - }, - }, - }, - ), - Tool( - name="browser_dom_snapshot", - description="Get current DOM state as HTML. Works in both LOCAL and REMOTE modes.", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="browser_get_errors", - description="Get captured errors (uncaught exceptions, unhandled rejections). Works in both modes.", - inputSchema={ - "type": "object", - "properties": { - "limit": { - "type": "integer", - "description": "Maximum number of errors to return (default: 50)", - } - }, - }, - ), - Tool( - name="browser_accessibility_audit", - description="Run accessibility audit using axe-core. Returns WCAG violations and passes.", - inputSchema={ - "type": "object", - "properties": { - "selector": { - "type": "string", - "description": "CSS selector to audit specific element. If omitted, audits entire page.", - } - }, - }, - ), - Tool( - name="browser_performance", - description="Get Core Web Vitals and performance metrics (TTFB, FCP, LCP, CLS).", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="browser_close", - description="Close the browser automation session and clean up resources.", - inputSchema={"type": "object", "properties": {}}, - ), - ] - - # Context Compiler Tools - context_compiler_tools = [ - Tool( - name="dss_get_resolved_context", - description="Get fully resolved design system context for a project. Returns compiled tokens from 3-layer cascade (base → skin → project).", - inputSchema={ - "type": "object", - "properties": { - "manifest_path": { - "type": "string", - "description": "Absolute path to ds.config.json", - }, - "debug": { - "type": "boolean", - "description": "Enable debug provenance tracking", - "default": False, - }, - "force_refresh": { - "type": "boolean", - "description": "Bypass cache and recompile", - "default": False, - }, - }, - "required": ["manifest_path"], - }, - ), - Tool( - name="dss_resolve_token", - description="Resolve a specific design token through the cascade. Use dot-notation (e.g. 'colors.primary').", - inputSchema={ - "type": "object", - "properties": { - "manifest_path": { - "type": "string", - "description": "Absolute path to ds.config.json", - }, - "token_path": { - "type": "string", - "description": "Dot-notation path to token (e.g. 'colors.primary')", - }, - "force_refresh": { - "type": "boolean", - "description": "Bypass cache and recompile", - "default": False, - }, - }, - "required": ["manifest_path", "token_path"], - }, - ), - Tool( - name="dss_validate_manifest", - description="Validate project manifest (ds.config.json) against schema.", - inputSchema={ - "type": "object", - "properties": { - "manifest_path": { - "type": "string", - "description": "Absolute path to ds.config.json", - } - }, - "required": ["manifest_path"], - }, - ), - Tool( - name="dss_list_skins", - description="List all available design system skins in the registry.", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="dss_get_compiler_status", - description="Get Context Compiler health and configuration status.", - inputSchema={"type": "object", "properties": {}}, - ), - ] - - # Project Management Tools - project_tools = [ - Tool( - name="dss_project_init", - description="Initialize a new DSS project with folder structure and config file.", - inputSchema={ - "type": "object", - "properties": { - "path": {"type": "string", "description": "Directory path for the new project"}, - "name": {"type": "string", "description": "Project name"}, - "description": { - "type": "string", - "description": "Optional project description", - }, - "skin": { - "type": "string", - "description": "Base skin to extend (e.g., 'shadcn', 'material')", - }, - }, - "required": ["path", "name"], - }, - ), - Tool( - name="dss_project_add_figma_team", - description="Link a Figma team folder to the project. Auto-discovers all projects/files and identifies the UIKit reference file.", - inputSchema={ - "type": "object", - "properties": { - "project_path": { - "type": "string", - "description": "Path to DSS project directory", - }, - "team_id": {"type": "string", "description": "Figma team ID"}, - "figma_token": { - "type": "string", - "description": "Figma personal access token (optional, uses FIGMA_TOKEN env var if not provided)", - }, - }, - "required": ["project_path", "team_id"], - }, - ), - Tool( - name="dss_project_add_figma_file", - description="Add a single Figma file to the project.", - inputSchema={ - "type": "object", - "properties": { - "project_path": { - "type": "string", - "description": "Path to DSS project directory", - }, - "file_key": {"type": "string", "description": "Figma file key (from URL)"}, - "file_name": { - "type": "string", - "description": "Human-readable name for the file", - }, - "figma_token": { - "type": "string", - "description": "Figma personal access token (optional)", - }, - }, - "required": ["project_path", "file_key", "file_name"], - }, - ), - Tool( - name="dss_project_sync", - description="Sync design tokens from all configured Figma sources.", - inputSchema={ - "type": "object", - "properties": { - "project_path": { - "type": "string", - "description": "Path to DSS project directory", - }, - "file_keys": { - "type": "array", - "items": {"type": "string"}, - "description": "Optional: specific file keys to sync (syncs all if not provided)", - }, - "figma_token": { - "type": "string", - "description": "Figma personal access token (optional)", - }, - }, - "required": ["project_path"], - }, - ), - Tool( - name="dss_project_build", - description="Build output files (CSS, SCSS, JSON) from synced tokens.", - inputSchema={ - "type": "object", - "properties": { - "project_path": { - "type": "string", - "description": "Path to DSS project directory", - } - }, - "required": ["project_path"], - }, - ), - Tool( - name="dss_project_graph_analysis", - description="Generates a dependency graph of the project's components and styles.", - inputSchema={ - "type": "object", - "properties": { - "project_path": { - "type": "string", - "description": "Path to the project directory to be analyzed.", - } - }, - "required": ["project_path"], - }, - ), - Tool( - name="dss_project_list", - description="List all registered DSS projects.", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="dss_project_info", - description="Get detailed information about a DSS project.", - inputSchema={ - "type": "object", - "properties": { - "project_path": { - "type": "string", - "description": "Path to DSS project directory", - } - }, - "required": ["project_path"], - }, - ), - Tool( - name="dss_project_export_context", - description="Exports a comprehensive project context, including analysis graph and configuration, for external agents.", - inputSchema={ - "type": "object", - "properties": { - "project_path": { - "type": "string", - "description": "Path to the project directory.", - } - }, - "required": ["project_path"], - }, - ), - Tool( - name="dss_figma_discover", - description="Discover Figma team structure including all projects, files, and identify UIKit reference file.", - inputSchema={ - "type": "object", - "properties": { - "team_id": {"type": "string", "description": "Figma team ID"}, - "figma_token": { - "type": "string", - "description": "Figma personal access token (optional)", - }, - }, - "required": ["team_id"], - }, - ), - Tool( - name="dss_core_sync", - description="Sync DSS core design system from the canonical Figma source (shadcn/ui). This is the base layer that all skins and projects inherit from.", - inputSchema={ - "type": "object", - "properties": { - "force": { - "type": "boolean", - "description": "Force sync even if recently synced", - }, - "figma_token": { - "type": "string", - "description": "Figma personal access token (optional)", - }, - }, - }, - ), - Tool( - name="dss_core_status", - description="Get DSS core sync status including Figma reference and synced files.", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="dss_core_tokens", - description="Get DSS core tokens (synced from shadcn/ui Figma).", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="dss_core_themes", - description="Get DSS core themes (light/dark based on shadcn/ui).", - inputSchema={"type": "object", "properties": {}}, - ), - Tool( - name="dss_rate_limit_status", - description="Check current Figma API rate limit status.", - inputSchema={ - "type": "object", - "properties": { - "figma_token": { - "type": "string", - "description": "Figma personal access token (optional)", - } - }, - }, - ), - ] - - return dss_tools + devtools_tools + browser_tools + context_compiler_tools + project_tools - - -# ============================================================================= -# TOOL DISPATCHER -# ============================================================================= - - -@server.call_tool() -async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]: - """Handle tool calls.""" - - # Tools that work without DSS module imports (use scripts directly) - DSS_INDEPENDENT_TOOLS = {"dss_sync_figma", "dss_get_status", "dss_list_themes"} - - if not DSS_AVAILABLE and name.startswith("dss_") and name not in DSS_INDEPENDENT_TOOLS: - return [ - TextContent( - type="text", - text=json.dumps( - {"success": False, "error": f"DSS modules not available: {DSS_IMPORT_ERROR}"}, - indent=2, - ), - ) - ] - - if not PLAYWRIGHT_AVAILABLE and name.startswith("devtools_"): - return [ - TextContent( - type="text", - text=json.dumps( - { - "success": False, - "error": "Playwright not installed. Run: pip install playwright && playwright install chromium", - }, - indent=2, - ), - ) - ] - - try: - # DSS Tools - if name == "dss_analyze_project": - result = await analyze_project(arguments.get("path", ".")) - elif name == "dss_extract_tokens": - result = await extract_tokens( - arguments.get("path", "."), - arguments.get("sources", ["css", "scss", "tailwind", "json"]), - ) - elif name == "dss_generate_theme": - result = await generate_theme( - arguments.get("tokens", {}), - arguments.get("format", "css"), - arguments.get("theme_name", "default"), - ) - elif name == "dss_list_themes": - result = await list_themes() - elif name == "dss_get_status": - result = await get_status(arguments.get("format", "json")) - elif name == "dss_audit_components": - result = await audit_components(arguments.get("path", ".")) - elif name == "dss_setup_storybook": - result = await setup_storybook( - arguments.get("path", "."), arguments.get("action", "scan") - ) - elif name == "dss_sync_figma": - result = await sync_figma(arguments.get("file_key", "")) - elif name == "dss_find_quick_wins": - result = await find_quick_wins(arguments.get("path", ".")) - elif name == "dss_transform_tokens": - result = await transform_tokens( - arguments.get("tokens", {}), - arguments.get("input_format", "json"), - arguments.get("output_format", "css"), - ) - # DevTools Tools - elif name == "devtools_launch": - result = await devtools_launch_impl( - url=arguments.get("url", "about:blank"), headless=arguments.get("headless", True) - ) - elif name == "devtools_connect": - result = await devtools_connect_impl( - port=arguments.get("port", 9222), host=arguments.get("host", "localhost") - ) - elif name == "devtools_disconnect": - result = await devtools_disconnect_impl() - elif name == "devtools_list_pages": - result = await devtools_list_pages_impl() - elif name == "devtools_select_page": - result = await devtools_select_page_impl(page_id=arguments.get("page_id")) - elif name == "devtools_console_logs": - result = await devtools_console_logs_impl( - level=arguments.get("level", "all"), - limit=arguments.get("limit", 100), - clear=arguments.get("clear", False), - ) - elif name == "devtools_network_requests": - result = await devtools_network_requests_impl( - filter_url=arguments.get("filter_url", ""), limit=arguments.get("limit", 50) - ) - elif name == "devtools_evaluate": - result = await devtools_evaluate_impl(expression=arguments.get("expression")) - elif name == "devtools_query_dom": - result = await devtools_query_dom_impl(selector=arguments.get("selector")) - elif name == "devtools_goto": - result = await devtools_goto_impl( - url=arguments.get("url"), wait_until=arguments.get("wait_until", "domcontentloaded") - ) - elif name == "devtools_screenshot": - result = await devtools_screenshot_impl( - selector=arguments.get("selector"), full_page=arguments.get("full_page", False) - ) - elif name == "devtools_performance": - result = await devtools_performance_impl() - # Browser Automation Tools - elif name == "browser_init": - result = await browser_init_impl( - mode=arguments.get("mode", "local"), - url=arguments.get("url"), - session_id=arguments.get("session_id"), - headless=arguments.get("headless", True), - ) - elif name == "browser_get_logs": - result = await browser_get_logs_impl( - level=arguments.get("level", "all"), limit=arguments.get("limit", 100) - ) - elif name == "browser_screenshot": - result = await browser_screenshot_impl( - selector=arguments.get("selector"), full_page=arguments.get("full_page", False) - ) - elif name == "browser_dom_snapshot": - result = await browser_dom_snapshot_impl() - elif name == "browser_get_errors": - result = await browser_get_errors_impl(limit=arguments.get("limit", 50)) - elif name == "browser_accessibility_audit": - result = await browser_accessibility_audit_impl(selector=arguments.get("selector")) - elif name == "browser_performance": - result = await browser_performance_impl() - elif name == "browser_close": - result = await browser_close_impl() - # Context Compiler tools - elif name == "dss_get_resolved_context": - if not CONTEXT_COMPILER_AVAILABLE: - result = { - "success": False, - "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}", - } - else: - try: - context_json = get_active_context( - arguments.get("manifest_path"), - arguments.get("debug", False), - arguments.get("force_refresh", False), - ) - result = {"success": True, "context": json.loads(context_json)} - except Exception as e: - result = {"success": False, "error": str(e)} - - elif name == "dss_resolve_token": - if not CONTEXT_COMPILER_AVAILABLE: - result = { - "success": False, - "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}", - } - else: - try: - token_value = resolve_token( - arguments.get("manifest_path"), - arguments.get("token_path"), - arguments.get("force_refresh", False), - ) - result = { - "success": True, - "token_path": arguments.get("token_path"), - "value": token_value, - } - except Exception as e: - result = {"success": False, "error": str(e)} - - elif name == "dss_validate_manifest": - if not CONTEXT_COMPILER_AVAILABLE: - result = { - "success": False, - "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}", - } - else: - try: - validation_result = validate_manifest(arguments.get("manifest_path")) - result = {"success": True, "validation": validation_result} - except Exception as e: - result = {"success": False, "error": str(e)} - - elif name == "dss_list_skins": - if not CONTEXT_COMPILER_AVAILABLE: - result = { - "success": False, - "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}", - } - else: - try: - skins_json = list_skins() - result = {"success": True, "skins": json.loads(skins_json)} - except Exception as e: - result = {"success": False, "error": str(e)} - - elif name == "dss_get_compiler_status": - if not CONTEXT_COMPILER_AVAILABLE: - result = { - "success": False, - "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}", - } - else: - try: - status_json = get_compiler_status() - result = {"success": True, "status": json.loads(status_json)} - except Exception as e: - result = {"success": False, "error": str(e)} - - # Project Management Tools - elif name == "dss_project_init": - result = await project_init_impl( - path=arguments.get("path"), - name=arguments.get("name"), - description=arguments.get("description"), - skin=arguments.get("skin"), - ) - elif name == "dss_project_add_figma_team": - result = await project_add_figma_team_impl( - project_path=arguments.get("project_path"), - team_id=arguments.get("team_id"), - figma_token=arguments.get("figma_token"), - ) - elif name == "dss_project_add_figma_file": - result = await project_add_figma_file_impl( - project_path=arguments.get("project_path"), - file_key=arguments.get("file_key"), - file_name=arguments.get("file_name"), - figma_token=arguments.get("figma_token"), - ) - elif name == "dss_project_sync": - result = await project_sync_impl( - project_path=arguments.get("project_path"), - file_keys=arguments.get("file_keys"), - figma_token=arguments.get("figma_token"), - ) - elif name == "dss_project_build": - result = await project_build_impl(project_path=arguments.get("project_path")) - elif name == "dss_project_graph_analysis": - result = await project_graph_analysis_impl(project_path=arguments.get("project_path")) - elif name == "dss_project_list": - result = await project_list_impl() - elif name == "dss_project_info": - result = await project_info_impl(project_path=arguments.get("project_path")) - elif name == "dss_project_export_context": - result = await project_export_context_impl(project_path=arguments.get("project_path")) - elif name == "dss_figma_discover": - result = await figma_discover_impl( - team_id=arguments.get("team_id"), figma_token=arguments.get("figma_token") - ) - elif name == "dss_core_sync": - result = await dss_core_sync_impl( - force=arguments.get("force", False), figma_token=arguments.get("figma_token") - ) - elif name == "dss_core_status": - result = await dss_core_status_impl() - elif name == "dss_core_tokens": - result = await dss_core_tokens_impl() - elif name == "dss_core_themes": - result = await dss_core_themes_impl() - elif name == "dss_rate_limit_status": - result = await dss_rate_limit_status_impl(figma_token=arguments.get("figma_token")) - else: - result = {"success": False, "error": f"Unknown tool: {name}"} - - return [TextContent(type="text", text=json.dumps(safe_serialize(result), indent=2))] - - except Exception as e: - logger.exception(f"Error in tool {name}") - return [ - TextContent( - type="text", - text=json.dumps({"success": False, "error": str(e), "tool": name}, indent=2), - ) - ] - - -# ============================================================================= -# DSS TOOL IMPLEMENTATIONS -# ============================================================================= - - -async def analyze_project(path: str) -> Dict[str, Any]: - """Analyze a project for design system patterns.""" - project_path = Path(path).resolve() - - if not project_path.exists(): - return {"success": False, "error": f"Path does not exist: {path}"} - - try: - loop = asyncio.get_event_loop() - scanner = ProjectScanner(project_path) - react_analyzer = ReactAnalyzer(project_path) - style_analyzer = StyleAnalyzer(project_path) - - scan_result = await loop.run_in_executor(None, scanner.scan) - react_result = await loop.run_in_executor(None, react_analyzer.analyze) - style_result = await loop.run_in_executor(None, style_analyzer.analyze) - - return { - "success": True, - "project_path": str(project_path), - "analysis": { - "scan": safe_serialize(scan_result), - "react_components": safe_serialize(react_result), - "styles": safe_serialize(style_result), - }, - "summary": { - "files_scanned": getattr(scan_result, "files_count", 0), - "components_found": len(getattr(react_result, "components", [])), - "style_patterns": len(getattr(style_result, "patterns", [])), - }, - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def extract_tokens(path: str, sources: List[str]) -> Dict[str, Any]: - """Extract design tokens from various sources.""" - target_path = Path(path).resolve() - - if not target_path.exists(): - return {"success": False, "error": f"Path does not exist: {path}"} - - try: - loop = asyncio.get_event_loop() - all_tokens = [] - source_map = { - "css": CSSTokenSource, - "scss": SCSSTokenSource, - "tailwind": TailwindTokenSource, - "json": JSONTokenSource, - } - - for source_type in sources: - if source_type in source_map: - source = source_map[source_type](target_path) - tokens = await loop.run_in_executor(None, source.extract) - if tokens: - all_tokens.extend(tokens) - - if all_tokens: - merger = TokenMerger(strategy=MergeStrategy.PREFER_LATEST) - merged = merger.merge(all_tokens) - return { - "success": True, - "path": str(target_path), - "sources": sources, - "tokens": safe_serialize(merged), - "token_count": len(merged) if hasattr(merged, "__len__") else 0, - } - else: - return { - "success": True, - "path": str(target_path), - "sources": sources, - "tokens": [], - "token_count": 0, - "message": "No tokens found", - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def generate_theme(tokens: Dict, format: str, theme_name: str) -> Dict[str, Any]: - """Generate theme files from tokens.""" - try: - loop = asyncio.get_event_loop() - theme = Theme(name=theme_name, tokens=tokens) - sd_wrapper = StyleDictionaryWrapper() - result = await loop.run_in_executor( - None, lambda: sd_wrapper.transform_theme(theme, output_format=format) - ) - return { - "success": result.get("success", False), - "format": format, - "theme_name": theme_name, - "files": result.get("files", {}), - "errors": result.get("errors"), - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def list_themes() -> Dict[str, Any]: - """List available themes.""" - try: - from dss.themes import default_themes - - themes = list(getattr(default_themes, "THEMES", {}).keys()) - return {"success": True, "themes": themes, "count": len(themes)} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def get_status(format: str = "json") -> Dict[str, Any]: - """Get DSS system status.""" - try: - from dss.status import StatusDashboard - - dashboard = StatusDashboard() - if format == "dashboard": - return {"success": True, "format": "dashboard", "dashboard": dashboard.render_text()} - else: - return dashboard.get_status() - except ImportError: - logger.warning("StatusDashboard not available, using basic status") - system_info = manager.get_system_info() - dependencies = manager.check_dependencies() - return { - "success": True, - "version": dss.__version__, - "system_info": system_info, - "dependencies": dependencies, - "healthy": all(dependencies.values()), - "timestamp": datetime.now().isoformat(), - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def audit_components(path: str) -> Dict[str, Any]: - """Audit React components for design system adoption.""" - project_path = Path(path).resolve() - if not project_path.exists(): - return {"success": False, "error": f"Path does not exist: {path}"} - try: - loop = asyncio.get_event_loop() - react_analyzer = ReactAnalyzer(project_path) - style_analyzer = StyleAnalyzer(project_path) - graph = DependencyGraph(project_path) - react_result = await loop.run_in_executor(None, react_analyzer.analyze) - style_result = await loop.run_in_executor(None, style_analyzer.analyze) - graph_result = await loop.run_in_executor(None, graph.build) - hardcoded = getattr(style_result, "hardcoded_values", []) - return { - "success": True, - "path": str(project_path), - "audit": { - "components": safe_serialize(react_result), - "styles": safe_serialize(style_result), - "dependencies": safe_serialize(graph_result), - }, - "issues": {"hardcoded_values": hardcoded}, - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def setup_storybook(path: str, action: str) -> Dict[str, Any]: - """Setup or configure Storybook.""" - project_path = Path(path).resolve() - if not project_path.exists(): - return {"success": False, "error": f"Path does not exist: {path}"} - try: - loop = asyncio.get_event_loop() - if action == "scan": - scanner = StorybookScanner(project_path) - result = await loop.run_in_executor(None, scanner.scan) - return {"success": True, "action": "scan", "result": safe_serialize(result)} - elif action == "generate": - generator = StoryGenerator(project_path) - result = await loop.run_in_executor(None, generator.generate) - return { - "success": True, - "action": "generate", - "stories_created": safe_serialize(result), - } - elif action == "configure": - theme_gen = ThemeGenerator(project_path) - result = await loop.run_in_executor(None, theme_gen.generate) - return {"success": True, "action": "configure", "theme_config": safe_serialize(result)} - else: - return {"success": False, "error": f"Unknown action: {action}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def sync_figma(file_key: str) -> Dict[str, Any]: - """Sync tokens from Figma using intelligent sync v2.0. - - Features: - - Rate limiting with exponential backoff - - Caching with lastModified checks - - Figma Variables extraction - - W3C token format output - - Component variant classification - """ - if not file_key: - return {"success": False, "error": "file_key is required"} - - # Get token from env or config - figma_token = os.environ.get("FIGMA_TOKEN") - if not figma_token: - config_path = Path(__file__).parent.parent.parent / ".dss/config/figma.json" - if config_path.exists(): - try: - with open(config_path) as f: - config = json.load(f) - figma_token = config.get("token") - except Exception: - pass - - if not figma_token: - return { - "success": False, - "error": "FIGMA_TOKEN not configured. Set env var or add to .dss/config/figma.json", - } - - try: - # Import intelligent sync from scripts - scripts_dir = Path(__file__).parent.parent.parent / "scripts" - sys.path.insert(0, str(scripts_dir)) - - import importlib.util - - spec = importlib.util.spec_from_file_location("figma_sync", scripts_dir / "figma-sync.py") - figma_sync_module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(figma_sync_module) - - # Run intelligent sync - success = await figma_sync_module.intelligent_sync( - file_key=file_key, token=figma_token, force=True, verbose=False - ) - - if success: - # Load results - tokens_path = ( - Path(__file__).parent.parent.parent / ".dss/data/_system/tokens/figma-tokens.json" - ) - components_path = ( - Path(__file__).parent.parent.parent / ".dss/components/figma-registry.json" - ) - - tokens = {} - components = {} - - if tokens_path.exists(): - with open(tokens_path) as f: - tokens = json.load(f) - - if components_path.exists(): - with open(components_path) as f: - components = json.load(f) - - token_count = sum( - len(v) - for k, v in tokens.items() - if not k.startswith("$") and not k.startswith("_") and isinstance(v, dict) - ) - - return { - "success": True, - "file_key": file_key, - "tokens_extracted": token_count, - "components_extracted": components.get("component_count", 0), - "output_files": {"tokens": str(tokens_path), "components": str(components_path)}, - } - else: - return {"success": False, "error": "Sync failed - check logs"} - - except Exception as e: - import traceback - - return {"success": False, "error": str(e), "traceback": traceback.format_exc()} - - -async def find_quick_wins(path: str) -> Dict[str, Any]: - """Find quick win opportunities.""" - project_path = Path(path).resolve() - if not project_path.exists(): - return {"success": False, "error": f"Path does not exist: {path}"} - try: - loop = asyncio.get_event_loop() - finder = QuickWinFinder(project_path) - quick_wins = await loop.run_in_executor(None, finder.find) - return { - "success": True, - "path": str(project_path), - "quick_wins": safe_serialize(quick_wins), - "count": len(quick_wins) if quick_wins else 0, - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def transform_tokens(tokens: Dict, input_format: str, output_format: str) -> Dict[str, Any]: - """Transform tokens between formats.""" - try: - loop = asyncio.get_event_loop() - theme = Theme(name="transform_temp", tokens=tokens) - sd_wrapper = StyleDictionaryWrapper() - result = await loop.run_in_executor( - None, lambda: sd_wrapper.transform_theme(theme, output_format=output_format) - ) - return { - "success": result.get("success", False), - "input_format": input_format, - "output_format": output_format, - "transformed": result.get("files", {}), - "errors": result.get("errors"), - } - except Exception as e: - return {"success": False, "error": str(e)} - - -# ============================================================================= -# CHROME DEVTOOLS IMPLEMENTATIONS -# ============================================================================= - - -def _get_active_page(): - """Retrieve the active Playwright Page from DevTools state. - - Returns: - Playwright Page object for the currently selected browser tab. - - Raises: - ConnectionError: If not connected to Chrome DevTools. - ValueError: If no page is selected or the selected page no longer exists. - """ - if not devtools.connected: - raise ConnectionError( - "Not connected to DevTools. Call devtools_connect(port=9222) first. " - "Ensure Chrome is running with --remote-debugging-port=9222" - ) - if not devtools.active_page_id: - available = len(devtools.pages) - raise ValueError( - f"No active page selected. {available} page(s) available. " - "Call devtools_list_pages then devtools_select_page(page_id)." - ) - if devtools.active_page_id not in devtools.pages: - raise ValueError( - f"Selected page '{devtools.active_page_id}' no longer exists. " - "Call devtools_list_pages to refresh available pages." - ) - return devtools.pages[devtools.active_page_id] - - -async def _on_console(msg): - """Event handler for browser console messages. - - Captures console.log, console.error, console.warn, etc. from the active page. - Messages are stored in a bounded deque (max DEVTOOLS_CONSOLE_MAX_ENTRIES). - - Args: - msg: Playwright ConsoleMessage object containing type, text, args, and location. - """ - try: - devtools.console_logs.append( - { - "timestamp": datetime.now().isoformat(), - "type": msg.type, - "text": msg.text, - "args": [str(arg) for arg in msg.args] if msg.args else [], - "location": getattr(msg, "location", {}), - } - ) - except Exception as e: - logger.debug(f"Error capturing console message: {e}") - - -async def _on_request(request): - """Event handler for network requests. - - Captures all HTTP requests made by the active page (XHR, fetch, resources). - Requests are stored in a bounded deque (max DEVTOOLS_NETWORK_MAX_ENTRIES). - - Args: - request: Playwright Request object containing url, method, headers, resource_type. - """ - try: - devtools.network_requests.append( - { - "timestamp": datetime.now().isoformat(), - "url": request.url, - "method": request.method, - "headers": dict(request.headers) if request.headers else {}, - "resource_type": request.resource_type, - } - ) - except Exception as e: - logger.debug(f"Error capturing network request: {e}") - - -@with_timeout("devtools_connect") -async def devtools_launch_impl(url: str = "about:blank", headless: bool = True) -> Dict[str, Any]: - """Launch a new headless Chromium browser instance. - - Use this on headless/remote servers where no Chrome instance is running. - Launches Playwright's bundled Chromium with CDP enabled. - - Args: - url: Initial URL to navigate to (default: about:blank) - headless: Run in headless mode (default: True for servers) - """ - global devtools - - if devtools.connected: - return {"success": False, "error": "Already connected. Call devtools_disconnect first."} - - try: - devtools.playwright = await async_playwright().start() - devtools.browser = await devtools.playwright.chromium.launch( - headless=headless, - args=["--no-sandbox", "--disable-dev-shm-usage"], # Required for Docker/remote - ) - devtools.connected = True - - # Create initial page and navigate - context = await devtools.browser.new_context() - devtools.contexts["context_0"] = context - page = await context.new_page() - - if url and url != "about:blank": - await page.goto(url, wait_until="domcontentloaded") - - # Store page directly (don't rely on list_pages for launched browser) - devtools.pages["page_0"] = page - devtools.active_page_id = "page_0" - - # Attach event listeners - try: - page.on("console", _on_console) - page.on("request", _on_request) - except Exception as e: - logger.warning(f"Failed to attach listeners: {e}") - - return { - "success": True, - "message": "Launched headless Chromium", - "headless": headless, - "url": url, - "pages_found": len(devtools.pages), - "active_page_id": devtools.active_page_id, - } - except Exception as e: - await devtools_disconnect_impl() - return {"success": False, "error": f"Launch failed: {str(e)}"} - - -@with_timeout("devtools_connect") -async def devtools_connect_impl(port: int = 9222, host: str = "localhost") -> Dict[str, Any]: - """Connect to a running Chrome instance via CDP.""" - global devtools - - if devtools.connected: - return {"success": False, "error": "Already connected. Call devtools_disconnect first."} - - try: - devtools.playwright = await async_playwright().start() - # Use configurable timeout for CDP connection - devtools.browser = await devtools.playwright.chromium.connect_over_cdp( - f"http://{host}:{port}", timeout=DEVTOOLS_CONNECTION_TIMEOUT_MS - ) - devtools.connected = True - - # Populate pages - await devtools_list_pages_impl() - - # Auto-select first page if available - if devtools.pages and not devtools.active_page_id: - first_page_id = next(iter(devtools.pages.keys())) - await devtools_select_page_impl(first_page_id) - - return { - "success": True, - "message": f"Connected to Chrome DevTools at {host}:{port}", - "pages_found": len(devtools.pages), - "active_page_id": devtools.active_page_id, - } - except Exception as e: - await devtools_disconnect_impl() - return { - "success": False, - "error": f"Connection failed: {str(e)}. Is Chrome running with --remote-debugging-port={port}?", - } - - -async def devtools_disconnect_impl() -> Dict[str, Any]: - """Disconnect from Chrome and clean up resources. - - Ensures proper cleanup of: - - Event listeners on all pages - - Browser connection - - Playwright instance - """ - global devtools - - if not devtools.connected: - return {"success": True, "message": "Not connected."} - - try: - # Remove event listeners from all pages to prevent memory leaks - for page_id, page in devtools.pages.items(): - try: - page.remove_listener("console", _on_console) - page.remove_listener("request", _on_request) - except Exception: - pass # Page may already be closed - - if devtools.browser: - await devtools.browser.close() - if devtools.playwright: - await devtools.playwright.stop() - except Exception as e: - logger.error(f"Error during disconnect: {e}") - finally: - devtools = DevToolsState() - - return {"success": True, "message": "Disconnected successfully."} - - -async def devtools_list_pages_impl() -> Dict[str, Any]: - """List all browser pages/tabs with URLs.""" - if not devtools.connected: - return {"success": False, "error": "Not connected. Call devtools_connect first."} - - try: - devtools.pages.clear() - devtools.contexts.clear() - - contexts = devtools.browser.contexts - page_index = 0 - for i, context in enumerate(contexts): - devtools.contexts[f"context_{i}"] = context - for page in context.pages: - page_id = f"page_{page_index}" - devtools.pages[page_id] = page - page_index += 1 - - page_list = [] - for page_id, page in devtools.pages.items(): - try: - title = await page.title() - page_list.append({"id": page_id, "title": title, "url": page.url}) - except Exception: - page_list.append({"id": page_id, "title": "(unavailable)", "url": page.url}) - - return { - "success": True, - "pages": page_list, - "count": len(page_list), - "active_page_id": devtools.active_page_id, - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def devtools_select_page_impl(page_id: str) -> Dict[str, Any]: - """Set active page for operations.""" - if not devtools.connected: - return {"success": False, "error": "Not connected."} - - if not page_id: - return {"success": False, "error": "page_id is required."} - - if page_id not in devtools.pages: - await devtools_list_pages_impl() - if page_id not in devtools.pages: - return {"success": False, "error": f"Page with ID '{page_id}' not found."} - - # Remove old listeners - if devtools.active_page_id and devtools.active_page_id in devtools.pages: - try: - old_page = devtools.pages[devtools.active_page_id] - old_page.remove_listener("console", _on_console) - old_page.remove_listener("request", _on_request) - except Exception: - pass - - devtools.active_page_id = page_id - page = devtools.pages[page_id] - - # Attach event listeners with race condition protection - try: - page.on("console", _on_console) - page.on("request", _on_request) - except Exception as e: - # Page may have closed between selection and listener attachment - logger.warning(f"Failed to attach listeners to page {page_id}: {e}") - devtools.active_page_id = None - return {"success": False, "error": f"Page closed during selection: {str(e)}"} - - try: - title = await page.title() - except Exception: - title = "(unavailable)" - - return {"success": True, "message": f"Active page set to '{title}'", "page_id": page_id} - - -async def devtools_goto_impl(url: str, wait_until: str = "domcontentloaded") -> Dict[str, Any]: - """Navigate the active page to a URL. - - Args: - url: URL to navigate to - wait_until: Wait condition - 'load', 'domcontentloaded', or 'networkidle' - """ - if not url: - return {"success": False, "error": "URL is required."} - - valid_wait = ["load", "domcontentloaded", "networkidle"] - if wait_until not in valid_wait: - wait_until = "domcontentloaded" - - try: - page = _get_active_page() - response = await page.goto(url, wait_until=wait_until) - - status = response.status if response else None - return {"success": True, "url": url, "status": status, "title": await page.title()} - except (ConnectionError, ValueError) as e: - return {"success": False, "error": str(e)} - except Exception as e: - return {"success": False, "error": f"Navigation failed: {str(e)}"} - - -async def devtools_console_logs_impl( - level: str = "all", limit: int = 100, clear: bool = False -) -> Dict[str, Any]: - """Get console messages.""" - try: - _get_active_page() - logs = list(devtools.console_logs) - - if level != "all": - logs = [log for log in logs if log.get("type") == level] - - result = logs[-limit:] - - if clear: - devtools.console_logs.clear() - - return { - "success": True, - "logs": result, - "count": len(result), - "total_captured": len(devtools.console_logs), - } - except (ConnectionError, ValueError) as e: - return {"success": False, "error": str(e)} - - -async def devtools_network_requests_impl(filter_url: str = "", limit: int = 50) -> Dict[str, Any]: - """Get network activity.""" - try: - _get_active_page() - requests = list(devtools.network_requests) - - if filter_url: - requests = [req for req in requests if re.search(filter_url, req.get("url", ""))] - - result = requests[-limit:] - return { - "success": True, - "requests": result, - "count": len(result), - "total_captured": len(devtools.network_requests), - } - except (ConnectionError, ValueError) as e: - return {"success": False, "error": str(e)} - - -async def devtools_evaluate_impl(expression: str) -> Dict[str, Any]: - """Execute JavaScript in page context. - - WARNING: This executes arbitrary JS in the browser context. - All executions are logged for audit purposes. - """ - if not expression: - return {"success": False, "error": "JavaScript expression cannot be empty."} - - # Audit log for security tracking - expr_preview = expression[:100] + "..." if len(expression) > 100 else expression - logger.info(f"[AUDIT] devtools_evaluate called: {expr_preview}") - - try: - page = _get_active_page() - result = await page.evaluate(expression) - logger.debug(f"[AUDIT] devtools_evaluate success for page {devtools.active_page_id}") - return {"success": True, "result": safe_serialize(result)} - except (ConnectionError, ValueError) as e: - return {"success": False, "error": str(e)} - except Exception as e: - logger.warning(f"[AUDIT] devtools_evaluate failed: {str(e)}") - return {"success": False, "error": f"JavaScript evaluation failed: {str(e)}"} - - -async def devtools_query_dom_impl(selector: str) -> Dict[str, Any]: - """Query DOM elements with CSS selector.""" - if not selector: - return {"success": False, "error": "CSS selector cannot be empty."} - try: - page = _get_active_page() - elements = await page.query_selector_all(selector) - results = [] - for el in elements[:50]: # Limit to 50 elements - try: - results.append( - { - "tag": await el.evaluate("el => el.tagName.toLowerCase()"), - "id": await el.evaluate("el => el.id || null"), - "classes": await el.evaluate( - 'el => Array.from(el.classList).join(" ") || null' - ), - "text": (await el.text_content() or "")[:200], - } - ) - except Exception: - continue - return {"success": True, "elements": results, "count": len(results)} - except (ConnectionError, ValueError) as e: - return {"success": False, "error": str(e)} - except Exception as e: - return {"success": False, "error": f"DOM query failed: {str(e)}"} - - -async def devtools_screenshot_impl(selector: str = None, full_page: bool = False) -> Dict[str, Any]: - """Capture screenshot as base64 PNG.""" - try: - page = _get_active_page() - screenshot_bytes = None - - if selector: - element = page.locator(selector).first - await element.wait_for(state="visible", timeout=5000) - screenshot_bytes = await element.screenshot() - else: - screenshot_bytes = await page.screenshot(full_page=full_page) - - b64_image = base64.b64encode(screenshot_bytes).decode("utf-8") - return {"success": True, "image_base64_png": b64_image, "size_bytes": len(screenshot_bytes)} - except (ConnectionError, ValueError) as e: - return {"success": False, "error": str(e)} - except Exception as e: - return {"success": False, "error": f"Screenshot failed: {str(e)}"} - - -async def devtools_performance_impl() -> Dict[str, Any]: - """Get Core Web Vitals and performance metrics.""" - try: - page = _get_active_page() - metrics = await page.evaluate( - """() => { - const timing = window.performance.getEntriesByType('navigation')[0]; - if (!timing) return null; - - const paint = window.performance.getEntriesByType('paint'); - const fcp = paint.find(p => p.name === 'first-contentful-paint'); - - return { - // Navigation timing - domContentLoaded: Math.round(timing.domContentLoadedEventEnd - timing.domContentLoadedEventStart), - loadTime: Math.round(timing.loadEventEnd - timing.loadEventStart), - totalPageLoadTime: Math.round(timing.loadEventEnd - timing.startTime), - dnsLookup: Math.round(timing.domainLookupEnd - timing.domainLookupStart), - tcpConnect: Math.round(timing.connectEnd - timing.connectStart), - requestTime: Math.round(timing.responseEnd - timing.requestStart), - responseTime: Math.round(timing.responseEnd - timing.responseStart), - domInteractive: Math.round(timing.domInteractive - timing.startTime), - // Paint timing - firstContentfulPaint: fcp ? Math.round(fcp.startTime) : null, - // Memory (if available) - jsHeapSize: window.performance.memory ? Math.round(window.performance.memory.usedJSHeapSize / 1024 / 1024) : null - }; - }""" - ) - - if not metrics: - return {"success": False, "error": "Performance metrics not available for this page."} - - return {"success": True, "metrics": metrics} - except (ConnectionError, ValueError) as e: - return {"success": False, "error": str(e)} - except Exception as e: - return {"success": False, "error": f"Performance query failed: {str(e)}"} - - -# ============================================================================= -# BROWSER AUTOMATION IMPLEMENTATIONS (Unified LOCAL/REMOTE) -# ============================================================================= - - -class DummyContext: - """Dummy context for LocalBrowserStrategy initialization.""" - - def __init__(self, session_id: str = "local"): - self.session_id = session_id - - -async def browser_init_impl( - mode: str = "local", - url: Optional[str] = None, - session_id: Optional[str] = None, - headless: bool = True, -) -> Dict[str, Any]: - """Initialize browser automation in LOCAL or REMOTE mode.""" - global browser_state - - if browser_state.initialized: - return {"success": False, "error": "Browser already initialized. Call browser_close first."} - - if mode == "local": - if not LOCAL_BROWSER_STRATEGY_AVAILABLE: - return { - "success": False, - "error": "LocalBrowserStrategy not available. Ensure strategies/local/browser.py exists.", - } - if not PLAYWRIGHT_AVAILABLE: - return { - "success": False, - "error": "Playwright not installed. Run: pip install playwright && playwright install chromium", - } - - try: - context = DummyContext(session_id or f"local-{datetime.now().strftime('%Y%m%d%H%M%S')}") - browser_state.strategy = LocalBrowserStrategy(context) - await browser_state.strategy.launch(headless=headless) - - if url: - await browser_state.strategy.navigate(url) - - browser_state.mode = "local" - browser_state.session_id = context.session_id - browser_state.initialized = True - - return { - "success": True, - "mode": "local", - "session_id": browser_state.session_id, - "url": url, - "headless": headless, - "message": "Local browser automation initialized successfully.", - } - except Exception as e: - return {"success": False, "error": f"Failed to initialize LOCAL mode: {str(e)}"} - - elif mode == "remote": - if not url: - return { - "success": False, - "error": "Remote mode requires 'url' parameter (API endpoint).", - } - if not session_id: - return {"success": False, "error": "Remote mode requires 'session_id' parameter."} - - # For remote mode, we just store the configuration - # Actual fetching happens in each tool call - browser_state.mode = "remote" - browser_state.session_id = session_id - browser_state.remote_api_url = url - browser_state.initialized = True - - return { - "success": True, - "mode": "remote", - "session_id": session_id, - "api_url": url, - "message": "Remote browser automation configured. Will fetch from Shadow State API.", - } - else: - return {"success": False, "error": f"Unknown mode: {mode}. Use 'local' or 'remote'."} - - -async def browser_get_logs_impl(level: str = "all", limit: int = 100) -> Dict[str, Any]: - """Get console logs from browser (LOCAL or REMOTE mode).""" - global browser_state - - if not browser_state.initialized: - return {"success": False, "error": "Browser not initialized. Call browser_init first."} - - try: - if browser_state.mode == "local": - logs = await browser_state.strategy.get_console_logs( - limit=limit, level=level if level != "all" else None - ) - return {"success": True, "mode": "local", "logs": logs, "count": len(logs)} - - elif browser_state.mode == "remote": - import aiohttp - - async with aiohttp.ClientSession() as session: - url = f"{browser_state.remote_api_url}/{browser_state.session_id}" - async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response: - if response.status == 200: - data = await response.json() - logs = data.get("logs", []) - if level != "all": - logs = [log for log in logs if log.get("level") == level] - return { - "success": True, - "mode": "remote", - "logs": logs[-limit:], - "count": len(logs), - } - else: - return {"success": False, "error": f"API returned status {response.status}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def browser_screenshot_impl( - selector: Optional[str] = None, full_page: bool = False -) -> Dict[str, Any]: - """Capture screenshot (LOCAL mode only).""" - global browser_state - - if not browser_state.initialized: - return {"success": False, "error": "Browser not initialized. Call browser_init first."} - - if browser_state.mode != "local": - return {"success": False, "error": "Screenshots require LOCAL mode."} - - try: - path = await browser_state.strategy.capture_screenshot( - selector=selector, full_page=full_page - ) - # Read file and encode as base64 - with open(path, "rb") as f: - screenshot_bytes = f.read() - b64_image = base64.b64encode(screenshot_bytes).decode("utf-8") - return { - "success": True, - "image_base64_png": b64_image, - "path": path, - "size_bytes": len(screenshot_bytes), - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def browser_dom_snapshot_impl() -> Dict[str, Any]: - """Get DOM snapshot (LOCAL or REMOTE mode).""" - global browser_state - - if not browser_state.initialized: - return {"success": False, "error": "Browser not initialized. Call browser_init first."} - - try: - if browser_state.mode == "local": - html = await browser_state.strategy.get_dom_snapshot() - return {"success": True, "mode": "local", "html": html, "length": len(html)} - - elif browser_state.mode == "remote": - import aiohttp - - async with aiohttp.ClientSession() as session: - url = f"{browser_state.remote_api_url}/{browser_state.session_id}" - async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response: - if response.status == 200: - data = await response.json() - # Look for snapshot in logs - snapshots = [ - log for log in data.get("logs", []) if log.get("category") == "snapshot" - ] - if snapshots: - latest = snapshots[-1] - html = latest.get("data", {}).get("snapshot", {}).get("html", "") - return { - "success": True, - "mode": "remote", - "html": html, - "length": len(html), - } - return { - "success": True, - "mode": "remote", - "html": "", - "message": "No snapshot found in logs.", - } - else: - return {"success": False, "error": f"API returned status {response.status}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def browser_get_errors_impl(limit: int = 50) -> Dict[str, Any]: - """Get captured errors (LOCAL or REMOTE mode).""" - global browser_state - - if not browser_state.initialized: - return {"success": False, "error": "Browser not initialized. Call browser_init first."} - - try: - if browser_state.mode == "local": - errors = await browser_state.strategy.get_errors(limit=limit) - return {"success": True, "mode": "local", "errors": errors, "count": len(errors)} - - elif browser_state.mode == "remote": - import aiohttp - - async with aiohttp.ClientSession() as session: - url = f"{browser_state.remote_api_url}/{browser_state.session_id}" - async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response: - if response.status == 200: - data = await response.json() - logs = data.get("logs", []) - errors = [log for log in logs if log.get("level") == "error"] - return { - "success": True, - "mode": "remote", - "errors": errors[-limit:], - "count": len(errors), - } - else: - return {"success": False, "error": f"API returned status {response.status}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def browser_accessibility_audit_impl(selector: Optional[str] = None) -> Dict[str, Any]: - """Run accessibility audit (LOCAL injects axe-core, REMOTE fetches from Shadow State).""" - global browser_state - - if not browser_state.initialized: - return {"success": False, "error": "Browser not initialized. Call browser_init first."} - - try: - if browser_state.mode == "local": - result = await browser_state.strategy.run_accessibility_audit(selector=selector) - else: - # REMOTE mode: fetch from Shadow State - import aiohttp - - async with aiohttp.ClientSession() as session: - url = f"{browser_state.remote_api_url}/{browser_state.session_id}" - async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response: - if response.status != 200: - return {"success": False, "error": f"API returned status {response.status}"} - data = await response.json() - logs = data.get("logs", []) - audits = [ - l - for l in logs - if l.get("category") in ["accessibility", "accessibilitySnapshot"] - ] - if not audits: - return { - "success": True, - "mode": "remote", - "message": "No accessibility audit in Shadow State. Run __DSS_BROWSER_LOGS.audit() in browser.", - "summary": {"violations": 0, "passes": 0, "incomplete": 0}, - "violations": [], - "passes": [], - "incomplete": [], - } - latest = max(audits, key=lambda x: x.get("timestamp", 0)) - audit_data = latest.get("data", {}) - result = ( - audit_data.get("results") or audit_data.get("accessibility") or audit_data - ) - - violations_count = len(result.get("violations", [])) - passes_count = len(result.get("passes", [])) - incomplete_count = len(result.get("incomplete", [])) - - return { - "success": True, - "mode": browser_state.mode, - "summary": { - "violations": violations_count, - "passes": passes_count, - "incomplete": incomplete_count, - }, - "violations": result.get("violations", []), - "passes": result.get("passes", []), - "incomplete": result.get("incomplete", []), - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def browser_performance_impl() -> Dict[str, Any]: - """Get Core Web Vitals and performance metrics (LOCAL or REMOTE mode).""" - global browser_state - - if not browser_state.initialized: - return {"success": False, "error": "Browser not initialized. Call browser_init first."} - - try: - if browser_state.mode == "local": - metrics = await browser_state.strategy.get_performance_metrics() - return {"success": True, "mode": "local", "metrics": metrics} - else: - # REMOTE mode: fetch from Shadow State - import aiohttp - - async with aiohttp.ClientSession() as session: - url = f"{browser_state.remote_api_url}/{browser_state.session_id}" - async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response: - if response.status != 200: - return {"success": False, "error": f"API returned status {response.status}"} - data = await response.json() - logs = data.get("logs", []) - perf_logs = [ - l - for l in logs - if l.get("category") in ["performance", "accessibilitySnapshot"] - ] - if not perf_logs: - return { - "success": True, - "mode": "remote", - "message": "No performance data in Shadow State. Metrics are captured during page load.", - "metrics": {}, - } - latest = max(perf_logs, key=lambda x: x.get("timestamp", 0)) - perf_data = latest.get("data", {}) - metrics = perf_data.get("performance") or {"raw_data": perf_data} - return { - "success": True, - "mode": "remote", - "metrics": {"core_web_vitals": metrics}, - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def browser_close_impl() -> Dict[str, Any]: - """Close browser automation session.""" - global browser_state - - if not browser_state.initialized: - return {"success": True, "message": "Browser was not initialized."} - - try: - if browser_state.mode == "local" and browser_state.strategy: - await browser_state.strategy.close() - - # Reset state - browser_state = BrowserAutomationState() - - return {"success": True, "message": "Browser automation session closed."} - except Exception as e: - # Reset state even on error - browser_state = BrowserAutomationState() - return {"success": True, "message": f"Browser closed with warning: {str(e)}"} - - -# ============================================================================= -# PROJECT MANAGEMENT IMPLEMENTATIONS -# ============================================================================= - - -async def project_init_impl( - path: str, name: str, description: str = None, skin: str = None -) -> Dict[str, Any]: - """Implementation for dss_project_init.""" - - if not path or not name: - return {"success": False, "error": "path and name are required."} - - try: - manager = ProjectManager() - - project = manager.init(path=Path(path), name=name, description=description, skin=skin) - - # Trigger graph analysis in the background - - asyncio.create_task(project_graph_analysis_impl(project_path=str(project.path))) - - return { - "success": True, - "project_name": project.config.name, - "path": str(project.path), - "status": project.status.value, - "message": "Project initialized. Graph analysis started in background.", - } - - except Exception as e: - logger.exception("dss_project_init failed") - - return {"success": False, "error": str(e)} - - -async def project_graph_analysis_impl(project_path: str) -> Dict[str, Any]: - """Implementation for dss_project_graph_analysis.""" - - if not project_path: - return {"success": False, "error": "project_path is required."} - - try: - from dss.analyze.project_analyzer import run_project_analysis - - loop = asyncio.get_event_loop() - - analysis_result = await loop.run_in_executor(None, run_project_analysis, project_path) - - return {"success": True, "project_path": project_path, "analysis": analysis_result} - - except Exception as e: - logger.exception(f"dss_project_graph_analysis failed for {project_path}") - - return {"success": False, "error": str(e)} - - -async def project_add_figma_team_impl( - project_path: str, team_id: str, figma_token: Optional[str] = None -) -> Dict[str, Any]: - """Implementation for dss_project_add_figma_team.""" - - if not project_path or not team_id: - return {"success": False, "error": "project_path and team_id are required."} - - try: - manager = ProjectManager() - - project = manager.load(Path(project_path)) - - updated_project = manager.add_figma_team( - project=project, team_id=team_id, figma_token=figma_token - ) - - return { - "success": True, - "project_name": updated_project.config.name, - "figma_team_id": updated_project.config.figma.team_id, - "files_added": len(updated_project.config.figma.files), - } - - except Exception as e: - logger.exception("dss_project_add_figma_team failed") - - return {"success": False, "error": str(e)} - - -async def project_add_figma_file_impl( - project_path: str, file_key: str, file_name: str, figma_token: Optional[str] = None -) -> Dict[str, Any]: - """Add a single Figma file to DSS project.""" - if not PROJECT_MANAGEMENT_AVAILABLE: - return { - "success": False, - "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}", - } - - try: - loop = asyncio.get_event_loop() - manager = ProjectManager() - - project = await loop.run_in_executor(None, lambda: manager.load(Path(project_path))) - - updated_project = await loop.run_in_executor( - None, - lambda: manager.add_figma_file( - project=project, file_key=file_key, file_name=file_name, figma_token=figma_token - ), - ) - - return { - "success": True, - "message": f"Added Figma file '{file_name}' to project", - "file_key": file_key, - "file_name": file_name, - "total_files": len(updated_project.config.figma.files), - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def project_sync_impl( - project_path: str, file_keys: Optional[List[str]] = None, figma_token: Optional[str] = None -) -> Dict[str, Any]: - """Sync design tokens from Figma sources.""" - if not PROJECT_MANAGEMENT_AVAILABLE: - return { - "success": False, - "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}", - } - - try: - loop = asyncio.get_event_loop() - manager = ProjectManager() - - project = await loop.run_in_executor(None, lambda: manager.load(Path(project_path))) - - # Sync (use sync version to avoid nested async issues) - updated_project = await loop.run_in_executor( - None, - lambda: manager.sync(project=project, figma_token=figma_token, file_keys=file_keys), - ) - - # Count tokens extracted - total_tokens = 0 - sources_info = {} - if updated_project.extracted_tokens: - for source_key, source_data in updated_project.extracted_tokens.get( - "sources", {} - ).items(): - token_count = len(source_data.get("tokens", {})) - total_tokens += token_count - sources_info[source_key] = token_count - - return { - "success": True, - "message": f"Synced {total_tokens} tokens from {len(sources_info)} files", - "project_status": updated_project.status.value, - "tokens_extracted": total_tokens, - "sources": sources_info, - "errors": updated_project.errors, - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def project_build_impl(project_path: str) -> Dict[str, Any]: - """Build output files from synced tokens.""" - if not PROJECT_MANAGEMENT_AVAILABLE: - return { - "success": False, - "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}", - } - - try: - loop = asyncio.get_event_loop() - manager = ProjectManager() - - project = await loop.run_in_executor(None, lambda: manager.load(Path(project_path))) - - updated_project = await loop.run_in_executor(None, lambda: manager.build(project)) - - output_dir = str(project.path / project.config.output.tokens_dir) - - return { - "success": True, - "message": "Built output files", - "project_status": updated_project.status.value, - "output_directory": output_dir, - "formats_generated": updated_project.config.output.formats, - "errors": updated_project.errors, - } - except Exception as e: - return {"success": False, "error": str(e)} - - -async def project_list_impl() -> Dict[str, Any]: - """List all registered DSS projects.""" - if not PROJECT_MANAGEMENT_AVAILABLE: - return { - "success": False, - "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}", - } - - try: - manager = ProjectManager() - projects = manager.list() - - return {"success": True, "count": len(projects), "projects": projects} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def project_info_impl(project_path: str) -> Dict[str, Any]: - """Implementation for dss_project_info.""" - if not project_path: - return {"success": False, "error": "project_path is required."} - try: - manager = ProjectManager() - project = manager.load(Path(project_path)) - return {"success": True, "project_info": safe_serialize(project.config)} - except Exception as e: - logger.exception("dss_project_info failed") - return {"success": False, "error": str(e)} - - -async def project_export_context_impl(project_path: str) -> Dict[str, Any]: - """Implementation for dss_project_export_context.""" - if not project_path: - return {"success": False, "error": "project_path is required."} - try: - from dss.analyze.project_analyzer import export_project_context - - loop = asyncio.get_event_loop() - project_context = await loop.run_in_executor(None, export_project_context, project_path) - - return {"success": True, "project_context": project_context} - except Exception as e: - logger.exception(f"dss_project_export_context failed for {project_path}") - return {"success": False, "error": str(e)} - - -async def project_graph_analysis_impl(project_path: str) -> Dict[str, Any]: - """Implementation for dss_project_graph_analysis.""" - if not project_path: - return {"success": False, "error": "project_path is required."} - - try: - from dss.analyze.project_analyzer import run_project_analysis - - loop = asyncio.get_event_loop() - analysis_result = await loop.run_in_executor(None, run_project_analysis, project_path) - - return {"success": True, "project_path": project_path, "analysis": analysis_result} - except Exception as e: - logger.exception(f"dss_project_graph_analysis failed for {project_path}") - return {"success": False, "error": str(e)} - - -async def figma_discover_impl(team_id: str, figma_token: Optional[str] = None) -> Dict[str, Any]: - """Implementation for dss_figma_discover.""" - - -# ============================================================================= -# DSS CORE SYNC IMPLEMENTATIONS -# ============================================================================= - - -async def dss_core_sync_impl( - force: bool = False, figma_token: Optional[str] = None -) -> Dict[str, Any]: - """ - Sync DSS core from the canonical shadcn/ui Figma source. - - This implements DSS's "eat our own dog food" philosophy - using the - shadcn/ui Figma as the canonical base layer for all design systems. - """ - try: - # Import DSS core sync - from dss.project.figma import FigmaRateLimitError - from dss.project.sync import DSSCoreSync - - loop = asyncio.get_event_loop() - sync = DSSCoreSync(figma_token=figma_token) - - # Run sync in executor (it uses sync requests) - result = await loop.run_in_executor(None, lambda: sync.sync(force=force)) - - if result.get("success"): - return { - "success": True, - "message": result.get("message", "Sync completed"), - "summary": result.get("summary", {}), - "files_written": result.get("files_written", []), - "figma_reference": { - "team_id": sync.reference.team_id, - "team_name": sync.reference.team_name, - "uikit_file_key": sync.reference.uikit_file_key, - "uikit_file_name": sync.reference.uikit_file_name, - }, - } - else: - return result - - except FigmaRateLimitError as e: - return { - "success": False, - "error": f"Figma rate limit exceeded: {e}", - "retry_after": e.retry_after, - "hint": "Wait for the rate limit to reset and try again", - } - except ImportError as e: - return {"success": False, "error": f"DSS core sync not available: {e}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def dss_core_status_impl() -> Dict[str, Any]: - """Get DSS core sync status.""" - try: - from dss.project.sync import DSSCoreSync - - sync = DSSCoreSync() - status = sync.get_sync_status() - - return {"success": True, **status} - except ImportError as e: - return {"success": False, "error": f"DSS core sync not available: {e}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def dss_core_tokens_impl() -> Dict[str, Any]: - """Get DSS core tokens.""" - try: - from dss.project.sync import DSSCoreSync - - sync = DSSCoreSync() - tokens = sync.get_tokens() - - if tokens: - return { - "success": True, - "tokens": tokens, - "categories": list(tokens.get("categories", {}).keys()), - "total_tokens": sum(len(cat) for cat in tokens.get("categories", {}).values()), - } - else: - return { - "success": False, - "error": "DSS core not synced yet. Run dss_core_sync first.", - "hint": "Use dss_core_sync to sync from Figma", - } - except ImportError as e: - return {"success": False, "error": f"DSS core sync not available: {e}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def dss_core_themes_impl() -> Dict[str, Any]: - """Get DSS core themes.""" - try: - from dss.project.sync import DSSCoreSync - - sync = DSSCoreSync() - themes = sync.get_themes() - - if themes: - theme_names = list(themes.get("themes", {}).keys()) - return { - "success": True, - "themes": themes, - "theme_names": theme_names, - "total_themes": len(theme_names), - } - else: - return { - "success": False, - "error": "DSS core not synced yet. Run dss_core_sync first.", - "hint": "Use dss_core_sync to sync from Figma", - } - except ImportError as e: - return {"success": False, "error": f"DSS core sync not available: {e}"} - except Exception as e: - return {"success": False, "error": str(e)} - - -async def dss_rate_limit_status_impl(figma_token: Optional[str] = None) -> Dict[str, Any]: - """Get current Figma rate limit status.""" - try: - from dss.project.figma import FigmaProjectSync - - sync = FigmaProjectSync(token=figma_token) - status = sync.get_rate_limit_status() - - return { - "success": True, - **status, - "hint": "Rate limits reset after 60 seconds of no requests", - } - except ValueError as e: - return {"success": False, "error": str(e)} - except Exception as e: - return {"success": False, "error": str(e)} - - -# ============================================================================= -# MAIN -# ============================================================================= - - -async def main(): - """Run the MCP server.""" - # Configure log rotation (10MB per file, keep 5 backups) - if RUNTIME_AVAILABLE: - try: - configure_log_rotation(max_bytes=10 * 1024 * 1024, backup_count=5) - except Exception as e: - logger.warning("Failed to configure log rotation", extra={"error": str(e)}) - - # Server startup logging with structured data - logger.info( - "Starting DSS MCP Server", - extra={ - "version": "2.0.0", - "dss_path": str(DSS_PATH), - "capabilities": { - "dss": DSS_AVAILABLE, - "playwright": PLAYWRIGHT_AVAILABLE, - "local_browser": LOCAL_BROWSER_STRATEGY_AVAILABLE, - "runtime": RUNTIME_AVAILABLE, - }, - }, - ) - - # Initialize DSS Runtime with boundary enforcement - if RUNTIME_AVAILABLE: - try: - runtime = get_runtime() - stats = runtime.get_stats() - logger.info( - "DSS Runtime initialized", - extra={ - "enforcement_mode": stats["enforcement_mode"], - "boundary_enforcement": "ACTIVE", - "stats": stats, - }, - ) - except Exception as e: - logger.error( - "Failed to initialize runtime", - extra={"error": str(e), "boundary_enforcement": "DISABLED"}, - ) - else: - logger.warning( - "DSSRuntime not available", - extra={ - "boundary_enforcement": "DISABLED", - "import_error": RUNTIME_IMPORT_ERROR if not RUNTIME_AVAILABLE else None, - }, - ) - - if DSS_AVAILABLE: - logger.info("DSS module loaded", extra={"version": dss.__version__}) - - try: - async with stdio_server() as (read_stream, write_stream): - await server.run(read_stream, write_stream, server.create_initialization_options()) - finally: - logger.info( - "Server shutting down", - extra={ - "devtools_connected": devtools.connected, - "browser_initialized": browser_state.initialized, - }, - ) - # Cleanup DevTools - if devtools.connected: - await devtools_disconnect_impl() - # Cleanup Browser Automation - if browser_state.initialized: - await browser_close_impl() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/dss-claude-plugin/skills/storybook-integration/SKILL.md b/dss-claude-plugin/skills/storybook-integration/SKILL.md index a5471b0..d42fd6c 100644 --- a/dss-claude-plugin/skills/storybook-integration/SKILL.md +++ b/dss-claude-plugin/skills/storybook-integration/SKILL.md @@ -206,11 +206,13 @@ export default create({ ## Server Configuration -DSS Storybook runs on port 6006 by default: +DSS Storybook runs on port `6226` by default: - Host: 0.0.0.0 (configurable) -- Port: 6006 (configurable) +- Port: 6226 (configurable via `STORYBOOK_PORT`) - Auto-open: disabled by default +Many target projects still run Storybook on `6006`/`6007`; DSS service discovery can detect those running instances. + ## Best Practices 1. **Story Organization** diff --git a/dss-claude-plugin/verify_tools.py b/dss-claude-plugin/verify_tools.py deleted file mode 100644 index d539ad5..0000000 --- a/dss-claude-plugin/verify_tools.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python3 -"""Verify that dss-mcp-server.py properly exports Context Compiler tools.""" - -import sys -from pathlib import Path - -# Add the server directory to path -sys.path.insert(0, str(Path(__file__).parent)) - -# Import the server module -print("=" * 60) -print("CONTEXT COMPILER TOOL VERIFICATION") -print("=" * 60) - -# Test imports -print("\n1. Testing Context Compiler imports...") -try: - from core import ( - get_active_context, - get_compiler_status, - list_skins, - resolve_token, - validate_manifest, - ) - - print(" ✓ All Context Compiler functions imported successfully") - CONTEXT_COMPILER_AVAILABLE = True -except ImportError as e: - print(f" ✗ Context Compiler import failed: {e}") - CONTEXT_COMPILER_AVAILABLE = False - sys.exit(1) - -# Test the server's tool list -print("\n2. Checking MCP server tool list...") -try: - # We need to simulate the MCP server initialization - # to see what tools it would export - - from mcp.server import Server - - # Create a test server instance - server = Server("dss-test") - - # Import the list_tools function logic - print(" Checking if server exports tools properly...") - - # Read the actual server file and check for context_compiler_tools - with open(Path(__file__).parent / "servers" / "dss-mcp-server.py", "r") as f: - server_code = f.read() - - if "context_compiler_tools" in server_code: - print(" ✓ context_compiler_tools defined in server") - else: - print(" ✗ context_compiler_tools NOT found in server") - sys.exit(1) - - if "dss_get_resolved_context" in server_code: - print(" ✓ dss_get_resolved_context tool defined") - else: - print(" ✗ dss_get_resolved_context NOT found") - sys.exit(1) - - if "dss_resolve_token" in server_code: - print(" ✓ dss_resolve_token tool defined") - else: - print(" ✗ dss_resolve_token NOT found") - sys.exit(1) - - if "dss_validate_manifest" in server_code: - print(" ✓ dss_validate_manifest tool defined") - else: - print(" ✗ dss_validate_manifest NOT found") - sys.exit(1) - - if "dss_list_skins" in server_code: - print(" ✓ dss_list_skins tool defined") - else: - print(" ✗ dss_list_skins NOT found") - sys.exit(1) - - if "dss_get_compiler_status" in server_code: - print(" ✓ dss_get_compiler_status tool defined") - else: - print(" ✗ dss_get_compiler_status NOT found") - sys.exit(1) - - # Check if tools are returned - if "return dss_tools + devtools_tools + browser_tools + context_compiler_tools" in server_code: - print(" ✓ context_compiler_tools added to tool list return") - else: - print(" ✗ context_compiler_tools NOT added to return statement") - sys.exit(1) - -except Exception as e: - print(f" ✗ Error checking server tools: {e}") - sys.exit(1) - -# Test tool handlers -print("\n3. Checking MCP server tool handlers...") -try: - with open(Path(__file__).parent / "servers" / "dss-mcp-server.py", "r") as f: - server_code = f.read() - - handlers = [ - 'elif name == "dss_get_resolved_context"', - 'elif name == "dss_resolve_token"', - 'elif name == "dss_validate_manifest"', - 'elif name == "dss_list_skins"', - 'elif name == "dss_get_compiler_status"', - ] - - for handler in handlers: - if handler in server_code: - tool_name = handler.split('"')[1] - print(f" ✓ {tool_name} handler implemented") - else: - tool_name = handler.split('"')[1] - print(f" ✗ {tool_name} handler NOT found") - sys.exit(1) - -except Exception as e: - print(f" ✗ Error checking tool handlers: {e}") - sys.exit(1) - -# Test Context Compiler functionality -print("\n4. Testing Context Compiler functionality...") -try: - import json - - # Test list_skins - skins_json = list_skins() - skins = json.loads(skins_json) - print(f" ✓ list_skins() returned {len(skins)} skins: {skins}") - - # Test get_compiler_status - status_json = get_compiler_status() - status = json.loads(status_json) - print(f" ✓ get_compiler_status() returned status: {status['status']}") - - if status["status"] == "active": - print(" ✓ Context Compiler is active and ready") - else: - print(f" ✗ Context Compiler status is: {status['status']}") - sys.exit(1) - -except Exception as e: - print(f" ✗ Context Compiler functionality test failed: {e}") - sys.exit(1) - -print("\n" + "=" * 60) -print("✅ ALL VERIFICATIONS PASSED") -print("=" * 60) -print("\nContext Compiler tools are properly integrated into dss-mcp-server.py") -print("and should be available to Claude Code after MCP server restart.") -print("\nIf tools are not showing up in Claude Code, try:") -print("1. Fully restart Claude Code (not just /mcp restart)") -print("2. Check Claude Code logs for connection errors") -print("3. Verify MCP server configuration in Claude settings") diff --git a/dss-cli.py b/dss-cli.py index eabe0ab..a178aa8 100755 --- a/dss-cli.py +++ b/dss-cli.py @@ -13,10 +13,8 @@ import json import sys from pathlib import Path -# Ensure the script can find the 'dss' module -# This adds the parent directory of 'dss-mvp1' to the Python path -# Assuming the script is run from the project root, this will allow `from dss...` imports -sys.path.insert(0, str(Path(__file__).parent.parent)) +# Ensure the script can find the local `dss` package when run from a checkout. +sys.path.insert(0, str(Path(__file__).parent)) try: from dss import StorybookScanner, StoryGenerator, ThemeGenerator @@ -24,7 +22,7 @@ try: from dss.project.manager import ProjectManager except ImportError as e: print( - "Error: Could not import DSS modules. Make sure dss-mvp1 is in the PYTHONPATH.", + "Error: Could not import DSS modules. Make sure the repo root is in PYTHONPATH.", file=sys.stderr, ) print(f"Import error: {e}", file=sys.stderr) @@ -169,7 +167,7 @@ def main(): print(json.dumps(result, indent=2)) elif action == "generate": generator = StoryGenerator(project_path) - result = generator.generate() + result = generator.generate(dry_run=False) print(f"Successfully generated {len(result)} new stories.") elif action == "configure": theme_gen = ThemeGenerator(project_path) diff --git a/dss-temp-handover.md b/dss-temp-handover.md deleted file mode 100644 index bfe99fe..0000000 --- a/dss-temp-handover.md +++ /dev/null @@ -1,59 +0,0 @@ -### **Situation Handover to Claude** - -**Context:** The overarching goal is to enhance DSS (Design System Server) with greater intelligence for analyzing and managing React projects, initially by "dogfooding" DSS itself on its own `admin-ui` project. - -**Initial Goal from User:** -1. Implement a robust Python-based analysis engine (`project_analyzer.py`) for React projects. -2. Integrate this into the DSS MCP and CLI. -3. Ensure continuous integration (CI/CD) automates the analysis and commits results (`project_context.json`) back to the repository. -4. Set up DSS to manage its own `admin-ui` project. -5. Link core DSS to its Figma UI Kit. -6. Build a default Storybook skin with DSS atoms and shadcn styles. - ---- - -**Actions Taken & Current Status:** - -1. **Analysis Engine & CLI**: - * **Implemented**: `dss-mvp1/dss/analyze/project_analyzer.py` was created, capable of parsing React/JS/TS files (using a Node.js `@babel/parser` subprocess) and generating a `networkx` graph. It also includes an `export_project_context` function. - * **Implemented**: `dss-mvp1/dss-cli.py` was created as a command-line interface, including `analyze`, `export-context`, `add-figma-file`, `setup-storybook`, and `sync-tokens` commands. - * **Implemented**: The `dss-claude-plugin/servers/dss-mcp-server.py` was updated to expose `dss_project_graph_analysis` and `dss_project_export_context` as MCP tools for AI agents. - * **Implemented**: Unit tests for `project_analyzer.py` were added and are currently passing. - -2. **CI/CD Setup**: - * **Implemented**: `.gitea/workflows/dss-analysis.yml` was created to automate the `dss-cli.py analyze` and `git commit` process for `project_context.json` on every push. - * **Verified**: Git hooks were fixed and confirmed to be running. - * **Verified**: SSH key authentication for Git push was correctly set up after troubleshooting. - -3. **Dogfooding `admin-ui` Project**: - * **Goal**: Initialize `admin-ui` as a DSS project, generate its analysis context, link it to Figma, and generate Storybook stories. - * **Status**: - * `admin-ui/.dss/analysis_graph.json` was successfully created (by `dss-mvp1/dss-cli.py analyze --project-path ./admin-ui`). - * `admin-ui/ds.config.json` was manually corrected and populated to resolve Pydantic validation errors during project loading. - * Figma UI Kit `figd_ScdBk47HlYEItZbQv2CcF9aq-3TfWbBXN3yoRKWA` was successfully linked to `admin-ui` (by `dss-mvp1/dss-cli.py add-figma-file --project-path ./admin-ui --file-key ...`). - * **Token Synchronization (Blocked)**: `dss-mvp1/dss-cli.py sync-tokens --project-path ./admin-ui` fails with `403 Client Error: Forbidden` from Figma API due to a placeholder token. This is expected, as a valid `FIGMA_TOKEN` environment variable is required. - -4. **Storybook Generation (Current Blocker)**: - * **Goal**: Build a default Storybook skin with DSS atoms and shadcn styles applied. - * **Expected Tool**: `dss-mvp1/dss-cli.py setup-storybook --action generate --project-path ./admin-ui`. - * **Problem**: This command consistently reports `Generated 0 new stories.` - * **Investigation**: - * Initial assumption that `dss-mvp1` itself contained components was incorrect. - * Moved `admin-ui/js/components/ds-button.js` to `admin-ui/src/components/ds-button.js` to match component discovery paths. - * Re-read `dss/storybook/generator.py` to confirm its logic. It expects components in standard directories like `src/components`. - * Confirmed that `StoryGenerator.generate` calls `generate_stories_for_directory`, which in turn calls `_parse_component`. - * Despite placing `ds-button.js` in a recognized path, `0 new stories` are still being generated. - * The `StoryGenerator` logic in `dss/storybook/generator.py` inspects component files, but it relies on specific patterns (e.g., `interface ButtonProps`, `children`) to extract `PropInfo` and `ComponentMeta`. The output of `@babel/parser` is not currently being used by `StoryGenerator` to populate `ComponentMeta`. - -**The core issue preventing Storybook generation is that the `StoryGenerator` is unable to correctly parse the provided JavaScript/TypeScript component files and extract the necessary metadata (props, component name, etc.) to create a story.** The integration between the `@babel/parser` output (which is JSON AST) and the `StoryGenerator`'s `_parse_component` method is either missing or misconfigured. The `_parse_component` method appears to be using regex on the raw file content, which might be insufficient or incorrect for the component's structure. - ---- - -**Recommendation for Claude:** - -1. **Investigate `dss/storybook/generator.py`**: Focus on the `_parse_component` method. How does it extract `ComponentMeta` from the component file? It currently uses regex, which is fragile. -2. **Integrate Babel AST**: The `@babel/parser` subprocess call already produces a full AST. The `_parse_component` method should be updated to consume and interpret this AST to reliably extract component metadata (name, props, children, description). This would be much more robust than regex. -3. **Validate Component Structure**: Ensure the `ds-button.js` (or any target component) has a structure that the updated parser can understand and extract metadata from. -4. **Re-run Storybook Generation**: Once `_parse_component` can correctly extract metadata, re-run `setup-storybook --action generate` to confirm stories are created. - -I have included the contents of `dss/storybook/generator.py` for direct reference. diff --git a/dss/__init__.py b/dss/__init__.py index 5e507bd..0b74321 100644 --- a/dss/__init__.py +++ b/dss/__init__.py @@ -1,12 +1,11 @@ """ DSS - Design System Server. -A Model Context Protocol (MCP) server that provides Claude Code with 40+ design system tools. -Supports local development and remote team deployment. +Design system tooling for local development and headless server deployments. Usage: from dss import settings, Projects, Components - from dss.mcp_server import MCPServer + # MCP stdio server entrypoint: python -m dss.mcp.server from dss.storage import Projects, Components, Tokens """ diff --git a/dss/analyze/project_analyzer.py b/dss/analyze/project_analyzer.py index 2a2a451..f31b19b 100644 --- a/dss/analyze/project_analyzer.py +++ b/dss/analyze/project_analyzer.py @@ -1,113 +1,187 @@ -"""This module provides tools for analyzing a project.""" +"""High-level project analysis orchestration used by CLI and MCP tooling.""" +from __future__ import annotations + +import asyncio import json -import logging -import subprocess +from dataclasses import asdict, is_dataclass +from datetime import datetime +from enum import Enum from pathlib import Path -from typing import Dict +from typing import Any, Dict, List, Optional, Tuple -from dss.analyze.base import ProjectAnalysis - -log = logging.getLogger(__name__) - -# Path to the node.js parser script. -# This assumes the script is located in the same directory as this file. -parser_script_path = Path(__file__).parent / "parser.js" +from .base import ProjectAnalysis +from .graph import DependencyGraph +from .quick_wins import QuickWinFinder +from .react import ReactAnalyzer +from .scanner import ProjectScanner +from .styles import StyleAnalyzer -def analyze_project( - path: str, - output_graph: bool = False, - prune: bool = False, - visualize: bool = False, -) -> ProjectAnalysis: - """ - Analyzes a project, including all its components and their dependencies. +def _safe_serialize(obj: Any) -> Any: + if obj is None or isinstance(obj, (str, int, float, bool)): + return obj + if isinstance(obj, datetime): + return obj.isoformat() + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, Path): + return str(obj) + if isinstance(obj, dict): + return {str(k): _safe_serialize(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple, set)): + return [_safe_serialize(v) for v in obj] + if hasattr(obj, "to_dict") and callable(obj.to_dict): + return _safe_serialize(obj.to_dict()) + if is_dataclass(obj): + return _safe_serialize(asdict(obj)) + return str(obj) - Args: - path: The path to the project to analyze. - output_graph: Whether to output the dependency graph. - prune: Whether to prune the dependency graph. - visualize: Whether to visualize the dependency graph. - Returns: - A ProjectAnalysis object containing the analysis results. - """ - project_path = Path(path).resolve() - log.info(f"Analyzing project at {project_path}...") +async def _build_analysis( + project_root: Path, +) -> Tuple[ProjectAnalysis, DependencyGraph, Dict[str, Any], List[Any]]: + scanner = ProjectScanner(str(project_root), use_cache=False) + analysis = await scanner.scan() - # Get all component files in the project. - component_files = list(project_path.glob("**/*.js")) + list(project_path.glob("**/*.jsx")) + react = ReactAnalyzer(str(project_root)) + style = StyleAnalyzer(str(project_root)) + graph = DependencyGraph(str(project_root)) + quick_wins_finder = QuickWinFinder(str(project_root)) - # For each component file, get its AST. - for file_path in component_files: - if file_path.is_file(): - # Call the external node.js parser - result = subprocess.run( - ["node", str(parser_script_path), file_path], - capture_output=True, - text=True, - check=True, - ) - # The AST is now in result.stdout as a JSON string. - ast = json.loads(result.stdout) - # TODO: Do something with the AST. + components_task = react.analyze() + style_task = style.analyze() + graph_task = graph.build() + quick_wins_task = quick_wins_finder.find_all() - # TODO: Populate the ProjectAnalysis object with the analysis results. - analysis = ProjectAnalysis( - project_name=project_path.name, - project_path=str(project_path), - total_files=len(component_files), - components={}, + components, style_result, _graph_dict, quick_wins = await asyncio.gather( + components_task, style_task, graph_task, quick_wins_task ) - log.info(f"Analysis complete for {project_path.name}.") + + analysis.components = components + analysis.component_count = len(components) + + analysis.token_candidates = style_result.get("token_candidates", []) # type: ignore[assignment] + analysis.stats["token_candidates"] = len(analysis.token_candidates) + + analysis.quick_wins = quick_wins + analysis.stats["quick_wins_count"] = len(quick_wins) + + return analysis, graph, style_result, quick_wins + + +def analyze_project(path: str) -> ProjectAnalysis: + """Synchronous wrapper around the async analyzers.""" + project_root = Path(path).expanduser().resolve() + if not project_root.exists(): + raise FileNotFoundError(f"Project path not found: {project_root}") + if not project_root.is_dir(): + raise NotADirectoryError(f"Project path is not a directory: {project_root}") + + analysis, _graph, _style_result, _quick_wins = asyncio.run(_build_analysis(project_root)) return analysis -def export_project_context(analysis: ProjectAnalysis, output_path: str): +def run_project_analysis(project_path: str, output_file: Optional[str] = None) -> Dict[str, Any]: """ - Exports the project context to a JSON file. + Run full analysis and write a portable graph JSON file to `/.dss/analysis_graph.json`. + + Returns a JSON-serializable dict with both the graph and a summary analysis payload. """ - log.info(f"Exporting project context to {output_path}...") - with open(output_path, "w") as f: - json.dump(analysis.dict(), f, indent=2) - log.info("Export complete.") + project_root = Path(project_path).expanduser().resolve() + if not project_root.exists(): + raise FileNotFoundError(f"Project path not found: {project_root}") + if not project_root.is_dir(): + raise NotADirectoryError(f"Project path is not a directory: {project_root}") + + analysis, graph, style_result, quick_wins = asyncio.run(_build_analysis(project_root)) + + graph_dict = graph.to_dict() + insights = { + "orphans": graph.find_orphans(), + "hubs": graph.find_hubs(), + "cycles": graph.find_circular_dependencies(), + } + + style_summary = {k: v for k, v in style_result.items() if k != "token_candidates"} + + result: Dict[str, Any] = { + "project_path": str(project_root), + "generated_at": datetime.now().isoformat(), + # Keep a stable, graph-friendly top-level shape. + "nodes": graph_dict.get("nodes", []), + "edges": graph_dict.get("edges", []), + "links": graph_dict.get("edges", []), # legacy alias + "stats": graph_dict.get("stats", {}), + # Extended payloads. + "analysis": _safe_serialize(analysis), + "style_summary": _safe_serialize(style_summary), + "quick_wins": _safe_serialize(quick_wins), + "graph_insights": _safe_serialize(insights), + } + + if output_file: + output_path = Path(output_file).expanduser() + else: + output_path = project_root / ".dss" / "analysis_graph.json" + + output_path.parent.mkdir(parents=True, exist_ok=True) + output_path.write_text(json.dumps(result, indent=2), encoding="utf-8") + + return result -def get_ast(file_path: str) -> Dict: +def export_project_context(project_path: str) -> Dict[str, Any]: """ - Gets the AST of a file using a node.js parser. + Export a lightweight, AI-friendly project context as a JSON-serializable dict. + + This intentionally avoids embedding full source files. """ - log.info(f"Getting AST for {file_path}...") - result = subprocess.run( - ["node", str(parser_script_path), file_path], - capture_output=True, - text=True, - check=True, - ) - log.info("AST retrieved.") - return json.loads(result.stdout) + project_root = Path(project_path).expanduser().resolve() + if not project_root.exists(): + raise FileNotFoundError(f"Project path not found: {project_root}") + if not project_root.is_dir(): + raise NotADirectoryError(f"Project path is not a directory: {project_root}") + analysis, graph, style_result, quick_wins = asyncio.run(_build_analysis(project_root)) -def main(): - """ - Main function for the project analyzer. - """ - import argparse + graph_dict = graph.to_dict() + hubs = graph.find_hubs() + cycles = graph.find_circular_dependencies() + orphans = graph.find_orphans() - parser = argparse.ArgumentParser(description="Analyze a project.") - parser.add_argument("path", help="The path to the project to analyze.") - parser.add_argument("--output-graph", action="store_true", help="Output the dependency graph.") - parser.add_argument("--prune", action="store_true", help="Prune the dependency graph.") - parser.add_argument("--visualize", action="store_true", help="Visualize the dependency graph.") - parser.add_argument("--export-context", help="Export the project context to a JSON file.") - args = parser.parse_args() + # Keep this small enough for prompt injection. + components_preview = [ + { + "name": c.name, + "path": c.path, + "type": c.type, + "has_styles": c.has_styles, + "props": c.props[:10], + } + for c in analysis.components[:50] + ] - analysis = analyze_project(args.path, args.output_graph, args.prune, args.visualize) + token_candidates = style_result.get("token_candidates", []) + token_candidates_preview = [_safe_serialize(c) for c in token_candidates[:25]] - if args.export_context: - export_project_context(analysis, args.export_context) + quick_wins_preview = [_safe_serialize(w) for w in quick_wins[:25]] - -if __name__ == "__main__": - main() + return { + "project_path": str(project_root), + "generated_at": datetime.now().isoformat(), + "framework": analysis.framework.value, + "framework_version": analysis.framework_version, + "primary_styling": analysis.primary_styling.value if analysis.primary_styling else None, + "stats": _safe_serialize(analysis.stats), + "components": components_preview, + "style_summary": _safe_serialize({k: v for k, v in style_result.items() if k != "token_candidates"}), + "token_candidates": token_candidates_preview, + "quick_wins": quick_wins_preview, + "dependency_graph": { + "stats": graph_dict.get("stats", {}), + "orphans": orphans[:50], + "hubs": hubs[:25], + "cycles": cycles[:10], + }, + } diff --git a/dss/auth/atlassian_auth.py b/dss/auth/atlassian_auth.py index d2e5d08..0766e6f 100644 --- a/dss/auth/atlassian_auth.py +++ b/dss/auth/atlassian_auth.py @@ -25,7 +25,7 @@ class AtlassianAuth: On successful validation, we: 1. Verify credentials against Atlassian API - 2. Store user in database + 2. Store user in JSON storage 3. Generate JWT token """ @@ -106,56 +106,17 @@ class AtlassianAuth: # Hash the API token token_hash = self.hash_api_token(api_token) - # Store or update user in database - with get_connection() as conn: - # Check if user exists - existing = conn.execute( - "SELECT id, email FROM users WHERE email = ?", (email,) - ).fetchone() + from dss.storage.json_store import Users - if existing: - # Update existing user - user_id = existing["id"] - conn.execute( - """ - UPDATE users - SET display_name = ?, - atlassian_url = ?, - atlassian_service = ?, - api_token_hash = ?, - last_login = ? - WHERE id = ? - """, - ( - user_info["display_name"], - url, - service, - token_hash, - datetime.utcnow().isoformat(), - user_id, - ), - ) - else: - # Create new user - cursor = conn.execute( - """ - INSERT INTO users ( - email, display_name, atlassian_url, atlassian_service, - api_token_hash, created_at, last_login - ) - VALUES (?, ?, ?, ?, ?, ?, ?) - """, - ( - email, - user_info["display_name"], - url, - service, - token_hash, - datetime.utcnow().isoformat(), - datetime.utcnow().isoformat(), - ), - ) - user_id = cursor.lastrowid + user_record = Users.upsert( + email=email, + display_name=user_info["display_name"], + atlassian_url=url, + atlassian_service=service, + api_token_hash=token_hash, + last_login=datetime.utcnow().isoformat(), + ) + user_id = int(user_record["id"]) # Generate JWT token expires_at = datetime.utcnow() + timedelta(hours=self.jwt_expiry_hours) @@ -198,21 +159,23 @@ class AtlassianAuth: async def get_user_by_id(self, user_id: int) -> Optional[Dict[str, Any]]: """Get user information by ID.""" - with get_connection() as conn: - user = conn.execute( - """ - SELECT id, email, display_name, atlassian_url, atlassian_service, - created_at, last_login - FROM users - WHERE id = ? - """, - (user_id,), - ).fetchone() + from dss.storage.json_store import Users - if user: - return dict(user) + user = Users.get(user_id) + if not user: return None + # Only return safe fields + return { + "id": user.get("id"), + "email": user.get("email"), + "display_name": user.get("display_name"), + "atlassian_url": user.get("atlassian_url"), + "atlassian_service": user.get("atlassian_service"), + "created_at": user.get("created_at"), + "last_login": user.get("last_login"), + } + # Singleton instance _auth_instance: Optional[AtlassianAuth] = None diff --git a/dss/export_import/security.py b/dss/export_import/security.py index 85f97a4..3e1118b 100644 --- a/dss/export_import/security.py +++ b/dss/export_import/security.py @@ -266,13 +266,11 @@ class TimestampConflictResolver: class DatabaseLockingStrategy: - """Manages SQLite database locking during import operations. + """Legacy-named scheduling/locking heuristics for bulk operations. - Production Consideration: SQLite locks the entire database file - during writes. Large imports can block other operations. - - Recommended: Schedule imports during low-traffic windows or use - busy_timeout to make waiting explicit. + DSS core storage is JSON-file based. This helper remains for: + - recommending conservative locking/scheduling defaults + - deciding when operations should run in background workers """ # Configuration @@ -283,11 +281,7 @@ class DatabaseLockingStrategy: self.busy_timeout_ms = busy_timeout_ms def get_pragmas(self) -> Dict[str, Any]: - """Get recommended SQLite pragmas for import operations. - - Returns: - Dict of pragma names and values - """ + """Legacy API retained for compatibility (no-op for JSON storage).""" return { "journal_mode": "WAL", # Write-Ahead Logging for concurrent access "busy_timeout": self.busy_timeout_ms, diff --git a/dss/export_import/service.py b/dss/export_import/service.py index ee02191..82099a6 100644 --- a/dss/export_import/service.py +++ b/dss/export_import/service.py @@ -2,11 +2,11 @@ DSSProjectService - High-level API for export/import operations with transaction safety. This service provides: -1. Transactional wrapper for safe database operations +1. Transaction-like wrapper for safe operations 2. Integration point for API/CLI layers 3. Proper error handling and rollback 4. Background job scheduling for large operations -5. SQLite configuration management +5. Resource/scheduling heuristics (JSON-only storage) """ from contextlib import contextmanager @@ -64,12 +64,11 @@ class MergeSummary: class DSSProjectService: """Service layer for DSS project export/import operations. - Provides transaction-safe operations with proper error handling, - database locking management, and memory limit enforcement. + Provides operation safety with proper error handling, + scheduling heuristics, and memory limit enforcement. Production Features: - - Transactional safety (rollback on error) - - SQLite locking configuration + - Best-effort safety (rollback on error) - Memory and resource limits - Background job scheduling for large operations - Comprehensive error handling @@ -79,47 +78,19 @@ class DSSProjectService: self, busy_timeout_ms: int = DatabaseLockingStrategy.DEFAULT_BUSY_TIMEOUT_MS, ): + # Legacy name: used as scheduling heuristic (no DB required). self.locking_strategy = DatabaseLockingStrategy(busy_timeout_ms) self.memory_manager = MemoryLimitManager() @contextmanager def _transaction(self): - """Context manager for transaction-safe database operations. - - Handles: - - SQLite locking with busy_timeout - - Automatic rollback on error - - Connection cleanup """ - conn = None - try: - # Get connection with locking pragmas - conn = get_connection() + Context manager for grouping operations. - # Apply locking pragmas - pragmas = self.locking_strategy.get_pragmas() - cursor = conn.cursor() - for pragma_name, pragma_value in pragmas.items(): - if isinstance(pragma_value, int): - cursor.execute(f"PRAGMA {pragma_name} = {pragma_value}") - else: - cursor.execute(f"PRAGMA {pragma_name} = '{pragma_value}'") - - yield conn - - # Commit on success - conn.commit() - - except Exception as e: - # Rollback on error - if conn: - conn.rollback() - raise e - - finally: - # Cleanup - if conn: - conn.close() + DSS uses JSON-file storage; there is no DB transaction. This wrapper exists + to preserve the service API while allowing future locking/resource limits. + """ + yield def export_project( self, diff --git a/dss/mcp/__init__.py b/dss/mcp/__init__.py new file mode 100644 index 0000000..5e7c80c --- /dev/null +++ b/dss/mcp/__init__.py @@ -0,0 +1,24 @@ +""" +DSS MCP (Model Context Protocol) integration. + +This package contains the shared tool registry and execution layer used by: +- The headless DSS API server (Admin UI + AI chat) +- The local MCP stdio server (Claude Code / desktop clients) + +Design goal: +One canonical definition of tools and their implementations, with optional +remote proxying via the DSS headless server. +""" + +from dss.mcp.config import integration_config, mcp_config, validate_config +from dss.mcp.handler import MCPContext, MCPHandler, get_mcp_handler + +__all__ = [ + "MCPContext", + "MCPHandler", + "get_mcp_handler", + "mcp_config", + "integration_config", + "validate_config", +] + diff --git a/dss/mcp/config.py b/dss/mcp/config.py new file mode 100644 index 0000000..5de6545 --- /dev/null +++ b/dss/mcp/config.py @@ -0,0 +1,106 @@ +""" +DSS MCP configuration. + +Used by the headless server to expose tools to: +- the Admin UI (tool browser + execution) +- the AI chat endpoint (tool calling) + +Also used by the local MCP server when proxying requests to a headless server. +""" + +from __future__ import annotations + +import os +from dataclasses import dataclass +from typing import List, Optional + + +def _get_env(name: str, default: Optional[str] = None) -> Optional[str]: + value = os.getenv(name) + if value is None: + return default + value = value.strip() + return value if value else default + + +@dataclass(frozen=True) +class MCPConfig: + """Core MCP runtime config for DSS.""" + + HOST: str = _get_env("DSS_MCP_HOST", "127.0.0.1") or "127.0.0.1" + PORT: int = int(_get_env("DSS_MCP_PORT", "6222") or "6222") + + # Tool execution / context + CONTEXT_CACHE_TTL: int = int(_get_env("DSS_MCP_CONTEXT_CACHE_TTL", "300") or "300") + + # Circuit breaker (used by handler; conservative defaults) + CIRCUIT_BREAKER_FAILURE_THRESHOLD: int = int( + _get_env("DSS_MCP_CIRCUIT_BREAKER_FAILURE_THRESHOLD", "5") or "5" + ) + CIRCUIT_BREAKER_TIMEOUT_SECONDS: int = int( + _get_env("DSS_MCP_CIRCUIT_BREAKER_TIMEOUT_SECONDS", "60") or "60" + ) + + # Optional encryption for at-rest integration configs + ENCRYPTION_KEY: Optional[str] = _get_env("DSS_MCP_ENCRYPTION_KEY") or _get_env( + "DSS_ENCRYPTION_KEY" + ) + + # Remote proxy (local MCP process -> headless server) + API_URL: Optional[str] = _get_env("DSS_API_URL") or _get_env("DSS_SERVER_URL") + + def get_cipher(self): + """Return a Fernet cipher if configured, otherwise None.""" + if not self.ENCRYPTION_KEY: + return None + try: + from cryptography.fernet import Fernet + + return Fernet(self.ENCRYPTION_KEY.encode()) + except Exception: + # Invalid key format or missing dependency + return None + + +@dataclass(frozen=True) +class IntegrationConfig: + """Integration credentials and defaults (read from environment).""" + + FIGMA_TOKEN: Optional[str] = _get_env("FIGMA_TOKEN") or _get_env("DSS_FIGMA_TOKEN") + ANTHROPIC_API_KEY: Optional[str] = _get_env("ANTHROPIC_API_KEY") or _get_env( + "DSS_ANTHROPIC_API_KEY" + ) + + # Defaults for Atlassian integrations (optional) + JIRA_URL: Optional[str] = _get_env("JIRA_URL") or _get_env("DSS_JIRA_URL") + CONFLUENCE_URL: Optional[str] = _get_env("CONFLUENCE_URL") or _get_env("DSS_CONFLUENCE_URL") + + +mcp_config = MCPConfig() +integration_config = IntegrationConfig() + + +def validate_config() -> List[str]: + """Return user-facing warnings for missing/invalid configuration.""" + warnings: List[str] = [] + + if not mcp_config.ENCRYPTION_KEY: + warnings.append( + "No encryption key configured (set DSS_MCP_ENCRYPTION_KEY) – integration configs will be stored in plaintext." + ) + elif mcp_config.get_cipher() is None: + warnings.append( + "Invalid DSS_MCP_ENCRYPTION_KEY – expected a Fernet key (urlsafe base64). Integration encryption is disabled." + ) + + if not integration_config.FIGMA_TOKEN: + warnings.append("FIGMA_TOKEN not configured – Figma tools will run in mock mode.") + + if not integration_config.ANTHROPIC_API_KEY: + warnings.append("ANTHROPIC_API_KEY not configured – AI chat/tool calling may be unavailable.") + + if mcp_config.API_URL and not mcp_config.API_URL.startswith(("http://", "https://")): + warnings.append("DSS_API_URL should include scheme (http:// or https://).") + + return warnings + diff --git a/dss/mcp/guides.py b/dss/mcp/guides.py new file mode 100644 index 0000000..bd5792a --- /dev/null +++ b/dss/mcp/guides.py @@ -0,0 +1,304 @@ +""" +MCP guide library (skills + command docs). + +Goal: make Claude plugin "skills" and "commands" discoverable from any MCP client +(Claude Code, Codex CLI, Gemini CLI, etc.), without requiring the Claude plugin system. +""" + +from __future__ import annotations + +import re +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + + +@dataclass(frozen=True) +class Guide: + id: str + type: str # "skill" | "command" + name: str + description: str + source_path: str + related_tools: List[str] + meta: Dict[str, Any] + + +def _repo_root() -> Path: + # dss/mcp/guides.py -> dss/mcp -> dss -> repo root + return Path(__file__).resolve().parent.parent.parent + + +def _split_frontmatter(text: str) -> Tuple[Dict[str, Any], str]: + """ + Minimal YAML front-matter splitter/parser. + + We only need the common `name:` and `description:` fields used in DSS guides, + plus optional list fields like `globs:` / `arguments:`. + """ + lines = text.splitlines() + if not lines or lines[0].strip() != "---": + return {}, text + + end_idx: Optional[int] = None + for i in range(1, len(lines)): + if lines[i].strip() == "---": + end_idx = i + break + if end_idx is None: + return {}, text + + fm_lines = lines[1:end_idx] + body = "\n".join(lines[end_idx + 1 :]).lstrip("\n") + + def parse_scalar(value: str) -> Any: + v = value.strip() + if v == "": + return "" + + if (v.startswith('"') and v.endswith('"')) or (v.startswith("'") and v.endswith("'")): + return v[1:-1] + + lower = v.lower() + if lower in {"true", "yes", "on"}: + return True + if lower in {"false", "no", "off"}: + return False + if lower in {"null", "~"}: + return None + + try: + return int(v) + except ValueError: + pass + + try: + return float(v) + except ValueError: + pass + + return v + + def indent_count(line: str) -> int: + return len(line) - len(line.lstrip(" ")) + + def parse_block(block_lines: List[str]) -> Any: + # Determine base indentation for the block. + first = next((ln for ln in block_lines if ln.strip()), "") + if not first: + return [] + base_indent = indent_count(first) + + # List block + if first[base_indent:].startswith("- "): + items: List[Any] = [] + i = 0 + while i < len(block_lines): + line = block_lines[i] + if not line.strip(): + i += 1 + continue + if indent_count(line) < base_indent: + break + + stripped = line[base_indent:] + if not stripped.startswith("- "): + i += 1 + continue + + item_head = stripped[2:].strip() + + # Map item (e.g. "- name: flags") + if ":" in item_head: + item: Dict[str, Any] = {} + k, v = item_head.split(":", 1) + item[k.strip()] = parse_scalar(v.strip()) + i += 1 + + # Continuation lines for this list item are more indented than the dash. + while i < len(block_lines): + cont = block_lines[i] + if not cont.strip(): + i += 1 + continue + cont_indent = indent_count(cont) + if cont_indent <= base_indent: + break + cont_str = cont.strip() + if cont_str.startswith("#"): + i += 1 + continue + if cont_str.startswith("- "): + # Nested lists inside list items are not supported in this minimal parser. + i += 1 + continue + if ":" in cont_str: + ck, cv = cont_str.split(":", 1) + item[ck.strip()] = parse_scalar(cv.strip()) + i += 1 + + items.append(item) + continue + + # Scalar item + items.append(parse_scalar(item_head)) + i += 1 + + # Skip any continuation lines for multi-line scalars (rare in these guides). + while i < len(block_lines): + cont = block_lines[i] + if not cont.strip(): + i += 1 + continue + if indent_count(cont) <= base_indent: + break + i += 1 + + return items + + # Map block (not currently used by DSS guides) + meta_map: Dict[str, Any] = {} + for raw in block_lines: + if not raw.strip(): + continue + if indent_count(raw) < base_indent: + continue + line = raw[base_indent:].strip() + if ":" not in line: + continue + k, v = line.split(":", 1) + meta_map[k.strip()] = parse_scalar(v.strip()) + return meta_map + + meta: Dict[str, Any] = {} + i = 0 + while i < len(fm_lines): + raw = fm_lines[i].rstrip() + if not raw.strip() or raw.lstrip().startswith("#"): + i += 1 + continue + + # Only parse top-level keys (no leading spaces). + if raw.startswith(" "): + i += 1 + continue + + if ":" not in raw: + i += 1 + continue + + key, value = raw.split(":", 1) + key = key.strip() + value = value.strip() + + if value != "": + meta[key] = parse_scalar(value) + i += 1 + continue + + # Block value (indented list/map) + i += 1 + block: List[str] = [] + while i < len(fm_lines): + nxt = fm_lines[i].rstrip("\n") + if not nxt.strip(): + i += 1 + continue + if not nxt.startswith(" "): + break + block.append(nxt) + i += 1 + + meta[key] = parse_block(block) + + return meta, body + + +_TOOL_REF_RE = re.compile(r"`(dss_[a-z0-9_]+)`", re.IGNORECASE) + + +def _extract_related_tools(markdown: str) -> List[str]: + tools = {m.group(1) for m in _TOOL_REF_RE.finditer(markdown or "")} + return sorted(tools) + + +def list_guides(kind: str = "all") -> List[Guide]: + """ + List available guides. + + kind: + - "all" (default) + - "skill" + - "command" + """ + root = _repo_root() + plugin_root = root / "dss-claude-plugin" + guides: List[Guide] = [] + + if kind in {"all", "command"}: + commands_dir = plugin_root / "commands" + if commands_dir.exists(): + for md in sorted(commands_dir.glob("*.md")): + raw = md.read_text(encoding="utf-8") + meta, body = _split_frontmatter(raw) + command_name = str(meta.get("name") or md.stem) + description = str(meta.get("description") or "") + guides.append( + Guide( + id=f"command:{command_name}", + type="command", + name=command_name, + description=description, + source_path=str(md), + related_tools=_extract_related_tools(body), + meta=meta, + ) + ) + + if kind in {"all", "skill"}: + skills_dir = plugin_root / "skills" + if skills_dir.exists(): + for skill_dir in sorted([p for p in skills_dir.iterdir() if p.is_dir()]): + md = skill_dir / "SKILL.md" + if not md.exists(): + continue + raw = md.read_text(encoding="utf-8") + meta, body = _split_frontmatter(raw) + display_name = str(meta.get("name") or skill_dir.name) + description = str(meta.get("description") or "") + guides.append( + Guide( + id=f"skill:{skill_dir.name}", + type="skill", + name=display_name, + description=description, + source_path=str(md), + related_tools=_extract_related_tools(body), + meta=meta, + ) + ) + + return guides + + +def get_guide(guide_id: str, include_frontmatter: bool = False) -> Dict[str, Any]: + guides = list_guides("all") + match = next((g for g in guides if g.id == guide_id), None) + if not match: + raise ValueError(f"Guide not found: {guide_id}") + + raw = Path(match.source_path).read_text(encoding="utf-8") + meta, body = _split_frontmatter(raw) + + content = raw if include_frontmatter else body + + return { + "id": match.id, + "type": match.type, + "name": match.name, + "description": match.description, + "source_path": match.source_path, + "related_tools": match.related_tools, + "meta": meta, + "content": content, + } diff --git a/dss/mcp/handler.py b/dss/mcp/handler.py new file mode 100644 index 0000000..c25a2e8 --- /dev/null +++ b/dss/mcp/handler.py @@ -0,0 +1,1051 @@ +""" +Unified DSS tool registry + execution layer. + +This is used in two places: +1) Headless server: `apps/api/server.py` exposes tools to the Admin UI and AI chat. +2) Local MCP process: `dss.mcp.server` exposes the same tools via stdio for Claude Code. +""" + +from __future__ import annotations + +import time +import os +import asyncio +from dataclasses import asdict, dataclass, is_dataclass +from datetime import date, datetime +from enum import Enum +from pathlib import Path +from typing import Any, Awaitable, Callable, Dict, List, Optional + +from dss.figma.figma_tools import FigmaToolSuite +from dss.ingest.base import DesignToken +from dss.ingest.css import CSSTokenSource +from dss.ingest.json_tokens import JSONTokenSource +from dss.ingest.merge import MergeStrategy, TokenMerger +from dss.ingest.scss import SCSSTokenSource +from dss.ingest.tailwind import TailwindTokenSource +from dss.status.dashboard import StatusDashboard +from dss.storybook.generator import StoryGenerator +from dss.storybook.scanner import StorybookScanner +from dss.storybook.theme import ThemeGenerator +from dss.themes.default_themes import get_default_dark_theme, get_default_light_theme +from dss.mcp.guides import get_guide, list_guides + + +def _safe_serialize(obj: Any) -> Any: + if obj is None or isinstance(obj, (str, int, float, bool)): + return obj + if isinstance(obj, (datetime, date)): + return obj.isoformat() + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, Path): + return str(obj) + if isinstance(obj, dict): + return {str(k): _safe_serialize(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple, set)): + return [_safe_serialize(v) for v in obj] + if is_dataclass(obj): + return _safe_serialize(asdict(obj)) + if hasattr(obj, "to_dict") and callable(obj.to_dict): + return _safe_serialize(obj.to_dict()) + if hasattr(obj, "model_dump") and callable(obj.model_dump): + return _safe_serialize(obj.model_dump()) + return str(obj) + + +@dataclass(frozen=True) +class MCPContext: + project_id: Optional[str] = None + user_id: Optional[int] = None + + +@dataclass(frozen=True) +class ProjectContextSummary: + """Small project summary used for AI prompt injection.""" + + name: str + component_count: int + health: Dict[str, Any] + integrations: Dict[str, Any] + + +@dataclass +class ToolExecutionResult: + success: bool + result: Any = None + error: Optional[str] = None + duration_ms: int = 0 + + def to_dict(self) -> Dict[str, Any]: + return { + "success": self.success, + "result": _safe_serialize(self.result), + "error": self.error, + "duration_ms": self.duration_ms, + } + + +@dataclass(frozen=True) +class ToolSpec: + name: str + description: str + input_schema: Dict[str, Any] + category: str + func: Callable[[Dict[str, Any], MCPContext], Awaitable[Any]] + + +def _resolve_project_root(arguments: Dict[str, Any], context: MCPContext) -> Path: + """ + Resolve a project root for tools that operate on a filesystem project. + + Priority: + 1) arguments.path (absolute/relative) + 2) arguments.project_id + 3) context.project_id + """ + from dss.storage.json_store import Projects + + path_arg = arguments.get("path") + if path_arg: + return Path(path_arg).expanduser().resolve() + + project_id = arguments.get("project_id") or context.project_id + if project_id: + project = Projects.get(project_id) + if not project: + raise ValueError(f"Project not found: {project_id}") + root_path = project.get("root_path") + if not root_path: + raise ValueError(f"Project has no root_path configured: {project_id}") + return Path(root_path).expanduser().resolve() + + raise ValueError("Missing required 'path' (or provide project_id via context).") + + +def _normalize_token_pairs(tokens: Any) -> List[tuple[str, Any]]: + """ + Normalize arbitrary token inputs into a list of (name, value) pairs. + + Supports: + - {name: value} dict + - {"tokens": [...]} where tokens are objects with name/value + - {"collection": {"tokens":[...]}} + - list[{"name":..,"value":..}] or list[(name,value)] + """ + if tokens is None: + return [] + + if isinstance(tokens, dict): + if "collection" in tokens and isinstance(tokens["collection"], dict): + return _normalize_token_pairs(tokens["collection"]) + if "tokens" in tokens: + return _normalize_token_pairs(tokens["tokens"]) + # plain mapping + return [(str(k), v) for k, v in tokens.items()] + + if isinstance(tokens, list): + pairs: List[tuple[str, Any]] = [] + for item in tokens: + if isinstance(item, (list, tuple)) and len(item) == 2: + pairs.append((str(item[0]), item[1])) + elif isinstance(item, dict) and "name" in item and "value" in item: + pairs.append((str(item["name"]), item["value"])) + return pairs + + return [] + + +def _format_tokens_output(token_pairs: List[tuple[str, Any]], output_format: str) -> str: + def to_css_name(name: str) -> str: + normalized = name.strip().replace(".", "-").replace("/", "-").replace("_", "-") + while "--" in normalized: + normalized = normalized.replace("--", "-") + return f"--{normalized.lower()}" + + if output_format == "json": + import json + + return json.dumps({k: v for k, v in token_pairs}, indent=2, default=str) + + if output_format == "css": + lines = [":root {"] + for name, value in token_pairs: + lines.append(f" {to_css_name(name)}: {value};") + lines.append("}") + return "\n".join(lines) + + if output_format == "scss": + lines = [] + for name, value in token_pairs: + var_name = to_css_name(name).lstrip("--") + lines.append(f"${var_name}: {value};") + return "\n".join(lines) + + if output_format == "js": + import json + + obj = {k: v for k, v in token_pairs} + return f"export const tokens = {json.dumps(obj, indent=2, default=str)};\n" + + raise ValueError(f"Unsupported output_format: {output_format}") + + +class MCPHandler: + """Central tool registry + execution.""" + + def __init__(self): + self._tools: Dict[str, ToolSpec] = {} + self._register_core_tools() + self._register_optional_dev_tools() + + def _register(self, spec: ToolSpec) -> None: + self._tools[spec.name] = spec + + def _register_core_tools(self) -> None: + # Tool schemas are aligned with the legacy Claude plugin where possible. + + self._register( + ToolSpec( + name="dss_get_status", + description="Get DSS system status including health checks, dependencies, configuration, metrics, and recommendations.", + category="system", + input_schema={ + "type": "object", + "properties": { + "format": { + "type": "string", + "enum": ["json", "dashboard"], + "description": "Output format: 'json' for structured data, 'dashboard' for ASCII art display (default: json)", + } + }, + }, + func=self._tool_get_status, + ) + ) + + self._register( + ToolSpec( + name="dss_list_themes", + description="List all available themes in the DSS system", + category="themes", + input_schema={"type": "object", "properties": {}}, + func=self._tool_list_themes, + ) + ) + + self._register( + ToolSpec( + name="dss_analyze_project", + description="Analyze a project for design system patterns, component usage, and tokenization opportunities. Returns comprehensive analysis including style patterns, React components, and dependency graph.", + category="analysis", + input_schema={ + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Absolute path to the project directory to analyze", + }, + "project_id": { + "type": "string", + "description": "Optional project ID (used when executing on the headless server)", + }, + }, + }, + func=self._tool_analyze_project, + ) + ) + + self._register( + ToolSpec( + name="dss_find_quick_wins", + description="Find quick win opportunities for design system adoption. Identifies low-effort, high-impact improvements.", + category="analysis", + input_schema={ + "type": "object", + "properties": { + "path": {"type": "string", "description": "Path to the project directory"}, + "project_id": {"type": "string", "description": "Optional project ID"}, + }, + }, + func=self._tool_find_quick_wins, + ) + ) + + self._register( + ToolSpec( + name="dss_audit_components", + description="Audit React components for design system adoption. Identifies hardcoded values, missing tokens, and refactoring opportunities.", + category="analysis", + input_schema={ + "type": "object", + "properties": { + "path": {"type": "string", "description": "Path to React component directory"}, + "project_id": {"type": "string", "description": "Optional project ID"}, + }, + }, + func=self._tool_audit_components, + ) + ) + + self._register( + ToolSpec( + name="dss_extract_tokens", + description="Extract design tokens from CSS, SCSS, Tailwind, or JSON sources. Returns a unified TokenCollection with all discovered tokens.", + category="tokens", + input_schema={ + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Path to the file or directory containing design tokens", + }, + "sources": { + "type": "array", + "items": {"type": "string", "enum": ["css", "scss", "tailwind", "json"]}, + "description": "Token source types to extract from (default: all)", + }, + }, + "required": ["path"], + }, + func=self._tool_extract_tokens, + ) + ) + + self._register( + ToolSpec( + name="dss_generate_theme", + description="Generate theme files from design tokens using style-dictionary. Supports CSS, SCSS, JSON, and JS output formats.", + category="tokens", + input_schema={ + "type": "object", + "properties": { + "tokens": { + "type": "object", + "description": "Design tokens to transform (or use tokens from previous extraction)", + }, + "format": { + "type": "string", + "enum": ["css", "scss", "json", "js"], + "description": "Output format for generated theme files", + }, + "theme_name": { + "type": "string", + "description": "Name for the generated theme (default: 'default')", + }, + }, + "required": ["format"], + }, + func=self._tool_generate_theme, + ) + ) + + self._register( + ToolSpec( + name="dss_transform_tokens", + description="Transform tokens between formats using style-dictionary", + category="tokens", + input_schema={ + "type": "object", + "properties": { + "tokens": {"type": "object", "description": "Tokens to transform"}, + "input_format": { + "type": "string", + "enum": ["css", "scss", "json", "tailwind"], + "description": "Input token format", + }, + "output_format": { + "type": "string", + "enum": ["css", "scss", "json", "js"], + "description": "Desired output format", + }, + }, + "required": ["tokens", "output_format"], + }, + func=self._tool_transform_tokens, + ) + ) + + self._register( + ToolSpec( + name="dss_setup_storybook", + description="Set up or configure Storybook for the project. Generates stories and theme configuration.", + category="storybook", + input_schema={ + "type": "object", + "properties": { + "path": {"type": "string", "description": "Path to the project directory"}, + "project_id": {"type": "string", "description": "Optional project ID"}, + "action": { + "type": "string", + "enum": ["scan", "generate", "configure"], + "description": "Action to perform: scan existing, generate stories, or configure theme", + }, + }, + "required": ["action"], + }, + func=self._tool_setup_storybook, + ) + ) + + self._register( + ToolSpec( + name="dss_sync_figma", + description="Sync design tokens from a Figma file. Requires FIGMA_TOKEN environment variable.", + category="figma", + input_schema={ + "type": "object", + "properties": { + "file_key": {"type": "string", "description": "Figma file key (from URL)"}, + "format": { + "type": "string", + "enum": ["css", "scss", "json", "js"], + "description": "Output format for extracted tokens (default: json)", + }, + "output_dir": { + "type": "string", + "description": "Optional output directory for token export", + }, + }, + "required": ["file_key"], + }, + func=self._tool_sync_figma, + ) + ) + + self._register( + ToolSpec( + name="dss_list_guides", + description="List available DSS guides (Claude plugin skills and command docs) for use in any MCP client.", + category="docs", + input_schema={ + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": ["all", "skill", "command"], + "description": "Which guide set to return (default: all).", + }, + "include_meta": { + "type": "boolean", + "description": "Include YAML front matter metadata for each guide (default: false).", + } + }, + }, + func=self._tool_list_guides, + ) + ) + + self._register( + ToolSpec( + name="dss_get_guide", + description="Get a specific DSS guide by id (from dss_list_guides). Returns markdown content and related tool names.", + category="docs", + input_schema={ + "type": "object", + "properties": { + "id": {"type": "string", "description": "Guide id (e.g., 'skill:design-system-analysis')"}, + "include_frontmatter": { + "type": "boolean", + "description": "Include YAML front matter in returned content (default: false)", + }, + }, + "required": ["id"], + }, + func=self._tool_get_guide, + ) + ) + + self._register( + ToolSpec( + name="dss_match_skills", + description="Given file paths, return which Claude plugin skills would apply (based on globs/alwaysApply).", + category="docs", + input_schema={ + "type": "object", + "properties": { + "paths": { + "type": "array", + "items": {"type": "string"}, + "description": "File paths to test against skill globs (absolute or relative).", + }, + "include_always_apply": { + "type": "boolean", + "description": "Include skills with alwaysApply=true even if no glob matches (default: true).", + }, + "include_meta": { + "type": "boolean", + "description": "Include the full YAML front matter for each matching skill (default: false).", + }, + }, + "required": ["paths"], + }, + func=self._tool_match_skills, + ) + ) + + def _register_optional_dev_tools(self) -> None: + """ + Dev-only workflow tools. + + These wrap local shell scripts (init/reset/services). They are intentionally gated behind + `DSS_ENABLE_DEV_COMMANDS=1` to avoid exposing side-effectful operations in headless/proxy usage. + """ + enabled = (os.getenv("DSS_ENABLE_DEV_COMMANDS") or "").strip().lower() in { + "1", + "true", + "yes", + "on", + } + if not enabled: + return + + self._register( + ToolSpec( + name="dss_init", + description="Run DSS initialization workflow (wrapper around scripts/dss-init.sh).", + category="workflow", + input_schema={ + "type": "object", + "properties": { + "reset": {"type": "boolean", "description": "Clear DSS data first (--reset)"}, + "skip_analysis": {"type": "boolean", "description": "Skip analysis step (--skip-analysis)"}, + "skip_servers": {"type": "boolean", "description": "Don't start servers (--skip-servers)"}, + "servers_only": {"type": "boolean", "description": "Only start servers (--servers-only)"}, + }, + }, + func=self._tool_dss_init, + ) + ) + + self._register( + ToolSpec( + name="dss_reset", + description="Reset DSS to a clean state (wrapper around scripts/dss-reset.sh). Dry-run unless confirm=true.", + category="workflow", + input_schema={ + "type": "object", + "properties": { + "confirm": { + "type": "boolean", + "description": "Actually perform reset (default false = dry-run)", + } + }, + }, + func=self._tool_dss_reset, + ) + ) + + self._register( + ToolSpec( + name="dss_services", + description="Manage DSS dev services (wrapper around scripts/dss-services.sh).", + category="workflow", + input_schema={ + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["start", "stop", "status", "restart", "logs"], + "description": "Action to perform", + }, + "service": { + "type": "string", + "description": "Optional service name (api, admin-ui, storybook)", + }, + }, + "required": ["action"], + }, + func=self._tool_dss_services, + ) + ) + + def list_tools(self, include_details: bool = False) -> List[Dict[str, Any]]: + # include_details reserved for future expansion; current payload is stable. + return [ + { + "name": t.name, + "description": t.description, + "input_schema": t.input_schema, + "category": t.category, + } + for t in sorted(self._tools.values(), key=lambda x: x.name) + ] + + def get_tool_info(self, tool_name: str) -> Optional[Dict[str, Any]]: + t = self._tools.get(tool_name) + if not t: + return None + return {"name": t.name, "description": t.description, "input_schema": t.input_schema, "category": t.category} + + def get_tools_for_claude(self) -> List[Dict[str, Any]]: + # Anthropic tool format uses `input_schema`. + return [ + {"name": t.name, "description": t.description, "input_schema": t.input_schema} + for t in sorted(self._tools.values(), key=lambda x: x.name) + ] + + async def get_project_context( + self, project_id: str, user_id: Optional[int] = None + ) -> Optional[ProjectContextSummary]: + """ + Lightweight per-project context for AI prompt injection. + + This is intentionally small and stable (no large file contents). + """ + from dss.storage.json_store import Components, Integrations, Projects + + project = Projects.get(project_id) + if not project: + return None + + name = project.get("name") or project_id + component_count = len(Components.list(project_id)) + + integrations_list = Integrations.list(project_id, user_id) if user_id is not None else [] + integrations = {i.get("integration_type", "unknown"): i for i in integrations_list} + + # Minimal heuristic health score (placeholder until project health model exists) + score = 80 if project.get("root_path") else 50 + if component_count == 0: + score = min(score, 60) + grade = "A" if score >= 90 else "B" if score >= 80 else "C" if score >= 70 else "D" + + return ProjectContextSummary( + name=name, + component_count=component_count, + health={"score": score, "grade": grade}, + integrations=integrations, + ) + + async def execute_tool( + self, tool_name: str, arguments: Dict[str, Any], context: MCPContext + ) -> ToolExecutionResult: + start = time.time() + try: + tool = self._tools.get(tool_name) + if not tool: + return ToolExecutionResult( + success=False, + error=f"Tool not found: {tool_name}", + duration_ms=int((time.time() - start) * 1000), + ) + + result = await tool.func(arguments or {}, context) + return ToolExecutionResult( + success=True, + result=result, + duration_ms=int((time.time() - start) * 1000), + ) + except Exception as e: + return ToolExecutionResult( + success=False, + error=str(e), + duration_ms=int((time.time() - start) * 1000), + ) + + # --------------------------------------------------------------------- + # Tool implementations + # --------------------------------------------------------------------- + + async def _tool_get_status(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + fmt = (arguments or {}).get("format") or "json" + dashboard = StatusDashboard() + if fmt == "dashboard": + return {"format": "dashboard", "text": dashboard.render_text()} + return dashboard.get_status() + + async def _tool_list_themes(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + themes = [get_default_light_theme(), get_default_dark_theme()] + return { + "success": True, + "themes": [ + { + "name": t.name, + "version": t.version, + "token_count": len(t.tokens or {}), + } + for t in themes + ], + } + + async def _tool_analyze_project(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + from dss.analyze.graph import DependencyGraph + from dss.analyze.quick_wins import QuickWinFinder + from dss.analyze.react import ReactAnalyzer + from dss.analyze.scanner import ProjectScanner + from dss.analyze.styles import StyleAnalyzer + + root = _resolve_project_root(arguments, context) + if not root.exists(): + raise FileNotFoundError(f"Path does not exist: {root}") + + scanner = ProjectScanner(str(root), use_cache=False) + analysis = await scanner.scan() + + react = ReactAnalyzer(str(root)) + components = await react.analyze() + analysis.components = components + analysis.component_count = len(components) + + style_analyzer = StyleAnalyzer(str(root)) + style_result = await style_analyzer.analyze() + analysis.token_candidates = style_result.get("token_candidates", []) # type: ignore[assignment] + analysis.stats["token_candidates"] = len(analysis.token_candidates) + + quick_wins = await QuickWinFinder(str(root)).find_all() + analysis.quick_wins = quick_wins + analysis.stats["quick_wins_count"] = len(quick_wins) + + graph = await DependencyGraph(str(root)).build() + + return { + "success": True, + "project_path": str(root), + "analysis": _safe_serialize(analysis), + "dependency_graph": _safe_serialize(graph), + "style_summary": _safe_serialize({k: v for k, v in style_result.items() if k != "token_candidates"}), + } + + async def _tool_find_quick_wins(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + from dss.analyze.quick_wins import QuickWinFinder + + root = _resolve_project_root(arguments, context) + wins = await QuickWinFinder(str(root)).find_all() + return {"success": True, "project_path": str(root), "quick_wins": _safe_serialize(wins)} + + async def _tool_audit_components(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + from dss.analyze.react import ReactAnalyzer + from dss.analyze.styles import StyleAnalyzer + + root = _resolve_project_root(arguments, context) + react = ReactAnalyzer(str(root)) + components = await react.analyze() + inline_styles = await react.find_inline_styles() + + style = StyleAnalyzer(str(root)) + style_result = await style.analyze() + + return { + "success": True, + "project_path": str(root), + "components_count": len(components), + "inline_styles_count": len(inline_styles), + "inline_styles": _safe_serialize(inline_styles[:50]), + "duplicates": _safe_serialize(style_result.get("duplicates", [])[:50]), + "token_candidates": _safe_serialize(style_result.get("token_candidates", [])[:50]), + } + + async def _tool_extract_tokens(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + target = Path(arguments["path"]).expanduser().resolve() + sources = arguments.get("sources") or ["css", "scss", "tailwind", "json"] + + collections = [] + + # Directory mode: scan files per source. + if target.is_dir(): + if "css" in sources: + src = CSSTokenSource() + for f in target.rglob("*.css"): + if "node_modules" in f.parts: + continue + collections.append(await src.extract(str(f))) + + if "scss" in sources: + src = SCSSTokenSource() + for pattern in ["*.scss", "*.sass"]: + for f in target.rglob(pattern): + if "node_modules" in f.parts: + continue + collections.append(await src.extract(str(f))) + + if "tailwind" in sources: + src = TailwindTokenSource() + try: + collections.append(await src.extract(str(target))) + except FileNotFoundError: + pass + + if "json" in sources: + src = JSONTokenSource() + for f in target.rglob("*.json"): + if f.name in {"package.json", "package-lock.json", "tsconfig.json"}: + continue + if "node_modules" in f.parts: + continue + # Prefer obvious token files; keep a loose fallback. + if "token" not in f.name.lower() and f.name not in {"tokens.json"}: + continue + collections.append(await src.extract(str(f))) + else: + # File mode: only run relevant sources (or all if requested). + ext = target.suffix.lower() + + if "css" in sources and ext == ".css": + collections.append(await CSSTokenSource().extract(str(target))) + if "scss" in sources and ext in {".scss", ".sass"}: + collections.append(await SCSSTokenSource().extract(str(target))) + if "json" in sources and ext == ".json": + collections.append(await JSONTokenSource().extract(str(target))) + if "tailwind" in sources and target.name.startswith("tailwind.config"): + collections.append(await TailwindTokenSource().extract(str(target))) + + # If nothing matched, try all selected sources safely. + if not collections: + if "tailwind" in sources: + try: + collections.append(await TailwindTokenSource().extract(str(target.parent))) + except FileNotFoundError: + pass + + merger = TokenMerger(strategy=MergeStrategy.LAST) + merge_result = merger.merge(collections, normalize_names=True) + + return { + "success": True, + "path": str(target), + "sources": sources, + "collection": _safe_serialize(merge_result.collection), + "conflicts": _safe_serialize(merge_result.conflicts), + "stats": _safe_serialize(merge_result.stats), + "warnings": merge_result.warnings, + } + + async def _tool_generate_theme(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + output_format = arguments.get("format") + if not output_format: + raise ValueError("Missing required field: format") + + token_pairs = _normalize_token_pairs(arguments.get("tokens")) + content = _format_tokens_output(token_pairs, output_format) + + return { + "success": True, + "format": output_format, + "theme_name": arguments.get("theme_name") or "default", + "output": content, + } + + async def _tool_transform_tokens(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + output_format = arguments.get("output_format") + token_pairs = _normalize_token_pairs(arguments.get("tokens")) + content = _format_tokens_output(token_pairs, output_format) + return {"success": True, "output_format": output_format, "output": content} + + async def _tool_setup_storybook(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + action = arguments.get("action") + if not action: + raise ValueError("Missing required field: action") + + root = _resolve_project_root(arguments, context) + if not root.exists(): + raise FileNotFoundError(f"Path does not exist: {root}") + + if action == "scan": + scanner = StorybookScanner(str(root)) + return await scanner.scan() + + if action == "generate": + generator = StoryGenerator(str(root)) + results = generator.generate(dry_run=False) + return {"success": True, "generated": len(results), "results": _safe_serialize(results)} + + if action == "configure": + theme_gen = ThemeGenerator(str(root)) + result = theme_gen.generate() + return {"success": True, "result": _safe_serialize(result)} + + raise ValueError(f"Unknown action: {action}") + + async def _tool_sync_figma(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + file_key = arguments["file_key"] + fmt = arguments.get("format") or "json" + env_home = os.environ.get("DSS_HOME") + if env_home: + dss_home = Path(env_home).expanduser() + else: + local = Path.cwd() / ".dss" + dss_home = local if local.exists() else (Path.home() / ".dss") + output_dir = arguments.get("output_dir") or str(dss_home / "cache" / "figma") + + suite = FigmaToolSuite(output_dir=output_dir) + return await suite.extract_variables(file_key=file_key, format=fmt) + + async def _tool_list_guides(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + kind = (arguments or {}).get("kind") or "all" + include_meta = bool((arguments or {}).get("include_meta") or False) + guides = list_guides(kind=kind) + return { + "success": True, + "kind": kind, + "count": len(guides), + "guides": [ + { + "id": g.id, + "type": g.type, + "name": g.name, + "description": g.description, + "related_tools": g.related_tools, + "source_path": g.source_path, + **({"meta": g.meta} if include_meta else {}), + } + for g in guides + ], + } + + async def _tool_get_guide(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + guide_id = (arguments or {}).get("id") + if not guide_id: + raise ValueError("Missing required field: id") + include_frontmatter = bool((arguments or {}).get("include_frontmatter") or False) + data = get_guide(guide_id, include_frontmatter=include_frontmatter) + return {"success": True, **data} + + async def _tool_match_skills(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + from pathlib import PurePosixPath + + raw_paths = (arguments or {}).get("paths") + if not isinstance(raw_paths, list) or not raw_paths: + raise ValueError("paths must be a non-empty array of strings") + + include_always_apply = (arguments or {}).get("include_always_apply") + if include_always_apply is None: + include_always_apply = True + include_always_apply = bool(include_always_apply) + + include_meta = bool((arguments or {}).get("include_meta") or False) + + def to_posix_path(p: str) -> PurePosixPath: + s = str(p).replace("\\", "/") + return PurePosixPath(s) + + def to_bool(v: Any) -> bool: + if isinstance(v, bool): + return v + if v is None: + return False + s = str(v).strip().lower() + return s in {"1", "true", "yes", "on"} + + paths = [to_posix_path(p) for p in raw_paths] + skills = [g for g in list_guides(kind="skill") if g.type == "skill"] + + matched: List[Dict[str, Any]] = [] + for skill in skills: + globs = skill.meta.get("globs") or [] + if isinstance(globs, str): + globs = [globs] + if not isinstance(globs, list): + globs = [] + + always_apply = to_bool(skill.meta.get("alwaysApply")) + matched_globs: List[str] = [] + matched_paths: List[str] = [] + + for p in paths: + for pat in globs: + pat_s = str(pat).replace("\\", "/") + if p.match(pat_s): + matched_globs.append(str(pat)) + matched_paths.append(str(p)) + + matched_globs = sorted(set(matched_globs)) + matched_paths = sorted(set(matched_paths)) + + reasons: List[str] = [] + if matched_globs: + reasons.append("globs") + if always_apply and include_always_apply: + reasons.append("alwaysApply") + + if not reasons: + continue + + entry: Dict[str, Any] = { + "id": skill.id, + "name": skill.name, + "description": skill.description, + "related_tools": skill.related_tools, + "alwaysApply": always_apply, + "globs": globs, + "matched_globs": matched_globs, + "matched_paths": matched_paths, + "reasons": reasons, + "source_path": skill.source_path, + } + if include_meta: + entry["meta"] = skill.meta + matched.append(entry) + + matched.sort(key=lambda s: (s.get("name") or "", s.get("id") or "")) + return {"success": True, "count": len(matched), "skills": matched} + + def _repo_root(self) -> Path: + return Path(__file__).resolve().parent.parent.parent + + async def _run_script(self, rel_path: str, args: List[str]) -> Dict[str, Any]: + script = self._repo_root() / rel_path + if not script.exists(): + raise FileNotFoundError(f"Script not found: {script}") + + proc = await asyncio.create_subprocess_exec( + str(script), + *args, + cwd=str(self._repo_root()), + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout_b, stderr_b = await proc.communicate() + + stdout = (stdout_b or b"").decode("utf-8", errors="replace") + stderr = (stderr_b or b"").decode("utf-8", errors="replace") + + max_len = 20000 + if len(stdout) > max_len: + stdout = stdout[:max_len] + "\n…(truncated)…\n" + if len(stderr) > max_len: + stderr = stderr[:max_len] + "\n…(truncated)…\n" + + return { + "exit_code": proc.returncode, + "stdout": stdout, + "stderr": stderr, + } + + async def _tool_dss_init(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + args: List[str] = [] + if (arguments or {}).get("reset"): + args.append("--reset") + if (arguments or {}).get("skip_analysis"): + args.append("--skip-analysis") + if (arguments or {}).get("skip_servers"): + args.append("--skip-servers") + if (arguments or {}).get("servers_only"): + args.append("--servers-only") + return await self._run_script("scripts/dss-init.sh", args) + + async def _tool_dss_reset(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + args: List[str] = [] + if bool((arguments or {}).get("confirm") or False): + args.append("--confirm") + return await self._run_script("scripts/dss-reset.sh", args) + + async def _tool_dss_services(self, arguments: Dict[str, Any], context: MCPContext) -> Any: + action = (arguments or {}).get("action") + if not action: + raise ValueError("Missing required field: action") + args: List[str] = [str(action)] + service = (arguments or {}).get("service") + if service: + args.extend(["--service", str(service)]) + return await self._run_script("scripts/dss-services.sh", args) + + +_handler: Optional[MCPHandler] = None + + +def get_mcp_handler() -> MCPHandler: + global _handler + if _handler is None: + _handler = MCPHandler() + return _handler diff --git a/dss/mcp/server.py b/dss/mcp/server.py new file mode 100644 index 0000000..256b7a4 --- /dev/null +++ b/dss/mcp/server.py @@ -0,0 +1,113 @@ +""" +DSS MCP stdio server. + +This is the local process that Claude Code spawns. It can run in: +- Local mode: execute tools on the local filesystem directly. +- Proxy mode: forward tool execution to a headless DSS server over HTTP. + +Proxy mode is enabled by setting `DSS_API_URL` (or `DSS_SERVER_URL`). +""" + +from __future__ import annotations + +import asyncio +import json +import os +from typing import Any, Dict, List + +import httpx + +from dss.mcp.config import mcp_config +from dss.mcp.handler import MCPContext, get_mcp_handler + +try: + from mcp.server import Server + from mcp.server.stdio import stdio_server + from mcp.types import TextContent, Tool +except ImportError as e: # pragma: no cover + raise SystemExit("MCP SDK not found. Install with: pip install mcp") from e + + +server = Server("dss") + + +def _api_base_url() -> str: + api_url = mcp_config.API_URL or "" + return api_url.rstrip("/") + + +def _default_context() -> MCPContext: + project_id = os.getenv("DSS_PROJECT_ID") or None + user_id = os.getenv("DSS_USER_ID") + return MCPContext(project_id=project_id, user_id=int(user_id) if user_id and user_id.isdigit() else None) + + +async def _proxy_list_tools() -> List[Dict[str, Any]]: + base = _api_base_url() + if not base: + raise ValueError("DSS_API_URL not configured for proxy mode") + async with httpx.AsyncClient(timeout=30.0) as client: + resp = await client.get(f"{base}/api/mcp/tools", params={"include_details": "true"}) + resp.raise_for_status() + return resp.json() + + +async def _proxy_execute_tool(tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]: + base = _api_base_url() + if not base: + raise ValueError("DSS_API_URL not configured for proxy mode") + + ctx = _default_context() + payload: Dict[str, Any] = { + "arguments": arguments or {}, + "project_id": ctx.project_id or "", + "user_id": ctx.user_id or 1, + } + + async with httpx.AsyncClient(timeout=60.0) as client: + resp = await client.post(f"{base}/api/mcp/tools/{tool_name}/execute", json=payload) + resp.raise_for_status() + return resp.json() + + +@server.list_tools() +async def list_tools() -> List[Tool]: + # Proxy mode: reflect tool list from headless server. + if _api_base_url(): + tools = await _proxy_list_tools() + else: + tools = get_mcp_handler().list_tools(include_details=True) + + return [ + Tool( + name=t["name"], + description=t.get("description", ""), + inputSchema=t.get("input_schema", {"type": "object", "properties": {}}), + ) + for t in tools + ] + + +@server.call_tool() +async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]: + if _api_base_url(): + result = await _proxy_execute_tool(name, arguments or {}) + return [TextContent(type="text", text=json.dumps(result, indent=2))] + + handler = get_mcp_handler() + exec_result = await handler.execute_tool(name, arguments or {}, _default_context()) + return [TextContent(type="text", text=json.dumps(exec_result.to_dict(), indent=2))] + + +async def _serve() -> None: + async with stdio_server() as streams: + await server.run(*streams) + + +def main() -> None: + asyncio.run(_serve()) + + +if __name__ == "__main__": # pragma: no cover + main() + diff --git a/dss/project/core.py b/dss/project/core.py index 8f7e071..e600a60 100644 --- a/dss/project/core.py +++ b/dss/project/core.py @@ -12,6 +12,7 @@ Hierarchy: from dataclasses import dataclass from pathlib import Path +import os from typing import Optional # ============================================================================= @@ -40,12 +41,26 @@ DSS_FIGMA_REFERENCE = DSSFigmaReference() # ============================================================================= # DSS installation paths -DSS_ROOT = Path("/home/overbits/dss") -DSS_MVP1 = DSS_ROOT / "dss-mvp1" -DSS_CORE_DIR = DSS_MVP1 / "dss" / "core_tokens" +def _resolve_dss_root() -> Path: + # dss/project/core.py -> dss/project -> dss -> repo root + return Path(__file__).resolve().parents[2] + + +def _resolve_dss_home() -> Path: + env = os.environ.get("DSS_HOME") + if env: + return Path(env).expanduser() + local = Path.cwd() / ".dss" + if local.exists(): + return local + return Path.home() / ".dss" + + +DSS_ROOT = _resolve_dss_root() +DSS_CORE_DIR = Path(__file__).resolve().parents[1] / "core_tokens" # User data paths -DSS_USER_DIR = Path.home() / ".dss" +DSS_USER_DIR = _resolve_dss_home() DSS_CACHE_DIR = DSS_USER_DIR / "cache" DSS_REGISTRY_FILE = DSS_USER_DIR / "registry.json" diff --git a/dss/project/manager.py b/dss/project/manager.py index 60bd328..1a9dedf 100644 --- a/dss/project/manager.py +++ b/dss/project/manager.py @@ -26,8 +26,22 @@ from dss.project.sync import get_dss_core_tokens logger = logging.getLogger(__name__) # Default location for DSS projects registry -DSS_PROJECTS_DIR = Path.home() / ".dss" / "projects" -DSS_REGISTRY_FILE = Path.home() / ".dss" / "registry.json" +def _resolve_dss_home() -> Path: + env = os.environ.get("DSS_HOME") + if env: + return Path(env).expanduser() + + cwd = Path.cwd() + local = cwd / ".dss" + if local.exists(): + return local + + return Path.home() / ".dss" + + +_dss_home = _resolve_dss_home() +DSS_PROJECTS_DIR = _dss_home / "projects" +DSS_REGISTRY_FILE = _dss_home / "registry.json" class ProjectRegistry: diff --git a/dss/services/project_manager.py b/dss/services/project_manager.py index b691cd9..bc9fc22 100644 --- a/dss/services/project_manager.py +++ b/dss/services/project_manager.py @@ -19,7 +19,7 @@ class ProjectManager: """ Manages project registry with root path validation. - Works with the existing Projects database class to add root_path support. + Works with the existing Projects storage class to add root_path support. Validates paths exist and are accessible before registration. """ @@ -28,7 +28,7 @@ class ProjectManager: Initialize project manager. Args: - projects_db: Projects database class (from dss.storage.database) + projects_db: Projects storage class (from dss.storage.json_store) config_service: Optional ConfigService for config initialization """ self.db = projects_db @@ -73,7 +73,7 @@ class ProjectManager: project_id = str(uuid.uuid4())[:8] - # Create project in database + # Create project in storage project = self.db.create( id=project_id, name=name, description=description, figma_file_key=figma_file_key ) diff --git a/dss/settings.py b/dss/settings.py index 014daa0..f27c53b 100644 --- a/dss/settings.py +++ b/dss/settings.py @@ -6,6 +6,7 @@ Includes test utilities and reset functionality import shutil import subprocess +import os from pathlib import Path from typing import Dict, Optional @@ -13,6 +14,18 @@ from pydantic import ConfigDict from pydantic_settings import BaseSettings +def _resolve_dss_home() -> Path: + env = os.environ.get("DSS_HOME") + if env: + return Path(env).expanduser() + + local = Path.cwd() / ".dss" + if local.exists(): + return local + + return Path.home() / ".dss" + + class DSSSettings(BaseSettings): """DSS Configuration Settings.""" @@ -22,8 +35,9 @@ class DSSSettings(BaseSettings): PROJECT_ROOT: Path = Path(__file__).parent.parent DSS_DIR: Path = Path(__file__).parent TESTS_DIR: Path = PROJECT_ROOT / "tests" - CACHE_DIR: Path = Path.home() / ".dss" / "cache" - DATA_DIR: Path = Path.home() / ".dss" / "data" + DSS_HOME: Path = _resolve_dss_home() + CACHE_DIR: Path = DSS_HOME / "cache" + DATA_DIR: Path = DSS_HOME / "data" # API Configuration ANTHROPIC_API_KEY: Optional[str] = None @@ -31,11 +45,7 @@ class DSSSettings(BaseSettings): FIGMA_FILE_KEY: Optional[str] = None FIGMA_CACHE_TTL: int = 300 # 5 minutes - # Database - DATABASE_PATH: Path = Path.home() / ".dss" / "dss.db" - # Test Configuration - TEST_DATABASE_PATH: Path = Path.home() / ".dss" / "test.db" USE_MOCK_APIS: bool = True # Server Configuration (DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226) @@ -185,23 +195,17 @@ class DSSManager: except Exception as e: results["errors"].append(f"Failed to clear Figma cache: {e}") - # Reset database - db_path = self.settings.DATABASE_PATH - if db_path.exists(): - try: - db_path.unlink() - results["deleted"].append(str(db_path)) - except Exception as e: - results["errors"].append(f"Failed to reset database: {e}") - - # Clear test database - test_db_path = self.settings.TEST_DATABASE_PATH - if test_db_path.exists(): - try: - test_db_path.unlink() - results["deleted"].append(str(test_db_path)) - except Exception as e: - results["errors"].append(f"Failed to clear test database: {e}") + # Legacy cleanup (pre-JSON-only versions) + for legacy_db in [ + self.settings.DSS_HOME / "dss.db", + self.settings.DSS_HOME / "test.db", + ]: + if legacy_db.exists(): + try: + legacy_db.unlink() + results["deleted"].append(str(legacy_db)) + except Exception as e: + results["errors"].append(f"Failed to remove legacy file: {e}") # Clear Python cache for pycache in self.project_root.rglob("__pycache__"): @@ -236,7 +240,8 @@ class DSSManager: "dss_dir": str(self.dss_dir), "tests_dir": str(self.settings.TESTS_DIR), "cache_dir": str(self.settings.CACHE_DIR), - "database_path": str(self.settings.DATABASE_PATH), + "dss_home": str(self.settings.DSS_HOME), + "data_dir": str(self.settings.DATA_DIR), "has_anthropic_key": bool(self.settings.ANTHROPIC_API_KEY), "has_figma_token": bool(self.settings.FIGMA_TOKEN), "use_mock_apis": self.settings.USE_MOCK_APIS, @@ -360,8 +365,9 @@ Management Commands: print(f" Project root: {info['project_root']}") print(f" DSS directory: {info['dss_dir']}") print(f" Tests directory: {info['tests_dir']}") + print(f" DSS home: {info['dss_home']}") + print(f" Data directory: {info['data_dir']}") print(f" Cache directory: {info['cache_dir']}") - print(f" Database path: {info['database_path']}") print( f" Anthropic API: {'Configured' if info['has_anthropic_key'] else 'Not configured'}" ) diff --git a/dss/status/dashboard.py b/dss/status/dashboard.py index a73c839..5e902df 100644 --- a/dss/status/dashboard.py +++ b/dss/status/dashboard.py @@ -3,13 +3,13 @@ DSS Status Dashboard - Comprehensive system status visualization. Provides a beautiful ASCII art dashboard that aggregates data from: - DSSManager (system info, dependencies) -- Database stats (projects, components, styles) +- Storage stats (projects, components, styles) - ActivityLog (recent activity) - SyncHistory (sync operations) - QuickWinFinder (improvement opportunities) Expert-validated design with: -- Optimized database queries using LIMIT +- Optimized storage queries using LIMIT - Modular render methods for maintainability - Named constants for health score weights - Dynamic terminal width support @@ -23,7 +23,7 @@ from typing import Any, Dict, List, Optional # Health score weight constants (expert recommendation) HEALTH_WEIGHT_DEPENDENCIES = 0.40 HEALTH_WEIGHT_INTEGRATIONS = 0.25 -HEALTH_WEIGHT_DATABASE = 0.20 +HEALTH_WEIGHT_STORAGE = 0.20 HEALTH_WEIGHT_ACTIVITY = 0.15 @@ -70,7 +70,7 @@ class StatusData: # Configuration project_root: str = "" - database_path: str = "" + storage_path: str = "" cache_dir: str = "" figma_configured: bool = False anthropic_configured: bool = False @@ -136,7 +136,7 @@ class StatusDashboard: "quick_wins": {"count": data.quick_wins_count, "items": data.quick_wins}, "configuration": { "project_root": data.project_root, - "database": data.database_path, + "storage": data.storage_path, "cache": data.cache_dir, "figma_configured": data.figma_configured, "anthropic_configured": data.anthropic_configured, @@ -159,7 +159,7 @@ class StatusDashboard: # System info info = self._manager.get_system_info() data.project_root = info["project_root"] - data.database_path = info["database_path"] + data.storage_path = info["data_dir"] data.cache_dir = info["cache_dir"] data.figma_configured = info["has_figma_token"] data.anthropic_configured = info["has_anthropic_key"] @@ -195,30 +195,28 @@ class StatusDashboard: ) ) - # Database stats + # Storage stats try: from dss.storage.json_store import ActivityLog, Projects, SyncHistory, get_stats stats = get_stats() data.projects_count = stats.get("projects", 0) + data.projects_active = stats.get("projects_active", 0) data.components_count = stats.get("components", 0) data.styles_count = stats.get("styles", 0) + data.tokens_count = stats.get("tokens", 0) - # Database size metric - db_size = stats.get("db_size_mb", 0) + # Storage size metric + storage_size = stats.get("total_size_mb", 0) data.health_metrics.append( HealthMetric( - name="Database", - status="ok" if db_size < 100 else "warning", - value=f"{db_size} MB", - category="database", + name="Storage", + status="ok" if storage_size < 500 else "warning", + value=f"{storage_size} MB", + category="storage", ) ) - # Projects - projects = Projects.list() - data.projects_active = len([p for p in projects if p.get("status") == "active"]) - # Recent activity (OPTIMIZED: use limit parameter, not slice) # Expert recommendation: avoid [:5] slicing which fetches all records activities = ActivityLog.recent(limit=5) @@ -226,12 +224,12 @@ class StatusDashboard: { "action": a.get("action", ""), "description": a.get("description", ""), - "created_at": a.get("created_at", ""), + "created_at": a.get("timestamp", ""), "category": a.get("category", ""), } for a in activities ] - data.total_activities = ActivityLog.count() + data.total_activities = ActivityLog.count(days=30) # Recent syncs (OPTIMIZED: use limit parameter) syncs = SyncHistory.recent(limit=3) @@ -248,10 +246,10 @@ class StatusDashboard: except Exception as e: data.health_metrics.append( HealthMetric( - name="Database", + name="Storage", status="error", value=f"Error: {str(e)[:30]}", - category="database", + category="storage", ) ) @@ -271,7 +269,7 @@ class StatusDashboard: Uses weighted components: - Dependencies: 40% - Integrations: 25% - - Database: 20% + - Storage: 20% - Activity: 15% """ # Dependencies score (40%) @@ -288,12 +286,12 @@ class StatusDashboard: else: int_ok = 0 - # Database score (20%) - db_metrics = [m for m in data.health_metrics if m.category == "database"] - if db_metrics: - db_ok = sum(1 for m in db_metrics if m.status == "ok") / len(db_metrics) + # Storage score (20%) + storage_metrics = [m for m in data.health_metrics if m.category == "storage"] + if storage_metrics: + storage_ok = sum(1 for m in storage_metrics if m.status == "ok") / len(storage_metrics) else: - db_ok = 0 + storage_ok = 0 # Activity score (15%) - based on having recent data activity_ok = 1.0 if data.projects_count > 0 or data.components_count > 0 else 0.5 @@ -302,7 +300,7 @@ class StatusDashboard: score = ( deps_ok * HEALTH_WEIGHT_DEPENDENCIES + int_ok * HEALTH_WEIGHT_INTEGRATIONS - + db_ok * HEALTH_WEIGHT_DATABASE + + storage_ok * HEALTH_WEIGHT_STORAGE + activity_ok * HEALTH_WEIGHT_ACTIVITY ) * 100 @@ -409,12 +407,12 @@ class StatusDashboard: int_line += f"{icon} {i.name} ({i.value}) " lines.append("\u2502" + int_line[:width].ljust(width) + "\u2502") - # Database - db = next((m for m in data.health_metrics if m.category == "database"), None) - if db: - db_icon = "\u2705" if db.status == "ok" else "\u26a0\ufe0f" - db_line = f" Database: {db_icon} {db.value}" - lines.append("\u2502" + db_line.ljust(width) + "\u2502") + # Storage + storage = next((m for m in data.health_metrics if m.category == "storage"), None) + if storage: + storage_icon = "\u2705" if storage.status == "ok" else "\u26a0\ufe0f" + storage_line = f" Storage: {storage_icon} {storage.value}" + lines.append("\u2502" + storage_line.ljust(width) + "\u2502") lines.append("\u2514" + "\u2500" * width + "\u2518") diff --git a/dss/storage/json_store.py b/dss/storage/json_store.py index 9759c24..2f4ac6d 100644 --- a/dss/storage/json_store.py +++ b/dss/storage/json_store.py @@ -5,12 +5,13 @@ Pure JSON file-based storage following DSS canonical structure. No SQLite - everything is JSON for git-friendly diffs. Structure: -.dss/data/ +${DSS_HOME:-~/.dss}/data/ ├── _system/ # DSS internal (config, cache, activity) ├── projects/ # Per-project data (tokens, components, etc.) └── teams/ # Team definitions """ +import os import fcntl import hashlib import json @@ -22,7 +23,22 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Union # Base paths -DATA_DIR = Path(__file__).parent.parent.parent / ".dss" / "data" +def _resolve_dss_home() -> Path: + env = os.environ.get("DSS_HOME") + if env: + return Path(env).expanduser() + + # Project-local default (developer-friendly): use `./.dss` when present. + cwd = Path.cwd() + local = cwd / ".dss" + if local.exists(): + return local + + return Path.home() / ".dss" + + +_dss_home = _resolve_dss_home() +DATA_DIR = _dss_home / "data" SYSTEM_DIR = DATA_DIR / "_system" PROJECTS_DIR = DATA_DIR / "projects" TEAMS_DIR = DATA_DIR / "teams" @@ -33,6 +49,7 @@ for d in [ SYSTEM_DIR, SYSTEM_DIR / "cache", SYSTEM_DIR / "activity", + SYSTEM_DIR / "users", PROJECTS_DIR, TEAMS_DIR, ]: @@ -664,6 +681,25 @@ class ActivityLog: return all_records[offset : offset + limit] + @staticmethod + def count(project_id: str = None, days: int = 30) -> int: + """Count activity records over recent days.""" + total = 0 + for i in range(days): + day = date.today() - __import__("datetime").timedelta(days=i) + path = ActivityLog._log_path(day) + if not path.exists(): + continue + with file_lock(path, exclusive=False): + try: + with open(path, "r") as f: + for line in f: + if line.strip(): + total += 1 + except Exception: + continue + return total + @staticmethod def search( project_id: str = None, @@ -699,6 +735,157 @@ class ActivityLog: return all_records[offset : offset + limit] +# === Users (system-level auth store) === + + +class Users: + """System user storage for headless-server authentication (JSON-only).""" + + @staticmethod + def _users_dir() -> Path: + return SYSTEM_DIR / "users" + + @staticmethod + def _index_path() -> Path: + return SYSTEM_DIR / "users_index.json" + + @staticmethod + def _user_path(user_id: int) -> Path: + return Users._users_dir() / f"{user_id}.json" + + @staticmethod + def _normalize_email(email: str) -> str: + return (email or "").strip().lower() + + @staticmethod + def _ensure_initialized() -> None: + Users._users_dir().mkdir(parents=True, exist_ok=True) + index_path = Users._index_path() + if not index_path.exists(): + write_json(index_path, {"next_id": 1, "by_email": {}}) + + @staticmethod + def _load_index() -> Dict[str, Any]: + Users._ensure_initialized() + data = read_json(Users._index_path(), {"next_id": 1, "by_email": {}}) + if not isinstance(data, dict): + return {"next_id": 1, "by_email": {}} + data.setdefault("next_id", 1) + data.setdefault("by_email", {}) + return data + + @staticmethod + def _load_index_unlocked() -> Dict[str, Any]: + path = Users._index_path() + if not path.exists(): + return {"next_id": 1, "by_email": {}} + try: + data = json.loads(path.read_text()) + except Exception: + return {"next_id": 1, "by_email": {}} + if not isinstance(data, dict): + return {"next_id": 1, "by_email": {}} + data.setdefault("next_id", 1) + data.setdefault("by_email", {}) + return data + + @staticmethod + def _write_index_unlocked(data: Dict[str, Any]) -> None: + path = Users._index_path() + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(data, indent=2, default=str)) + + @staticmethod + def get(user_id: int) -> Optional[Dict[str, Any]]: + Users._ensure_initialized() + if user_id is None: + return None + try: + user_id_int = int(user_id) + except Exception: + return None + return read_json(Users._user_path(user_id_int)) + + @staticmethod + def get_by_email(email: str) -> Optional[Dict[str, Any]]: + Users._ensure_initialized() + normalized = Users._normalize_email(email) + if not normalized: + return None + index = Users._load_index() + user_id = index.get("by_email", {}).get(normalized) + if user_id is None: + return None + return Users.get(int(user_id)) + + @staticmethod + def upsert( + email: str, + display_name: str, + atlassian_url: str, + atlassian_service: str, + api_token_hash: str, + last_login: str, + ) -> Dict[str, Any]: + """ + Create or update a user by email. + + Returns the stored user record. + """ + Users._ensure_initialized() + normalized = Users._normalize_email(email) + if not normalized: + raise ValueError("email is required") + + index_path = Users._index_path() + with file_lock(index_path, exclusive=True): + index = Users._load_index_unlocked() + by_email = index.get("by_email", {}) + + now = datetime.utcnow().isoformat() + existing_id = by_email.get(normalized) + if existing_id is None: + user_id = int(index.get("next_id", 1)) + index["next_id"] = user_id + 1 + by_email[normalized] = user_id + index["by_email"] = by_email + Users._write_index_unlocked(index) + + record = { + "id": user_id, + "email": normalized, + "display_name": display_name, + "atlassian_url": atlassian_url, + "atlassian_service": atlassian_service, + "api_token_hash": api_token_hash, + "created_at": now, + "updated_at": now, + "last_login": last_login, + } + write_json(Users._user_path(user_id), record) + return record + + user_id = int(existing_id) + record = read_json(Users._user_path(user_id), {}) or {} + if not isinstance(record, dict): + record = {} + record.update( + { + "id": user_id, + "email": normalized, + "display_name": display_name, + "atlassian_url": atlassian_url, + "atlassian_service": atlassian_service, + "api_token_hash": api_token_hash, + "updated_at": now, + "last_login": last_login, + } + ) + record.setdefault("created_at", now) + write_json(Users._user_path(user_id), record) + return record + + # === Teams === @@ -892,6 +1079,28 @@ class CodeMetrics: return data["components"].get(component_id) return data["components"] + @staticmethod + def get_project_summary(project_id: str) -> Dict: + """Get aggregated code metrics for a project.""" + metrics = CodeMetrics.get(project_id) or {} + component_total = len(Components.list(project_id)) + measured = len(metrics) if isinstance(metrics, dict) else 0 + + # Best-effort: find latest update timestamp across component metrics + latest = None + if isinstance(metrics, dict): + for v in metrics.values(): + ts = (v or {}).get("updated_at") + if ts and (latest is None or ts > latest): + latest = ts + + return { + "project_id": project_id, + "components_total": component_total, + "components_measured": measured, + "measured_ratio": (measured / component_total) if component_total else 0.0, + "last_updated_at": latest, + } class TestResults: """Test results storage.""" @@ -942,6 +1151,24 @@ class TestResults: results.sort(key=lambda r: r.get("run_at", ""), reverse=True) return results + @staticmethod + def get_project_summary(project_id: str) -> Dict: + """Get aggregated test results summary for a project.""" + results = TestResults.list(project_id) + total = len(results) + passed = len([r for r in results if r.get("passed") is True]) + failed = len([r for r in results if r.get("passed") is False]) + last_run = results[0].get("run_at") if results else None + + return { + "project_id": project_id, + "total_runs": total, + "passed": passed, + "failed": failed, + "pass_rate": (passed / total) if total else 0.0, + "last_run_at": last_run, + } + class TokenDrift: """Token drift tracking.""" @@ -1011,6 +1238,133 @@ class TokenDrift: return None +class TokenDriftDetector: + """ + Compatibility wrapper used by the headless API. + + The underlying storage is `TokenDrift`. + """ + + @staticmethod + def list_by_project(project_id: str, severity: Optional[str] = None) -> List[Dict]: + return TokenDrift.list(project_id, severity=severity) + + @staticmethod + def record_drift( + project_id: str, + component_id: str, + property_name: str, + hardcoded_value: str, + file_path: str, + line_number: int, + severity: str = "warning", + suggested_token: Optional[str] = None, + ) -> Dict: + return TokenDrift.record( + project_id=project_id, + component_id=component_id, + property_name=property_name, + hardcoded_value=hardcoded_value, + file_path=file_path, + line_number=line_number, + severity=severity, + suggested_token=suggested_token, + ) + + @staticmethod + def update_status(project_id: str, drift_id: str, status: str) -> Optional[Dict]: + return TokenDrift.update_status(project_id=project_id, drift_id=drift_id, status=status) + + @staticmethod + def get_stats(project_id: str) -> Dict: + drifts = TokenDrift.list(project_id) + by_status: Dict[str, int] = {} + by_severity: Dict[str, int] = {} + for d in drifts: + by_status[d.get("status", "unknown")] = by_status.get(d.get("status", "unknown"), 0) + 1 + by_severity[d.get("severity", "unknown")] = ( + by_severity.get(d.get("severity", "unknown"), 0) + 1 + ) + return {"total": len(drifts), "by_status": by_status, "by_severity": by_severity} + + +class ESREDefinitions: + """ESRE (Explicit Style Requirements & Expectations) definitions storage.""" + + @staticmethod + def _path(project_id: str) -> Path: + return PROJECTS_DIR / project_id / "metrics" / "esre.json" + + @staticmethod + def list(project_id: str) -> List[Dict]: + data = read_json(ESREDefinitions._path(project_id), {"definitions": []}) + return data.get("definitions", []) + + @staticmethod + def create( + project_id: str, + name: str, + definition_text: str, + expected_value: Optional[str] = None, + component_name: Optional[str] = None, + ) -> Dict: + path = ESREDefinitions._path(project_id) + data = read_json(path, {"definitions": []}) + now = datetime.utcnow().isoformat() + + record = { + "id": str(uuid.uuid4())[:8], + "project_id": project_id, + "name": name, + "definition_text": definition_text, + "expected_value": expected_value, + "component_name": component_name, + "created_at": now, + "updated_at": now, + } + + data["definitions"].append(record) + write_json(path, data) + return record + + @staticmethod + def update( + project_id: str, + esre_id: str, + name: str, + definition_text: str, + expected_value: Optional[str] = None, + component_name: Optional[str] = None, + ) -> Optional[Dict]: + path = ESREDefinitions._path(project_id) + data = read_json(path, {"definitions": []}) + for record in data.get("definitions", []): + if record.get("id") == esre_id: + record.update( + { + "name": name, + "definition_text": definition_text, + "expected_value": expected_value, + "component_name": component_name, + "updated_at": datetime.utcnow().isoformat(), + } + ) + write_json(path, data) + return record + return None + + @staticmethod + def delete(project_id: str, esre_id: str) -> bool: + path = ESREDefinitions._path(project_id) + data = read_json(path, {"definitions": []}) + before = len(data.get("definitions", [])) + data["definitions"] = [d for d in data.get("definitions", []) if d.get("id") != esre_id] + if len(data["definitions"]) == before: + return False + write_json(path, data) + return True + + # === Integrations === @@ -1182,12 +1536,58 @@ class IntegrationHealth: def get_stats() -> Dict: - """Get storage statistics.""" + """Get storage statistics (JSON-only).""" + projects_total = 0 + projects_active = 0 + projects_archived = 0 + components_total = 0 + styles_total = 0 + tokens_total = 0 + + if PROJECTS_DIR.exists(): + archived_dir = PROJECTS_DIR / "_archived" + if archived_dir.exists(): + projects_archived = len([p for p in archived_dir.iterdir() if p.is_dir()]) + + for project_dir in PROJECTS_DIR.iterdir(): + if not project_dir.is_dir() or project_dir.name.startswith("_"): + continue + + projects_total += 1 + + manifest = read_json(project_dir / "manifest.json", {}) or {} + if manifest.get("status") == "active": + projects_active += 1 + + components_dir = project_dir / "components" + if components_dir.exists(): + components_total += len([p for p in components_dir.glob("*.json") if p.is_file()]) + + styles_dir = project_dir / "styles" + if styles_dir.exists(): + for style_file in styles_dir.glob("*.json"): + data = read_json(style_file, {}) or {} + if isinstance(data, dict): + styles_total += len(data.get("styles", []) or []) + + tokens_dir = project_dir / "tokens" + if tokens_dir.exists(): + for token_file in tokens_dir.glob("*.json"): + data = read_json(token_file, {}) or {} + if isinstance(data, dict) and isinstance(data.get("tokens"), dict): + tokens_total += len(data["tokens"]) + stats = { - "projects": len(list(PROJECTS_DIR.iterdir())) - 1 - if PROJECTS_DIR.exists() - else 0, # -1 for _archived - "teams": len(list(TEAMS_DIR.iterdir())) if TEAMS_DIR.exists() else 0, + "storage_type": "json", + "projects": projects_total, + "projects_active": projects_active, + "projects_archived": projects_archived, + "teams": len([p for p in TEAMS_DIR.iterdir() if p.is_dir() and not p.name.startswith("_")]) + if TEAMS_DIR.exists() + else 0, + "components": components_total, + "styles": styles_total, + "tokens": tokens_total, "cache_files": len(list((SYSTEM_DIR / "cache").glob("*.json"))) if (SYSTEM_DIR / "cache").exists() else 0, @@ -1222,12 +1622,6 @@ def init_storage() -> None: ]: d.mkdir(parents=True, exist_ok=True) - print(f"[Storage] JSON storage initialized at {DATA_DIR}") - - -# Initialize on import -init_storage() - # === CLI === diff --git a/dss/storybook/generator.py b/dss/storybook/generator.py index 596a6b3..d631602 100644 --- a/dss/storybook/generator.py +++ b/dss/storybook/generator.py @@ -350,6 +350,25 @@ class StoryGenerator: """Parse TypeScript interface/type for React component props.""" props = [] + # Collect simple union type aliases so we can resolve things like: + # export type ButtonVariant = 'primary' | 'secondary'; + type_aliases: Dict[str, List[str]] = {} + type_alias_pattern = re.compile( + r"(?:export\s+)?type\s+(\w+)\s*=\s*([^;]+);", re.MULTILINE | re.DOTALL + ) + for match in type_alias_pattern.finditer(content): + alias_name = match.group(1) + rhs = match.group(2).strip() + if "|" not in rhs: + continue + options = [ + o.strip().strip("'\"") + for o in rhs.split("|") + if o.strip().startswith(("'", '"')) and o.strip().endswith(("'", '"')) + ] + if options: + type_aliases[alias_name] = options + # Extract props from interface/type # interface ButtonProps { variant?: 'primary' | 'secondary'; ... } props_pattern = re.compile( @@ -375,7 +394,10 @@ class StoryGenerator: # Extract options from union types options = [] - if "|" in prop_type: + if prop_type in type_aliases: + options = type_aliases[prop_type] + prop_type = " | ".join(f"'{o}'" for o in options) + elif "|" in prop_type: # 'primary' | 'secondary' | 'ghost' options = [ o.strip().strip("'\"") @@ -851,9 +873,12 @@ class StoryGenerator: if not dir_path.exists(): return results - # Find component files (React + Web Components) + # Find component files (React + Web Components) recursively + skip_dirs = {"node_modules", ".git", "dist", "build", ".next"} for pattern in ["*.tsx", "*.jsx", "*.js"]: - for comp_path in dir_path.glob(pattern): + for comp_path in dir_path.rglob(pattern): + if any(skip in comp_path.parts for skip in skip_dirs): + continue # Skip story files, test files, index files if any( x in comp_path.name.lower() for x in [".stories.", ".test.", ".spec.", "index."] @@ -887,6 +912,8 @@ class StoryGenerator: # Determine story output path (use .stories.js for Web Components) if comp_path.suffix == ".js": story_path = comp_path.with_name(comp_path.stem + ".stories.js") + elif comp_path.suffix == ".jsx": + story_path = comp_path.with_suffix(".stories.jsx") else: story_path = comp_path.with_suffix(".stories.tsx") diff --git a/dss_mcp b/dss_mcp deleted file mode 120000 index a0734fd..0000000 --- a/dss_mcp +++ /dev/null @@ -1 +0,0 @@ -/Users/bsarlo/Documents/SoFi/dss/dss/mcp \ No newline at end of file diff --git a/scripts/dss b/scripts/dss index 20b3ece..00571b4 100755 --- a/scripts/dss +++ b/scripts/dss @@ -5,12 +5,12 @@ # Portable single-server launcher. One command, one port, everything included. # # Usage: -# ./dss start Start DSS (UI + API on port 3456) -# ./dss dev Development mode with auto-reload -# ./dss stop Stop DSS server -# ./dss status Check service status -# ./dss config Show current configuration -# ./dss help Show this help +# ./scripts/dss start Start DSS (UI + API on one port) +# ./scripts/dss dev Development mode with auto-reload +# ./scripts/dss stop Stop DSS server +# ./scripts/dss status Check service status +# ./scripts/dss config Show current configuration +# ./scripts/dss help Show this help # set -e @@ -21,7 +21,7 @@ UI_DIR="$DSS_ROOT/admin-ui" VENV_DIR="$DSS_ROOT/.venv" PID_FILE="$DSS_ROOT/.dss/dss.pid" LOG_FILE="$DSS_ROOT/.dss/dss.log" -PORT="${DSS_PORT:-3456}" +PORT="${DSS_PORT:-6220}" # Colors RED='\033[0;31m' @@ -200,8 +200,8 @@ show_config() { echo "═══════════════════════════════════════════════════" echo "" - if curl -s http://localhost:3456/api/config > /dev/null 2>&1; then - curl -s http://localhost:3456/api/config | python3 -m json.tool + if curl -s "http://localhost:$PORT/api/config" > /dev/null 2>&1; then + curl -s "http://localhost:$PORT/api/config" | python3 -m json.tool else warn "DSS not running. Showing file-based config..." if [ -f "$DSS_ROOT/.dss/runtime-config.json" ]; then @@ -234,14 +234,14 @@ show_help() { echo " help Show this help" echo "" echo "Environment:" - echo " DSS_PORT Override default port (default: 3456)" + echo " DSS_PORT Override default port (default: 6220)" echo "" echo "Examples:" - echo " ./dss start # Start on port 3456" - echo " DSS_PORT=8080 ./dss start # Start on port 8080" - echo " ./dss dev # Dev mode with auto-reload" + echo " ./scripts/dss start # Start on port 6220" + echo " DSS_PORT=8080 ./scripts/dss start # Start on port 8080" + echo " ./scripts/dss dev # Dev mode with auto-reload" echo "" - echo "Once running, open http://localhost:3456 for:" + echo "Once running, open http://localhost:6220 for:" echo " / Dashboard (Admin UI)" echo " /api/* REST API endpoints" echo " /docs Swagger documentation" diff --git a/scripts/dss-init.sh b/scripts/dss-init.sh index 8879276..87de1c2 100755 --- a/scripts/dss-init.sh +++ b/scripts/dss-init.sh @@ -4,7 +4,7 @@ # This is the single entry point for DSS setup. It handles: # - MCP configuration # - Dependencies (Python venv, Node modules) -# - Directory structure and database +# - Directory structure (JSON storage) # - Figma sync and token resolution # - CSS generation with style-dictionary # - Storybook story generation @@ -103,9 +103,6 @@ if [ "$RESET" = true ]; then rm -rf .dss/data/projects/* .dss/data/teams/* .dss/data/_system/cache/* .dss/data/_system/activity/* 2>/dev/null || true rm -rf .dss/data/_system/tokens/* .dss/data/_system/themes/* .dss/data/_system/components/* 2>/dev/null || true - # Reset database - rm -f .dss/dss.db .dss/dss.db.old - # Clear admin-ui generated files rm -f admin-ui/css/dss-*.css 2>/dev/null || true rm -f admin-ui/src/components/*.stories.js admin-ui/src/components/ds-*.js 2>/dev/null || true @@ -169,26 +166,29 @@ fi # ============================================================================ log_step "2. Generating MCP configuration..." -cat > "$DSS_ROOT/.mcp.json" << EOF +mkdir -p "$DSS_ROOT/.claude" + +cat > "$DSS_ROOT/.claude/mcp.json" << EOF { "\$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json", "mcpServers": { "dss": { "command": "$DSS_ROOT/.venv/bin/python3", - "args": ["$DSS_ROOT/dss-claude-plugin/servers/dss-mcp-server.py"], + "args": ["-m", "dss.mcp.server"], "env": { - "PYTHONPATH": "$DSS_ROOT:$DSS_ROOT/dss-claude-plugin", + "PYTHONPATH": "$DSS_ROOT", "DSS_HOME": "$DSS_ROOT/.dss", - "DSS_DATABASE": "$DSS_ROOT/.dss/dss.db", "DSS_CACHE": "$DSS_ROOT/.dss/cache", - "DSS_BASE_PATH": "$DSS_ROOT" + "DSS_BASE_PATH": "$DSS_ROOT", + "DSS_ENABLE_DEV_COMMANDS": "1", + "DSS_API_URL": "" }, "description": "Design System Server MCP - local development" } } } EOF -log_ok "MCP config: .mcp.json" +log_ok "MCP config: .claude/mcp.json" echo "" @@ -323,36 +323,10 @@ log_ok "Directory structure ready" echo "" # ============================================================================ -# STEP 6: Initialize Database +# STEP 6: Storage (JSON) # ============================================================================ -log_step "6. Initializing database..." - -if [ ! -f ".dss/dss.db" ]; then - python3 << 'PYEOF' -import sqlite3 -conn = sqlite3.connect(".dss/dss.db") -c = conn.cursor() -c.execute('''CREATE TABLE IF NOT EXISTS projects ( - id TEXT PRIMARY KEY, name TEXT NOT NULL, path TEXT, - config TEXT, created_at TEXT, updated_at TEXT)''') -c.execute('''CREATE TABLE IF NOT EXISTS tokens ( - id TEXT PRIMARY KEY, project_id TEXT, category TEXT, - name TEXT, value TEXT, source TEXT, created_at TEXT, - FOREIGN KEY (project_id) REFERENCES projects(id))''') -c.execute('''CREATE TABLE IF NOT EXISTS components ( - id TEXT PRIMARY KEY, project_id TEXT, name TEXT, - path TEXT, analysis TEXT, created_at TEXT, - FOREIGN KEY (project_id) REFERENCES projects(id))''') -c.execute('''CREATE TABLE IF NOT EXISTS figma_syncs ( - id TEXT PRIMARY KEY, file_key TEXT, file_name TEXT, - tokens_count INTEGER, status TEXT, synced_at TEXT)''') -conn.commit() -conn.close() -PYEOF - log_ok "Database initialized" -else - log_ok "Database exists" -fi +log_step "6. Storage (JSON) ready..." +log_ok "Using JSON storage under .dss/data/" echo "" diff --git a/scripts/dss-mcp b/scripts/dss-mcp new file mode 100755 index 0000000..b76aaf9 --- /dev/null +++ b/scripts/dss-mcp @@ -0,0 +1,31 @@ +#!/bin/bash +set -euo pipefail + +# DSS MCP stdio launcher (client-agnostic) +# +# Use this when configuring MCP clients that don't support per-server env vars, +# or when you want a single canonical entrypoint for DSS MCP across tools. + +DSS_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +# Prefer repo-local venv (recommended for DSS). +if [ -x "$DSS_ROOT/.venv/bin/python3" ]; then + PYTHON_BIN="$DSS_ROOT/.venv/bin/python3" +elif [ -x "$DSS_ROOT/venv/bin/python3" ]; then + PYTHON_BIN="$DSS_ROOT/venv/bin/python3" +else + echo "[dss-mcp] No venv found at $DSS_ROOT/.venv or $DSS_ROOT/venv" >&2 + echo "[dss-mcp] Create one: python3 -m venv .venv && source .venv/bin/activate && pip install -r requirements.txt" >&2 + exit 1 +fi + +# Defaults (allow caller to override). +export PYTHONPATH="${PYTHONPATH:-$DSS_ROOT}" +export DSS_HOME="${DSS_HOME:-$DSS_ROOT/.dss}" +export DSS_CACHE="${DSS_CACHE:-$DSS_ROOT/.dss/cache}" +export DSS_BASE_PATH="${DSS_BASE_PATH:-$DSS_ROOT}" + +# Enable dev-only MCP workflow tools (shell-script wrappers). +export DSS_ENABLE_DEV_COMMANDS="${DSS_ENABLE_DEV_COMMANDS:-1}" + +exec "$PYTHON_BIN" -m dss.mcp.server diff --git a/scripts/dss-reset.sh b/scripts/dss-reset.sh index 17e4de6..3033f86 100755 --- a/scripts/dss-reset.sh +++ b/scripts/dss-reset.sh @@ -41,20 +41,16 @@ run_or_show "rm -rf .dss/data/projects/* .dss/data/teams/* .dss/data/_system/cac run_or_show "rm -rf .dss/data/_system/tokens/* .dss/data/_system/themes/* .dss/data/_system/components/* 2>/dev/null || true" run_or_show "mkdir -p .dss/data/{projects,teams,_system/{cache,activity,tokens,themes,components}}" -# 2. Reset database -echo "2. Resetting database..." -run_or_show "rm -f .dss/dss.db .dss/dss.db.old" - -# 3. Remove admin-ui DSS CSS (keep non-dss files) -echo "3. Removing admin-ui DSS CSS files..." +# 2. Remove admin-ui DSS CSS (keep non-dss files) +echo "2. Removing admin-ui DSS CSS files..." run_or_show "rm -f admin-ui/css/dss-*.css" -# 4. Remove generated stories and components -echo "4. Removing generated stories and components..." +# 3. Remove generated stories and components +echo "3. Removing generated stories and components..." run_or_show "rm -f admin-ui/src/components/*.stories.js admin-ui/src/components/ds-*.js" -# 5. Reset core_tokens -echo "5. Resetting core_tokens..." +# 4. Reset core_tokens +echo "4. Resetting core_tokens..." if [ "$DRY_RUN" = false ]; then cat > dss/core_tokens/tokens.json << 'EOF' { @@ -71,8 +67,8 @@ else echo " Would reset: dss/core_tokens/tokens.json" fi -# 6. Reset skins to empty -echo "6. Resetting skins..." +# 5. Reset skins to empty +echo "5. Resetting skins..." for skin in base classic workbench; do if [ "$DRY_RUN" = false ]; then # Capitalize first letter for description @@ -106,14 +102,14 @@ EOF fi done -# 7. Clear caches and logs -echo "7. Clearing caches and logs..." +# 6. Clear caches and logs +echo "6. Clearing caches and logs..." run_or_show "rm -f .dss/logs/*.jsonl 2>/dev/null || true" run_or_show "rm -rf .dss/logs/browser-logs/* 2>/dev/null || true" run_or_show "touch .dss/logs/dss-operations.jsonl .dss/logs/git-hooks.jsonl" -# 8. Regenerate hash manifest -echo "8. Regenerating hash manifest..." +# 7. Regenerate hash manifest +echo "7. Regenerating hash manifest..." if [ "$DRY_RUN" = false ]; then ./scripts/regenerate-core-hashes.sh else diff --git a/scripts/dss-setup.sh.deprecated b/scripts/dss-setup.sh.deprecated deleted file mode 100755 index 21d73ff..0000000 --- a/scripts/dss-setup.sh.deprecated +++ /dev/null @@ -1,185 +0,0 @@ -#!/bin/bash -# DSS Complete Setup Script -# Sets up MCP, initializes DSS structure, and starts services -# -# Usage: scripts/dss-setup.sh [--reset] [--skip-servers] -# -# Flow: -# 1. Generate MCP configuration -# 2. Install dependencies if needed -# 3. Initialize DSS structure (dss-init.sh) -# 4. Start development servers - -set -e - -DSS_ROOT="$(cd "$(dirname "$0")/.." && pwd)" -cd "$DSS_ROOT" - -# Parse arguments -RESET=false -SKIP_SERVERS=false -for arg in "$@"; do - case $arg in - --reset) RESET=true ;; - --skip-servers) SKIP_SERVERS=true ;; - esac -done - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -CYAN='\033[0;36m' -NC='\033[0m' - -log_step() { echo -e "${BLUE}[SETUP]${NC} $1"; } -log_ok() { echo -e "${GREEN}[OK]${NC} $1"; } -log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } -log_info() { echo -e "${CYAN}[INFO]${NC} $1"; } - -echo "╔══════════════════════════════════════════════════════════════╗" -echo "║ DSS COMPLETE SETUP ║" -echo "╚══════════════════════════════════════════════════════════════╝" -echo "" - -# ============================================================================ -# STEP 1: Generate MCP Configuration -# ============================================================================ -log_step "1. Generating MCP configuration..." - -cat > "$DSS_ROOT/.mcp.json" << EOF -{ - "\$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json", - "mcpServers": { - "dss": { - "command": "$DSS_ROOT/.venv/bin/python3", - "args": ["$DSS_ROOT/dss-claude-plugin/servers/dss-mcp-server.py"], - "env": { - "PYTHONPATH": "$DSS_ROOT:$DSS_ROOT/dss-claude-plugin", - "DSS_HOME": "$DSS_ROOT/.dss", - "DSS_DATABASE": "$DSS_ROOT/.dss/dss.db", - "DSS_CACHE": "$DSS_ROOT/.dss/cache", - "DSS_BASE_PATH": "$DSS_ROOT" - }, - "description": "Design System Server MCP - local development" - } - } -} -EOF -log_ok "MCP config generated: .mcp.json" - -echo "" - -# ============================================================================ -# STEP 2: Check/Install Dependencies -# ============================================================================ -log_step "2. Checking dependencies..." - -# Check Python venv -if [ ! -d "$DSS_ROOT/.venv" ]; then - log_info "Creating Python virtual environment..." - python3 -m venv "$DSS_ROOT/.venv" -fi - -# Activate venv and check packages -source "$DSS_ROOT/.venv/bin/activate" -if ! python3 -c "import mcp" 2>/dev/null; then - log_info "Installing MCP package..." - pip install mcp 2>/dev/null || log_warn "MCP package install failed" -fi -log_ok "Python venv ready" - -# Check admin-ui node_modules -if [ ! -d "$DSS_ROOT/admin-ui/node_modules" ]; then - log_info "Installing admin-ui dependencies..." - cd "$DSS_ROOT/admin-ui" && npm install --legacy-peer-deps - cd "$DSS_ROOT" -fi -log_ok "Node dependencies ready" - -# Build admin-ui for production -log_info "Building admin-ui for production..." -cd "$DSS_ROOT/admin-ui" -npm run build 2>&1 | tail -5 -cd "$DSS_ROOT" -log_ok "admin-ui built (dist/)" - -echo "" - -# ============================================================================ -# STEP 3: Initialize DSS Structure -# ============================================================================ -log_step "3. Running DSS initialization..." - -if [ "$RESET" = true ]; then - "$DSS_ROOT/scripts/dss-init.sh" --reset -else - "$DSS_ROOT/scripts/dss-init.sh" -fi - -echo "" - -# ============================================================================ -# STEP 4: Start Development Servers -# ============================================================================ -if [ "$SKIP_SERVERS" = false ]; then - log_step "4. Starting development servers..." - - # Kill existing processes - pkill -f "vite.*admin-ui" 2>/dev/null || true - pkill -f "storybook.*6006" 2>/dev/null || true - sleep 1 - - # Start admin-ui (Vite) - cd "$DSS_ROOT/admin-ui" - nohup npm run dev > /tmp/dss-admin-ui.log 2>&1 & - VITE_PID=$! - log_info "admin-ui starting (PID: $VITE_PID)..." - - # Start Storybook - nohup npm run storybook > /tmp/dss-storybook.log 2>&1 & - SB_PID=$! - log_info "Storybook starting (PID: $SB_PID)..." - - cd "$DSS_ROOT" - - # Wait for servers - sleep 5 - - # Check status - if curl -s -o /dev/null -w "" http://localhost:3456 2>/dev/null; then - log_ok "admin-ui running on http://localhost:3456" - else - log_warn "admin-ui not responding yet (check /tmp/dss-admin-ui.log)" - fi - - if curl -s -o /dev/null -w "" http://localhost:6006 2>/dev/null; then - log_ok "Storybook running on http://localhost:6006" - else - log_warn "Storybook not responding yet (check /tmp/dss-storybook.log)" - fi - - echo "" -else - log_step "4. Skipping servers (--skip-servers)" - echo "" -fi - -# ============================================================================ -# SUMMARY -# ============================================================================ -echo "╔══════════════════════════════════════════════════════════════╗" -echo "║ DSS SETUP COMPLETE ║" -echo "╚══════════════════════════════════════════════════════════════╝" -echo "" -echo " Services:" -echo " admin-ui: http://localhost:3456" -echo " Storybook: http://localhost:6006" -echo "" -echo " Logs:" -echo " /tmp/dss-admin-ui.log" -echo " /tmp/dss-storybook.log" -echo "" -echo " Next: Restart Claude Code to load DSS MCP server" -echo "" diff --git a/scripts/enable-mcp-clients.sh b/scripts/enable-mcp-clients.sh new file mode 100755 index 0000000..ba97612 --- /dev/null +++ b/scripts/enable-mcp-clients.sh @@ -0,0 +1,94 @@ +#!/bin/bash +set -euo pipefail + +# Enable DSS MCP for supported AI clients (Claude Code, Codex CLI, Gemini CLI). +# +# This is safe to run multiple times. +# +# Usage: +# ./scripts/enable-mcp-clients.sh [--force] [--api-url ] [--skip-codex] [--skip-gemini] [--skip-claude] +# +# Notes: +# - Claude Code MCP config is project-local: `.claude/mcp.json` +# - Codex/Gemini are configured via their CLI (`codex mcp add`, `gemini mcp add`) + +DSS_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +MCP_CMD="$DSS_ROOT/scripts/dss-mcp" + +FORCE=false +SKIP_CLAUDE=false +SKIP_CODEX=false +SKIP_GEMINI=false +API_URL="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --force) + FORCE=true + shift + ;; + --skip-claude) + SKIP_CLAUDE=true + shift + ;; + --skip-codex) + SKIP_CODEX=true + shift + ;; + --skip-gemini) + SKIP_GEMINI=true + shift + ;; + --api-url) + API_URL="${2:-}" + if [[ -z "$API_URL" ]]; then + echo "Error: --api-url requires a value" >&2 + exit 1 + fi + shift 2 + ;; + *) + echo "Unknown argument: $1" >&2 + echo "Usage: ./scripts/enable-mcp-clients.sh [--force] [--api-url ] [--skip-codex] [--skip-gemini] [--skip-claude]" >&2 + exit 1 + ;; + esac +done + +echo "[dss] Enabling MCP clients in: $DSS_ROOT" + +if [[ "$SKIP_CLAUDE" != "true" ]]; then + echo "[dss] Claude Code: generating .claude/mcp.json" + if [[ -n "$API_URL" ]]; then + "$DSS_ROOT/scripts/setup-mcp.sh" --api-url "$API_URL" + else + "$DSS_ROOT/scripts/setup-mcp.sh" + fi +fi + +if [[ "$SKIP_CODEX" != "true" ]]; then + if command -v codex >/dev/null 2>&1; then + echo "[dss] Codex CLI: configuring MCP server 'dss'" + if $FORCE; then + codex mcp remove dss >/dev/null 2>&1 || true + fi + codex mcp get dss >/dev/null 2>&1 || codex mcp add dss -- "$MCP_CMD" + else + echo "[dss] Codex CLI: not found (skip)" >&2 + fi +fi + +if [[ "$SKIP_GEMINI" != "true" ]]; then + if command -v gemini >/dev/null 2>&1; then + echo "[dss] Gemini CLI: configuring MCP server 'dss'" + if $FORCE; then + gemini mcp remove dss >/dev/null 2>&1 || true + fi + gemini mcp list 2>/dev/null | grep -qE '^dss\\b' || gemini mcp add dss "$MCP_CMD" + else + echo "[dss] Gemini CLI: not found (skip)" >&2 + fi +fi + +echo "[dss] Done." +echo "[dss] Restart Claude Code/Codex/Gemini sessions to load the updated MCP toolset." diff --git a/scripts/setup-mcp.sh b/scripts/setup-mcp.sh index 9c116a6..bc36662 100755 --- a/scripts/setup-mcp.sh +++ b/scripts/setup-mcp.sh @@ -2,7 +2,7 @@ # Generate .claude/mcp.json with absolute paths for current setup # # USAGE: -# ./scripts/setup-mcp.sh +# ./scripts/setup-mcp.sh [--api-url https://dss.example.com] # # This script generates the MCP configuration file needed for Claude Code # to access DSS tools. Run this after cloning or when switching machines. @@ -13,6 +13,31 @@ DSS_ROOT="$(cd "$(dirname "$0")/.." && pwd)" MCP_CONFIG_DIR="$DSS_ROOT/.claude" MCP_CONFIG="$MCP_CONFIG_DIR/mcp.json" +# Defaults +API_URL="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --local) + # Kept for backwards-compatibility; MCP server is always local now. + shift + ;; + --api-url) + API_URL="${2:-}" + if [ -z "$API_URL" ]; then + echo "Error: --api-url requires a value" + exit 1 + fi + shift 2 + ;; + *) + echo "Unknown argument: $1" + echo "Usage: ./scripts/setup-mcp.sh [--api-url https://dss.example.com]" + exit 1 + ;; + esac +done + # Ensure .claude directory exists mkdir -p "$MCP_CONFIG_DIR" @@ -27,12 +52,9 @@ else exit 1 fi -# Verify MCP server exists -MCP_SERVER="$DSS_ROOT/dss-claude-plugin/servers/dss-mcp-server.py" -if [ ! -f "$MCP_SERVER" ]; then - echo "Error: MCP server not found at $MCP_SERVER" - exit 1 -fi +MCP_ARGS='["-m", "dss.mcp.server"]' +MCP_SERVER_DESC="python -m dss.mcp.server" +PYTHONPATH_VALUE="$DSS_ROOT" cat > "$MCP_CONFIG" << EOF { @@ -40,13 +62,14 @@ cat > "$MCP_CONFIG" << EOF "mcpServers": { "dss": { "command": "$PYTHON_PATH", - "args": ["$MCP_SERVER"], + "args": $MCP_ARGS, "env": { - "PYTHONPATH": "$DSS_ROOT:$DSS_ROOT/dss-claude-plugin", + "PYTHONPATH": "$PYTHONPATH_VALUE", "DSS_HOME": "$DSS_ROOT/.dss", - "DSS_DATABASE": "$DSS_ROOT/.dss/dss.db", "DSS_CACHE": "$DSS_ROOT/.dss/cache", - "DSS_BASE_PATH": "$DSS_ROOT" + "DSS_BASE_PATH": "$DSS_ROOT", + "DSS_ENABLE_DEV_COMMANDS": "1", + "DSS_API_URL": "$API_URL" }, "description": "Design System Server MCP - local development" } @@ -59,11 +82,13 @@ echo "" echo "Configuration:" echo " DSS_ROOT: $DSS_ROOT" echo " Python: $PYTHON_PATH" -echo " MCP Server: $MCP_SERVER" +echo " MCP Server: $MCP_SERVER_DESC" +if [ -n "$API_URL" ]; then + echo " DSS_API_URL: $API_URL" +fi echo "" -# Optionally install the DSS plugin for commands/skills -echo "To install DSS plugin commands (optional):" +echo "Optional: install DSS Claude plugin commands/skills:" echo " claude plugin marketplace add $DSS_ROOT/dss-claude-plugin" echo " claude plugin install dss-claude-plugin@dss" echo "" diff --git a/storybook/config.yaml b/storybook/config.yaml index 7cf2a2b..9bd0a8e 100644 --- a/storybook/config.yaml +++ b/storybook/config.yaml @@ -47,12 +47,12 @@ server: - "http://localhost:3000" # Development # ========================================== -# Database Configuration +# Storage Configuration (JSON) # ========================================== -database: - path: "/home/overbits/.dss/dss.db" - backup_path: "/home/overbits/.dss/backups/" - auto_backup: true +storage: + type: "json" + dss_home: "${DSS_HOME}" + data_dir: "${DSS_HOME}/data" # ========================================== # Theme Configuration @@ -60,7 +60,7 @@ database: themes: default_light: "DSS Light" default_dark: "DSS Dark" - custom_themes_dir: "/home/overbits/dss/dss-mvp1/themes/" + custom_themes_dir: "${DSS_HOME}/themes/" # ========================================== # Style Dictionary Configuration @@ -70,7 +70,7 @@ style_dictionary: - "css" - "scss" - "json" - build_path: "/home/overbits/dss/dss-mvp1/dist/tokens/" + build_path: "${DSS_HOME}/dist/tokens/" platforms: - name: "css" transformGroup: "css" @@ -95,7 +95,7 @@ components: # shadcn/ui shadcn: enabled: true - components_dir: "/home/overbits/dss/dss-mvp1/components/" + components_dir: "${DSS_HOME}/components/" registry_url: "https://ui.shadcn.com/registry" # HeroUI @@ -109,7 +109,6 @@ components: # ========================================== testing: use_mock_apis: false # Use real APIs in production tests - test_db_path: "/home/overbits/.dss/test.db" coverage_threshold: 80 markers: - "unit" diff --git a/storybook/package.json b/storybook/package.json index 506ce72..6d8dc2d 100644 --- a/storybook/package.json +++ b/storybook/package.json @@ -1,7 +1,7 @@ { - "name": "dss-mvp1", + "name": "dss-storybook", "version": "1.0.0", - "description": "Design System Server MVP1 - External tool dependencies", + "description": "DSS Storybook - External tool dependencies", "private": true, "scripts": { "test": "pytest", diff --git a/tests/test_atomic_dss.py b/tests/test_atomic_dss.py index 771e0e2..840cf67 100644 --- a/tests/test_atomic_dss.py +++ b/tests/test_atomic_dss.py @@ -4,7 +4,6 @@ from pathlib import Path from unittest.mock import MagicMock, patch import pytest -from httpx import Response from dss.models.component import AtomicType from dss.project.manager import DSSProject, ProjectManager, ProjectRegistry @@ -32,19 +31,19 @@ def dss_project(project_manager: ProjectManager, tmp_path: Path) -> DSSProject: return project -@patch("httpx.AsyncClient") -def test_recursive_figma_import( - mock_async_client, dss_project: DSSProject, project_manager: ProjectManager -): - """ - Test that the Figma import is recursive and that the components are - classified correctly. - """ - # Mock the httpx.AsyncClient to return a sample Figma file - mock_client_instance = mock_async_client.return_value - mock_client_instance.get.return_value = Response( - 200, - json={ +# Mock Figma client with async context manager and async methods +class MockAsyncClient: + def __init__(self, *args, **kwargs): + pass + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + pass + + async def get_file(self, file_key: str): + return { "document": { "id": "0:0", "name": "Document", @@ -55,11 +54,7 @@ def test_recursive_figma_import( "name": "Page 1", "type": "CANVAS", "children": [ - { - "id": "1:1", - "name": "Icon", - "type": "COMPONENT", - }, + {"id": "1:1", "name": "Icon", "type": "COMPONENT"}, { "id": "1:2", "name": "Button", @@ -76,20 +71,17 @@ def test_recursive_figma_import( } ], } - }, - ) + } - # Run the sync + async def get_file_variables(self, file_key: str): + return {"meta": {"variables": {}, "variableCollections": {}}} + + +@patch("dss.ingest.sources.figma.IntelligentFigmaClient", new=MockAsyncClient) +def test_recursive_figma_import(dss_project: DSSProject, project_manager: ProjectManager): + """Project sync uses extracted Figma components.""" dss_project = asyncio.run(project_manager.sync(dss_project, figma_token="fake_token")) - # Assert that the project contains the correct number of components - assert len(dss_project.components) == 3 - - # Assert that the components are classified correctly - for component in dss_project.components: - if component.name == "Icon": - assert component.classification == AtomicType.ATOM - elif component.name == "Button": - assert component.classification == AtomicType.ATOM - elif component.name == "Card": - assert component.classification == AtomicType.MOLECULE + assert len(dss_project.components) == 1 + assert dss_project.components[0].name == "Card" + assert dss_project.components[0].classification == AtomicType.COMPOSITE_COMPONENT diff --git a/tests/test_project_analyzer.py b/tests/test_project_analyzer.py index 3ad8c86..a849d84 100644 --- a/tests/test_project_analyzer.py +++ b/tests/test_project_analyzer.py @@ -1,24 +1,79 @@ """Tests for the project analyzer.""" +import json from pathlib import Path import pytest -from dss.analyze.project_analyzer import analyze_project +from dss.analyze.base import Framework +from dss.analyze.project_analyzer import analyze_project, export_project_context, run_project_analysis @pytest.fixture def project_path(tmp_path: Path) -> Path: """Creates a dummy project for testing.""" project_path = tmp_path / "project" - project_path.mkdir() - (project_path / "componentA.js").touch() - (project_path / "componentB.jsx").touch() + (project_path / "src").mkdir(parents=True) + + (project_path / "package.json").write_text( + json.dumps({"dependencies": {"react": "18.0.0"}}, indent=2), encoding="utf-8" + ) + + (project_path / "src" / "Button.jsx").write_text( + "\n".join( + [ + 'import React from "react";', + 'import "./button.css";', + "", + "export function Button({ label }) {", + ' return ;', + "}", + "", + ] + ), + encoding="utf-8", + ) + + (project_path / "src" / "button.css").write_text( + "\n".join( + [ + ".btn {", + " color: #ff0000;", + "}", + "", + ] + ), + encoding="utf-8", + ) + return project_path def test_analyze_project(project_path: Path): """Tests that the project analyzer can analyze a project.""" analysis = analyze_project(str(project_path)) - assert analysis.project_name == "project" - assert analysis.total_files == 2 + assert analysis.project_path == str(project_path.resolve()) + assert analysis.framework == Framework.REACT + assert analysis.component_count >= 1 + assert analysis.style_file_count == 1 + + +def test_run_project_analysis_writes_graph(project_path: Path): + """Writes analysis output to /.dss/analysis_graph.json.""" + result = run_project_analysis(str(project_path)) + output_path = project_path / ".dss" / "analysis_graph.json" + assert output_path.exists() + + saved = json.loads(output_path.read_text(encoding="utf-8")) + assert saved["project_path"] == str(project_path.resolve()) + assert "nodes" in saved + assert "edges" in saved + assert "analysis" in saved + assert result["project_path"] == str(project_path.resolve()) + + +def test_export_project_context(project_path: Path): + """Exports a lightweight context payload for prompt injection.""" + ctx = export_project_context(str(project_path)) + assert ctx["project_path"] == str(project_path.resolve()) + assert ctx["framework"] == Framework.REACT.value