The quick brown fox
diff --git a/admin-ui/theme.json b/admin-ui/theme.json
index 3ee700f..fb1a148 100644
--- a/admin-ui/theme.json
+++ b/admin-ui/theme.json
@@ -5,7 +5,7 @@
"author": "DSS Team",
"license": "MIT",
"homepage": "https://github.com/anthropics/dss",
-
+
"metadata": {
"type": "design-system",
"architecture": "layered-css",
diff --git a/apps/api/ai_providers.py b/apps/api/ai_providers.py
index 7ae497b..cf62185 100644
--- a/apps/api/ai_providers.py
+++ b/apps/api/ai_providers.py
@@ -1,17 +1,18 @@
"""
-AI Provider abstraction for Claude and Gemini
+AI Provider abstraction for Claude and Gemini.
+
Handles model-specific API calls and tool execution
"""
-import os
-import json
import asyncio
-from typing import List, Dict, Any, Optional
+import json
+import os
from abc import ABC, abstractmethod
+from typing import Any, Dict, List, Optional
class AIProvider(ABC):
- """Abstract base class for AI providers"""
+ """Abstract base class for AI providers."""
@abstractmethod
async def chat(
@@ -20,7 +21,7 @@ class AIProvider(ABC):
system_prompt: str,
history: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
- temperature: float = 0.7
+ temperature: float = 0.7,
) -> Dict[str, Any]:
"""
Send a chat message and get response
@@ -36,16 +37,17 @@ class AIProvider(ABC):
class ClaudeProvider(AIProvider):
- """Anthropic Claude provider"""
+ """Anthropic Claude provider."""
def __init__(self):
self.api_key = os.getenv("ANTHROPIC_API_KEY")
self.default_model = "claude-sonnet-4-5-20250929"
def is_available(self) -> bool:
- """Check if Claude is available"""
+ """Check if Claude is available."""
try:
from anthropic import Anthropic
+
return bool(self.api_key)
except ImportError:
return False
@@ -58,9 +60,9 @@ class ClaudeProvider(AIProvider):
tools: Optional[List[Dict[str, Any]]] = None,
temperature: float = 0.7,
mcp_handler=None,
- mcp_context=None
+ mcp_context=None,
) -> Dict[str, Any]:
- """Chat with Claude"""
+ """Chat with Claude."""
if not self.is_available():
return {
@@ -68,7 +70,7 @@ class ClaudeProvider(AIProvider):
"response": "Claude not available. Install anthropic SDK or set ANTHROPIC_API_KEY.",
"model": "error",
"tools_used": [],
- "stop_reason": "error"
+ "stop_reason": "error",
}
from anthropic import Anthropic
@@ -91,17 +93,14 @@ class ClaudeProvider(AIProvider):
"max_tokens": 4096,
"temperature": temperature,
"system": system_prompt,
- "messages": messages
+ "messages": messages,
}
if tools:
api_params["tools"] = tools
# Initial call
- response = await asyncio.to_thread(
- client.messages.create,
- **api_params
- )
+ response = await asyncio.to_thread(client.messages.create, **api_params)
# Handle tool use loop
tools_used = []
@@ -120,16 +119,16 @@ class ClaudeProvider(AIProvider):
# Execute tool via MCP handler
result = await mcp_handler.execute_tool(
- tool_name=tool_name,
- arguments=tool_input,
- context=mcp_context
+ tool_name=tool_name, arguments=tool_input, context=mcp_context
)
- tools_used.append({
- "tool": tool_name,
- "success": result.success,
- "duration_ms": result.duration_ms
- })
+ tools_used.append(
+ {
+ "tool": tool_name,
+ "success": result.success,
+ "duration_ms": result.duration_ms,
+ }
+ )
# Format result
if result.success:
@@ -137,19 +136,20 @@ class ClaudeProvider(AIProvider):
else:
tool_result_content = json.dumps({"error": result.error})
- tool_results.append({
- "type": "tool_result",
- "tool_use_id": tool_use_id,
- "content": tool_result_content
- })
+ tool_results.append(
+ {
+ "type": "tool_result",
+ "tool_use_id": tool_use_id,
+ "content": tool_result_content,
+ }
+ )
# Continue conversation with tool results
messages.append({"role": "assistant", "content": response.content})
messages.append({"role": "user", "content": tool_results})
response = await asyncio.to_thread(
- client.messages.create,
- **{**api_params, "messages": messages}
+ client.messages.create, **{**api_params, "messages": messages}
)
# Extract final response
@@ -163,27 +163,30 @@ class ClaudeProvider(AIProvider):
"response": response_text,
"model": response.model,
"tools_used": tools_used,
- "stop_reason": response.stop_reason
+ "stop_reason": response.stop_reason,
}
class GeminiProvider(AIProvider):
- """Google Gemini provider"""
+ """Google Gemini provider."""
def __init__(self):
self.api_key = os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
self.default_model = "gemini-2.0-flash-exp"
def is_available(self) -> bool:
- """Check if Gemini is available"""
+ """Check if Gemini is available."""
try:
import google.generativeai as genai
+
return bool(self.api_key)
except ImportError:
return False
- def _convert_tools_to_gemini_format(self, claude_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """Convert Claude tool format to Gemini function declarations"""
+ def _convert_tools_to_gemini_format(
+ self, claude_tools: List[Dict[str, Any]]
+ ) -> List[Dict[str, Any]]:
+ """Convert Claude tool format to Gemini function declarations."""
gemini_tools = []
for tool in claude_tools:
@@ -191,11 +194,7 @@ class GeminiProvider(AIProvider):
function_declaration = {
"name": tool.get("name"),
"description": tool.get("description", ""),
- "parameters": {
- "type": "object",
- "properties": {},
- "required": []
- }
+ "parameters": {"type": "object", "properties": {}, "required": []},
}
# Convert input schema
@@ -218,9 +217,9 @@ class GeminiProvider(AIProvider):
tools: Optional[List[Dict[str, Any]]] = None,
temperature: float = 0.7,
mcp_handler=None,
- mcp_context=None
+ mcp_context=None,
) -> Dict[str, Any]:
- """Chat with Gemini"""
+ """Chat with Gemini."""
if not self.is_available():
return {
@@ -228,7 +227,7 @@ class GeminiProvider(AIProvider):
"response": "Gemini not available. Install google-generativeai SDK or set GOOGLE_API_KEY/GEMINI_API_KEY.",
"model": "error",
"tools_used": [],
- "stop_reason": "error"
+ "stop_reason": "error",
}
import google.generativeai as genai
@@ -241,10 +240,9 @@ class GeminiProvider(AIProvider):
role = msg.get("role", "user")
content = msg.get("content", "")
if content and role in ["user", "assistant"]:
- gemini_history.append({
- "role": "user" if role == "user" else "model",
- "parts": [content]
- })
+ gemini_history.append(
+ {"role": "user" if role == "user" else "model", "parts": [content]}
+ )
# Create model with tools if available
model_kwargs = {
@@ -253,7 +251,7 @@ class GeminiProvider(AIProvider):
"temperature": temperature,
"max_output_tokens": 4096,
},
- "system_instruction": system_prompt
+ "system_instruction": system_prompt,
}
# Convert and add tools if available
@@ -282,7 +280,7 @@ class GeminiProvider(AIProvider):
has_function_call = False
for part in response.candidates[0].content.parts:
- if hasattr(part, 'function_call') and part.function_call:
+ if hasattr(part, "function_call") and part.function_call:
has_function_call = True
func_call = part.function_call
tool_name = func_call.name
@@ -290,31 +288,34 @@ class GeminiProvider(AIProvider):
# Execute tool
result = await mcp_handler.execute_tool(
- tool_name=tool_name,
- arguments=tool_args,
- context=mcp_context
+ tool_name=tool_name, arguments=tool_args, context=mcp_context
)
- tools_used.append({
- "tool": tool_name,
- "success": result.success,
- "duration_ms": result.duration_ms
- })
+ tools_used.append(
+ {
+ "tool": tool_name,
+ "success": result.success,
+ "duration_ms": result.duration_ms,
+ }
+ )
# Format result for Gemini
function_response = {
"name": tool_name,
- "response": result.result if result.success else {"error": result.error}
+ "response": result.result
+ if result.success
+ else {"error": result.error},
}
# Send function response back
current_message = genai.protos.Content(
- parts=[genai.protos.Part(
- function_response=genai.protos.FunctionResponse(
- name=tool_name,
- response=function_response
+ parts=[
+ genai.protos.Part(
+ function_response=genai.protos.FunctionResponse(
+ name=tool_name, response=function_response
+ )
)
- )]
+ ]
)
break
@@ -328,7 +329,7 @@ class GeminiProvider(AIProvider):
response_text = ""
if response.candidates and response.candidates[0].content.parts:
for part in response.candidates[0].content.parts:
- if hasattr(part, 'text'):
+ if hasattr(part, "text"):
response_text += part.text
return {
@@ -336,13 +337,13 @@ class GeminiProvider(AIProvider):
"response": response_text,
"model": self.default_model,
"tools_used": tools_used,
- "stop_reason": "stop" if response.candidates else "error"
+ "stop_reason": "stop" if response.candidates else "error",
}
# Factory function
def get_ai_provider(model_name: str) -> AIProvider:
- """Get AI provider by name"""
+ """Get AI provider by name."""
if model_name.lower() in ["gemini", "google"]:
return GeminiProvider()
else:
diff --git a/apps/api/browser_logger.py b/apps/api/browser_logger.py
index 7330bd2..10cd216 100644
--- a/apps/api/browser_logger.py
+++ b/apps/api/browser_logger.py
@@ -1,9 +1,10 @@
-import os
import logging
+import os
from logging.handlers import RotatingFileHandler
+from typing import Any, List, Optional
+
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
-from typing import List, Any, Optional
# --- Configuration ---
# Use project-local logs directory to avoid permission issues
@@ -21,30 +22,29 @@ browser_logger = logging.getLogger("browser_logger")
browser_logger.setLevel(logging.INFO)
# Rotating file handler: 10MB max size, keep last 5 backups
-handler = RotatingFileHandler(LOG_FILE, maxBytes=10*1024*1024, backupCount=5)
-formatter = logging.Formatter(
- '%(asctime)s [%(levelname)s] [BROWSER] %(message)s'
-)
+handler = RotatingFileHandler(LOG_FILE, maxBytes=10 * 1024 * 1024, backupCount=5)
+formatter = logging.Formatter("%(asctime)s [%(levelname)s] [BROWSER] %(message)s")
handler.setFormatter(formatter)
browser_logger.addHandler(handler)
# --- API Router ---
router = APIRouter()
+
class LogEntry(BaseModel):
level: str
timestamp: str
message: str
data: Optional[List[Any]] = None
+
class LogBatch(BaseModel):
logs: List[LogEntry]
+
@router.post("/api/logs/browser")
async def receive_browser_logs(batch: LogBatch):
- """
- Receives a batch of logs from the browser and writes them to the log file.
- """
+ """Receives a batch of logs from the browser and writes them to the log file."""
try:
for log in batch.logs:
# Map browser levels to python logging levels
@@ -52,11 +52,11 @@ async def receive_browser_logs(batch: LogBatch):
log_message = f"[{log.timestamp}] {log.message}"
- if level == 'error':
+ if level == "error":
browser_logger.error(log_message)
- elif level == 'warn':
+ elif level == "warn":
browser_logger.warning(log_message)
- elif level == 'debug':
+ elif level == "debug":
browser_logger.debug(log_message)
else:
browser_logger.info(log_message)
diff --git a/apps/api/server.py b/apps/api/server.py
index 33f8ce6..6d03925 100644
--- a/apps/api/server.py
+++ b/apps/api/server.py
@@ -1,5 +1,5 @@
"""
-DSS API Server
+DSS API Server.
REST API for design system operations.
@@ -16,10 +16,44 @@ Modes:
- Local: Development companion
"""
-# Load environment variables from .env file FIRST (before any other imports)
+import json
import os
+import subprocess
+import sys
+from datetime import datetime
from pathlib import Path
+from typing import Any, Dict, List, Optional
+
from dotenv import load_dotenv
+from fastapi import BackgroundTasks, Depends, FastAPI, Header, HTTPException, Query
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.responses import JSONResponse
+from fastapi.staticfiles import StaticFiles
+from pydantic import BaseModel
+
+from apps.api.browser_logger import router as browser_log_router
+from dss import settings
+
+# Load environment variables from .env file FIRST (before any other imports)
+from dss.auth.atlassian_auth import get_auth
+from dss.figma.figma_tools import FigmaToolSuite
+from dss.services.config_service import ConfigService
+from dss.services.project_manager import ProjectManager
+from dss.services.sandboxed_fs import SandboxedFS
+from dss.storage.json_store import (
+ ActivityLog,
+ Cache,
+ CodeMetrics,
+ Components,
+ FigmaFiles,
+ IntegrationHealth,
+ Integrations,
+ Projects,
+ SyncHistory,
+ Teams,
+ TestResults,
+ get_stats,
+)
# Get project root - apps/api/server.py -> apps/api -> apps -> project_root
_server_file = Path(__file__).resolve()
@@ -27,46 +61,22 @@ _project_root = _server_file.parent.parent.parent # /home/.../dss
# Try loading from multiple possible .env locations
env_paths = [
- _project_root / ".env", # root .env (primary)
+ _project_root / ".env", # root .env (primary)
_project_root / "storybook" / ".env", # storybook/.env
- _server_file.parent / ".env", # apps/api/.env
+ _server_file.parent / ".env", # apps/api/.env
]
for env_path in env_paths:
if env_path.exists():
load_dotenv(env_path, override=True)
break
-import asyncio
-import subprocess
-import json
-from typing import Optional, List, Dict, Any
-from datetime import datetime
-from fastapi import FastAPI, HTTPException, Query, BackgroundTasks, Depends, Header
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import JSONResponse
-from fastapi.staticfiles import StaticFiles
-from pydantic import BaseModel
-from typing import Optional
-
-import sys
# Add project root to path for dss package
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
# Import browser logger router (local import from same directory)
-from apps.api.browser_logger import router as browser_log_router
# DSS package imports - unified package
-from dss import settings
-from dss.storage.json_store import (
- Projects, Components, SyncHistory, ActivityLog, Teams, Cache, get_stats,
- FigmaFiles, CodeMetrics, TestResults, TokenDrift, Tokens, Styles,
- Integrations, IntegrationHealth
-)
-from dss.figma.figma_tools import FigmaToolSuite
-from dss.services.project_manager import ProjectManager
-from dss.services.config_service import ConfigService, DSSConfig
-from dss.services.sandboxed_fs import SandboxedFS
# Additional DSS imports available:
# from dss import DesignToken, TokenSource, ProjectScanner
@@ -81,52 +91,69 @@ class _FigmaConfigCompat:
@property
def is_configured(self):
return settings.figma_configured
+
@property
def token(self):
return settings.FIGMA_TOKEN
+
@property
def cache_ttl(self):
return settings.FIGMA_CACHE_TTL
+
class _ServerConfigCompat:
@property
def env(self):
return settings.SERVER_ENV
+
@property
def port(self):
return settings.SERVER_PORT
+
@property
def host(self):
return settings.SERVER_HOST
+
@property
def is_production(self):
return settings.is_production
+
class _ConfigCompat:
figma = _FigmaConfigCompat()
server = _ServerConfigCompat()
def summary(self):
return {
- "figma": {"configured": settings.figma_configured, "cache_ttl": settings.FIGMA_CACHE_TTL},
- "server": {"port": settings.SERVER_PORT, "env": settings.SERVER_ENV, "log_level": settings.LOG_LEVEL},
+ "figma": {
+ "configured": settings.figma_configured,
+ "cache_ttl": settings.FIGMA_CACHE_TTL,
+ },
+ "server": {
+ "port": settings.SERVER_PORT,
+ "env": settings.SERVER_ENV,
+ "log_level": settings.LOG_LEVEL,
+ },
"database": {"path": str(settings.DATABASE_PATH)},
}
+
config = _ConfigCompat()
# === Runtime Configuration ===
+
class RuntimeConfig:
"""
- ⚙️ ENDOCRINE HORMONE STORAGE - Runtime configuration system
+ ⚙️ ENDOCRINE HORMONE STORAGE - Runtime configuration system.
The endocrine system regulates behavior through hormones. This configuration
manager stores the component's behavioral preferences and adaptation state.
Persists to .dss/runtime-config.json so the component remembers its preferences
even after sleep (shutdown).
"""
+
def __init__(self):
self.config_path = Path(__file__).parent.parent.parent / ".dss" / "runtime-config.json"
self.config_path.parent.mkdir(parents=True, exist_ok=True)
@@ -136,7 +163,7 @@ class RuntimeConfig:
if self.config_path.exists():
try:
return json.loads(self.config_path.read_text())
- except (json.JSONDecodeError, IOError) as e:
+ except (json.JSONDecodeError, IOError):
# Config file corrupted or unreadable, use defaults
pass
return {
@@ -152,7 +179,7 @@ class RuntimeConfig:
"token_sync": True,
"code_gen": True,
"ai_advisor": False,
- }
+ },
}
def _save(self):
@@ -205,9 +232,11 @@ ProjectManager.ensure_schema()
# === Service Discovery ===
+
class ServiceDiscovery:
"""
Service discovery for companion services (Storybook, Chromatic, dev servers).
+
Checks known ports to discover running services.
"""
@@ -230,13 +259,13 @@ class ServiceDiscovery:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
- result = sock.connect_ex(('127.0.0.1', port))
+ result = sock.connect_ex(("127.0.0.1", port))
sock.close()
if result == 0:
discovered[service] = {
"running": True,
"port": port,
- "url": f"http://localhost:{port}"
+ "url": f"http://localhost:{port}",
}
break
except (OSError, socket.error):
@@ -259,11 +288,7 @@ class ServiceDiscovery:
try:
async with httpx.AsyncClient(timeout=2.0) as client:
resp = await client.get(url)
- return {
- "running": resp.status_code == 200,
- "url": url,
- "port": port
- }
+ return {"running": resp.status_code == 200, "url": url, "port": port}
except (httpx.ConnectError, httpx.TimeoutException, httpx.HTTPError):
# Storybook not running or unreachable
return {"running": False, "url": url, "port": port}
@@ -274,7 +299,7 @@ class ServiceDiscovery:
app = FastAPI(
title="Design System Server (DSS)",
description="API for design system management and Figma integration",
- version="1.0.0"
+ version="1.0.0",
)
app.add_middleware(
@@ -298,18 +323,20 @@ figma_config = runtime_config.get("figma")
figma_token_at_startup = figma_config.get("token") if figma_config else None
figma_suite = FigmaToolSuite(
token=figma_token_at_startup,
- output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output")
+ output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output"),
)
# === Request/Response Models ===
+
class ProjectCreate(BaseModel):
name: str
description: str = ""
figma_file_key: str = ""
root_path: str = "" # MVP1: Project root directory path
+
class ProjectUpdate(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
@@ -317,30 +344,36 @@ class ProjectUpdate(BaseModel):
status: Optional[str] = None
root_path: Optional[str] = None # MVP1: Update project root path
+
class FigmaExtractRequest(BaseModel):
file_key: str
format: str = "css"
+
class FigmaSyncRequest(BaseModel):
file_key: str
target_path: str
format: str = "css"
+
class TeamCreate(BaseModel):
name: str
description: str = ""
+
class FigmaFileCreate(BaseModel):
figma_url: str
file_name: str
file_key: str
+
class ESRECreate(BaseModel):
name: str
definition_text: str
expected_value: Optional[str] = None
component_name: Optional[str] = None
+
class TokenDriftCreate(BaseModel):
component_id: str
property_name: str
@@ -353,11 +386,11 @@ class TokenDriftCreate(BaseModel):
# === Authentication ===
-from dss.auth.atlassian_auth import get_auth
async def get_current_user(authorization: Optional[str] = Header(None)) -> Dict[str, Any]:
"""
Dependency to get current authenticated user from JWT token.
+
Usage: user = Depends(get_current_user)
"""
if not authorization or not authorization.startswith("Bearer "):
@@ -372,12 +405,14 @@ async def get_current_user(authorization: Optional[str] = Header(None)) -> Dict[
return user_data
+
class LoginRequest(BaseModel):
url: str # Atlassian URL
email: str
api_token: str
service: str = "jira" # "jira" or "confluence"
+
@app.post("/api/auth/login")
async def login(request: LoginRequest):
"""
@@ -392,7 +427,7 @@ async def login(request: LoginRequest):
url=request.url,
email=request.email,
api_token=request.api_token,
- service=request.service
+ service=request.service,
)
return result
except ValueError as e:
@@ -400,21 +435,25 @@ async def login(request: LoginRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=f"Login failed: {str(e)}")
+
@app.get("/api/auth/me")
async def get_me(user: Dict[str, Any] = Depends(get_current_user)):
- """Get current authenticated user info"""
+ """Get current authenticated user info."""
auth = get_auth()
user_data = await auth.get_user_by_id(user["user_id"])
if not user_data:
raise HTTPException(status_code=404, detail="User not found")
return user_data
+
# === Root & Health ===
+
@app.get("/")
async def root():
"""Redirect to Admin UI dashboard."""
from fastapi.responses import RedirectResponse
+
return RedirectResponse(url="/admin-ui/index.html")
@@ -432,16 +471,17 @@ async def health():
- Figma - Is the Figma integration configured?
"""
import os
- import psutil
from pathlib import Path
+ import psutil
+
# Check storage connectivity
storage_ok = False
try:
from dss.storage.json_store import DATA_DIR
+
storage_ok = DATA_DIR.exists()
except Exception as e:
- import traceback
print(f"[Health] Storage check error: {type(e).__name__}: {e}", flush=True)
# Check MCP handler functionality
@@ -449,21 +489,24 @@ async def health():
try:
import sys
from pathlib import Path
+
project_root = Path(__file__).parent.parent.parent
if str(project_root) not in sys.path:
sys.path.insert(0, str(project_root))
from dss.mcp.handler import get_mcp_handler
+
handler = get_mcp_handler()
mcp_ok = handler is not None
except Exception as e:
- import traceback
print(f"[Health] MCP handler check error: {type(e).__name__}: {e}", flush=True)
# Get uptime
try:
process = psutil.Process(os.getpid())
- uptime_seconds = int((datetime.now() - datetime.fromtimestamp(process.create_time())).total_seconds())
+ uptime_seconds = int(
+ (datetime.now() - datetime.fromtimestamp(process.create_time())).total_seconds()
+ )
except:
uptime_seconds = 0
@@ -478,17 +521,18 @@ async def health():
"services": {
"storage": "ok" if storage_ok else "error",
"mcp": "ok" if mcp_ok else "error",
- "figma": "connected" if config.figma.is_configured else "not configured"
- }
+ "figma": "connected" if config.figma.is_configured else "not configured",
+ },
}
# === DEBUG ENDPOINTS ===
+
@app.post("/api/browser-logs")
async def receive_browser_logs(logs: dict):
"""
- 📋 BROWSER LOG COLLECTION ENDPOINT
+ 📋 BROWSER LOG COLLECTION ENDPOINT.
Receives browser logs from the dashboard and stores them for debugging.
Browser logger (browser-logger.js) POSTs logs here automatically or on demand.
@@ -501,8 +545,8 @@ async def receive_browser_logs(logs: dict):
"diagnostic": {...}
}
"""
- from pathlib import Path
import time
+ from pathlib import Path
# Create browser logs directory if doesn't exist
browser_logs_dir = Path(__file__).parent.parent.parent / ".dss" / "browser-logs"
@@ -518,13 +562,19 @@ async def receive_browser_logs(logs: dict):
# Log to activity (skip if ActivityLog not available)
try:
with get_connection() as conn:
- conn.execute("""
+ conn.execute(
+ """
INSERT INTO activity_log (category, action, details, metadata, created_at)
VALUES (?, ?, ?, ?, ?)
- """, ("debug", "browser_logs_received",
- f"Received browser logs for session {session_id}",
- json.dumps({"session_id": session_id, "log_count": len(logs.get("logs", []))}),
- datetime.utcnow().isoformat()))
+ """,
+ (
+ "debug",
+ "browser_logs_received",
+ f"Received browser logs for session {session_id}",
+ json.dumps({"session_id": session_id, "log_count": len(logs.get("logs", []))}),
+ datetime.utcnow().isoformat(),
+ ),
+ )
conn.commit()
except:
pass # Activity logging is optional
@@ -537,25 +587,25 @@ async def receive_browser_logs(logs: dict):
# Create task for Claude to investigate
try:
import httpx
+
task_data = {
"title": f"Browser errors detected in session {session_id[:20]}...",
"description": f"Detected {error_count} errors and {warn_count} warnings in browser session. Use dss_get_browser_errors('{session_id}') to investigate.",
"priority": 3 if error_count > 0 else 5,
"project": "dss-debug",
- "visibility": "public"
+ "visibility": "public",
}
# Create task via task-queue MCP HTTP endpoint (if available)
# This runs async - don't block browser log storage
import asyncio
+
async def create_task():
try:
async with httpx.AsyncClient() as client:
# Task queue typically runs on same server
await client.post(
- "http://localhost:8765/tasks",
- json=task_data,
- timeout=2.0
+ "http://localhost:8765/tasks", json=task_data, timeout=2.0
)
except:
pass # Task creation is best-effort
@@ -570,14 +620,14 @@ async def receive_browser_logs(logs: dict):
"sessionId": session_id,
"logCount": len(logs.get("logs", [])),
"storedAt": datetime.utcnow().isoformat() + "Z",
- "errorsDetected": error_count > 0 or warn_count > 0
+ "errorsDetected": error_count > 0 or warn_count > 0,
}
@app.get("/api/browser-logs/{session_id}")
async def get_browser_logs(session_id: str):
"""
- 📋 RETRIEVE BROWSER LOGS
+ 📋 RETRIEVE BROWSER LOGS.
Retrieves stored browser logs by session ID.
"""
@@ -596,7 +646,7 @@ async def get_browser_logs(session_id: str):
@app.get("/api/debug/diagnostic")
async def get_debug_diagnostic():
"""
- 🔍 COMPREHENSIVE SYSTEM DIAGNOSTIC
+ 🔍 COMPREHENSIVE SYSTEM DIAGNOSTIC.
Returns detailed system diagnostic including:
- Health status (from /health endpoint)
@@ -607,9 +657,10 @@ async def get_debug_diagnostic():
- Recent errors
"""
import os
- import psutil
from pathlib import Path
+ import psutil
+
# Get health status
health_status = await health()
@@ -629,20 +680,17 @@ async def get_debug_diagnostic():
# Get recent errors from activity log
try:
with get_connection() as conn:
- recent_errors = conn.execute("""
+ recent_errors = conn.execute(
+ """
SELECT category, action, details, created_at
FROM activity_log
WHERE category = 'error' OR action LIKE '%error%' OR action LIKE '%fail%'
ORDER BY created_at DESC
LIMIT 10
- """).fetchall()
+ """
+ ).fetchall()
recent_errors = [
- {
- "category": row[0],
- "action": row[1],
- "details": row[2],
- "timestamp": row[3]
- }
+ {"category": row[0], "action": row[1], "details": row[2], "timestamp": row[3]}
for row in recent_errors
]
except:
@@ -652,29 +700,26 @@ async def get_debug_diagnostic():
"status": health_status["status"],
"timestamp": datetime.utcnow().isoformat() + "Z",
"health": health_status,
- "browser": {
- "session_count": browser_sessions,
- "logs_directory": str(browser_logs_dir)
- },
+ "browser": {"session_count": browser_sessions, "logs_directory": str(browser_logs_dir)},
"database": {
"size_bytes": db_size_bytes,
"size_mb": round(db_size_bytes / 1024 / 1024, 2),
- "path": str(db_path)
+ "path": str(db_path),
},
"process": {
"pid": os.getpid(),
"memory_rss_mb": round(memory_info.rss / 1024 / 1024, 2),
"memory_vms_mb": round(memory_info.vms / 1024 / 1024, 2),
- "threads": process.num_threads()
+ "threads": process.num_threads(),
},
- "recent_errors": recent_errors
+ "recent_errors": recent_errors,
}
@app.get("/api/debug/workflows")
async def list_workflows():
"""
- 📋 LIST AVAILABLE DEBUG WORKFLOWS
+ 📋 LIST AVAILABLE DEBUG WORKFLOWS.
Returns list of available workflows from .dss/WORKFLOWS/ directory.
Each workflow is a markdown file with step-by-step debugging procedures.
@@ -709,19 +754,17 @@ async def list_workflows():
purpose = line.replace("**Purpose**:", "").strip()
break
- workflows.append({
- "id": workflow_file.stem,
- "title": title,
- "purpose": purpose,
- "file": workflow_file.name,
- "path": str(workflow_file)
- })
+ workflows.append(
+ {
+ "id": workflow_file.stem,
+ "title": title,
+ "purpose": purpose,
+ "file": workflow_file.name,
+ "path": str(workflow_file),
+ }
+ )
- return {
- "workflows": workflows,
- "count": len(workflows),
- "directory": str(workflows_dir)
- }
+ return {"workflows": workflows, "count": len(workflows), "directory": str(workflows_dir)}
@app.get("/api/config")
@@ -738,6 +781,7 @@ async def get_config():
# Import here to avoid circular imports
try:
from config import get_public_config
+
return get_public_config()
except ImportError:
# Fallback for legacy deployments
@@ -747,27 +791,27 @@ async def get_config():
"storybookPort": 6006,
}
+
@app.get("/api/stats")
async def get_statistics():
"""Get database and system statistics."""
db_stats = get_stats()
return {
"database": db_stats,
- "figma": {
- "mode": figma_suite.mode,
- "configured": config.figma.is_configured
- }
+ "figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured},
}
# === Projects ===
+
@app.get("/api/projects")
async def list_projects(status: Optional[str] = None):
"""List all projects."""
projects = Projects.list(status=status)
return projects
+
@app.get("/api/projects/{project_id}")
async def get_project(project_id: str):
"""Get a specific project."""
@@ -776,6 +820,7 @@ async def get_project(project_id: str):
raise HTTPException(status_code=404, detail="Project not found")
return project
+
@app.post("/api/projects")
async def create_project(project: ProjectCreate):
"""Create a new project."""
@@ -784,17 +829,18 @@ async def create_project(project: ProjectCreate):
id=project_id,
name=project.name,
description=project.description,
- figma_file_key=project.figma_file_key
+ figma_file_key=project.figma_file_key,
)
ActivityLog.log(
action="project_created",
entity_type="project",
entity_id=project_id,
project_id=project_id,
- details={"name": project.name}
+ details={"name": project.name},
)
return created
+
@app.put("/api/projects/{project_id}")
async def update_project(project_id: str, update: ProjectUpdate):
"""Update a project."""
@@ -812,25 +858,23 @@ async def update_project(project_id: str, update: ProjectUpdate):
entity_type="project",
entity_id=project_id,
project_id=project_id,
- details=update_data
+ details=update_data,
)
return updated
+
@app.delete("/api/projects/{project_id}")
async def delete_project(project_id: str):
"""Delete a project."""
if not Projects.delete(project_id):
raise HTTPException(status_code=404, detail="Project not found")
- ActivityLog.log(
- action="project_deleted",
- entity_type="project",
- entity_id=project_id
- )
+ ActivityLog.log(action="project_deleted", entity_type="project", entity_id=project_id)
return {"success": True}
# === Components ===
+
@app.get("/api/projects/{project_id}/components")
async def list_components(project_id: str):
"""List components for a project."""
@@ -841,6 +885,7 @@ async def list_components(project_id: str):
# === Figma Integration ===
+
@app.post("/api/figma/extract-variables")
async def extract_variables(request: FigmaExtractRequest, background_tasks: BackgroundTasks):
"""Extract design tokens from Figma variables."""
@@ -849,12 +894,17 @@ async def extract_variables(request: FigmaExtractRequest, background_tasks: Back
ActivityLog.log(
action="figma_extract_variables",
entity_type="figma",
- details={"file_key": request.file_key, "format": request.format, "tokens_count": result.get("tokens_count")}
+ details={
+ "file_key": request.file_key,
+ "format": request.format,
+ "tokens_count": result.get("tokens_count"),
+ },
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=f"Token extraction failed: {str(e)}")
+
@app.post("/api/figma/extract-components")
async def extract_components(request: FigmaExtractRequest):
"""Extract component definitions from Figma."""
@@ -863,12 +913,13 @@ async def extract_components(request: FigmaExtractRequest):
ActivityLog.log(
action="figma_extract_components",
entity_type="figma",
- details={"file_key": request.file_key, "count": result.get("components_count")}
+ details={"file_key": request.file_key, "count": result.get("components_count")},
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/extract-styles")
async def extract_styles(request: FigmaExtractRequest):
"""Extract style definitions from Figma."""
@@ -878,20 +929,28 @@ async def extract_styles(request: FigmaExtractRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=f"Style extraction failed: {str(e)}")
+
@app.post("/api/figma/sync-tokens")
async def sync_tokens(request: FigmaSyncRequest):
"""Sync tokens from Figma to target file."""
try:
- result = await figma_suite.sync_tokens(request.file_key, request.target_path, request.format)
+ result = await figma_suite.sync_tokens(
+ request.file_key, request.target_path, request.format
+ )
ActivityLog.log(
action="figma_sync_tokens",
entity_type="figma",
- details={"file_key": request.file_key, "target": request.target_path, "tokens_synced": result.get("tokens_synced")}
+ details={
+ "file_key": request.file_key,
+ "target": request.target_path,
+ "tokens_synced": result.get("tokens_synced"),
+ },
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=f"Token sync failed: {str(e)}")
+
@app.post("/api/figma/validate")
async def validate_components(request: FigmaExtractRequest):
"""Validate component definitions against design system rules."""
@@ -901,6 +960,7 @@ async def validate_components(request: FigmaExtractRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/generate-code")
async def generate_code(file_key: str, component_name: str, framework: str = "webcomponent"):
"""Generate component code from Figma."""
@@ -910,19 +970,23 @@ async def generate_code(file_key: str, component_name: str, framework: str = "we
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.get("/api/figma/health")
async def figma_health():
"""Check Figma connection status."""
- is_live = figma_suite.mode == 'live'
+ is_live = figma_suite.mode == "live"
return {
"status": "ok" if is_live else "degraded",
"mode": figma_suite.mode,
- "message": "Figma connected" if is_live else "Running in mock mode. Configure FIGMA_TOKEN for live API."
+ "message": "Figma connected"
+ if is_live
+ else "Running in mock mode. Configure FIGMA_TOKEN for live API.",
}
# === Discovery ===
+
@app.get("/api/discovery")
async def run_discovery(path: str = "."):
"""Run project discovery."""
@@ -930,10 +994,7 @@ async def run_discovery(path: str = "."):
try:
result = subprocess.run(
- [str(script_path), path],
- capture_output=True,
- text=True,
- timeout=30
+ [str(script_path), path], capture_output=True, text=True, timeout=30
)
if result.returncode == 0:
return json.loads(result.stdout)
@@ -944,10 +1005,12 @@ async def run_discovery(path: str = "."):
except json.JSONDecodeError:
return {"raw_output": result.stdout}
+
class DiscoveryScanRequest(BaseModel):
path: str = "."
full_scan: bool = False
+
@app.post("/api/discovery/scan")
async def scan_project(request: DiscoveryScanRequest):
"""Run project discovery scan."""
@@ -955,17 +1018,14 @@ async def scan_project(request: DiscoveryScanRequest):
try:
result = subprocess.run(
- [str(script_path), request.path],
- capture_output=True,
- text=True,
- timeout=30
+ [str(script_path), request.path], capture_output=True, text=True, timeout=30
)
if result.returncode == 0:
data = json.loads(result.stdout)
ActivityLog.log(
action="discovery_scan",
entity_type="project",
- details={"path": request.path, "full_scan": request.full_scan}
+ details={"path": request.path, "full_scan": request.full_scan},
)
return data
else:
@@ -975,6 +1035,7 @@ async def scan_project(request: DiscoveryScanRequest):
except json.JSONDecodeError:
return {"raw_output": result.stdout}
+
@app.get("/api/discovery/stats")
async def get_discovery_stats():
"""Get project statistics."""
@@ -987,34 +1048,30 @@ async def get_discovery_stats():
"today": 0,
"this_week": 0,
"total": db_stats.get("syncs", {}).get("total", 0),
- "last_sync": None
+ "last_sync": None,
},
- "stories": {
- "total": 0
- }
+ "stories": {"total": 0},
}
+
@app.get("/api/discovery/activity")
async def get_discovery_activity(limit: int = Query(default=10, le=50)):
"""Get recent discovery activity."""
return ActivityLog.recent(limit=limit)
+
@app.get("/api/discovery/ports")
async def discover_ports():
"""Discover listening ports and services."""
script_path = Path(__file__).parent.parent / "discovery" / "discover-ports.sh"
try:
- result = subprocess.run(
- [str(script_path)],
- capture_output=True,
- text=True,
- timeout=10
- )
+ result = subprocess.run([str(script_path)], capture_output=True, text=True, timeout=10)
return json.loads(result.stdout)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.get("/api/discovery/env")
async def discover_env(path: str = "."):
"""Analyze environment configuration."""
@@ -1022,10 +1079,7 @@ async def discover_env(path: str = "."):
try:
result = subprocess.run(
- [str(script_path), path],
- capture_output=True,
- text=True,
- timeout=10
+ [str(script_path), path], capture_output=True, text=True, timeout=10
)
return json.loads(result.stdout)
except Exception as e:
@@ -1034,19 +1088,24 @@ async def discover_env(path: str = "."):
# === Activity & Sync History ===
+
@app.get("/api/activity")
async def get_activity(limit: int = Query(default=50, le=100)):
"""Get recent activity log."""
return ActivityLog.recent(limit=limit)
+
@app.get("/api/sync-history")
-async def get_sync_history(project_id: Optional[str] = None, limit: int = Query(default=20, le=100)):
+async def get_sync_history(
+ project_id: Optional[str] = None, limit: int = Query(default=20, le=100)
+):
"""Get sync history."""
return SyncHistory.recent(project_id=project_id, limit=limit)
# === Audit Log (Enhanced) ===
+
@app.get("/api/audit")
async def get_audit_log(
project_id: Optional[str] = None,
@@ -1058,7 +1117,7 @@ async def get_audit_log(
start_date: Optional[str] = None,
end_date: Optional[str] = None,
limit: int = Query(default=50, le=200),
- offset: int = Query(default=0, ge=0)
+ offset: int = Query(default=0, ge=0),
):
"""
Get audit log with advanced filtering.
@@ -1085,14 +1144,11 @@ async def get_audit_log(
start_date=start_date,
end_date=end_date,
limit=limit,
- offset=offset
+ offset=offset,
)
total = ActivityLog.count(
- project_id=project_id,
- user_id=user_id,
- action=action,
- category=category
+ project_id=project_id, user_id=user_id, action=action, category=category
)
return {
@@ -1100,28 +1156,32 @@ async def get_audit_log(
"total": total,
"limit": limit,
"offset": offset,
- "has_more": (offset + limit) < total
+ "has_more": (offset + limit) < total,
}
+
@app.get("/api/audit/stats")
async def get_audit_stats():
"""Get audit log statistics."""
return {
"by_category": ActivityLog.get_stats_by_category(),
"by_user": ActivityLog.get_stats_by_user(),
- "total_count": ActivityLog.count()
+ "total_count": ActivityLog.count(),
}
+
@app.get("/api/audit/categories")
async def get_audit_categories():
"""Get list of all activity categories."""
return ActivityLog.get_categories()
+
@app.get("/api/audit/actions")
async def get_audit_actions():
"""Get list of all activity actions."""
return ActivityLog.get_actions()
+
class AuditLogRequest(BaseModel):
action: str
entity_type: Optional[str] = None
@@ -1133,18 +1193,20 @@ class AuditLogRequest(BaseModel):
team_context: Optional[str] = None
description: Optional[str] = None
category: Optional[str] = None
- severity: str = 'info'
+ severity: str = "info"
details: Optional[Dict[str, Any]] = None
+
@app.post("/api/audit")
async def create_audit_entry(entry: AuditLogRequest, request: Any):
"""
Create a new audit log entry.
+
Automatically captures IP and user agent from request.
"""
# Extract IP and user agent from request
- ip_address = request.client.host if hasattr(request, 'client') else None
- user_agent = request.headers.get('user-agent') if hasattr(request, 'headers') else None
+ ip_address = request.client.host if hasattr(request, "client") else None
+ user_agent = request.headers.get("user-agent") if hasattr(request, "headers") else None
ActivityLog.log(
action=entry.action,
@@ -1160,39 +1222,49 @@ async def create_audit_entry(entry: AuditLogRequest, request: Any):
severity=entry.severity,
details=entry.details,
ip_address=ip_address,
- user_agent=user_agent
+ user_agent=user_agent,
)
return {"success": True, "message": "Audit entry created"}
+
@app.get("/api/audit/export")
async def export_audit_log(
project_id: Optional[str] = None,
category: Optional[str] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
- format: str = Query(default="json", regex="^(json|csv)$")
+ format: str = Query(default="json", regex="^(json|csv)$"),
):
- """
- Export audit log in JSON or CSV format.
- """
+ """Export audit log in JSON or CSV format."""
activities = ActivityLog.search(
project_id=project_id,
category=category,
start_date=start_date,
end_date=end_date,
- limit=10000 # Max export limit
+ limit=10000, # Max export limit
)
if format == "csv":
import csv
import io
+
from fastapi.responses import StreamingResponse
output = io.StringIO()
if activities:
- fieldnames = ['created_at', 'user_name', 'action', 'category', 'description', 'project_id', 'entity_type', 'entity_name', 'severity']
- writer = csv.DictWriter(output, fieldnames=fieldnames, extrasaction='ignore')
+ fieldnames = [
+ "created_at",
+ "user_name",
+ "action",
+ "category",
+ "description",
+ "project_id",
+ "entity_type",
+ "entity_name",
+ "severity",
+ ]
+ writer = csv.DictWriter(output, fieldnames=fieldnames, extrasaction="ignore")
writer.writeheader()
writer.writerows(activities)
@@ -1200,24 +1272,28 @@ async def export_audit_log(
return StreamingResponse(
iter([output.getvalue()]),
media_type="text/csv",
- headers={"Content-Disposition": f"attachment; filename=audit_log_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}.csv"}
+ headers={
+ "Content-Disposition": f"attachment; filename=audit_log_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}.csv"
+ },
)
else:
# JSON format
return {
"activities": activities,
"total": len(activities),
- "exported_at": datetime.utcnow().isoformat() + "Z"
+ "exported_at": datetime.utcnow().isoformat() + "Z",
}
# === Teams ===
+
@app.get("/api/teams")
async def list_teams():
"""List all teams."""
return Teams.list()
+
@app.post("/api/teams")
async def create_team(team: TeamCreate):
"""Create a new team."""
@@ -1225,6 +1301,7 @@ async def create_team(team: TeamCreate):
created = Teams.create(team_id, team.name, team.description)
return created
+
@app.get("/api/teams/{team_id}")
async def get_team(team_id: str):
"""Get a specific team."""
@@ -1236,12 +1313,14 @@ async def get_team(team_id: str):
# === Cache Management ===
+
@app.post("/api/cache/clear")
async def clear_cache():
"""Clear expired cache entries."""
count = Cache.clear_expired()
return {"cleared": count}
+
@app.delete("/api/cache")
async def purge_cache():
"""Purge all cache entries."""
@@ -1251,6 +1330,7 @@ async def purge_cache():
# === Configuration Management ===
+
class ConfigUpdate(BaseModel):
mode: Optional[str] = None
figma_token: Optional[str] = None
@@ -1264,7 +1344,7 @@ async def get_config():
return {
"config": runtime_config.get(),
"env": config.summary(),
- "mode": runtime_config.get("mode")
+ "mode": runtime_config.get("mode"),
}
@@ -1282,12 +1362,12 @@ async def update_config(update: ConfigUpdate):
global figma_suite
figma_suite = FigmaToolSuite(
token=update.figma_token,
- output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output")
+ output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output"),
)
ActivityLog.log(
action="figma_token_updated",
entity_type="config",
- details={"configured": bool(update.figma_token)}
+ details={"configured": bool(update.figma_token)},
)
if update.services:
@@ -1299,9 +1379,7 @@ async def update_config(update: ConfigUpdate):
if updates:
runtime_config.update(updates)
ActivityLog.log(
- action="config_updated",
- entity_type="config",
- details={"keys": list(updates.keys())}
+ action="config_updated", entity_type="config", details={"keys": list(updates.keys())}
)
return runtime_config.get()
@@ -1321,7 +1399,7 @@ async def get_figma_config():
"sync_tokens": True,
"validate": True,
"generate_code": True,
- }
+ },
}
@@ -1335,18 +1413,16 @@ async def test_figma_connection():
# Test with a minimal API call
import httpx
+
token = runtime_config._data["figma"]["token"]
async with httpx.AsyncClient() as client:
- resp = await client.get(
- "https://api.figma.com/v1/me",
- headers={"X-Figma-Token": token}
- )
+ resp = await client.get("https://api.figma.com/v1/me", headers={"X-Figma-Token": token})
if resp.status_code == 200:
user = resp.json()
return {
"success": True,
"user": user.get("email", "connected"),
- "handle": user.get("handle")
+ "handle": user.get("handle"),
}
else:
return {"success": False, "error": f"API returned {resp.status_code}"}
@@ -1356,6 +1432,7 @@ async def test_figma_connection():
# === Service Discovery ===
+
@app.get("/api/services")
async def list_services():
"""List configured and discovered services."""
@@ -1365,7 +1442,7 @@ async def list_services():
return {
"configured": configured,
"discovered": discovered,
- "storybook": await ServiceDiscovery.check_storybook()
+ "storybook": await ServiceDiscovery.check_storybook(),
}
@@ -1380,7 +1457,7 @@ async def configure_service(service_name: str, config_data: Dict[str, Any]):
action="service_configured",
entity_type="service",
entity_id=service_name,
- details={"keys": list(config_data.keys())}
+ details={"keys": list(config_data.keys())},
)
return services[service_name]
@@ -1451,7 +1528,7 @@ async def init_storybook(request_data: Dict[str, Any] = None):
results = await generator.generate_stories_for_directory(
str(source_path.relative_to(dss_mvp1_path)),
template=StoryTemplate.CSF3,
- dry_run=False
+ dry_run=False,
)
# Move generated stories to stories/generated/
@@ -1478,26 +1555,23 @@ async def init_storybook(request_data: Dict[str, Any] = None):
ActivityLog.log(
action="storybook_initialized",
entity_type="storybook",
- details={
- "stories_generated": stories_generated,
- "errors_count": len(errors)
- }
+ details={"stories_generated": stories_generated, "errors_count": len(errors)},
)
return {
"success": True,
"stories_generated": stories_generated,
- "message": f"Generated {stories_generated} stories" if stories_generated > 0 else "Storybook initialized (no components found)",
- "errors": errors if errors else None
+ "message": f"Generated {stories_generated} stories"
+ if stories_generated > 0
+ else "Storybook initialized (no components found)",
+ "errors": errors if errors else None,
}
except HTTPException:
raise
except Exception as e:
ActivityLog.log(
- action="storybook_init_failed",
- entity_type="storybook",
- details={"error": str(e)}
+ action="storybook_init_failed", entity_type="storybook", details={"error": str(e)}
)
raise HTTPException(status_code=500, detail=f"Storybook initialization failed: {str(e)}")
@@ -1506,6 +1580,7 @@ async def init_storybook(request_data: Dict[str, Any] = None):
async def clear_storybook_stories():
"""
Clear all generated stories from Storybook.
+
Returns Storybook to blank state (only Welcome page).
"""
import shutil
@@ -1527,13 +1602,13 @@ async def clear_storybook_stories():
ActivityLog.log(
action="storybook_cleared",
entity_type="storybook",
- details={"cleared_count": cleared_count}
+ details={"cleared_count": cleared_count},
)
return {
"success": True,
"cleared_count": cleared_count,
- "message": "Storybook stories cleared"
+ "message": "Storybook stories cleared",
}
except Exception as e:
@@ -1542,13 +1617,17 @@ async def clear_storybook_stories():
# === Design System Ingestion ===
+
class IngestionRequest(BaseModel):
"""Request for design system ingestion via natural language."""
+
prompt: str
project_id: Optional[str] = None
+
class IngestionConfirmRequest(BaseModel):
"""Confirm ingestion of a specific design system."""
+
system_id: str
method: str = "npm" # npm, figma, css, manual
source_url: Optional[str] = None
@@ -1580,8 +1659,8 @@ async def parse_ingestion_prompt(request: IngestionRequest):
details={
"prompt": request.prompt[:100],
"intent": result.get("intent"),
- "sources_found": len(result.get("sources", []))
- }
+ "sources_found": len(result.get("sources", [])),
+ },
)
return result
@@ -1592,9 +1671,7 @@ async def parse_ingestion_prompt(request: IngestionRequest):
@app.get("/api/ingest/systems")
async def list_known_systems(
- category: Optional[str] = None,
- framework: Optional[str] = None,
- search: Optional[str] = None
+ category: Optional[str] = None, framework: Optional[str] = None, search: Optional[str] = None
):
"""
List known design systems from the registry.
@@ -1609,7 +1686,7 @@ async def list_known_systems(
get_all_systems,
get_systems_by_category,
get_systems_by_framework,
- search_design_systems
+ search_design_systems,
)
if search:
@@ -1624,11 +1701,7 @@ async def list_known_systems(
return {
"systems": [s.to_dict() for s in systems],
"count": len(systems),
- "filters": {
- "category": category,
- "framework": framework,
- "search": search
- }
+ "filters": {"category": category, "framework": framework, "search": search},
}
except Exception as e:
@@ -1637,9 +1710,7 @@ async def list_known_systems(
@app.get("/api/ingest/systems/{system_id}")
async def get_system_info(system_id: str):
- """
- Get detailed information about a specific design system.
- """
+ """Get detailed information about a specific design system."""
try:
from design_system_registry import find_design_system, get_alternative_ingestion_options
@@ -1650,10 +1721,7 @@ async def get_system_info(system_id: str):
alternatives = get_alternative_ingestion_options(system)
- return {
- "system": system.to_dict(),
- "alternatives": alternatives
- }
+ return {"system": system.to_dict(), "alternatives": alternatives}
except HTTPException:
raise
@@ -1663,9 +1731,7 @@ async def get_system_info(system_id: str):
@app.get("/api/ingest/npm/search")
async def search_npm_packages(
- query: str,
- limit: int = Query(default=10, le=50),
- design_systems_only: bool = True
+ query: str, limit: int = Query(default=10, le=50), design_systems_only: bool = True
):
"""
Search npm registry for design system packages.
@@ -1681,7 +1747,7 @@ async def search_npm_packages(
"packages": [r.to_dict() for r in results],
"count": len(results),
"query": query,
- "design_systems_only": design_systems_only
+ "design_systems_only": design_systems_only,
}
except Exception as e:
@@ -1733,12 +1799,12 @@ async def confirm_ingestion(request: IngestionConfirmRequest):
if not system:
# Try to find via npm
from npm_search import get_package_info
+
npm_info = await get_package_info(request.system_id)
if not npm_info:
raise HTTPException(
- status_code=404,
- detail=f"Design system not found: {request.system_id}"
+ status_code=404, detail=f"Design system not found: {request.system_id}"
)
# Execute ingestion based on method
@@ -1746,7 +1812,7 @@ async def confirm_ingestion(request: IngestionConfirmRequest):
"success": True,
"system_id": request.system_id,
"method": request.method,
- "status": "queued"
+ "status": "queued",
}
if request.method == "npm":
@@ -1758,22 +1824,19 @@ async def confirm_ingestion(request: IngestionConfirmRequest):
"Install npm packages",
"Extract design tokens",
"Generate Storybook stories",
- "Update token configuration"
+ "Update token configuration",
]
elif request.method == "figma":
if not request.source_url:
- raise HTTPException(
- status_code=400,
- detail="Figma URL required for figma method"
- )
+ raise HTTPException(status_code=400, detail="Figma URL required for figma method")
result["figma_url"] = request.source_url
result["message"] = "Will extract tokens from Figma"
result["next_steps"] = [
"Authenticate with Figma",
"Extract design tokens",
"Map to CSS variables",
- "Generate component stories"
+ "Generate component stories",
]
elif request.method == "css":
@@ -1782,17 +1845,14 @@ async def confirm_ingestion(request: IngestionConfirmRequest):
if system and system.css_cdn_url:
request.source_url = system.css_cdn_url
else:
- raise HTTPException(
- status_code=400,
- detail="CSS URL required for css method"
- )
+ raise HTTPException(status_code=400, detail="CSS URL required for css method")
result["css_url"] = request.source_url
result["message"] = "Will parse CSS for design tokens"
result["next_steps"] = [
"Fetch CSS file",
"Parse CSS variables",
"Extract color/spacing/typography tokens",
- "Create token collection"
+ "Create token collection",
]
elif request.method == "manual":
@@ -1801,17 +1861,14 @@ async def confirm_ingestion(request: IngestionConfirmRequest):
"Enter color tokens",
"Enter typography tokens",
"Enter spacing tokens",
- "Review and confirm"
+ "Review and confirm",
]
ActivityLog.log(
action="ingestion_confirmed",
entity_type="ingestion",
entity_id=request.system_id,
- details={
- "method": request.method,
- "status": "queued"
- }
+ details={"method": request.method, "status": "queued"},
)
return result
@@ -1827,7 +1884,7 @@ async def execute_ingestion(
system_id: str,
method: str = "npm",
source_url: Optional[str] = None,
- project_id: Optional[str] = None
+ project_id: Optional[str] = None,
):
"""
Execute the actual ingestion process.
@@ -1858,10 +1915,12 @@ async def execute_ingestion(
if system.css_cdn_url:
# Fetch CSS from CDN and parse
import httpx
+
async with httpx.AsyncClient() as client:
resp = await client.get(system.css_cdn_url)
if resp.status_code == 200:
from dss.ingest.css import CSSTokenSource
+
# Write temp file and parse
temp_css = Path("/tmp") / f"{system.id}_tokens.css"
temp_css.write_text(resp.text)
@@ -1874,7 +1933,7 @@ async def execute_ingestion(
# For Tailwind-based systems, we'll need their config
tokens_extracted = 0 # Placeholder for Tailwind parsing
- except ImportError as e:
+ except ImportError:
# Token ingestion module not available
pass
finally:
@@ -1889,6 +1948,7 @@ async def execute_ingestion(
elif method == "css" and source_url:
# Fetch and parse CSS
import httpx
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "dss-mvp1"))
try:
@@ -1896,6 +1956,7 @@ async def execute_ingestion(
resp = await client.get(source_url)
if resp.status_code == 200:
from dss.ingest.css import CSSTokenSource
+
temp_css = Path("/tmp") / "ingested_tokens.css"
temp_css.write_text(resp.text)
source = CSSTokenSource(str(temp_css))
@@ -1910,10 +1971,7 @@ async def execute_ingestion(
entity_type="ingestion",
entity_id=system_id,
project_id=project_id,
- details={
- "method": method,
- "tokens_extracted": tokens_extracted
- }
+ details={"method": method, "tokens_extracted": tokens_extracted},
)
return {
@@ -1921,7 +1979,7 @@ async def execute_ingestion(
"system_id": system_id,
"method": method,
"tokens_extracted": tokens_extracted,
- "message": f"Extracted {tokens_extracted} tokens from {system.name if system else system_id}"
+ "message": f"Extracted {tokens_extracted} tokens from {system.name if system else system_id}",
}
except Exception as e:
@@ -1929,7 +1987,7 @@ async def execute_ingestion(
action="ingestion_failed",
entity_type="ingestion",
entity_id=system_id,
- details={"error": str(e)}
+ details={"error": str(e)},
)
raise HTTPException(status_code=500, detail=str(e))
@@ -1957,6 +2015,7 @@ async def get_ingestion_alternatives(system_id: Optional[str] = None):
# === DSS Mode ===
+
@app.get("/api/mode")
async def get_mode():
"""Get current DSS mode."""
@@ -1964,7 +2023,7 @@ async def get_mode():
return {
"mode": mode,
"description": "Local dev companion" if mode == "local" else "Remote design system server",
- "features": runtime_config.get("features")
+ "features": runtime_config.get("features"),
}
@@ -1976,11 +2035,7 @@ async def set_mode(request_data: Dict[str, Any]):
raise HTTPException(status_code=400, detail="Mode must be 'local' or 'server'")
runtime_config.set("mode", mode)
- ActivityLog.log(
- action="mode_changed",
- entity_type="config",
- details={"mode": mode}
- )
+ ActivityLog.log(action="mode_changed", entity_type="config", details={"mode": mode})
return {"mode": mode, "success": True}
@@ -1993,10 +2048,12 @@ async def set_mode(request_data: Dict[str, Any]):
# === System Administration ===
+
@app.post("/api/system/reset")
async def reset_dss(request_data: Dict[str, Any]):
"""
Reset DSS to fresh state by calling the reset command in dss-mvp1.
+
Requires confirmation.
"""
confirm = request_data.get("confirm", "")
@@ -2014,41 +2071,35 @@ async def reset_dss(request_data: Dict[str, Any]):
cwd=str(dss_mvp1_path),
capture_output=True,
text=True,
- timeout=60
+ timeout=60,
)
if result.returncode != 0:
raise Exception(f"Reset failed: {result.stderr}")
- ActivityLog.log(
- action="dss_reset",
- entity_type="system",
- details={"status": "success"}
- )
+ ActivityLog.log(action="dss_reset", entity_type="system", details={"status": "success"})
return {
"success": True,
"message": "DSS has been reset to fresh state",
- "output": result.stdout
+ "output": result.stdout,
}
except subprocess.TimeoutExpired:
raise HTTPException(status_code=504, detail="Reset operation timed out")
except Exception as e:
- ActivityLog.log(
- action="dss_reset_failed",
- entity_type="system",
- details={"error": str(e)}
- )
+ ActivityLog.log(action="dss_reset_failed", entity_type="system", details={"error": str(e)})
raise HTTPException(status_code=500, detail=str(e))
# === Team Dashboards ===
+
@app.get("/api/projects/{project_id}/dashboard/summary")
async def get_dashboard_summary(project_id: str):
"""
Get dashboard summary for all teams (thin slice).
+
Provides overview of UX, UI, and QA metrics.
"""
if not Projects.get(project_id):
@@ -2069,21 +2120,16 @@ async def get_dashboard_summary(project_id: str):
"project_id": project_id,
"ux": {
"figma_files_count": len(figma_files),
- "figma_files": figma_files[:5] # Show first 5
+ "figma_files": figma_files[:5], # Show first 5
},
- "ui": {
- "token_drift": drift_stats,
- "code_metrics": code_summary
- },
- "qa": {
- "esre_count": len(esre_list),
- "test_summary": test_summary
- }
+ "ui": {"token_drift": drift_stats, "code_metrics": code_summary},
+ "qa": {"esre_count": len(esre_list), "test_summary": test_summary},
}
# === UX Dashboard: Figma File Management ===
+
@app.get("/api/projects/{project_id}/figma-files")
async def list_figma_files(project_id: str):
"""List all Figma files for a project (UX Dashboard)."""
@@ -2102,17 +2148,17 @@ async def create_figma_file(project_id: str, figma_file: FigmaFileCreate):
project_id=project_id,
figma_url=figma_file.figma_url,
file_name=figma_file.file_name,
- file_key=figma_file.file_key
+ file_key=figma_file.file_key,
)
ActivityLog.log(
action="figma_file_added",
entity_type="figma_file",
- entity_id=str(created['id']),
+ entity_id=str(created["id"]),
entity_name=figma_file.file_name,
project_id=project_id,
team_context="ux",
- details={"file_key": figma_file.file_key}
+ details={"file_key": figma_file.file_key},
)
return created
@@ -2125,9 +2171,7 @@ async def update_figma_file_sync(project_id: str, file_id: int, status: str = "s
raise HTTPException(status_code=404, detail="Project not found")
updated = FigmaFiles.update_sync_status(
- file_id=file_id,
- status=status,
- last_synced=datetime.utcnow().isoformat()
+ file_id=file_id, status=status, last_synced=datetime.utcnow().isoformat()
)
if not updated:
@@ -2138,7 +2182,7 @@ async def update_figma_file_sync(project_id: str, file_id: int, status: str = "s
entity_type="figma_file",
entity_id=str(file_id),
project_id=project_id,
- team_context="ux"
+ team_context="ux",
)
return updated
@@ -2158,7 +2202,7 @@ async def delete_figma_file(project_id: str, file_id: int):
entity_type="figma_file",
entity_id=str(file_id),
project_id=project_id,
- team_context="ux"
+ team_context="ux",
)
return {"success": True}
@@ -2166,6 +2210,7 @@ async def delete_figma_file(project_id: str, file_id: int):
# === UI Dashboard: Token Drift Detection ===
+
@app.get("/api/projects/{project_id}/token-drift")
async def list_token_drift(project_id: str, severity: Optional[str] = None):
"""List token drift issues for a project (UI Dashboard)."""
@@ -2175,10 +2220,7 @@ async def list_token_drift(project_id: str, severity: Optional[str] = None):
drifts = TokenDriftDetector.list_by_project(project_id, severity)
stats = TokenDriftDetector.get_stats(project_id)
- return {
- "drifts": drifts,
- "stats": stats
- }
+ return {"drifts": drifts, "stats": stats}
@app.post("/api/projects/{project_id}/token-drift")
@@ -2194,19 +2236,16 @@ async def record_token_drift(project_id: str, drift: TokenDriftCreate):
file_path=drift.file_path,
line_number=drift.line_number,
severity=drift.severity,
- suggested_token=drift.suggested_token
+ suggested_token=drift.suggested_token,
)
ActivityLog.log(
action="token_drift_detected",
entity_type="token_drift",
- entity_id=str(created['id']),
+ entity_id=str(created["id"]),
project_id=project_id,
team_context="ui",
- details={
- "severity": drift.severity,
- "component_id": drift.component_id
- }
+ details={"severity": drift.severity, "component_id": drift.component_id},
)
return created
@@ -2232,7 +2271,7 @@ async def update_drift_status(project_id: str, drift_id: int, status: str):
entity_id=str(drift_id),
project_id=project_id,
team_context="ui",
- details={"status": status}
+ details={"status": status},
)
return updated
@@ -2240,6 +2279,7 @@ async def update_drift_status(project_id: str, drift_id: int, status: str):
# === QA Dashboard: ESRE Definitions ===
+
@app.get("/api/projects/{project_id}/esre")
async def list_esre_definitions(project_id: str):
"""List all ESRE definitions for a project (QA Dashboard)."""
@@ -2259,16 +2299,16 @@ async def create_esre_definition(project_id: str, esre: ESRECreate):
name=esre.name,
definition_text=esre.definition_text,
expected_value=esre.expected_value,
- component_name=esre.component_name
+ component_name=esre.component_name,
)
ActivityLog.log(
action="esre_created",
entity_type="esre",
- entity_id=str(created['id']),
+ entity_id=str(created["id"]),
entity_name=esre.name,
project_id=project_id,
- team_context="qa"
+ team_context="qa",
)
return created
@@ -2285,7 +2325,7 @@ async def update_esre_definition(project_id: str, esre_id: int, updates: ESRECre
name=updates.name,
definition_text=updates.definition_text,
expected_value=updates.expected_value,
- component_name=updates.component_name
+ component_name=updates.component_name,
)
if not updated:
@@ -2297,7 +2337,7 @@ async def update_esre_definition(project_id: str, esre_id: int, updates: ESRECre
entity_id=str(esre_id),
entity_name=updates.name,
project_id=project_id,
- team_context="qa"
+ team_context="qa",
)
return updated
@@ -2317,7 +2357,7 @@ async def delete_esre_definition(project_id: str, esre_id: int):
entity_type="esre",
entity_id=str(esre_id),
project_id=project_id,
- team_context="qa"
+ team_context="qa",
)
return {"success": True}
@@ -2325,8 +2365,10 @@ async def delete_esre_definition(project_id: str, esre_id: int):
# === Claude Chat API with MCP Tool Integration ===
+
class ClaudeChatRequest(BaseModel):
- """AI chat request model (supports Claude and Gemini)"""
+ """AI chat request model (supports Claude and Gemini)."""
+
message: str
context: Optional[Dict[str, Any]] = {}
history: Optional[List[Dict[str, Any]]] = []
@@ -2362,7 +2404,11 @@ async def claude_chat(request_data: ClaudeChatRequest):
action="ai_chat",
entity_type="chat",
entity_id=model_name,
- details={"message_length": len(message), "tools_enabled": enable_tools, "model": model_name}
+ details={
+ "message_length": len(message),
+ "tools_enabled": enable_tools,
+ "model": model_name,
+ },
)
try:
@@ -2376,11 +2422,11 @@ async def claude_chat(request_data: ClaudeChatRequest):
return {
"success": False,
"response": f"{model_name.title()} is not available. Check API keys and SDK installation.",
- "model": "error"
+ "model": "error",
}
# Import MCP handler
- from dss_mcp.handler import get_mcp_handler, MCPContext
+ from dss_mcp.handler import MCPContext, get_mcp_handler
mcp_handler = get_mcp_handler()
@@ -2427,7 +2473,7 @@ CURRENT PROJECT CONTEXT:
if "component" in context:
context_parts.append(f"Component: {context['component']}")
if context_parts:
- system_prompt += f"\n\nUser context:\n" + "\n".join(context_parts)
+ system_prompt += "\n\nUser context:\n" + "\n".join(context_parts)
# Get tools if enabled
tools = None
@@ -2435,10 +2481,7 @@ CURRENT PROJECT CONTEXT:
tools = mcp_handler.get_tools_for_claude()
# Create MCP context
- mcp_context = MCPContext(
- project_id=project_id,
- user_id=user_id
- )
+ mcp_context = MCPContext(project_id=project_id, user_id=user_id)
# Call AI provider with all context
result = await provider.chat(
@@ -2448,7 +2491,7 @@ CURRENT PROJECT CONTEXT:
tools=tools,
temperature=0.7,
mcp_handler=mcp_handler,
- mcp_context=mcp_context
+ mcp_context=mcp_context,
)
# Log tool usage
@@ -2458,7 +2501,7 @@ CURRENT PROJECT CONTEXT:
entity_type="chat",
entity_id=model_name,
project_id=project_id,
- details={"tools": result["tools_used"], "model": model_name}
+ details={"tools": result["tools_used"], "model": model_name},
)
return result
@@ -2468,67 +2511,91 @@ CURRENT PROJECT CONTEXT:
return {
"success": False,
"response": f"Error connecting to {model_name.title()}: {error_msg}\n\nMake sure your API key is valid and you have API access.",
- "model": "error"
+ "model": "error",
}
# === MCP Tools Proxy ===
+
@app.post("/api/mcp/{tool_name}")
async def execute_mcp_tool(tool_name: str, params: Dict[str, Any] = {}):
"""
Proxy MCP tool execution.
+
Calls the MCP server running on port 3457.
"""
try:
# Import MCP server functions
from mcp_server import (
- get_status, list_projects, create_project, get_project,
- extract_tokens, extract_components, generate_component_code,
- sync_tokens_to_file, get_sync_history, get_activity,
- ingest_css_tokens, ingest_scss_tokens, ingest_tailwind_tokens,
- ingest_json_tokens, merge_tokens, export_tokens, validate_tokens,
- discover_project, analyze_react_components, find_inline_styles,
- find_style_patterns, analyze_style_values, find_unused_styles,
- build_source_graph, get_quick_wins, get_quick_wins_report,
- check_naming_consistency, scan_storybook, generate_story,
- generate_stories_batch, generate_storybook_theme, get_story_coverage
+ analyze_react_components,
+ analyze_style_values,
+ build_source_graph,
+ check_naming_consistency,
+ create_project,
+ discover_project,
+ export_tokens,
+ extract_components,
+ extract_tokens,
+ find_inline_styles,
+ find_style_patterns,
+ find_unused_styles,
+ generate_component_code,
+ generate_stories_batch,
+ generate_story,
+ generate_storybook_theme,
+ get_activity,
+ get_project,
+ get_quick_wins,
+ get_quick_wins_report,
+ get_status,
+ get_story_coverage,
+ get_sync_history,
+ ingest_css_tokens,
+ ingest_json_tokens,
+ ingest_scss_tokens,
+ ingest_tailwind_tokens,
+ list_projects,
+ merge_tokens,
+ scan_storybook,
+ sync_tokens_to_file,
+ validate_tokens,
)
# Map tool names to functions
tool_map = {
- 'get_status': get_status,
- 'list_projects': list_projects,
- 'create_project': create_project,
- 'get_project': get_project,
- 'extract_tokens': extract_tokens,
- 'extract_components': extract_components,
- 'generate_component_code': generate_component_code,
- 'sync_tokens_to_file': sync_tokens_to_file,
- 'get_sync_history': get_sync_history,
- 'get_activity': get_activity,
- 'ingest_css_tokens': ingest_css_tokens,
- 'ingest_scss_tokens': ingest_scss_tokens,
- 'ingest_tailwind_tokens': ingest_tailwind_tokens,
- 'ingest_json_tokens': ingest_json_tokens,
- 'merge_tokens': merge_tokens,
- 'export_tokens': export_tokens,
- 'validate_tokens': validate_tokens,
- 'discover_project': discover_project,
- 'analyze_react_components': analyze_react_components,
- 'find_inline_styles': find_inline_styles,
- 'find_style_patterns': find_style_patterns,
- 'analyze_style_values': analyze_style_values,
- 'find_unused_styles': find_unused_styles,
- 'build_source_graph': build_source_graph,
- 'get_quick_wins': get_quick_wins,
- 'get_quick_wins_report': get_quick_wins_report,
- 'check_naming_consistency': check_naming_consistency,
- 'scan_storybook': scan_storybook,
- 'generate_story': generate_story,
- 'generate_stories_batch': generate_stories_batch,
- 'generate_storybook_theme': generate_storybook_theme,
- 'get_story_coverage': get_story_coverage,
+ "get_status": get_status,
+ "list_projects": list_projects,
+ "create_project": create_project,
+ "get_project": get_project,
+ "extract_tokens": extract_tokens,
+ "extract_components": extract_components,
+ "generate_component_code": generate_component_code,
+ "sync_tokens_to_file": sync_tokens_to_file,
+ "get_sync_history": get_sync_history,
+ "get_activity": get_activity,
+ "ingest_css_tokens": ingest_css_tokens,
+ "ingest_scss_tokens": ingest_scss_tokens,
+ "ingest_tailwind_tokens": ingest_tailwind_tokens,
+ "ingest_json_tokens": ingest_json_tokens,
+ "merge_tokens": merge_tokens,
+ "export_tokens": export_tokens,
+ "validate_tokens": validate_tokens,
+ "discover_project": discover_project,
+ "analyze_react_components": analyze_react_components,
+ "find_inline_styles": find_inline_styles,
+ "find_style_patterns": find_style_patterns,
+ "analyze_style_values": analyze_style_values,
+ "find_unused_styles": find_unused_styles,
+ "build_source_graph": build_source_graph,
+ "get_quick_wins": get_quick_wins,
+ "get_quick_wins_report": get_quick_wins_report,
+ "check_naming_consistency": check_naming_consistency,
+ "scan_storybook": scan_storybook,
+ "generate_story": generate_story,
+ "generate_stories_batch": generate_stories_batch,
+ "generate_storybook_theme": generate_storybook_theme,
+ "get_story_coverage": get_story_coverage,
}
# Get the tool function
@@ -2544,7 +2611,7 @@ async def execute_mcp_tool(tool_name: str, params: Dict[str, Any] = {}):
action="mcp_tool_executed",
entity_type="tool",
entity_id=tool_name,
- details={"params": list(params.keys())}
+ details={"params": list(params.keys())},
)
return JSONResponse(content={"success": True, "result": result})
@@ -2554,22 +2621,25 @@ async def execute_mcp_tool(tool_name: str, params: Dict[str, Any] = {}):
action="mcp_tool_failed",
entity_type="tool",
entity_id=tool_name,
- details={"error": str(e)}
+ details={"error": str(e)},
)
raise HTTPException(status_code=500, detail=str(e))
# === MCP Integration Endpoints ===
+
class IntegrationCreate(BaseModel):
- """Create/Update integration configuration"""
+ """Create/Update integration configuration."""
+
integration_type: str # figma, jira, confluence, sequential-thinking
config: Dict[str, Any] # Encrypted in database
enabled: bool = True
class IntegrationUpdate(BaseModel):
- """Update integration"""
+ """Update integration."""
+
config: Optional[Dict[str, Any]] = None
enabled: Optional[bool] = None
@@ -2586,7 +2656,7 @@ async def list_all_integrations():
{"integration_type": "figma", "is_healthy": True, "failure_count": 0},
{"integration_type": "jira", "is_healthy": True, "failure_count": 0},
{"integration_type": "confluence", "is_healthy": True, "failure_count": 0},
- {"integration_type": "sequential-thinking", "is_healthy": True, "failure_count": 0}
+ {"integration_type": "sequential-thinking", "is_healthy": True, "failure_count": 0},
]
}
@@ -2595,8 +2665,7 @@ async def list_all_integrations():
@app.get("/api/projects/{project_id}/integrations")
async def list_project_integrations(
- project_id: str,
- user_id: Optional[int] = Query(None, description="Filter by user ID")
+ project_id: str, user_id: Optional[int] = Query(None, description="Filter by user ID")
):
"""List integrations configured for a project."""
if not Projects.get(project_id):
@@ -2610,7 +2679,7 @@ async def list_project_integrations(
async def create_integration(
project_id: str,
integration: IntegrationCreate,
- user_id: int = Query(..., description="User ID for user-scoped integration")
+ user_id: int = Query(..., description="User ID for user-scoped integration"),
):
"""Create or update integration for a project (user-scoped)."""
if not Projects.get(project_id):
@@ -2632,7 +2701,7 @@ async def create_integration(
user_id=user_id,
integration_type=integration.integration_type,
config=encrypted_config,
- enabled=integration.enabled
+ enabled=integration.enabled,
)
ActivityLog.log(
@@ -2640,13 +2709,13 @@ async def create_integration(
entity_type="integration",
entity_id=integration.integration_type,
project_id=project_id,
- details={"user_id": user_id, "enabled": integration.enabled}
+ details={"user_id": user_id, "enabled": integration.enabled},
)
return {
"success": True,
"integration_type": integration.integration_type,
- "enabled": integration.enabled
+ "enabled": integration.enabled,
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@@ -2657,7 +2726,7 @@ async def update_integration(
project_id: str,
integration_type: str,
update: IntegrationUpdate,
- user_id: int = Query(..., description="User ID")
+ user_id: int = Query(..., description="User ID"),
):
"""Update an existing integration."""
if not Projects.get(project_id):
@@ -2683,7 +2752,7 @@ async def update_integration(
user_id=user_id,
integration_type=integration_type,
config=encrypted_config,
- enabled=update.enabled
+ enabled=update.enabled,
)
if not result:
@@ -2698,9 +2767,7 @@ async def update_integration(
@app.delete("/api/projects/{project_id}/integrations/{integration_type}")
async def delete_integration(
- project_id: str,
- integration_type: str,
- user_id: int = Query(..., description="User ID")
+ project_id: str, integration_type: str, user_id: int = Query(..., description="User ID")
):
"""Delete an integration configuration."""
if not Projects.get(project_id):
@@ -2717,7 +2784,7 @@ async def delete_integration(
entity_type="integration",
entity_id=integration_type,
project_id=project_id,
- details={"user_id": user_id}
+ details={"user_id": user_id},
)
return {"success": True}
@@ -2728,7 +2795,9 @@ async def delete_integration(
@app.get("/api/mcp/tools")
-async def list_mcp_tools(include_details: bool = Query(False, description="Include full tool schemas")):
+async def list_mcp_tools(
+ include_details: bool = Query(False, description="Include full tool schemas"),
+):
"""List all available MCP tools via unified handler."""
from dss_mcp.handler import get_mcp_handler
@@ -2751,7 +2820,8 @@ async def get_mcp_tool_info(tool_name: str):
class MCPToolExecuteRequest(BaseModel):
- """Request to execute an MCP tool"""
+ """Request to execute an MCP tool."""
+
arguments: Dict[str, Any]
project_id: str
user_id: Optional[int] = 1
@@ -2768,21 +2838,16 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest):
- Applies circuit breaker protection
- Logs execution metrics
"""
- from dss_mcp.handler import get_mcp_handler, MCPContext
+ from dss_mcp.handler import MCPContext, get_mcp_handler
handler = get_mcp_handler()
# Create execution context
- context = MCPContext(
- project_id=request.project_id,
- user_id=request.user_id
- )
+ context = MCPContext(project_id=request.project_id, user_id=request.user_id)
# Execute tool
result = await handler.execute_tool(
- tool_name=tool_name,
- arguments=request.arguments,
- context=context
+ tool_name=tool_name, arguments=request.arguments, context=context
)
# Log to activity
@@ -2794,8 +2859,8 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest):
details={
"success": result.success,
"duration_ms": result.duration_ms,
- "error": result.error
- }
+ "error": result.error,
+ },
)
return result.to_dict()
@@ -2804,7 +2869,7 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest):
@app.get("/api/mcp/status")
async def get_mcp_status():
"""Get MCP server status and configuration."""
- from dss_mcp.config import mcp_config, integration_config, validate_config
+ from dss_mcp.config import integration_config, mcp_config, validate_config
warnings = validate_config()
@@ -2813,24 +2878,25 @@ async def get_mcp_status():
"host": mcp_config.HOST,
"port": mcp_config.PORT,
"encryption_enabled": bool(mcp_config.ENCRYPTION_KEY),
- "context_cache_ttl": mcp_config.CONTEXT_CACHE_TTL
+ "context_cache_ttl": mcp_config.CONTEXT_CACHE_TTL,
},
"integrations": {
"figma": bool(integration_config.FIGMA_TOKEN),
"anthropic": bool(integration_config.ANTHROPIC_API_KEY),
"jira_default": bool(integration_config.JIRA_URL),
- "confluence_default": bool(integration_config.CONFLUENCE_URL)
+ "confluence_default": bool(integration_config.CONFLUENCE_URL),
},
"circuit_breaker": {
"failure_threshold": mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD,
- "timeout_seconds": mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS
+ "timeout_seconds": mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS,
},
- "warnings": warnings
+ "warnings": warnings,
}
# === MVP1: Project Configuration & Sandboxed File System ===
+
@app.get("/api/projects/{project_id}/config")
async def get_project_config(project_id: str):
"""Get project configuration from .dss/config.json."""
@@ -2838,7 +2904,7 @@ async def get_project_config(project_id: str):
if not project:
raise HTTPException(status_code=404, detail="Project not found")
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if not root_path:
raise HTTPException(status_code=400, detail="Project has no root_path configured")
@@ -2853,7 +2919,7 @@ async def update_project_config(project_id: str, updates: Dict[str, Any]):
if not project:
raise HTTPException(status_code=404, detail="Project not found")
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if not root_path:
raise HTTPException(status_code=400, detail="Project has no root_path configured")
@@ -2872,7 +2938,7 @@ async def get_project_context(project_id: str):
if not project:
raise HTTPException(status_code=404, detail="Project not found")
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if not root_path:
raise HTTPException(status_code=400, detail="Project has no root_path configured")
@@ -2891,14 +2957,10 @@ async def get_project_context(project_id: str):
pass
return {
- "project": {
- "id": project['id'],
- "name": project['name'],
- "root_path": root_path
- },
+ "project": {"id": project["id"], "name": project["name"], "root_path": root_path},
"config": config.dict(),
"file_tree": fs.get_file_tree(max_depth=2),
- "context_files": context_files
+ "context_files": context_files,
}
@@ -2909,7 +2971,7 @@ async def list_project_files(project_id: str, path: str = "."):
if not project:
raise HTTPException(status_code=404, detail="Project not found")
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if not root_path:
raise HTTPException(status_code=400, detail="Project has no root_path configured")
@@ -2929,7 +2991,7 @@ async def get_project_file_tree(project_id: str, max_depth: int = 3):
if not project:
raise HTTPException(status_code=404, detail="Project not found")
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if not root_path:
raise HTTPException(status_code=400, detail="Project has no root_path configured")
@@ -2944,7 +3006,7 @@ async def read_project_file(project_id: str, path: str):
if not project:
raise HTTPException(status_code=404, detail="Project not found")
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if not root_path:
raise HTTPException(status_code=400, detail="Project has no root_path configured")
@@ -2972,7 +3034,7 @@ async def write_project_file(project_id: str, request: FileWriteRequest):
if not project:
raise HTTPException(status_code=404, detail="Project not found")
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if not root_path:
raise HTTPException(status_code=400, detail="Project has no root_path configured")
@@ -2989,7 +3051,7 @@ async def write_project_file(project_id: str, request: FileWriteRequest):
entity_type="file",
entity_id=request.path,
project_id=project_id,
- details={"path": request.path, "size": len(request.content)}
+ details={"path": request.path, "size": len(request.content)},
)
return {"status": "ok", "path": request.path}
except PermissionError as e:
@@ -3005,13 +3067,11 @@ def kill_port(port: int, wait: float = 0.5) -> None:
"""Kill any process using the specified port."""
import subprocess
import time
+
try:
# Get PIDs using the port
- result = subprocess.run(
- ["lsof", "-ti", f":{port}"],
- capture_output=True, text=True
- )
- pids = result.stdout.strip().split('\n')
+ result = subprocess.run(["lsof", "-ti", f":{port}"], capture_output=True, text=True)
+ pids = result.stdout.strip().split("\n")
killed = False
for pid in pids:
if pid:
@@ -3035,7 +3095,8 @@ if __name__ == "__main__":
kill_port(port, wait=0.5)
url = f"http://{host}:{port}"
- print(f"""
+ print(
+ f"""
╔═══════════════════════════════════════════════════════════════╗
║ Design System Server (DSS) - Portable Server ║
╠═══════════════════════════════════════════════════════════════╣
@@ -3045,11 +3106,7 @@ if __name__ == "__main__":
║ Environment: {config.server.env:^47}║
║ Figma Mode: {figma_suite.mode:^47}║
╚═══════════════════════════════════════════════════════════════╝
-""")
-
- uvicorn.run(
- "server:app",
- host=host,
- port=port,
- reload=config.server.env == "development"
+"""
)
+
+ uvicorn.run("server:app", host=host, port=port, reload=config.server.env == "development")
diff --git a/apps/cli/python/api/server.py b/apps/cli/python/api/server.py
index c13bffb..a6a2771 100644
--- a/apps/cli/python/api/server.py
+++ b/apps/cli/python/api/server.py
@@ -1,5 +1,5 @@
"""
-Design System Server (DSS) - FastAPI Server
+Design System Server (DSS) - FastAPI Server.
Portable API server providing:
- Project management (CRUD)
@@ -16,37 +16,43 @@ Modes:
Uses SQLite for persistence, integrates with Figma tools.
"""
-import asyncio
-import subprocess
import json
import os
-from pathlib import Path
-from typing import Optional, List, Dict, Any
-from datetime import datetime
-
-from fastapi import FastAPI, HTTPException, Query, BackgroundTasks
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import JSONResponse
-from fastapi.staticfiles import StaticFiles
-from pydantic import BaseModel
-
+import subprocess
import sys
-sys.path.insert(0, str(Path(__file__).parent.parent))
+from datetime import datetime
+from pathlib import Path
+from typing import Any, Dict, Optional
from config import config
-from storage.json_store import (
- Projects, Components, SyncHistory, ActivityLog, Teams, Cache, get_stats
-)
+from fastapi import BackgroundTasks, FastAPI, HTTPException, Query
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
from figma.figma_tools import FigmaToolSuite
+from pydantic import BaseModel
+from storage.json_store import (
+ ActivityLog,
+ Cache,
+ Components,
+ Projects,
+ SyncHistory,
+ Teams,
+ get_stats,
+)
+
+sys.path.insert(0, str(Path(__file__).parent.parent))
# === Runtime Configuration ===
+
class RuntimeConfig:
"""
Runtime configuration that can be modified from the dashboard.
+
Persists to .dss/runtime-config.json for portability.
"""
+
def __init__(self):
self.config_path = Path(__file__).parent.parent.parent / ".dss" / "runtime-config.json"
self.config_path.parent.mkdir(parents=True, exist_ok=True)
@@ -71,7 +77,7 @@ class RuntimeConfig:
"token_sync": True,
"code_gen": True,
"ai_advisor": False,
- }
+ },
}
def _save(self):
@@ -114,6 +120,7 @@ runtime_config = RuntimeConfig()
# === Service Discovery ===
+
class ServiceDiscovery:
"""Discovers and manages companion services."""
@@ -136,13 +143,13 @@ class ServiceDiscovery:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
- result = sock.connect_ex(('127.0.0.1', port))
+ result = sock.connect_ex(("127.0.0.1", port))
sock.close()
if result == 0:
discovered[service] = {
"running": True,
"port": port,
- "url": f"http://localhost:{port}"
+ "url": f"http://localhost:{port}",
}
break
except:
@@ -164,11 +171,7 @@ class ServiceDiscovery:
try:
async with httpx.AsyncClient(timeout=2.0) as client:
resp = await client.get(url)
- return {
- "running": resp.status_code == 200,
- "url": url,
- "port": port
- }
+ return {"running": resp.status_code == 200, "url": url, "port": port}
except:
return {"running": False, "url": url, "port": port}
@@ -178,7 +181,7 @@ class ServiceDiscovery:
app = FastAPI(
title="Design System Server (DSS)",
description="API for design system management and Figma integration",
- version="1.0.0"
+ version="1.0.0",
)
app.add_middleware(
@@ -195,31 +198,38 @@ if UI_DIR.exists():
app.mount("/admin-ui", StaticFiles(directory=str(UI_DIR), html=True), name="admin-ui")
# Initialize Figma tools
-figma_suite = FigmaToolSuite(output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output"))
+figma_suite = FigmaToolSuite(
+ output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output")
+)
# === Request/Response Models ===
+
class ProjectCreate(BaseModel):
name: str
description: str = ""
figma_file_key: str = ""
+
class ProjectUpdate(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
figma_file_key: Optional[str] = None
status: Optional[str] = None
+
class FigmaExtractRequest(BaseModel):
file_key: str
format: str = "css"
+
class FigmaSyncRequest(BaseModel):
file_key: str
target_path: str
format: str = "css"
+
class TeamCreate(BaseModel):
name: str
description: str = ""
@@ -227,10 +237,12 @@ class TeamCreate(BaseModel):
# === Root & Health ===
+
@app.get("/")
async def root():
"""Redirect to Admin UI dashboard."""
from fastapi.responses import RedirectResponse
+
return RedirectResponse(url="/admin-ui/index.html")
@@ -243,30 +255,30 @@ async def health():
"version": "1.0.0",
"timestamp": datetime.utcnow().isoformat() + "Z",
"figma_mode": figma_suite.mode,
- "config": config.summary()
+ "config": config.summary(),
}
+
@app.get("/api/stats")
async def get_statistics():
"""Get database and system statistics."""
db_stats = get_stats()
return {
"database": db_stats,
- "figma": {
- "mode": figma_suite.mode,
- "configured": config.figma.is_configured
- }
+ "figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured},
}
# === Projects ===
+
@app.get("/api/projects")
async def list_projects(status: Optional[str] = None):
"""List all projects."""
projects = Projects.list(status=status)
return projects
+
@app.get("/api/projects/{project_id}")
async def get_project(project_id: str):
"""Get a specific project."""
@@ -275,6 +287,7 @@ async def get_project(project_id: str):
raise HTTPException(status_code=404, detail="Project not found")
return project
+
@app.post("/api/projects")
async def create_project(project: ProjectCreate):
"""Create a new project."""
@@ -283,17 +296,18 @@ async def create_project(project: ProjectCreate):
id=project_id,
name=project.name,
description=project.description,
- figma_file_key=project.figma_file_key
+ figma_file_key=project.figma_file_key,
)
ActivityLog.log(
action="project_created",
entity_type="project",
entity_id=project_id,
project_id=project_id,
- details={"name": project.name}
+ details={"name": project.name},
)
return created
+
@app.put("/api/projects/{project_id}")
async def update_project(project_id: str, update: ProjectUpdate):
"""Update a project."""
@@ -311,25 +325,23 @@ async def update_project(project_id: str, update: ProjectUpdate):
entity_type="project",
entity_id=project_id,
project_id=project_id,
- details=update_data
+ details=update_data,
)
return updated
+
@app.delete("/api/projects/{project_id}")
async def delete_project(project_id: str):
"""Delete a project."""
if not Projects.delete(project_id):
raise HTTPException(status_code=404, detail="Project not found")
- ActivityLog.log(
- action="project_deleted",
- entity_type="project",
- entity_id=project_id
- )
+ ActivityLog.log(action="project_deleted", entity_type="project", entity_id=project_id)
return {"success": True}
# === Components ===
+
@app.get("/api/projects/{project_id}/components")
async def list_components(project_id: str):
"""List components for a project."""
@@ -340,6 +352,7 @@ async def list_components(project_id: str):
# === Figma Integration ===
+
@app.post("/api/figma/extract-variables")
async def extract_variables(request: FigmaExtractRequest, background_tasks: BackgroundTasks):
"""Extract design tokens from Figma file."""
@@ -348,12 +361,17 @@ async def extract_variables(request: FigmaExtractRequest, background_tasks: Back
ActivityLog.log(
action="figma_extract_variables",
entity_type="figma",
- details={"file_key": request.file_key, "format": request.format, "count": result.get("tokens_count")}
+ details={
+ "file_key": request.file_key,
+ "format": request.format,
+ "count": result.get("tokens_count"),
+ },
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/extract-components")
async def extract_components(request: FigmaExtractRequest):
"""Extract components from Figma file."""
@@ -362,12 +380,13 @@ async def extract_components(request: FigmaExtractRequest):
ActivityLog.log(
action="figma_extract_components",
entity_type="figma",
- details={"file_key": request.file_key, "count": result.get("components_count")}
+ details={"file_key": request.file_key, "count": result.get("components_count")},
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/extract-styles")
async def extract_styles(request: FigmaExtractRequest):
"""Extract styles from Figma file."""
@@ -377,20 +396,28 @@ async def extract_styles(request: FigmaExtractRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/sync-tokens")
async def sync_tokens(request: FigmaSyncRequest):
"""Sync tokens from Figma to target path."""
try:
- result = await figma_suite.sync_tokens(request.file_key, request.target_path, request.format)
+ result = await figma_suite.sync_tokens(
+ request.file_key, request.target_path, request.format
+ )
ActivityLog.log(
action="figma_sync_tokens",
entity_type="figma",
- details={"file_key": request.file_key, "target": request.target_path, "synced": result.get("tokens_synced")}
+ details={
+ "file_key": request.file_key,
+ "target": request.target_path,
+ "synced": result.get("tokens_synced"),
+ },
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/validate")
async def validate_components(request: FigmaExtractRequest):
"""Validate components against design system rules."""
@@ -400,6 +427,7 @@ async def validate_components(request: FigmaExtractRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/generate-code")
async def generate_code(file_key: str, component_name: str, framework: str = "webcomponent"):
"""Generate component code from Figma."""
@@ -412,6 +440,7 @@ async def generate_code(file_key: str, component_name: str, framework: str = "we
# === Discovery ===
+
@app.get("/api/discovery")
async def run_discovery(path: str = "."):
"""Run project discovery."""
@@ -419,10 +448,7 @@ async def run_discovery(path: str = "."):
try:
result = subprocess.run(
- [str(script_path), path],
- capture_output=True,
- text=True,
- timeout=30
+ [str(script_path), path], capture_output=True, text=True, timeout=30
)
if result.returncode == 0:
return json.loads(result.stdout)
@@ -433,22 +459,19 @@ async def run_discovery(path: str = "."):
except json.JSONDecodeError:
return {"raw_output": result.stdout}
+
@app.get("/api/discovery/ports")
async def discover_ports():
"""Discover listening ports and services."""
script_path = Path(__file__).parent.parent / "discovery" / "discover-ports.sh"
try:
- result = subprocess.run(
- [str(script_path)],
- capture_output=True,
- text=True,
- timeout=10
- )
+ result = subprocess.run([str(script_path)], capture_output=True, text=True, timeout=10)
return json.loads(result.stdout)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.get("/api/discovery/env")
async def discover_env(path: str = "."):
"""Analyze environment configuration."""
@@ -456,10 +479,7 @@ async def discover_env(path: str = "."):
try:
result = subprocess.run(
- [str(script_path), path],
- capture_output=True,
- text=True,
- timeout=10
+ [str(script_path), path], capture_output=True, text=True, timeout=10
)
return json.loads(result.stdout)
except Exception as e:
@@ -468,24 +488,30 @@ async def discover_env(path: str = "."):
# === Activity & Sync History ===
+
@app.get("/api/activity")
async def get_activity(limit: int = Query(default=50, le=100)):
"""Get recent activity log."""
return ActivityLog.recent(limit=limit)
+
@app.get("/api/sync-history")
-async def get_sync_history(project_id: Optional[str] = None, limit: int = Query(default=20, le=100)):
+async def get_sync_history(
+ project_id: Optional[str] = None, limit: int = Query(default=20, le=100)
+):
"""Get sync history."""
return SyncHistory.recent(project_id=project_id, limit=limit)
# === Teams ===
+
@app.get("/api/teams")
async def list_teams():
"""List all teams."""
return Teams.list()
+
@app.post("/api/teams")
async def create_team(team: TeamCreate):
"""Create a new team."""
@@ -493,6 +519,7 @@ async def create_team(team: TeamCreate):
created = Teams.create(team_id, team.name, team.description)
return created
+
@app.get("/api/teams/{team_id}")
async def get_team(team_id: str):
"""Get a specific team."""
@@ -504,12 +531,14 @@ async def get_team(team_id: str):
# === Cache Management ===
+
@app.post("/api/cache/clear")
async def clear_cache():
"""Clear expired cache entries."""
count = Cache.clear_expired()
return {"cleared": count}
+
@app.delete("/api/cache")
async def purge_cache():
"""Purge all cache entries."""
@@ -519,6 +548,7 @@ async def purge_cache():
# === Configuration Management ===
+
class ConfigUpdate(BaseModel):
mode: Optional[str] = None
figma_token: Optional[str] = None
@@ -532,7 +562,7 @@ async def get_config():
return {
"config": runtime_config.get(),
"env": config.summary(),
- "mode": runtime_config.get("mode")
+ "mode": runtime_config.get("mode"),
}
@@ -548,11 +578,13 @@ async def update_config(update: ConfigUpdate):
runtime_config.set_figma_token(update.figma_token)
# Reinitialize Figma tools with new token
global figma_suite
- figma_suite = FigmaToolSuite(output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output"))
+ figma_suite = FigmaToolSuite(
+ output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output")
+ )
ActivityLog.log(
action="figma_token_updated",
entity_type="config",
- details={"configured": bool(update.figma_token)}
+ details={"configured": bool(update.figma_token)},
)
if update.services:
@@ -564,9 +596,7 @@ async def update_config(update: ConfigUpdate):
if updates:
runtime_config.update(updates)
ActivityLog.log(
- action="config_updated",
- entity_type="config",
- details={"keys": list(updates.keys())}
+ action="config_updated", entity_type="config", details={"keys": list(updates.keys())}
)
return runtime_config.get()
@@ -586,7 +616,7 @@ async def get_figma_config():
"sync_tokens": True,
"validate": True,
"generate_code": True,
- }
+ },
}
@@ -600,18 +630,16 @@ async def test_figma_connection():
# Test with a minimal API call
import httpx
+
token = runtime_config._data["figma"]["token"]
async with httpx.AsyncClient() as client:
- resp = await client.get(
- "https://api.figma.com/v1/me",
- headers={"X-Figma-Token": token}
- )
+ resp = await client.get("https://api.figma.com/v1/me", headers={"X-Figma-Token": token})
if resp.status_code == 200:
user = resp.json()
return {
"success": True,
"user": user.get("email", "connected"),
- "handle": user.get("handle")
+ "handle": user.get("handle"),
}
else:
return {"success": False, "error": f"API returned {resp.status_code}"}
@@ -621,6 +649,7 @@ async def test_figma_connection():
# === Service Discovery ===
+
@app.get("/api/services")
async def list_services():
"""List configured and discovered services."""
@@ -630,7 +659,7 @@ async def list_services():
return {
"configured": configured,
"discovered": discovered,
- "storybook": await ServiceDiscovery.check_storybook()
+ "storybook": await ServiceDiscovery.check_storybook(),
}
@@ -645,7 +674,7 @@ async def configure_service(service_name: str, config_data: Dict[str, Any]):
action="service_configured",
entity_type="service",
entity_id=service_name,
- details={"keys": list(config_data.keys())}
+ details={"keys": list(config_data.keys())},
)
return services[service_name]
@@ -659,6 +688,7 @@ async def get_storybook_status():
# === DSS Mode ===
+
@app.get("/api/mode")
async def get_mode():
"""Get current DSS mode."""
@@ -666,7 +696,7 @@ async def get_mode():
return {
"mode": mode,
"description": "Local dev companion" if mode == "local" else "Remote design system server",
- "features": runtime_config.get("features")
+ "features": runtime_config.get("features"),
}
@@ -677,11 +707,7 @@ async def set_mode(mode: str):
raise HTTPException(status_code=400, detail="Mode must be 'local' or 'server'")
runtime_config.set("mode", mode)
- ActivityLog.log(
- action="mode_changed",
- entity_type="config",
- details={"mode": mode}
- )
+ ActivityLog.log(action="mode_changed", entity_type="config", details={"mode": mode})
return {"mode": mode, "success": True}
@@ -704,7 +730,8 @@ if __name__ == "__main__":
host = os.getenv("HOST", "0.0.0.0")
url = f"http://{host}:{port}"
- print(f"""
+ print(
+ f"""
╔═══════════════════════════════════════════════════════════════╗
║ Design System Server (DSS) - Portable Server ║
╠═══════════════════════════════════════════════════════════════╣
@@ -714,11 +741,7 @@ if __name__ == "__main__":
║ Environment: {config.server.env:^47}║
║ Figma Mode: {figma_suite.mode:^47}║
╚═══════════════════════════════════════════════════════════════╝
-""")
-
- uvicorn.run(
- "server:app",
- host=host,
- port=port,
- reload=config.server.env == "development"
+"""
)
+
+ uvicorn.run("server:app", host=host, port=port, reload=config.server.env == "development")
diff --git a/cli/python/api/server.py b/cli/python/api/server.py
index c13bffb..a6a2771 100644
--- a/cli/python/api/server.py
+++ b/cli/python/api/server.py
@@ -1,5 +1,5 @@
"""
-Design System Server (DSS) - FastAPI Server
+Design System Server (DSS) - FastAPI Server.
Portable API server providing:
- Project management (CRUD)
@@ -16,37 +16,43 @@ Modes:
Uses SQLite for persistence, integrates with Figma tools.
"""
-import asyncio
-import subprocess
import json
import os
-from pathlib import Path
-from typing import Optional, List, Dict, Any
-from datetime import datetime
-
-from fastapi import FastAPI, HTTPException, Query, BackgroundTasks
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import JSONResponse
-from fastapi.staticfiles import StaticFiles
-from pydantic import BaseModel
-
+import subprocess
import sys
-sys.path.insert(0, str(Path(__file__).parent.parent))
+from datetime import datetime
+from pathlib import Path
+from typing import Any, Dict, Optional
from config import config
-from storage.json_store import (
- Projects, Components, SyncHistory, ActivityLog, Teams, Cache, get_stats
-)
+from fastapi import BackgroundTasks, FastAPI, HTTPException, Query
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
from figma.figma_tools import FigmaToolSuite
+from pydantic import BaseModel
+from storage.json_store import (
+ ActivityLog,
+ Cache,
+ Components,
+ Projects,
+ SyncHistory,
+ Teams,
+ get_stats,
+)
+
+sys.path.insert(0, str(Path(__file__).parent.parent))
# === Runtime Configuration ===
+
class RuntimeConfig:
"""
Runtime configuration that can be modified from the dashboard.
+
Persists to .dss/runtime-config.json for portability.
"""
+
def __init__(self):
self.config_path = Path(__file__).parent.parent.parent / ".dss" / "runtime-config.json"
self.config_path.parent.mkdir(parents=True, exist_ok=True)
@@ -71,7 +77,7 @@ class RuntimeConfig:
"token_sync": True,
"code_gen": True,
"ai_advisor": False,
- }
+ },
}
def _save(self):
@@ -114,6 +120,7 @@ runtime_config = RuntimeConfig()
# === Service Discovery ===
+
class ServiceDiscovery:
"""Discovers and manages companion services."""
@@ -136,13 +143,13 @@ class ServiceDiscovery:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
- result = sock.connect_ex(('127.0.0.1', port))
+ result = sock.connect_ex(("127.0.0.1", port))
sock.close()
if result == 0:
discovered[service] = {
"running": True,
"port": port,
- "url": f"http://localhost:{port}"
+ "url": f"http://localhost:{port}",
}
break
except:
@@ -164,11 +171,7 @@ class ServiceDiscovery:
try:
async with httpx.AsyncClient(timeout=2.0) as client:
resp = await client.get(url)
- return {
- "running": resp.status_code == 200,
- "url": url,
- "port": port
- }
+ return {"running": resp.status_code == 200, "url": url, "port": port}
except:
return {"running": False, "url": url, "port": port}
@@ -178,7 +181,7 @@ class ServiceDiscovery:
app = FastAPI(
title="Design System Server (DSS)",
description="API for design system management and Figma integration",
- version="1.0.0"
+ version="1.0.0",
)
app.add_middleware(
@@ -195,31 +198,38 @@ if UI_DIR.exists():
app.mount("/admin-ui", StaticFiles(directory=str(UI_DIR), html=True), name="admin-ui")
# Initialize Figma tools
-figma_suite = FigmaToolSuite(output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output"))
+figma_suite = FigmaToolSuite(
+ output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output")
+)
# === Request/Response Models ===
+
class ProjectCreate(BaseModel):
name: str
description: str = ""
figma_file_key: str = ""
+
class ProjectUpdate(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
figma_file_key: Optional[str] = None
status: Optional[str] = None
+
class FigmaExtractRequest(BaseModel):
file_key: str
format: str = "css"
+
class FigmaSyncRequest(BaseModel):
file_key: str
target_path: str
format: str = "css"
+
class TeamCreate(BaseModel):
name: str
description: str = ""
@@ -227,10 +237,12 @@ class TeamCreate(BaseModel):
# === Root & Health ===
+
@app.get("/")
async def root():
"""Redirect to Admin UI dashboard."""
from fastapi.responses import RedirectResponse
+
return RedirectResponse(url="/admin-ui/index.html")
@@ -243,30 +255,30 @@ async def health():
"version": "1.0.0",
"timestamp": datetime.utcnow().isoformat() + "Z",
"figma_mode": figma_suite.mode,
- "config": config.summary()
+ "config": config.summary(),
}
+
@app.get("/api/stats")
async def get_statistics():
"""Get database and system statistics."""
db_stats = get_stats()
return {
"database": db_stats,
- "figma": {
- "mode": figma_suite.mode,
- "configured": config.figma.is_configured
- }
+ "figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured},
}
# === Projects ===
+
@app.get("/api/projects")
async def list_projects(status: Optional[str] = None):
"""List all projects."""
projects = Projects.list(status=status)
return projects
+
@app.get("/api/projects/{project_id}")
async def get_project(project_id: str):
"""Get a specific project."""
@@ -275,6 +287,7 @@ async def get_project(project_id: str):
raise HTTPException(status_code=404, detail="Project not found")
return project
+
@app.post("/api/projects")
async def create_project(project: ProjectCreate):
"""Create a new project."""
@@ -283,17 +296,18 @@ async def create_project(project: ProjectCreate):
id=project_id,
name=project.name,
description=project.description,
- figma_file_key=project.figma_file_key
+ figma_file_key=project.figma_file_key,
)
ActivityLog.log(
action="project_created",
entity_type="project",
entity_id=project_id,
project_id=project_id,
- details={"name": project.name}
+ details={"name": project.name},
)
return created
+
@app.put("/api/projects/{project_id}")
async def update_project(project_id: str, update: ProjectUpdate):
"""Update a project."""
@@ -311,25 +325,23 @@ async def update_project(project_id: str, update: ProjectUpdate):
entity_type="project",
entity_id=project_id,
project_id=project_id,
- details=update_data
+ details=update_data,
)
return updated
+
@app.delete("/api/projects/{project_id}")
async def delete_project(project_id: str):
"""Delete a project."""
if not Projects.delete(project_id):
raise HTTPException(status_code=404, detail="Project not found")
- ActivityLog.log(
- action="project_deleted",
- entity_type="project",
- entity_id=project_id
- )
+ ActivityLog.log(action="project_deleted", entity_type="project", entity_id=project_id)
return {"success": True}
# === Components ===
+
@app.get("/api/projects/{project_id}/components")
async def list_components(project_id: str):
"""List components for a project."""
@@ -340,6 +352,7 @@ async def list_components(project_id: str):
# === Figma Integration ===
+
@app.post("/api/figma/extract-variables")
async def extract_variables(request: FigmaExtractRequest, background_tasks: BackgroundTasks):
"""Extract design tokens from Figma file."""
@@ -348,12 +361,17 @@ async def extract_variables(request: FigmaExtractRequest, background_tasks: Back
ActivityLog.log(
action="figma_extract_variables",
entity_type="figma",
- details={"file_key": request.file_key, "format": request.format, "count": result.get("tokens_count")}
+ details={
+ "file_key": request.file_key,
+ "format": request.format,
+ "count": result.get("tokens_count"),
+ },
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/extract-components")
async def extract_components(request: FigmaExtractRequest):
"""Extract components from Figma file."""
@@ -362,12 +380,13 @@ async def extract_components(request: FigmaExtractRequest):
ActivityLog.log(
action="figma_extract_components",
entity_type="figma",
- details={"file_key": request.file_key, "count": result.get("components_count")}
+ details={"file_key": request.file_key, "count": result.get("components_count")},
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/extract-styles")
async def extract_styles(request: FigmaExtractRequest):
"""Extract styles from Figma file."""
@@ -377,20 +396,28 @@ async def extract_styles(request: FigmaExtractRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/sync-tokens")
async def sync_tokens(request: FigmaSyncRequest):
"""Sync tokens from Figma to target path."""
try:
- result = await figma_suite.sync_tokens(request.file_key, request.target_path, request.format)
+ result = await figma_suite.sync_tokens(
+ request.file_key, request.target_path, request.format
+ )
ActivityLog.log(
action="figma_sync_tokens",
entity_type="figma",
- details={"file_key": request.file_key, "target": request.target_path, "synced": result.get("tokens_synced")}
+ details={
+ "file_key": request.file_key,
+ "target": request.target_path,
+ "synced": result.get("tokens_synced"),
+ },
)
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/validate")
async def validate_components(request: FigmaExtractRequest):
"""Validate components against design system rules."""
@@ -400,6 +427,7 @@ async def validate_components(request: FigmaExtractRequest):
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.post("/api/figma/generate-code")
async def generate_code(file_key: str, component_name: str, framework: str = "webcomponent"):
"""Generate component code from Figma."""
@@ -412,6 +440,7 @@ async def generate_code(file_key: str, component_name: str, framework: str = "we
# === Discovery ===
+
@app.get("/api/discovery")
async def run_discovery(path: str = "."):
"""Run project discovery."""
@@ -419,10 +448,7 @@ async def run_discovery(path: str = "."):
try:
result = subprocess.run(
- [str(script_path), path],
- capture_output=True,
- text=True,
- timeout=30
+ [str(script_path), path], capture_output=True, text=True, timeout=30
)
if result.returncode == 0:
return json.loads(result.stdout)
@@ -433,22 +459,19 @@ async def run_discovery(path: str = "."):
except json.JSONDecodeError:
return {"raw_output": result.stdout}
+
@app.get("/api/discovery/ports")
async def discover_ports():
"""Discover listening ports and services."""
script_path = Path(__file__).parent.parent / "discovery" / "discover-ports.sh"
try:
- result = subprocess.run(
- [str(script_path)],
- capture_output=True,
- text=True,
- timeout=10
- )
+ result = subprocess.run([str(script_path)], capture_output=True, text=True, timeout=10)
return json.loads(result.stdout)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
+
@app.get("/api/discovery/env")
async def discover_env(path: str = "."):
"""Analyze environment configuration."""
@@ -456,10 +479,7 @@ async def discover_env(path: str = "."):
try:
result = subprocess.run(
- [str(script_path), path],
- capture_output=True,
- text=True,
- timeout=10
+ [str(script_path), path], capture_output=True, text=True, timeout=10
)
return json.loads(result.stdout)
except Exception as e:
@@ -468,24 +488,30 @@ async def discover_env(path: str = "."):
# === Activity & Sync History ===
+
@app.get("/api/activity")
async def get_activity(limit: int = Query(default=50, le=100)):
"""Get recent activity log."""
return ActivityLog.recent(limit=limit)
+
@app.get("/api/sync-history")
-async def get_sync_history(project_id: Optional[str] = None, limit: int = Query(default=20, le=100)):
+async def get_sync_history(
+ project_id: Optional[str] = None, limit: int = Query(default=20, le=100)
+):
"""Get sync history."""
return SyncHistory.recent(project_id=project_id, limit=limit)
# === Teams ===
+
@app.get("/api/teams")
async def list_teams():
"""List all teams."""
return Teams.list()
+
@app.post("/api/teams")
async def create_team(team: TeamCreate):
"""Create a new team."""
@@ -493,6 +519,7 @@ async def create_team(team: TeamCreate):
created = Teams.create(team_id, team.name, team.description)
return created
+
@app.get("/api/teams/{team_id}")
async def get_team(team_id: str):
"""Get a specific team."""
@@ -504,12 +531,14 @@ async def get_team(team_id: str):
# === Cache Management ===
+
@app.post("/api/cache/clear")
async def clear_cache():
"""Clear expired cache entries."""
count = Cache.clear_expired()
return {"cleared": count}
+
@app.delete("/api/cache")
async def purge_cache():
"""Purge all cache entries."""
@@ -519,6 +548,7 @@ async def purge_cache():
# === Configuration Management ===
+
class ConfigUpdate(BaseModel):
mode: Optional[str] = None
figma_token: Optional[str] = None
@@ -532,7 +562,7 @@ async def get_config():
return {
"config": runtime_config.get(),
"env": config.summary(),
- "mode": runtime_config.get("mode")
+ "mode": runtime_config.get("mode"),
}
@@ -548,11 +578,13 @@ async def update_config(update: ConfigUpdate):
runtime_config.set_figma_token(update.figma_token)
# Reinitialize Figma tools with new token
global figma_suite
- figma_suite = FigmaToolSuite(output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output"))
+ figma_suite = FigmaToolSuite(
+ output_dir=str(Path(__file__).parent.parent.parent / ".dss" / "output")
+ )
ActivityLog.log(
action="figma_token_updated",
entity_type="config",
- details={"configured": bool(update.figma_token)}
+ details={"configured": bool(update.figma_token)},
)
if update.services:
@@ -564,9 +596,7 @@ async def update_config(update: ConfigUpdate):
if updates:
runtime_config.update(updates)
ActivityLog.log(
- action="config_updated",
- entity_type="config",
- details={"keys": list(updates.keys())}
+ action="config_updated", entity_type="config", details={"keys": list(updates.keys())}
)
return runtime_config.get()
@@ -586,7 +616,7 @@ async def get_figma_config():
"sync_tokens": True,
"validate": True,
"generate_code": True,
- }
+ },
}
@@ -600,18 +630,16 @@ async def test_figma_connection():
# Test with a minimal API call
import httpx
+
token = runtime_config._data["figma"]["token"]
async with httpx.AsyncClient() as client:
- resp = await client.get(
- "https://api.figma.com/v1/me",
- headers={"X-Figma-Token": token}
- )
+ resp = await client.get("https://api.figma.com/v1/me", headers={"X-Figma-Token": token})
if resp.status_code == 200:
user = resp.json()
return {
"success": True,
"user": user.get("email", "connected"),
- "handle": user.get("handle")
+ "handle": user.get("handle"),
}
else:
return {"success": False, "error": f"API returned {resp.status_code}"}
@@ -621,6 +649,7 @@ async def test_figma_connection():
# === Service Discovery ===
+
@app.get("/api/services")
async def list_services():
"""List configured and discovered services."""
@@ -630,7 +659,7 @@ async def list_services():
return {
"configured": configured,
"discovered": discovered,
- "storybook": await ServiceDiscovery.check_storybook()
+ "storybook": await ServiceDiscovery.check_storybook(),
}
@@ -645,7 +674,7 @@ async def configure_service(service_name: str, config_data: Dict[str, Any]):
action="service_configured",
entity_type="service",
entity_id=service_name,
- details={"keys": list(config_data.keys())}
+ details={"keys": list(config_data.keys())},
)
return services[service_name]
@@ -659,6 +688,7 @@ async def get_storybook_status():
# === DSS Mode ===
+
@app.get("/api/mode")
async def get_mode():
"""Get current DSS mode."""
@@ -666,7 +696,7 @@ async def get_mode():
return {
"mode": mode,
"description": "Local dev companion" if mode == "local" else "Remote design system server",
- "features": runtime_config.get("features")
+ "features": runtime_config.get("features"),
}
@@ -677,11 +707,7 @@ async def set_mode(mode: str):
raise HTTPException(status_code=400, detail="Mode must be 'local' or 'server'")
runtime_config.set("mode", mode)
- ActivityLog.log(
- action="mode_changed",
- entity_type="config",
- details={"mode": mode}
- )
+ ActivityLog.log(action="mode_changed", entity_type="config", details={"mode": mode})
return {"mode": mode, "success": True}
@@ -704,7 +730,8 @@ if __name__ == "__main__":
host = os.getenv("HOST", "0.0.0.0")
url = f"http://{host}:{port}"
- print(f"""
+ print(
+ f"""
╔═══════════════════════════════════════════════════════════════╗
║ Design System Server (DSS) - Portable Server ║
╠═══════════════════════════════════════════════════════════════╣
@@ -714,11 +741,7 @@ if __name__ == "__main__":
║ Environment: {config.server.env:^47}║
║ Figma Mode: {figma_suite.mode:^47}║
╚═══════════════════════════════════════════════════════════════╝
-""")
-
- uvicorn.run(
- "server:app",
- host=host,
- port=port,
- reload=config.server.env == "development"
+"""
)
+
+ uvicorn.run("server:app", host=host, port=port, reload=config.server.env == "development")
diff --git a/dss-claude-plugin/core/__init__.py b/dss-claude-plugin/core/__init__.py
index caf5e92..803753d 100644
--- a/dss-claude-plugin/core/__init__.py
+++ b/dss-claude-plugin/core/__init__.py
@@ -1,19 +1,20 @@
"""
-DSS Core Module - Configuration and Context Management
+DSS Core Module - Configuration and Context Management.
+
Extended with Context Compiler for design system context resolution.
"""
+from .compiler import EMERGENCY_SKIN, ContextCompiler
from .config import DSSConfig, DSSMode
from .context import DSSContext
-from .compiler import ContextCompiler, EMERGENCY_SKIN
from .mcp_extensions import (
+ COMPILER,
get_active_context,
+ get_compiler_status,
+ list_skins,
resolve_token,
validate_manifest,
- list_skins,
- get_compiler_status,
with_context,
- COMPILER
)
__all__ = [
@@ -28,5 +29,5 @@ __all__ = [
"list_skins",
"get_compiler_status",
"with_context",
- "COMPILER"
+ "COMPILER",
]
diff --git a/dss-claude-plugin/core/compiler.py b/dss-claude-plugin/core/compiler.py
index 633e9cc..c61a5c1 100644
--- a/dss-claude-plugin/core/compiler.py
+++ b/dss-claude-plugin/core/compiler.py
@@ -1,16 +1,16 @@
"""
-DSS Context Compiler
+DSS Context Compiler.
+
Resolves project context via 3-layer cascade: Base -> Skin -> Project
Includes Safe Boot Protocol and Debug Provenance.
"""
-import json
-import os
import copy
+import json
import logging
from datetime import datetime, timezone
-from typing import Dict, Any, Optional, List, Union
from pathlib import Path
+from typing import Any, Dict, List
# Setup logging
logging.basicConfig(level=logging.INFO)
@@ -21,25 +21,26 @@ logger = logging.getLogger("DSSCompiler")
EMERGENCY_SKIN = {
"meta": {"id": "emergency", "version": "1.0.0"},
"tokens": {
- "colors": {
- "primary": "#FF0000",
- "background": "#FFFFFF",
- "text": "#000000"
- },
- "spacing": {"base": "4px"}
+ "colors": {"primary": "#FF0000", "background": "#FFFFFF", "text": "#000000"},
+ "spacing": {"base": "4px"},
},
- "status": "emergency_mode"
+ "status": "emergency_mode",
}
+
class ContextCompiler:
def __init__(self, skins_dir: str = "./skins"):
self.skins_dir = Path(skins_dir)
self.cache: Dict[str, Any] = {}
self._manifest_mtimes: Dict[str, float] = {} # Track file modification times
- def compile(self, manifest_path: str, debug: bool = False, force_refresh: bool = False) -> Dict[str, Any]:
+ def compile(
+ self, manifest_path: str, debug: bool = False, force_refresh: bool = False
+ ) -> Dict[str, Any]:
"""
- Main entry point. Compiles context by merging:
+ Main entry point.
+
+ Compiles context by merging:
1. Base Skin (Implicit or Explicit)
2. Extended Skin (defined in manifest)
3. Project Overrides (defined in manifest)
@@ -83,17 +84,17 @@ class ContextCompiler:
# Merge Result + Project Overrides
# Need to wrap project overrides in same structure as skins
- project_overrides_wrapped = {
- "tokens": manifest.get("overrides", {}).get("tokens", {})
- }
- final_context = self._deep_merge(context, project_overrides_wrapped, path="skin->project", debug=debug)
+ project_overrides_wrapped = {"tokens": manifest.get("overrides", {}).get("tokens", {})}
+ final_context = self._deep_merge(
+ context, project_overrides_wrapped, path="skin->project", debug=debug
+ )
# Inject Metadata
final_context["_meta"] = {
"project_id": manifest["project"]["id"],
"compiled_at": datetime.now(timezone.utc).isoformat(),
"debug_enabled": debug,
- "compiler_config": manifest.get("compiler", {})
+ "compiler_config": manifest.get("compiler", {}),
}
if debug:
@@ -138,19 +139,28 @@ class ContextCompiler:
return data
def _load_json(self, path: str) -> Dict[str, Any]:
- with open(path, 'r') as f:
+ with open(path, "r") as f:
return json.load(f)
- def _deep_merge(self, base: Dict, override: Dict, path: str = "", debug: bool = False, provenance: List[Dict] = None) -> Dict:
+ def _deep_merge(
+ self,
+ base: Dict,
+ override: Dict,
+ path: str = "",
+ debug: bool = False,
+ provenance: List[Dict] = None,
+ ) -> Dict:
"""
- Deep merge dictionaries. Replaces arrays.
+ Deep merge dictionaries.
+
+ Replaces arrays.
Populates provenance list if debug is True (thread-safe).
"""
# Thread-safe: use method parameter instead of instance variable
if provenance is None and debug:
provenance = []
# Store reference on first call for later retrieval
- if not hasattr(self, 'provenance_log'):
+ if not hasattr(self, "provenance_log"):
self.provenance_log = provenance
result = copy.deepcopy(base)
@@ -158,16 +168,20 @@ class ContextCompiler:
for key, value in override.items():
if isinstance(value, dict) and key in result and isinstance(result[key], dict):
# Recursive merge - pass provenance down
- result[key] = self._deep_merge(result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance)
+ result[key] = self._deep_merge(
+ result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance
+ )
else:
# Direct replacement (Primitive or Array)
if debug and provenance is not None:
- provenance.append({
- "key": key,
- "action": "override",
- "layer": path,
- "value_type": type(value).__name__
- })
+ provenance.append(
+ {
+ "key": key,
+ "action": "override",
+ "layer": path,
+ "value_type": type(value).__name__,
+ }
+ )
result[key] = copy.deepcopy(value)
return result
diff --git a/dss-claude-plugin/core/config.py b/dss-claude-plugin/core/config.py
index d0aefaf..8a53019 100644
--- a/dss-claude-plugin/core/config.py
+++ b/dss-claude-plugin/core/config.py
@@ -7,17 +7,15 @@ Supports local/remote mode detection, persistent configuration storage, and
environment variable overrides.
"""
-import os
import json
-import uuid
-import asyncio
import logging
+import os
+import uuid
from enum import Enum
from pathlib import Path
-from typing import Optional, Union, Any
import aiohttp
-from pydantic import BaseModel, Field, HttpUrl, ValidationError
+from pydantic import BaseModel, Field, ValidationError
# Configure module-level logger
logger = logging.getLogger(__name__)
@@ -30,6 +28,7 @@ DEFAULT_LOCAL_URL = "http://localhost:6006"
class DSSMode(str, Enum):
"""Operation modes for the DSS plugin."""
+
LOCAL = "local"
REMOTE = "remote"
AUTO = "auto"
@@ -45,10 +44,13 @@ class DSSConfig(BaseModel):
local_url (str): URL for the local DSS API (usually localhost).
session_id (str): Unique identifier for this client instance.
"""
+
mode: DSSMode = Field(default=DSSMode.AUTO, description="Operation mode preference")
remote_url: str = Field(default=DEFAULT_REMOTE_URL, description="Remote API endpoint")
local_url: str = Field(default=DEFAULT_LOCAL_URL, description="Local API endpoint")
- session_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID")
+ session_id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID"
+ )
class Config:
validate_assignment = True
@@ -58,6 +60,7 @@ class DSSConfig(BaseModel):
def load(cls) -> "DSSConfig":
"""
Load configuration from ~/.dss/config.json.
+
Returns a default instance if the file does not exist or is invalid.
"""
if not CONFIG_FILE.exists():
@@ -79,6 +82,7 @@ class DSSConfig(BaseModel):
def save(self) -> None:
"""
Save the current configuration to ~/.dss/config.json.
+
Creates the directory if it does not exist.
"""
try:
@@ -153,9 +157,7 @@ class DSSConfig(BaseModel):
return False
def get_api_url(self, active_mode: DSSMode) -> str:
- """
- Helper to get the correct API URL for the determined mode.
- """
+ """Helper to get the correct API URL for the determined mode."""
if active_mode == DSSMode.LOCAL:
return self.local_url
return self.remote_url
diff --git a/dss-claude-plugin/core/context.py b/dss-claude-plugin/core/context.py
index 9843748..8819b43 100644
--- a/dss-claude-plugin/core/context.py
+++ b/dss-claude-plugin/core/context.py
@@ -8,7 +8,7 @@ Handles configuration loading, mode detection, and strategy instantiation.
import asyncio
import logging
-from typing import Optional, Dict, Any
+from typing import Any, Dict, Optional
from .config import DSSConfig, DSSMode
@@ -26,12 +26,15 @@ class DSSContext:
Handles configuration loading, mode detection (Local/Remote),
and strategy instantiation.
"""
- _instance: Optional['DSSContext'] = None
+
+ _instance: Optional["DSSContext"] = None
_lock: asyncio.Lock = asyncio.Lock()
def __init__(self) -> None:
"""
- Private initializer. Use get_instance() instead.
+ Private initializer.
+
+ Use get_instance() instead.
"""
if DSSContext._instance is not None:
raise RuntimeError("DSSContext is a singleton. Use get_instance() to access it.")
@@ -43,9 +46,10 @@ class DSSContext:
self.session_id: Optional[str] = None
@classmethod
- async def get_instance(cls) -> 'DSSContext':
+ async def get_instance(cls) -> "DSSContext":
"""
Async factory method to get the singleton instance.
+
Ensures config is loaded and mode is detected before returning.
"""
if not cls._instance:
@@ -61,13 +65,16 @@ class DSSContext:
@classmethod
def reset(cls) -> None:
"""
- Resets the singleton instance. Useful for testing.
+ Resets the singleton instance.
+
+ Useful for testing.
"""
cls._instance = None
async def _initialize(self) -> None:
"""
Internal initialization logic:
+
1. Load Config
2. Detect Mode
3. Cache Capabilities
@@ -80,7 +87,9 @@ class DSSContext:
# 2. Detect Mode (Async check)
self.active_mode = await self.config.get_active_mode()
- logger.info(f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}")
+ logger.info(
+ f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}"
+ )
# 3. Cache Capabilities
self._cache_capabilities()
@@ -92,15 +101,13 @@ class DSSContext:
self._capabilities = {"limited": True}
def _cache_capabilities(self) -> None:
- """
- Determines what the plugin can do based on the active mode.
- """
+ """Determines what the plugin can do based on the active mode."""
# Base capabilities
caps = {
"can_read_files": False,
"can_execute_browser": False,
"can_screenshot": False,
- "can_connect_remote": True
+ "can_connect_remote": True,
}
if self.active_mode == DSSMode.LOCAL:
@@ -111,8 +118,10 @@ class DSSContext:
elif self.active_mode == DSSMode.REMOTE:
# Remote mode relies on API capabilities
# Depending on remote configuration, these might differ
- caps["can_execute_browser"] = False # Typically restricted in pure remote unless via API
- caps["can_read_files"] = False # Security restriction
+ caps[
+ "can_execute_browser"
+ ] = False # Typically restricted in pure remote unless via API
+ caps["can_read_files"] = False # Security restriction
self._capabilities = caps
@@ -151,18 +160,22 @@ class DSSContext:
# Will be implemented in Phase 2 & 3
if self.active_mode == DSSMode.LOCAL:
from ..strategies.local.browser import LocalBrowserStrategy
+
strategy_instance = LocalBrowserStrategy(self)
else:
from ..strategies.remote.browser import RemoteBrowserStrategy
+
strategy_instance = RemoteBrowserStrategy(self)
elif strategy_type == "filesystem":
# Will be implemented in Phase 2
if self.active_mode == DSSMode.LOCAL:
from ..strategies.local.filesystem import LocalFilesystemStrategy
+
strategy_instance = LocalFilesystemStrategy(self)
else:
from ..strategies.remote.filesystem import RemoteFilesystemStrategy
+
strategy_instance = RemoteFilesystemStrategy(self)
elif strategy_type == "screenshot":
diff --git a/dss-claude-plugin/core/mcp_extensions.py b/dss-claude-plugin/core/mcp_extensions.py
index 5b36eca..22225c0 100644
--- a/dss-claude-plugin/core/mcp_extensions.py
+++ b/dss-claude-plugin/core/mcp_extensions.py
@@ -1,13 +1,15 @@
"""
-MCP Extensions for Context Awareness
+MCP Extensions for Context Awareness.
+
Implements the Factory Pattern to wrap existing tools with context
and defines 5 new tools for the Context Compiler.
"""
-from typing import Any, Dict, List, Callable
import functools
import json
import os
+from typing import Callable
+
from .compiler import ContextCompiler
# Singleton compiler instance
@@ -15,19 +17,22 @@ COMPILER = ContextCompiler(skins_dir=os.path.join(os.path.dirname(__file__), "sk
# --- FACTORY PATTERN: Context Wrapper ---
+
def with_context(default_manifest_path: str = None):
"""
Decorator that injects the compiled context into the tool's arguments.
+
Use this to upgrade existing 'token extractor' tools to be 'context aware'.
The manifest path is extracted from kwargs['manifest_path'] if present,
otherwise falls back to the default_manifest_path provided at decoration time.
"""
+
def decorator(func: Callable):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# 1. Get manifest path (runtime kwarg or decorator default)
- manifest_path = kwargs.get('manifest_path', default_manifest_path)
+ manifest_path = kwargs.get("manifest_path", default_manifest_path)
if not manifest_path:
raise ValueError("No manifest_path provided to context-aware tool")
@@ -35,33 +40,39 @@ def with_context(default_manifest_path: str = None):
context = COMPILER.compile(manifest_path)
# 3. Inject into kwargs
- kwargs['dss_context'] = context
+ kwargs["dss_context"] = context
# 4. Execute Tool
return func(*args, **kwargs)
+
return wrapper
+
return decorator
# --- 5 NEW MCP TOOLS ---
+
def get_active_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
"""
[Tool 1] Returns the fully resolved JSON context for the project.
+
Set debug=True to see provenance (which layer defined which token).
Set force_refresh=True to bypass cache (for long-running servers).
"""
context = COMPILER.compile(manifest_path, debug=debug, force_refresh=force_refresh)
return json.dumps(context, indent=2)
+
def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
"""
- [Tool 2] Resolves a specific token value (e.g. 'colors.primary')
+ [Tool 2] Resolves a specific token value (e.g. 'colors.primary').
+
through the cascade.
Set force_refresh=True to bypass cache (for long-running servers).
"""
context = COMPILER.compile(manifest_path, force_refresh=force_refresh)
- keys = token_path.split('.')
+ keys = token_path.split(".")
current = context.get("tokens", {})
for k in keys:
@@ -72,10 +83,9 @@ def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = Fal
return str(current)
+
def validate_manifest(manifest_path: str) -> str:
- """
- [Tool 3] Validates the ds.config.json against the schema.
- """
+ """[Tool 3] Validates the ds.config.json against the schema."""
# In a full implementation, we would use 'jsonschema' library here.
# For now, we perform a basic structural check via the Compiler's loader.
try:
@@ -84,10 +94,9 @@ def validate_manifest(manifest_path: str) -> str:
except Exception as e:
return f"Invalid: {str(e)}"
+
def list_skins() -> str:
- """
- [Tool 4] Lists all available skins in the registry.
- """
+ """[Tool 4] Lists all available skins in the registry."""
skins_path = COMPILER.skins_dir
if not skins_path.exists():
return "No skins directory found."
@@ -95,18 +104,18 @@ def list_skins() -> str:
skins = [f.stem for f in skins_path.glob("*.json")]
return json.dumps(skins)
+
def get_compiler_status() -> str:
- """
- [Tool 5] Returns the health and configuration of the Context Compiler.
- """
+ """[Tool 5] Returns the health and configuration of the Context Compiler."""
status = {
"status": "active",
"skins_directory": str(COMPILER.skins_dir),
"cached_skins": list(COMPILER.cache.keys()),
- "safe_boot_ready": True
+ "safe_boot_ready": True,
}
return json.dumps(status, indent=2)
+
# Instructions for Main Server File:
# 1. Import these tools
# 2. Register them with the MCP server instance
diff --git a/dss-claude-plugin/core/mcp_integration.py b/dss-claude-plugin/core/mcp_integration.py
index 4975d44..cda05fc 100644
--- a/dss-claude-plugin/core/mcp_integration.py
+++ b/dss-claude-plugin/core/mcp_integration.py
@@ -1,23 +1,21 @@
"""
-MCP Integration Layer for DSS Context Compiler
+MCP Integration Layer for DSS Context Compiler.
+
Provides MCP-compliant tool wrappers for the 5 new context tools.
"""
-from typing import Dict, Any
import json
-from . import (
- get_active_context,
- resolve_token,
- validate_manifest,
- list_skins,
- get_compiler_status
-)
+
+from . import get_active_context, get_compiler_status, list_skins, resolve_token, validate_manifest
# MCP Tool Definitions
-def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
+
+def mcp_get_resolved_context(
+ manifest_path: str, debug: bool = False, force_refresh: bool = False
+) -> str:
"""
- MCP Tool: Get Active Context
+ MCP Tool: Get Active Context.
Returns the fully resolved JSON context for a project.
Set debug=True to see provenance (which layer defined which token).
@@ -39,7 +37,7 @@ def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refr
def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
"""
- MCP Tool: Resolve Token
+ MCP Tool: Resolve Token.
Resolves a specific token value (e.g. 'colors.primary') through the cascade.
Set force_refresh=True to bypass cache (for long-running servers).
@@ -60,7 +58,7 @@ def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool =
def mcp_validate_manifest(manifest_path: str) -> str:
"""
- MCP Tool: Validate Manifest
+ MCP Tool: Validate Manifest.
Validates the ds.config.json against the schema.
@@ -78,7 +76,7 @@ def mcp_validate_manifest(manifest_path: str) -> str:
def mcp_list_skins() -> str:
"""
- MCP Tool: List Skins
+ MCP Tool: List Skins.
Lists all available skins in the registry.
@@ -93,7 +91,7 @@ def mcp_list_skins() -> str:
def mcp_get_compiler_status() -> str:
"""
- MCP Tool: Get Compiler Status
+ MCP Tool: Get Compiler Status.
Returns the health and configuration of the Context Compiler.
@@ -117,15 +115,15 @@ MCP_TOOLS = {
"manifest_path": {
"type": "string",
"description": "Path to ds.config.json",
- "required": True
+ "required": True,
},
"debug": {
"type": "boolean",
"description": "Enable debug provenance tracking",
"required": False,
- "default": False
- }
- }
+ "default": False,
+ },
+ },
},
"dss_resolve_token": {
"function": mcp_resolve_token,
@@ -134,14 +132,14 @@ MCP_TOOLS = {
"manifest_path": {
"type": "string",
"description": "Path to ds.config.json",
- "required": True
+ "required": True,
},
"token_path": {
"type": "string",
"description": "Dot-notation path to token (e.g. 'colors.primary')",
- "required": True
- }
- }
+ "required": True,
+ },
+ },
},
"dss_validate_manifest": {
"function": mcp_validate_manifest,
@@ -150,18 +148,18 @@ MCP_TOOLS = {
"manifest_path": {
"type": "string",
"description": "Path to ds.config.json",
- "required": True
+ "required": True,
}
- }
+ },
},
"dss_list_skins": {
"function": mcp_list_skins,
"description": "List all available design system skins",
- "parameters": {}
+ "parameters": {},
},
"dss_get_compiler_status": {
"function": mcp_get_compiler_status,
"description": "Get Context Compiler health and configuration",
- "parameters": {}
- }
+ "parameters": {},
+ },
}
diff --git a/dss-claude-plugin/core/runtime.py b/dss-claude-plugin/core/runtime.py
index 0471254..31f5548 100644
--- a/dss-claude-plugin/core/runtime.py
+++ b/dss-claude-plugin/core/runtime.py
@@ -1,5 +1,5 @@
"""
-DSS Runtime - Dependency Injection & Boundary Enforcement
+DSS Runtime - Dependency Injection & Boundary Enforcement.
This module provides a bounded runtime environment for DSS MCP tools.
All external API access (Figma, Browser, HTTP) MUST go through this runtime.
@@ -16,20 +16,24 @@ Usage:
browser = runtime.get_browser() # Sandboxed
"""
-import logging
import json
-from pathlib import Path
-from typing import Optional, Dict, Any, List
+import logging
from datetime import datetime
+from pathlib import Path
+from typing import Any, Dict, Optional
+
import yaml
# Setup logging
logger = logging.getLogger("dss.runtime")
+
class BoundaryViolationError(Exception):
- """Raised when an operation violates DSS boundaries"""
+ """Raised when an operation violates DSS boundaries."""
+
pass
+
class DSSRuntime:
"""
Bounded runtime environment for DSS operations.
@@ -52,7 +56,11 @@ class DSSRuntime:
self.config = self._load_config()
self.enforcement_mode = self.config.get("enforcement", {}).get("mode", "strict")
self.log_violations = self.config.get("enforcement", {}).get("log_violations", True)
- self.violation_log_path = Path(self.config.get("enforcement", {}).get("violation_log", ".dss/logs/boundary-violations.jsonl"))
+ self.violation_log_path = Path(
+ self.config.get("enforcement", {}).get(
+ "violation_log", ".dss/logs/boundary-violations.jsonl"
+ )
+ )
# Client caches (lazy initialization)
self._figma_client = None
@@ -62,7 +70,7 @@ class DSSRuntime:
logger.info(f"DSSRuntime initialized with enforcement mode: {self.enforcement_mode}")
def _load_config(self) -> Dict[str, Any]:
- """Load boundary configuration from YAML"""
+ """Load boundary configuration from YAML."""
if not self.config_path.exists():
logger.warning(f"Boundary config not found: {self.config_path}, using defaults")
return self._default_config()
@@ -75,7 +83,7 @@ class DSSRuntime:
return self._default_config()
def _default_config(self) -> Dict[str, Any]:
- """Default boundary configuration (strict)"""
+ """Default boundary configuration (strict)."""
return {
"version": "1.0",
"blocked_external_apis": ["api.figma.com"],
@@ -83,12 +91,12 @@ class DSSRuntime:
"enforcement": {
"mode": "strict",
"log_violations": True,
- "violation_log": ".dss/logs/boundary-violations.jsonl"
- }
+ "violation_log": ".dss/logs/boundary-violations.jsonl",
+ },
}
def _log_violation(self, operation: str, details: Dict[str, Any]):
- """Log boundary violation to audit trail"""
+ """Log boundary violation to audit trail."""
if not self.log_violations:
return
@@ -99,7 +107,7 @@ class DSSRuntime:
"type": "boundary_violation",
"operation": operation,
"enforcement_mode": self.enforcement_mode,
- "details": details
+ "details": details,
}
with open(self.violation_log_path, "a") as f:
@@ -108,7 +116,7 @@ class DSSRuntime:
logger.warning(f"Boundary violation: {operation} - {details}")
def _log_access(self, operation: str, allowed: bool, details: Dict[str, Any]):
- """Log successful access for audit trail"""
+ """Log successful access for audit trail."""
access_log_path = Path(".dss/logs/runtime-access.jsonl")
access_log_path.parent.mkdir(parents=True, exist_ok=True)
@@ -117,7 +125,7 @@ class DSSRuntime:
"type": "runtime_access",
"operation": operation,
"allowed": allowed,
- "details": details
+ "details": details,
}
with open(access_log_path, "a") as f:
@@ -139,11 +147,7 @@ class DSSRuntime:
# Check if operation requires going through DSS tools
for category, tools in required_tools.items():
if operation in category:
- details = {
- "operation": operation,
- "context": context,
- "required_tools": tools
- }
+ details = {"operation": operation, "context": context, "required_tools": tools}
self._log_violation(operation, details)
@@ -173,8 +177,8 @@ class DSSRuntime:
self._figma_client = SafeFigmaClient(
token=token,
- allow_write=False, # Read-only by default
- runtime=self
+ allow_write=False,
+ runtime=self, # Read-only by default
)
logger.info("Figma client initialized (read-only mode)")
@@ -195,6 +199,7 @@ class DSSRuntime:
if strategy == "local":
try:
from strategies.local.browser import LocalBrowserStrategy
+
self._browser_strategy = LocalBrowserStrategy(runtime=self)
logger.info("Local browser strategy initialized")
except ImportError:
@@ -204,6 +209,7 @@ class DSSRuntime:
elif strategy == "remote":
try:
from strategies.remote.browser import RemoteBrowserStrategy
+
self._browser_strategy = RemoteBrowserStrategy(runtime=self)
logger.info("Remote browser strategy initialized")
except ImportError:
@@ -224,8 +230,7 @@ class DSSRuntime:
from core.safe_http_client import SafeHTTPClient
self._http_client = SafeHTTPClient(
- blocked_domains=self.config.get("blocked_external_apis", []),
- runtime=self
+ blocked_domains=self.config.get("blocked_external_apis", []), runtime=self
)
logger.info("HTTP client initialized with URL validation")
@@ -245,10 +250,7 @@ class DSSRuntime:
blocked = self.config.get("blocked_imports", [])
if module_name in blocked:
- details = {
- "module": module_name,
- "blocked_imports": blocked
- }
+ details = {"module": module_name, "blocked_imports": blocked}
self._log_violation(f"direct_import:{module_name}", details)
@@ -292,14 +294,16 @@ class DSSRuntime:
"browser": self._browser_strategy is not None,
"http": self._http_client is not None,
},
- "config_version": self.config.get("version", "unknown")
+ "config_version": self.config.get("version", "unknown"),
}
+
# Global runtime instance (singleton pattern)
_runtime_instance: Optional[DSSRuntime] = None
+
def get_runtime() -> DSSRuntime:
- """Get the global DSSRuntime instance (singleton)"""
+ """Get the global DSSRuntime instance (singleton)."""
global _runtime_instance
if _runtime_instance is None:
diff --git a/dss-claude-plugin/core/structured_logger.py b/dss-claude-plugin/core/structured_logger.py
index dc95837..4c4f100 100644
--- a/dss-claude-plugin/core/structured_logger.py
+++ b/dss-claude-plugin/core/structured_logger.py
@@ -1,5 +1,5 @@
"""
-DSS Structured Logger - JSON-based logging for AI-consumable audit trails
+DSS Structured Logger - JSON-based logging for AI-consumable audit trails.
Provides structured, machine-readable logging in JSONL format (one JSON object per line).
All DSS operations are logged with consistent fields for analysis, debugging, and compliance.
@@ -27,11 +27,11 @@ import json
import logging
import os
import sys
+import threading
+from contextlib import contextmanager
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, Optional
-from contextlib import contextmanager
-import threading
# Thread-local storage for context
_context = threading.local()
@@ -51,7 +51,7 @@ class DSSJSONFormatter(logging.Formatter):
"""
def format(self, record: logging.LogRecord) -> str:
- """Format log record as single-line JSON"""
+ """Format log record as single-line JSON."""
# Build base log entry
log_entry = {
@@ -100,8 +100,10 @@ class DSSLogger(logging.Logger):
as keyword arguments for structured logging.
"""
- def _log_with_extra(self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
- """Internal method to log with extra structured data"""
+ def _log_with_extra(
+ self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs
+ ):
+ """Internal method to log with extra structured data."""
if extra:
# Store extra data in a custom attribute
extra_record = {"extra_data": extra}
@@ -110,23 +112,23 @@ class DSSLogger(logging.Logger):
super()._log(level, msg, (), **kwargs)
def debug(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
- """Log DEBUG message with optional extra data"""
+ """Log DEBUG message with optional extra data."""
self._log_with_extra(logging.DEBUG, msg, extra, **kwargs)
def info(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
- """Log INFO message with optional extra data"""
+ """Log INFO message with optional extra data."""
self._log_with_extra(logging.INFO, msg, extra, **kwargs)
def warning(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
- """Log WARNING message with optional extra data"""
+ """Log WARNING message with optional extra data."""
self._log_with_extra(logging.WARNING, msg, extra, **kwargs)
def error(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
- """Log ERROR message with optional extra data"""
+ """Log ERROR message with optional extra data."""
self._log_with_extra(logging.ERROR, msg, extra, **kwargs)
def critical(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
- """Log CRITICAL message with optional extra data"""
+ """Log CRITICAL message with optional extra data."""
self._log_with_extra(logging.CRITICAL, msg, extra, **kwargs)
@@ -182,7 +184,9 @@ def get_logger(name: str, log_file: Optional[str] = None) -> DSSLogger:
@contextmanager
-def LogContext(session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None):
+def LogContext(
+ session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None
+):
"""
Context manager for adding structured context to log entries.
@@ -259,12 +263,15 @@ class PerformanceLogger:
self.end_time = None
def start(self):
- """Mark operation start time"""
+ """Mark operation start time."""
self.start_time = datetime.now(timezone.utc)
- self.logger.debug(f"Started: {self.operation}", extra={
- "operation": self.operation,
- "start_time": self.start_time.isoformat(),
- })
+ self.logger.debug(
+ f"Started: {self.operation}",
+ extra={
+ "operation": self.operation,
+ "start_time": self.start_time.isoformat(),
+ },
+ )
def end(self, extra: Optional[Dict[str, Any]] = None):
"""
@@ -276,7 +283,9 @@ class PerformanceLogger:
self.end_time = datetime.now(timezone.utc)
if self.start_time is None:
- self.logger.warning(f"Performance logger end() called without start() for: {self.operation}")
+ self.logger.warning(
+ f"Performance logger end() called without start() for: {self.operation}"
+ )
return
duration_ms = (self.end_time - self.start_time).total_seconds() * 1000
@@ -294,7 +303,9 @@ class PerformanceLogger:
self.logger.info(f"Completed: {self.operation}", extra=perf_data)
-def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5):
+def configure_log_rotation(
+ log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5
+):
"""
Configure log rotation for DSS log files.
@@ -325,19 +336,19 @@ def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 *
# Add rotating file handler
rotating_handler = RotatingFileHandler(
- str(log_file),
- maxBytes=max_bytes,
- backupCount=backup_count,
- encoding="utf-8"
+ str(log_file), maxBytes=max_bytes, backupCount=backup_count, encoding="utf-8"
)
rotating_handler.setFormatter(DSSJSONFormatter())
logger.addHandler(rotating_handler)
- logger.info("Log rotation configured", extra={
- "max_bytes": max_bytes,
- "backup_count": backup_count,
- "log_file": str(log_file),
- })
+ logger.info(
+ "Log rotation configured",
+ extra={
+ "max_bytes": max_bytes,
+ "backup_count": backup_count,
+ "log_file": str(log_file),
+ },
+ )
# Example usage (can be removed in production)
@@ -356,6 +367,7 @@ if __name__ == "__main__":
perf.start()
# Simulate work
import time
+
time.sleep(0.1)
perf.end(extra={"tokens_found": 100})
diff --git a/dss-claude-plugin/hooks/.state/.git-backup.lock b/dss-claude-plugin/hooks/.state/.git-backup.lock
index 2195d73..179bfb9 100644
--- a/dss-claude-plugin/hooks/.state/.git-backup.lock
+++ b/dss-claude-plugin/hooks/.state/.git-backup.lock
@@ -1 +1 @@
-1765445463969
\ No newline at end of file
+1765446683593
diff --git a/dss-claude-plugin/hooks/dss-hooks-config.json b/dss-claude-plugin/hooks/dss-hooks-config.json
index 57f7814..4d85002 100644
--- a/dss-claude-plugin/hooks/dss-hooks-config.json
+++ b/dss-claude-plugin/hooks/dss-hooks-config.json
@@ -1,27 +1,27 @@
{
"description": "DSS Hooks Configuration - Customize hook behavior",
"version": "1.0.0",
-
+
"security_check": {
"enabled": true,
"block_on_critical": false,
"warn_only": true,
"ignored_patterns": []
},
-
+
"token_validator": {
"enabled": true,
"strict_mode": false,
"warn_only": true,
"categories": ["color", "spacing", "typography", "border", "effects", "layout"]
},
-
+
"component_checker": {
"enabled": true,
"categories": ["accessibility", "react", "typescript", "structure"],
"min_severity": "low"
},
-
+
"complexity_monitor": {
"enabled": true,
"max_function_lines": 50,
@@ -30,7 +30,7 @@
"max_nesting_depth": 4,
"warn_only": true
},
-
+
"storybook_reminder": {
"enabled": true,
"component_patterns": ["**/components/**/*.tsx", "**/ui/**/*.tsx"],
@@ -38,7 +38,7 @@
"remind_on_new": true,
"remind_on_props_change": true
},
-
+
"session_summary": {
"enabled": true,
"output_file": ".dss-session-summary.md",
@@ -46,7 +46,7 @@
"include_file_list": true,
"max_diff_lines": 100
},
-
+
"git_backup": {
"enabled": true,
"require_git_repo": true,
diff --git a/dss-claude-plugin/hooks/scripts/complexity-monitor.js b/dss-claude-plugin/hooks/scripts/complexity-monitor.js
index 81bb3cb..64622c1 100755
--- a/dss-claude-plugin/hooks/scripts/complexity-monitor.js
+++ b/dss-claude-plugin/hooks/scripts/complexity-monitor.js
@@ -55,7 +55,7 @@ function countProps(content) {
function countNestingDepth(content) {
let maxDepth = 0;
let currentDepth = 0;
-
+
for (const char of content) {
if (char === '{' || char === '(') {
currentDepth++;
@@ -64,7 +64,7 @@ function countNestingDepth(content) {
currentDepth = Math.max(0, currentDepth - 1);
}
}
-
+
return maxDepth;
}
@@ -74,7 +74,7 @@ function countFunctions(content) {
/const\s+\w+\s*=\s*(?:async\s*)?\([^)]*\)\s*=>/g,
/const\s+\w+\s*=\s*(?:async\s*)?function/g
];
-
+
let count = 0;
for (const pattern of patterns) {
const matches = content.match(pattern);
@@ -87,17 +87,17 @@ function analyzeComplexity(content, filePath, config) {
const issues = [];
const monitorConfig = config.complexity_monitor || {};
const ext = path.extname(filePath).toLowerCase();
-
+
// Only analyze JS/TS files
if (!['.js', '.jsx', '.ts', '.tsx'].includes(ext)) {
return issues;
}
-
+
const lines = countLines(content);
const props = countProps(content);
const nesting = countNestingDepth(content);
const functions = countFunctions(content);
-
+
// Check component size (for tsx/jsx files)
if (['.tsx', '.jsx'].includes(ext)) {
if (lines > monitorConfig.max_component_lines) {
@@ -108,7 +108,7 @@ function analyzeComplexity(content, filePath, config) {
suggestion: 'Consider breaking into smaller components'
});
}
-
+
if (props > monitorConfig.max_props) {
issues.push({
type: 'prop_count',
@@ -118,7 +118,7 @@ function analyzeComplexity(content, filePath, config) {
});
}
}
-
+
// Check nesting depth
if (nesting > monitorConfig.max_nesting_depth) {
issues.push({
@@ -128,7 +128,7 @@ function analyzeComplexity(content, filePath, config) {
suggestion: 'Extract nested logic into separate functions'
});
}
-
+
// Check function count (indicator of file doing too much)
if (functions > 10) {
issues.push({
@@ -138,38 +138,38 @@ function analyzeComplexity(content, filePath, config) {
suggestion: 'Consider splitting into multiple modules'
});
}
-
+
return issues;
}
function formatOutput(issues, filePath) {
if (issues.length === 0) return '';
-
+
const severityIcons = {
high: '[HIGH]',
medium: '[MED]',
low: '[LOW]'
};
-
+
const lines = [`\n=== DSS Complexity Monitor: ${filePath} ===\n`];
-
+
for (const issue of issues) {
const icon = severityIcons[issue.severity] || '[?]';
lines.push(`${icon} ${issue.message}`);
lines.push(` Suggestion: ${issue.suggestion}\n`);
}
-
+
lines.push('='.repeat(50));
return lines.join('\n');
}
async function main() {
const config = loadConfig();
-
+
if (!config.complexity_monitor?.enabled) {
process.exit(0);
}
-
+
// Read input from stdin
let inputData;
try {
@@ -181,34 +181,34 @@ async function main() {
} catch (e) {
process.exit(0);
}
-
+
const toolName = inputData.tool_name || '';
const toolInput = inputData.tool_input || {};
-
+
if (!['Edit', 'Write'].includes(toolName)) {
process.exit(0);
}
-
+
const filePath = toolInput.file_path || '';
let content = '';
-
+
if (toolName === 'Write') {
content = toolInput.content || '';
} else if (toolName === 'Edit') {
content = toolInput.new_string || '';
}
-
+
if (!content || !filePath) {
process.exit(0);
}
-
+
const issues = analyzeComplexity(content, filePath, config);
-
+
if (issues.length > 0) {
const output = formatOutput(issues, filePath);
console.error(output);
}
-
+
process.exit(0);
}
diff --git a/dss-claude-plugin/hooks/scripts/component-checker.py b/dss-claude-plugin/hooks/scripts/component-checker.py
index 873674f..23d875d 100755
--- a/dss-claude-plugin/hooks/scripts/component-checker.py
+++ b/dss-claude-plugin/hooks/scripts/component-checker.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
"""
-DSS Component Checker Hook
+DSS Component Checker Hook.
+
Validates React components for best practices and accessibility.
Written from scratch for DSS.
"""
@@ -19,7 +20,7 @@ COMPONENT_PATTERNS = [
"category": "accessibility",
"severity": "high",
"message": "Missing alt attribute on
. Add alt text for accessibility.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "a11y-button-type",
@@ -27,7 +28,7 @@ COMPONENT_PATTERNS = [
"category": "accessibility",
"severity": "medium",
"message": "Button missing type attribute. Add type='button' or type='submit'.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "a11y-anchor-href",
@@ -35,7 +36,7 @@ COMPONENT_PATTERNS = [
"category": "accessibility",
"severity": "high",
"message": "Anchor tag missing href. Use button for actions without navigation.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "a11y-click-handler",
@@ -43,7 +44,7 @@ COMPONENT_PATTERNS = [
"category": "accessibility",
"severity": "medium",
"message": "Click handler on non-interactive element. Use
or add role/tabIndex.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "a11y-form-label",
@@ -51,7 +52,7 @@ COMPONENT_PATTERNS = [
"category": "accessibility",
"severity": "medium",
"message": "Input may be missing label association. Add id with or aria-label.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
# React best practices
{
@@ -60,7 +61,7 @@ COMPONENT_PATTERNS = [
"category": "react",
"severity": "medium",
"message": "Using array index as key. Use unique, stable IDs when possible.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "react-bind-render",
@@ -68,7 +69,7 @@ COMPONENT_PATTERNS = [
"category": "react",
"severity": "low",
"message": "Binding in render creates new function each time. Use arrow function or bind in constructor.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "react-inline-style-object",
@@ -76,7 +77,7 @@ COMPONENT_PATTERNS = [
"category": "react",
"severity": "low",
"message": "Large inline style object. Consider extracting to a constant or CSS module.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "react-console-log",
@@ -84,7 +85,7 @@ COMPONENT_PATTERNS = [
"category": "react",
"severity": "low",
"message": "Console statement detected. Remove before production.",
- "file_types": [".js", ".jsx", ".ts", ".tsx"]
+ "file_types": [".js", ".jsx", ".ts", ".tsx"],
},
# TypeScript checks
{
@@ -93,7 +94,7 @@ COMPONENT_PATTERNS = [
"category": "typescript",
"severity": "medium",
"message": "Using 'any' type loses type safety. Consider using a specific type or 'unknown'.",
- "file_types": [".ts", ".tsx"]
+ "file_types": [".ts", ".tsx"],
},
{
"id": "ts-type-assertion",
@@ -101,7 +102,7 @@ COMPONENT_PATTERNS = [
"category": "typescript",
"severity": "medium",
"message": "Type assertion to 'any'. This bypasses type checking.",
- "file_types": [".ts", ".tsx"]
+ "file_types": [".ts", ".tsx"],
},
# Component structure
{
@@ -110,7 +111,7 @@ COMPONENT_PATTERNS = [
"category": "structure",
"severity": "low",
"message": "Component may not be exported. Ensure it's exported if meant to be reused.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "component-missing-displayname",
@@ -118,10 +119,11 @@ COMPONENT_PATTERNS = [
"category": "structure",
"severity": "low",
"message": "HOC component may need displayName for debugging.",
- "file_types": [".jsx", ".tsx"]
- }
+ "file_types": [".jsx", ".tsx"],
+ },
]
+
def get_config():
"""Load hook configuration."""
config_path = Path.home() / ".dss" / "hooks-config.json"
@@ -129,10 +131,10 @@ def get_config():
"component_checker": {
"enabled": True,
"categories": ["accessibility", "react", "typescript"],
- "min_severity": "low"
+ "min_severity": "low",
}
}
-
+
if config_path.exists():
try:
with open(config_path) as f:
@@ -142,64 +144,65 @@ def get_config():
pass
return default_config
+
def severity_level(severity: str) -> int:
"""Convert severity to numeric level."""
levels = {"low": 1, "medium": 2, "high": 3}
return levels.get(severity, 0)
+
def check_content(content: str, file_path: str, config: dict) -> list:
"""Check content for component issues."""
issues = []
file_ext = Path(file_path).suffix.lower()
-
+
checker_config = config.get("component_checker", {})
enabled_categories = checker_config.get("categories", [])
min_severity = checker_config.get("min_severity", "low")
min_level = severity_level(min_severity)
-
+
for pattern_def in COMPONENT_PATTERNS:
# Skip if file type doesn't match
if file_ext not in pattern_def.get("file_types", []):
continue
-
+
# Skip if category not enabled
if enabled_categories and pattern_def["category"] not in enabled_categories:
continue
-
+
# Skip if below minimum severity
if severity_level(pattern_def["severity"]) < min_level:
continue
-
+
if re.search(pattern_def["regex"], content, re.MULTILINE):
- issues.append({
- "id": pattern_def["id"],
- "category": pattern_def["category"],
- "severity": pattern_def["severity"],
- "message": pattern_def["message"]
- })
-
+ issues.append(
+ {
+ "id": pattern_def["id"],
+ "category": pattern_def["category"],
+ "severity": pattern_def["severity"],
+ "message": pattern_def["message"],
+ }
+ )
+
return issues
+
def format_output(issues: list, file_path: str) -> str:
"""Format issues for display."""
if not issues:
return ""
-
- severity_icons = {
- "high": "[HIGH]",
- "medium": "[MED]",
- "low": "[LOW]"
- }
-
+
+ severity_icons = {"high": "[HIGH]", "medium": "[MED]", "low": "[LOW]"}
+
category_labels = {
"accessibility": "A11Y",
"react": "REACT",
"typescript": "TS",
- "structure": "STRUCT"
+ "structure": "STRUCT",
}
-
+
lines = [f"\n=== DSS Component Checker: {file_path} ===\n"]
-
+
# Group by category
by_category = {}
for issue in issues:
@@ -207,7 +210,7 @@ def format_output(issues: list, file_path: str) -> str:
if cat not in by_category:
by_category[cat] = []
by_category[cat].append(issue)
-
+
for category, cat_issues in by_category.items():
label = category_labels.get(category, category.upper())
lines.append(f"[{label}]")
@@ -215,36 +218,37 @@ def format_output(issues: list, file_path: str) -> str:
sev = severity_icons.get(issue["severity"], "[?]")
lines.append(f" {sev} {issue['message']}")
lines.append("")
-
+
lines.append("=" * 50)
return "\n".join(lines)
+
def main():
"""Main hook entry point."""
config = get_config()
-
+
if not config.get("component_checker", {}).get("enabled", True):
sys.exit(0)
-
+
# Read hook input from stdin
try:
input_data = json.loads(sys.stdin.read())
except json.JSONDecodeError:
sys.exit(0)
-
+
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
-
+
if tool_name not in ["Edit", "Write"]:
sys.exit(0)
-
+
file_path = tool_input.get("file_path", "")
file_ext = Path(file_path).suffix.lower() if file_path else ""
-
+
# Only check React/TypeScript files
if file_ext not in [".jsx", ".tsx", ".js", ".ts"]:
sys.exit(0)
-
+
# Get content to check
if tool_name == "Write":
content = tool_input.get("content", "")
@@ -252,17 +256,18 @@ def main():
content = tool_input.get("new_string", "")
else:
content = ""
-
+
if not content:
sys.exit(0)
-
+
issues = check_content(content, file_path, config)
-
+
if issues:
output = format_output(issues, file_path)
print(output, file=sys.stderr)
-
+
sys.exit(0)
+
if __name__ == "__main__":
main()
diff --git a/dss-claude-plugin/hooks/scripts/git-backup.js b/dss-claude-plugin/hooks/scripts/git-backup.js
index 2338b0f..e18b568 100755
--- a/dss-claude-plugin/hooks/scripts/git-backup.js
+++ b/dss-claude-plugin/hooks/scripts/git-backup.js
@@ -44,14 +44,14 @@ function checkLock() {
if (!fs.existsSync(STATE_DIR)) {
fs.mkdirSync(STATE_DIR, { recursive: true });
}
-
+
if (fs.existsSync(LOCK_FILE)) {
const lastRun = parseInt(fs.readFileSync(LOCK_FILE, 'utf8'));
if (!isNaN(lastRun) && (Date.now() - lastRun < LOCK_TIMEOUT_MS)) {
return false; // Already ran recently
}
}
-
+
fs.writeFileSync(LOCK_FILE, Date.now().toString(), 'utf8');
return true;
} catch (e) {
@@ -81,16 +81,16 @@ function getChangeSummary() {
try {
const status = execSync('git status --short', { encoding: 'utf8' });
const lines = status.trim().split('\n').filter(Boolean);
-
+
let added = 0, modified = 0, deleted = 0;
-
+
for (const line of lines) {
const status = line.trim().charAt(0);
if (status === 'A' || status === '?') added++;
else if (status === 'M') modified++;
else if (status === 'D') deleted++;
}
-
+
return { added, modified, deleted, total: lines.length };
} catch (e) {
return { added: 0, modified: 0, deleted: 0, total: 0 };
@@ -99,30 +99,30 @@ function getChangeSummary() {
function createBackup(config) {
const backupConfig = config.git_backup || {};
-
+
try {
// Stage all changes
execSync('git add -A', { stdio: 'pipe' });
-
+
// Build commit message
const parts = [backupConfig.commit_prefix || 'auto-backup'];
-
+
if (backupConfig.include_timestamp) {
const timestamp = new Date().toISOString().replace('T', ' ').replace(/\..+/, '');
parts.push(timestamp);
}
-
+
const summary = getChangeSummary();
const summaryText = `(${summary.total} files: +${summary.added} ~${summary.modified} -${summary.deleted})`;
-
+
const commitMessage = `${parts.join(': ')} ${summaryText}\n\nGenerated by DSS Git Backup Hook`;
-
+
// Create commit
execSync(`git commit -m "${commitMessage}"`, { stdio: 'pipe' });
-
+
// Get commit hash
const commitHash = execSync('git rev-parse --short HEAD', { encoding: 'utf8' }).trim();
-
+
return { success: true, hash: commitHash, files: summary.total };
} catch (e) {
return { success: false, error: e.message };
@@ -143,39 +143,39 @@ function main() {
if (!checkLock()) {
process.exit(0);
}
-
+
// Prevent hook recursion
if (process.env.STOP_HOOK_ACTIVE === 'true') {
process.exit(0);
}
-
+
const config = loadConfig();
-
+
if (!config.git_backup?.enabled) {
process.exit(0);
}
-
+
// Check for git repo
if (config.git_backup.require_git_repo && !isGitRepo()) {
log(config, 'DSS Git Backup: Not a git repository, skipping');
process.exit(0);
}
-
+
// Check for changes
if (config.git_backup.commit_only_if_changes && !hasChanges()) {
log(config, 'DSS Git Backup: No changes to commit');
process.exit(0);
}
-
+
// Create backup
const result = createBackup(config);
-
+
if (result.success) {
log(config, `DSS Git Backup: Committed ${result.files} files (${result.hash})`);
} else {
log(config, `DSS Git Backup: Failed - ${result.error}`);
}
-
+
process.exit(0);
}
diff --git a/dss-claude-plugin/hooks/scripts/security-check.py b/dss-claude-plugin/hooks/scripts/security-check.py
index b8c97a8..e46aecb 100755
--- a/dss-claude-plugin/hooks/scripts/security-check.py
+++ b/dss-claude-plugin/hooks/scripts/security-check.py
@@ -1,14 +1,13 @@
#!/usr/bin/env python3
"""
-DSS Security Check Hook
+DSS Security Check Hook.
+
Validates file edits for common security vulnerabilities.
Written from scratch for DSS - no external dependencies.
"""
import json
-import os
import sys
-from datetime import datetime
from pathlib import Path
# Security patterns to detect
@@ -18,73 +17,74 @@ SECURITY_PATTERNS = [
"patterns": [".innerHTML =", ".innerHTML=", "innerHTML:"],
"severity": "high",
"message": "Potential XSS: innerHTML assignment detected. Use textContent for plain text or sanitize HTML with DOMPurify.",
- "file_types": [".js", ".jsx", ".ts", ".tsx"]
+ "file_types": [".js", ".jsx", ".ts", ".tsx"],
},
{
"id": "xss-dangerously",
"patterns": ["dangerouslySetInnerHTML"],
"severity": "high",
"message": "Potential XSS: dangerouslySetInnerHTML detected. Ensure content is sanitized before rendering.",
- "file_types": [".js", ".jsx", ".ts", ".tsx"]
+ "file_types": [".js", ".jsx", ".ts", ".tsx"],
},
{
"id": "eval-usage",
"patterns": ["eval(", "new Function("],
"severity": "critical",
"message": "Code injection risk: eval() or new Function() detected. These can execute arbitrary code.",
- "file_types": [".js", ".jsx", ".ts", ".tsx"]
+ "file_types": [".js", ".jsx", ".ts", ".tsx"],
},
{
"id": "document-write",
"patterns": ["document.write("],
"severity": "medium",
"message": "Deprecated: document.write() detected. Use DOM manipulation methods instead.",
- "file_types": [".js", ".jsx", ".ts", ".tsx", ".html"]
+ "file_types": [".js", ".jsx", ".ts", ".tsx", ".html"],
},
{
"id": "sql-injection",
- "patterns": ["execute(f\"", "execute(f'", "cursor.execute(\"", ".query(`${"],
+ "patterns": ['execute(f"', "execute(f'", 'cursor.execute("', ".query(`${"],
"severity": "critical",
"message": "Potential SQL injection: String interpolation in SQL query. Use parameterized queries.",
- "file_types": [".py", ".js", ".ts"]
+ "file_types": [".py", ".js", ".ts"],
},
{
"id": "hardcoded-secret",
"patterns": ["password=", "api_key=", "secret=", "token=", "apiKey:"],
"severity": "high",
"message": "Potential hardcoded secret detected. Use environment variables instead.",
- "file_types": [".py", ".js", ".ts", ".jsx", ".tsx"]
+ "file_types": [".py", ".js", ".ts", ".jsx", ".tsx"],
},
{
"id": "python-pickle",
"patterns": ["pickle.load", "pickle.loads"],
"severity": "high",
"message": "Insecure deserialization: pickle can execute arbitrary code. Use JSON for untrusted data.",
- "file_types": [".py"]
+ "file_types": [".py"],
},
{
"id": "python-shell",
"patterns": ["os.system(", "subprocess.call(shell=True", "subprocess.run(shell=True"],
"severity": "high",
"message": "Shell injection risk: Use subprocess with shell=False and pass args as list.",
- "file_types": [".py"]
+ "file_types": [".py"],
},
{
"id": "react-ref-current",
"patterns": ["ref.current.innerHTML"],
"severity": "high",
"message": "XSS via React ref: Avoid setting innerHTML on refs. Use state/props instead.",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "unsafe-regex",
"patterns": ["new RegExp(", "RegExp("],
"severity": "medium",
"message": "Potential ReDoS: Dynamic regex from user input can cause denial of service.",
- "file_types": [".js", ".ts", ".jsx", ".tsx"]
- }
+ "file_types": [".js", ".ts", ".jsx", ".tsx"],
+ },
]
+
def get_config():
"""Load hook configuration."""
config_path = Path.home() / ".dss" / "hooks-config.json"
@@ -93,10 +93,10 @@ def get_config():
"enabled": True,
"block_on_critical": False,
"warn_only": True,
- "ignored_patterns": []
+ "ignored_patterns": [],
}
}
-
+
if config_path.exists():
try:
with open(config_path) as f:
@@ -106,72 +106,77 @@ def get_config():
pass
return default_config
+
def check_content(content: str, file_path: str) -> list:
"""Check content for security patterns."""
issues = []
file_ext = Path(file_path).suffix.lower()
-
+
for pattern_def in SECURITY_PATTERNS:
# Skip if file type doesn't match
if file_ext not in pattern_def.get("file_types", []):
continue
-
+
for pattern in pattern_def["patterns"]:
if pattern.lower() in content.lower():
- issues.append({
- "id": pattern_def["id"],
- "severity": pattern_def["severity"],
- "message": pattern_def["message"],
- "pattern": pattern
- })
+ issues.append(
+ {
+ "id": pattern_def["id"],
+ "severity": pattern_def["severity"],
+ "message": pattern_def["message"],
+ "pattern": pattern,
+ }
+ )
break # One match per pattern definition is enough
-
+
return issues
+
def format_output(issues: list, file_path: str) -> str:
"""Format issues for display."""
if not issues:
return ""
-
+
severity_icons = {
"critical": "[CRITICAL]",
"high": "[HIGH]",
"medium": "[MEDIUM]",
- "low": "[LOW]"
+ "low": "[LOW]",
}
-
+
lines = [f"\n=== DSS Security Check: {file_path} ===\n"]
-
+
for issue in issues:
icon = severity_icons.get(issue["severity"], "[?]")
lines.append(f"{icon} {issue['message']}")
lines.append(f" Pattern: {issue['pattern']}\n")
-
+
lines.append("=" * 50)
return "\n".join(lines)
+
def main():
"""Main hook entry point."""
config = get_config()
-
+
if not config.get("security_check", {}).get("enabled", True):
sys.exit(0)
-
+
# Read hook input from stdin
try:
input_data = json.loads(sys.stdin.read())
except json.JSONDecodeError:
sys.exit(0) # Allow tool to proceed if we can't parse
-
+
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
-
+
# Only check Edit and Write tools
if tool_name not in ["Edit", "Write"]:
sys.exit(0)
-
+
file_path = tool_input.get("file_path", "")
-
+
# Get content to check
if tool_name == "Write":
content = tool_input.get("content", "")
@@ -179,23 +184,24 @@ def main():
content = tool_input.get("new_string", "")
else:
content = ""
-
+
if not content or not file_path:
sys.exit(0)
-
+
# Check for security issues
issues = check_content(content, file_path)
-
+
if issues:
output = format_output(issues, file_path)
print(output, file=sys.stderr)
-
+
# Check if we should block on critical issues
has_critical = any(i["severity"] == "critical" for i in issues)
if has_critical and config.get("security_check", {}).get("block_on_critical", False):
sys.exit(2) # Block the tool
-
+
sys.exit(0) # Allow tool to proceed
+
if __name__ == "__main__":
main()
diff --git a/dss-claude-plugin/hooks/scripts/session-summary.js b/dss-claude-plugin/hooks/scripts/session-summary.js
index 35c4fe3..8430a55 100755
--- a/dss-claude-plugin/hooks/scripts/session-summary.js
+++ b/dss-claude-plugin/hooks/scripts/session-summary.js
@@ -40,17 +40,17 @@ function getGitInfo() {
diff: '',
modifiedFiles: []
};
-
+
try {
// Check if in git repo
execSync('git rev-parse --is-inside-work-tree', { stdio: 'pipe' });
-
+
// Get branch
info.branch = execSync('git branch --show-current', { encoding: 'utf8' }).trim();
-
+
// Get status
info.status = execSync('git status --short', { encoding: 'utf8' }).trim();
-
+
// Get modified files
const statusLines = info.status.split('\n').filter(Boolean);
info.modifiedFiles = statusLines.map(line => {
@@ -60,7 +60,7 @@ function getGitInfo() {
file: parts.slice(1).join(' ')
};
});
-
+
// Get diff summary
try {
info.diff = execSync('git diff --stat', { encoding: 'utf8' }).trim();
@@ -70,7 +70,7 @@ function getGitInfo() {
} catch (e) {
// Not a git repo or git not available
}
-
+
return info;
}
@@ -82,12 +82,12 @@ function getSessionStats() {
linesAdded: 0,
linesRemoved: 0
};
-
+
try {
// Get diff stats from git
const diffStat = execSync('git diff --numstat', { encoding: 'utf8' });
const lines = diffStat.trim().split('\n').filter(Boolean);
-
+
for (const line of lines) {
const [added, removed] = line.split('\t');
stats.linesAdded += parseInt(added) || 0;
@@ -97,7 +97,7 @@ function getSessionStats() {
} catch (e) {
// Git not available
}
-
+
return stats;
}
@@ -105,29 +105,29 @@ function generateReport(config) {
const summaryConfig = config.session_summary || {};
const gitInfo = getGitInfo();
const stats = getSessionStats();
-
+
const timestamp = new Date().toLocaleString();
const lines = [];
-
+
lines.push('# DSS Session Summary');
lines.push(`\n**Generated:** ${timestamp}`);
-
+
if (gitInfo.branch) {
lines.push(`**Branch:** ${gitInfo.branch}`);
}
-
+
lines.push('\n## Changes Overview');
lines.push('');
lines.push(`- Files modified: ${stats.filesModified}`);
lines.push(`- Lines added: +${stats.linesAdded}`);
lines.push(`- Lines removed: -${stats.linesRemoved}`);
-
+
if (summaryConfig.include_file_list && gitInfo.modifiedFiles.length > 0) {
lines.push('\n## Modified Files');
lines.push('');
lines.push('| Status | File |');
lines.push('|--------|------|');
-
+
const statusLabels = {
'M': 'Modified',
'A': 'Added',
@@ -135,17 +135,17 @@ function generateReport(config) {
'R': 'Renamed',
'??': 'Untracked'
};
-
+
for (const file of gitInfo.modifiedFiles.slice(0, 20)) {
const label = statusLabels[file.status] || file.status;
lines.push(`| ${label} | ${file.file} |`);
}
-
+
if (gitInfo.modifiedFiles.length > 20) {
lines.push(`| ... | +${gitInfo.modifiedFiles.length - 20} more files |`);
}
}
-
+
if (summaryConfig.include_git_diff && gitInfo.diff) {
lines.push('\n## Diff Summary');
lines.push('');
@@ -158,27 +158,27 @@ function generateReport(config) {
}
lines.push('```');
}
-
+
lines.push('\n---');
lines.push('*Generated by DSS Session Summary Hook*');
-
+
return lines.join('\n');
}
function main() {
const config = loadConfig();
-
+
if (!config.session_summary?.enabled) {
process.exit(0);
}
-
+
try {
const report = generateReport(config);
const outputFile = config.session_summary.output_file || '.dss-session-summary.md';
const outputPath = path.join(process.cwd(), outputFile);
-
+
fs.writeFileSync(outputPath, report, 'utf8');
-
+
// Output confirmation
console.log(JSON.stringify({
systemMessage: `Session summary saved to ${outputFile}`,
@@ -187,7 +187,7 @@ function main() {
} catch (e) {
// Fail silently
}
-
+
process.exit(0);
}
diff --git a/dss-claude-plugin/hooks/scripts/storybook-reminder.py b/dss-claude-plugin/hooks/scripts/storybook-reminder.py
index 4d23513..03a9dac 100755
--- a/dss-claude-plugin/hooks/scripts/storybook-reminder.py
+++ b/dss-claude-plugin/hooks/scripts/storybook-reminder.py
@@ -1,16 +1,17 @@
#!/usr/bin/env python3
"""
-DSS Storybook Reminder Hook
+DSS Storybook Reminder Hook.
+
Reminds developers to update Storybook stories when components change.
Written from scratch for DSS.
"""
import json
-import os
import re
import sys
from pathlib import Path
+
def get_config():
"""Load hook configuration."""
config_path = Path.home() / ".dss" / "hooks-config.json"
@@ -20,10 +21,10 @@ def get_config():
"component_patterns": ["**/components/**/*.tsx", "**/ui/**/*.tsx"],
"story_extensions": [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"],
"remind_on_new": True,
- "remind_on_props_change": True
+ "remind_on_props_change": True,
}
}
-
+
if config_path.exists():
try:
with open(config_path) as f:
@@ -33,38 +34,40 @@ def get_config():
pass
return default_config
+
def is_component_file(file_path: str) -> bool:
"""Check if file is a React component."""
path = Path(file_path)
-
+
# Must be a tsx/jsx file
if path.suffix.lower() not in [".tsx", ".jsx"]:
return False
-
+
# Skip story files, test files, index files
name = path.stem.lower()
if any(x in name for x in [".stories", ".story", ".test", ".spec", "index"]):
return False
-
+
# Check if in component-like directory
parts = str(path).lower()
component_dirs = ["components", "ui", "atoms", "molecules", "organisms", "templates"]
return any(d in parts for d in component_dirs)
+
def find_story_file(component_path: str) -> tuple:
"""Find corresponding story file for a component."""
path = Path(component_path)
base_name = path.stem
parent = path.parent
-
+
story_extensions = [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"]
-
+
# Check same directory
for ext in story_extensions:
story_path = parent / f"{base_name}{ext}"
if story_path.exists():
return (True, str(story_path))
-
+
# Check __stories__ subdirectory
stories_dir = parent / "__stories__"
if stories_dir.exists():
@@ -72,7 +75,7 @@ def find_story_file(component_path: str) -> tuple:
story_path = stories_dir / f"{base_name}{ext}"
if story_path.exists():
return (True, str(story_path))
-
+
# Check stories subdirectory
stories_dir = parent / "stories"
if stories_dir.exists():
@@ -80,9 +83,10 @@ def find_story_file(component_path: str) -> tuple:
story_path = stories_dir / f"{base_name}{ext}"
if story_path.exists():
return (True, str(story_path))
-
+
return (False, None)
+
def detect_props_change(content: str) -> bool:
"""Detect if content includes prop changes."""
prop_patterns = [
@@ -90,20 +94,21 @@ def detect_props_change(content: str) -> bool:
r"type\s+\w+Props\s*=",
r"Props\s*=\s*\{",
r"defaultProps\s*=",
- r"propTypes\s*="
+ r"propTypes\s*=",
]
-
+
for pattern in prop_patterns:
if re.search(pattern, content):
return True
return False
+
def format_reminder(file_path: str, has_story: bool, story_path: str, props_changed: bool) -> str:
"""Format the reminder message."""
- lines = [f"\n=== DSS Storybook Reminder ===\n"]
-
+ lines = ["\n=== DSS Storybook Reminder ===\n"]
+
component_name = Path(file_path).stem
-
+
if not has_story:
lines.append(f"[NEW] Component '{component_name}' has no Storybook story!")
lines.append(f" Consider creating: {component_name}.stories.tsx")
@@ -116,36 +121,37 @@ def format_reminder(file_path: str, has_story: bool, story_path: str, props_chan
lines.append(f"[UPDATE] Props changed in '{component_name}'")
lines.append(f" Story file: {story_path}")
lines.append(" Consider updating stories to reflect new props.")
-
+
lines.append("")
lines.append("=" * 40)
return "\n".join(lines)
+
def main():
"""Main hook entry point."""
config = get_config()
-
+
if not config.get("storybook_reminder", {}).get("enabled", True):
sys.exit(0)
-
+
# Read hook input from stdin
try:
input_data = json.loads(sys.stdin.read())
except json.JSONDecodeError:
sys.exit(0)
-
+
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
-
+
if tool_name not in ["Edit", "Write"]:
sys.exit(0)
-
+
file_path = tool_input.get("file_path", "")
-
+
# Only check component files
if not is_component_file(file_path):
sys.exit(0)
-
+
# Get content
if tool_name == "Write":
content = tool_input.get("content", "")
@@ -153,27 +159,28 @@ def main():
content = tool_input.get("new_string", "")
else:
content = ""
-
+
# Check for story file
has_story, story_path = find_story_file(file_path)
-
+
# Check for props changes
props_changed = detect_props_change(content) if content else False
-
+
reminder_config = config.get("storybook_reminder", {})
-
+
# Determine if we should show reminder
should_remind = False
if not has_story and reminder_config.get("remind_on_new", True):
should_remind = True
elif has_story and props_changed and reminder_config.get("remind_on_props_change", True):
should_remind = True
-
+
if should_remind:
output = format_reminder(file_path, has_story, story_path, props_changed)
print(output, file=sys.stderr)
-
+
sys.exit(0)
+
if __name__ == "__main__":
main()
diff --git a/dss-claude-plugin/hooks/scripts/token-validator.py b/dss-claude-plugin/hooks/scripts/token-validator.py
index 0582f0a..b1bd076 100755
--- a/dss-claude-plugin/hooks/scripts/token-validator.py
+++ b/dss-claude-plugin/hooks/scripts/token-validator.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
"""
-DSS Token Validator Hook
+DSS Token Validator Hook.
+
Detects hardcoded values that should use design tokens.
Written from scratch for DSS.
"""
@@ -18,7 +19,7 @@ HARDCODED_PATTERNS = [
"category": "color",
"message": "Hardcoded hex color detected. Consider using a design token.",
"suggestion": "Use: var(--color-*) or theme.colors.*",
- "file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
+ "file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"],
},
{
"id": "color-rgb",
@@ -26,7 +27,7 @@ HARDCODED_PATTERNS = [
"category": "color",
"message": "Hardcoded RGB color detected. Consider using a design token.",
"suggestion": "Use: var(--color-*) or theme.colors.*",
- "file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
+ "file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"],
},
{
"id": "color-hsl",
@@ -34,7 +35,7 @@ HARDCODED_PATTERNS = [
"category": "color",
"message": "Hardcoded HSL color detected. Consider using a design token.",
"suggestion": "Use: var(--color-*) or theme.colors.*",
- "file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
+ "file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"],
},
{
"id": "spacing-px",
@@ -42,7 +43,7 @@ HARDCODED_PATTERNS = [
"category": "spacing",
"message": "Hardcoded pixel spacing detected. Consider using a spacing token.",
"suggestion": "Use: var(--spacing-*) or theme.spacing.*",
- "file_types": [".css", ".scss", ".less"]
+ "file_types": [".css", ".scss", ".less"],
},
{
"id": "font-size-px",
@@ -50,7 +51,7 @@ HARDCODED_PATTERNS = [
"category": "typography",
"message": "Hardcoded font-size detected. Consider using a typography token.",
"suggestion": "Use: var(--font-size-*) or theme.fontSize.*",
- "file_types": [".css", ".scss", ".less"]
+ "file_types": [".css", ".scss", ".less"],
},
{
"id": "font-family-direct",
@@ -58,7 +59,7 @@ HARDCODED_PATTERNS = [
"category": "typography",
"message": "Hardcoded font-family detected. Consider using a typography token.",
"suggestion": "Use: var(--font-family-*) or theme.fontFamily.*",
- "file_types": [".css", ".scss", ".less"]
+ "file_types": [".css", ".scss", ".less"],
},
{
"id": "border-radius-px",
@@ -66,7 +67,7 @@ HARDCODED_PATTERNS = [
"category": "border",
"message": "Hardcoded border-radius detected. Consider using a radius token.",
"suggestion": "Use: var(--radius-*) or theme.borderRadius.*",
- "file_types": [".css", ".scss", ".less"]
+ "file_types": [".css", ".scss", ".less"],
},
{
"id": "box-shadow-direct",
@@ -74,7 +75,7 @@ HARDCODED_PATTERNS = [
"category": "effects",
"message": "Hardcoded box-shadow detected. Consider using a shadow token.",
"suggestion": "Use: var(--shadow-*) or theme.boxShadow.*",
- "file_types": [".css", ".scss", ".less"]
+ "file_types": [".css", ".scss", ".less"],
},
{
"id": "z-index-magic",
@@ -82,7 +83,7 @@ HARDCODED_PATTERNS = [
"category": "layout",
"message": "Magic number z-index detected. Consider using a z-index token.",
"suggestion": "Use: var(--z-index-*) with semantic names (modal, dropdown, tooltip)",
- "file_types": [".css", ".scss", ".less"]
+ "file_types": [".css", ".scss", ".less"],
},
{
"id": "inline-style-color",
@@ -90,7 +91,7 @@ HARDCODED_PATTERNS = [
"category": "color",
"message": "Hardcoded color in inline style. Consider using theme tokens.",
"suggestion": "Use: style={{ color: theme.colors.* }}",
- "file_types": [".jsx", ".tsx"]
+ "file_types": [".jsx", ".tsx"],
},
{
"id": "tailwind-arbitrary",
@@ -98,8 +99,8 @@ HARDCODED_PATTERNS = [
"category": "color",
"message": "Arbitrary Tailwind color value. Consider using theme colors.",
"suggestion": "Use: bg-primary, text-secondary, etc.",
- "file_types": [".jsx", ".tsx", ".html"]
- }
+ "file_types": [".jsx", ".tsx", ".html"],
+ },
]
# Allowlist patterns (common exceptions)
@@ -114,6 +115,7 @@ ALLOWLIST = [
r"colors\.", # Already using colors object
]
+
def get_config():
"""Load hook configuration."""
config_path = Path.home() / ".dss" / "hooks-config.json"
@@ -122,10 +124,10 @@ def get_config():
"enabled": True,
"strict_mode": False,
"warn_only": True,
- "categories": ["color", "spacing", "typography"]
+ "categories": ["color", "spacing", "typography"],
}
}
-
+
if config_path.exists():
try:
with open(config_path) as f:
@@ -135,6 +137,7 @@ def get_config():
pass
return default_config
+
def is_allowlisted(match: str) -> bool:
"""Check if match is in allowlist."""
for pattern in ALLOWLIST:
@@ -142,33 +145,36 @@ def is_allowlisted(match: str) -> bool:
return True
return False
+
def check_content(content: str, file_path: str, config: dict) -> list:
"""Check content for hardcoded values."""
issues = []
file_ext = Path(file_path).suffix.lower()
enabled_categories = config.get("token_validator", {}).get("categories", [])
-
+
for pattern_def in HARDCODED_PATTERNS:
# Skip if file type doesn't match
if file_ext not in pattern_def.get("file_types", []):
continue
-
+
# Skip if category not enabled (unless empty = all)
if enabled_categories and pattern_def["category"] not in enabled_categories:
continue
-
+
matches = re.findall(pattern_def["regex"], content, re.IGNORECASE)
-
+
for match in matches:
if not is_allowlisted(match):
- issues.append({
- "id": pattern_def["id"],
- "category": pattern_def["category"],
- "message": pattern_def["message"],
- "suggestion": pattern_def["suggestion"],
- "value": match[:50] # Truncate long matches
- })
-
+ issues.append(
+ {
+ "id": pattern_def["id"],
+ "category": pattern_def["category"],
+ "message": pattern_def["message"],
+ "suggestion": pattern_def["suggestion"],
+ "value": match[:50], # Truncate long matches
+ }
+ )
+
# Deduplicate by id
seen = set()
unique_issues = []
@@ -176,55 +182,57 @@ def check_content(content: str, file_path: str, config: dict) -> list:
if issue["id"] not in seen:
seen.add(issue["id"])
unique_issues.append(issue)
-
+
return unique_issues
+
def format_output(issues: list, file_path: str) -> str:
"""Format issues for display."""
if not issues:
return ""
-
+
category_icons = {
"color": "[COLOR]",
"spacing": "[SPACE]",
"typography": "[FONT]",
"border": "[BORDER]",
"effects": "[EFFECT]",
- "layout": "[LAYOUT]"
+ "layout": "[LAYOUT]",
}
-
+
lines = [f"\n=== DSS Token Validator: {file_path} ===\n"]
-
+
for issue in issues:
icon = category_icons.get(issue["category"], "[TOKEN]")
lines.append(f"{icon} {issue['message']}")
lines.append(f" Found: {issue['value']}")
lines.append(f" {issue['suggestion']}\n")
-
+
lines.append("=" * 50)
return "\n".join(lines)
+
def main():
"""Main hook entry point."""
config = get_config()
-
+
if not config.get("token_validator", {}).get("enabled", True):
sys.exit(0)
-
+
# Read hook input from stdin
try:
input_data = json.loads(sys.stdin.read())
except json.JSONDecodeError:
sys.exit(0)
-
+
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
-
+
if tool_name not in ["Edit", "Write"]:
sys.exit(0)
-
+
file_path = tool_input.get("file_path", "")
-
+
# Get content to check
if tool_name == "Write":
content = tool_input.get("content", "")
@@ -232,22 +240,23 @@ def main():
content = tool_input.get("new_string", "")
else:
content = ""
-
+
if not content or not file_path:
sys.exit(0)
-
+
# Check for token issues
issues = check_content(content, file_path, config)
-
+
if issues:
output = format_output(issues, file_path)
print(output, file=sys.stderr)
-
+
# In strict mode, block on issues
if config.get("token_validator", {}).get("strict_mode", False):
sys.exit(2)
-
+
sys.exit(0)
+
if __name__ == "__main__":
main()
diff --git a/dss-claude-plugin/servers/dss-mcp-server.py b/dss-claude-plugin/servers/dss-mcp-server.py
index a6242e0..db72b2b 100644
--- a/dss-claude-plugin/servers/dss-mcp-server.py
+++ b/dss-claude-plugin/servers/dss-mcp-server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
-DSS MCP Server - Design System Server Integration for Claude Code
+DSS MCP Server - Design System Server Integration for Claude Code.
A Python MCP server that exposes DSS functionality as tools for Claude.
Uses stdio transport for Claude Code integration.
@@ -10,24 +10,30 @@ Version: 2.0.0 - Architectural Refinement: Boundary Enforcement & Runtime
"""
import asyncio
+import base64
import json
import logging
-import sys
import os
-from pathlib import Path
-from typing import Any, Dict, List, Optional
-from datetime import datetime
+import re
+import sys
from collections import deque
from dataclasses import dataclass, field
-import base64
-import re
+from datetime import datetime
+from pathlib import Path
+from typing import Any, Dict, List, Optional
# DSS Runtime - Boundary Enforcement (CRITICAL)
# All external API access MUST go through the runtime
try:
sys.path.insert(0, str(Path(__file__).parent.parent))
- from core.runtime import DSSRuntime, BoundaryViolationError, get_runtime
- from core.structured_logger import get_logger, LogContext, PerformanceLogger, configure_log_rotation
+ from core.runtime import BoundaryViolationError, DSSRuntime, get_runtime
+ from core.structured_logger import (
+ LogContext,
+ PerformanceLogger,
+ configure_log_rotation,
+ get_logger,
+ )
+
RUNTIME_AVAILABLE = True
except ImportError as e:
RUNTIME_AVAILABLE = False
@@ -37,7 +43,8 @@ except ImportError as e:
# Playwright import (optional - only needed for DevTools features)
try:
- from playwright.async_api import async_playwright, Browser, Page, BrowserContext, Playwright
+ from playwright.async_api import Browser, BrowserContext, Page, Playwright, async_playwright
+
PLAYWRIGHT_AVAILABLE = True
except ImportError:
PLAYWRIGHT_AVAILABLE = False
@@ -45,6 +52,7 @@ except ImportError:
# Import LocalBrowserStrategy for unified browser automation
try:
from strategies.local.browser import LocalBrowserStrategy
+
LOCAL_BROWSER_STRATEGY_AVAILABLE = True
except ImportError:
LOCAL_BROWSER_STRATEGY_AVAILABLE = False
@@ -57,7 +65,7 @@ sys.path.insert(0, str(DSS_PATH))
try:
from mcp.server import Server
from mcp.server.stdio import stdio_server
- from mcp.types import Tool, TextContent
+ from mcp.types import TextContent, Tool
except ImportError:
print("MCP SDK not found. Install with: pip install mcp", file=sys.stderr)
sys.exit(1)
@@ -65,21 +73,32 @@ except ImportError:
# DSS imports
try:
import dss
- from dss import (
- # Analyze - Context generation & project graph
- ProjectScanner, ReactAnalyzer, StyleAnalyzer, DependencyGraph, QuickWinFinder,
- # Ingest - Token sources
- CSSTokenSource, SCSSTokenSource, TailwindTokenSource, JSONTokenSource,
- TokenMerger, MergeStrategy, TokenCollection,
- # Models
- Theme, Project, ProjectMetadata,
- # Storybook
- StorybookScanner, StoryGenerator, ThemeGenerator,
- # Settings
- DSSSettings, DSSManager, settings, manager,
- # Figma
+ from dss import ( # Analyze - Context generation & project graph; Ingest - Token sources; Models; Storybook; Settings; Figma
+ CSSTokenSource,
+ DependencyGraph,
+ DSSManager,
+ DSSSettings,
FigmaToolSuite,
+ JSONTokenSource,
+ MergeStrategy,
+ Project,
+ ProjectMetadata,
+ ProjectScanner,
+ QuickWinFinder,
+ ReactAnalyzer,
+ SCSSTokenSource,
+ StorybookScanner,
+ StoryGenerator,
+ StyleAnalyzer,
+ TailwindTokenSource,
+ Theme,
+ ThemeGenerator,
+ TokenCollection,
+ TokenMerger,
+ manager,
+ settings,
)
+
DSS_AVAILABLE = True
except ImportError as e:
DSS_AVAILABLE = False
@@ -89,11 +108,12 @@ except ImportError as e:
try:
from core import (
get_active_context,
+ get_compiler_status,
+ list_skins,
resolve_token,
validate_manifest,
- list_skins,
- get_compiler_status
)
+
CONTEXT_COMPILER_AVAILABLE = True
except ImportError as e:
CONTEXT_COMPILER_AVAILABLE = False
@@ -103,13 +123,14 @@ except ImportError as e:
try:
from dss.project import (
DSSProject,
- ProjectConfig,
+ FigmaProjectSync,
FigmaSource,
- ProjectStatus,
+ ProjectConfig,
ProjectManager,
ProjectRegistry,
- FigmaProjectSync,
+ ProjectStatus,
)
+
PROJECT_MANAGEMENT_AVAILABLE = True
except ImportError as e:
PROJECT_MANAGEMENT_AVAILABLE = False
@@ -124,8 +145,8 @@ else:
# Fallback to basic logging if runtime not available
logging.basicConfig(
level=logging.INFO,
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
- handlers=[logging.StreamHandler(sys.stderr)]
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+ handlers=[logging.StreamHandler(sys.stderr)],
)
logger = logging.getLogger("dss-mcp-server")
logger.warning("Structured logging unavailable - using fallback")
@@ -141,7 +162,7 @@ TIMEOUT_CONFIG = {
"audit": 45,
"quick_wins": 30,
"devtools_connect": 20,
- "devtools_default": 10
+ "devtools_default": 10,
}
@@ -165,13 +186,16 @@ class DevToolsState:
- Network request capture (bounded buffer)
- Connection lifecycle state
"""
+
playwright: Optional[Any] = None
browser: Optional[Any] = None
contexts: Dict[str, Any] = field(default_factory=dict)
pages: Dict[str, Any] = field(default_factory=dict)
active_page_id: Optional[str] = None
console_logs: deque = field(default_factory=lambda: deque(maxlen=DEVTOOLS_CONSOLE_MAX_ENTRIES))
- network_requests: deque = field(default_factory=lambda: deque(maxlen=DEVTOOLS_NETWORK_MAX_ENTRIES))
+ network_requests: deque = field(
+ default_factory=lambda: deque(maxlen=DEVTOOLS_NETWORK_MAX_ENTRIES)
+ )
connected: bool = False
@@ -182,15 +206,18 @@ devtools = DevToolsState()
# BROWSER AUTOMATION STATE
# =============================================================================
+
@dataclass
class BrowserAutomationState:
- """State management for unified browser automation (LOCAL mode)"""
+ """State management for unified browser automation (LOCAL mode)."""
+
strategy: Optional[Any] = None # LocalBrowserStrategy instance
mode: str = "local" # "local" or "remote"
session_id: Optional[str] = None
remote_api_url: Optional[str] = None
initialized: bool = False
+
browser_state = BrowserAutomationState()
@@ -199,27 +226,27 @@ server = Server("dss-server")
def with_timeout(timeout_key: str):
- """Decorator to add timeout to async functions"""
+ """Decorator to add timeout to async functions."""
+
def decorator(func):
async def wrapper(*args, **kwargs):
timeout = TIMEOUT_CONFIG.get(timeout_key, 30)
try:
- return await asyncio.wait_for(
- func(*args, **kwargs),
- timeout=timeout
- )
+ return await asyncio.wait_for(func(*args, **kwargs), timeout=timeout)
except asyncio.TimeoutError:
return {
"success": False,
"error": f"Operation timed out after {timeout} seconds",
- "timeout_key": timeout_key
+ "timeout_key": timeout_key,
}
+
return wrapper
+
return decorator
def safe_serialize(obj: Any) -> Any:
- """Safely serialize objects to JSON-compatible format"""
+ """Safely serialize objects to JSON-compatible format."""
if obj is None:
return None
if isinstance(obj, (str, int, float, bool)):
@@ -232,10 +259,10 @@ def safe_serialize(obj: Any) -> Any:
return str(obj)
if isinstance(obj, deque):
return [safe_serialize(item) for item in obj]
- if hasattr(obj, '__dict__'):
- d = {k: v for k, v in obj.__dict__.items() if not k.startswith('_')}
+ if hasattr(obj, "__dict__"):
+ d = {k: v for k, v in obj.__dict__.items() if not k.startswith("_")}
return safe_serialize(d)
- if hasattr(obj, 'model_dump'):
+ if hasattr(obj, "model_dump"):
return obj.model_dump()
return str(obj)
@@ -244,9 +271,10 @@ def safe_serialize(obj: Any) -> Any:
# TOOL DEFINITIONS
# =============================================================================
+
@server.list_tools()
async def list_tools() -> List[Tool]:
- """List all available DSS and DevTools tools"""
+ """List all available DSS and DevTools tools."""
dss_tools = [
Tool(
name="dss_analyze_project",
@@ -256,11 +284,11 @@ async def list_tools() -> List[Tool]:
"properties": {
"path": {
"type": "string",
- "description": "Absolute path to the project directory to analyze"
+ "description": "Absolute path to the project directory to analyze",
}
},
- "required": ["path"]
- }
+ "required": ["path"],
+ },
),
Tool(
name="dss_extract_tokens",
@@ -270,16 +298,16 @@ async def list_tools() -> List[Tool]:
"properties": {
"path": {
"type": "string",
- "description": "Path to the file or directory containing design tokens"
+ "description": "Path to the file or directory containing design tokens",
},
"sources": {
"type": "array",
"items": {"type": "string", "enum": ["css", "scss", "tailwind", "json"]},
- "description": "Token source types to extract from (default: all)"
- }
+ "description": "Token source types to extract from (default: all)",
+ },
},
- "required": ["path"]
- }
+ "required": ["path"],
+ },
),
Tool(
name="dss_generate_theme",
@@ -289,25 +317,25 @@ async def list_tools() -> List[Tool]:
"properties": {
"tokens": {
"type": "object",
- "description": "Design tokens to transform (or use tokens from previous extraction)"
+ "description": "Design tokens to transform (or use tokens from previous extraction)",
},
"format": {
"type": "string",
"enum": ["css", "scss", "json", "js"],
- "description": "Output format for generated theme files"
+ "description": "Output format for generated theme files",
},
"theme_name": {
"type": "string",
- "description": "Name for the generated theme (default: 'default')"
- }
+ "description": "Name for the generated theme (default: 'default')",
+ },
},
- "required": ["format"]
- }
+ "required": ["format"],
+ },
),
Tool(
name="dss_list_themes",
description="List all available themes in the DSS system",
- inputSchema={"type": "object", "properties": {}}
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="dss_get_status",
@@ -318,10 +346,10 @@ async def list_tools() -> List[Tool]:
"format": {
"type": "string",
"enum": ["json", "dashboard"],
- "description": "Output format: 'json' for structured data, 'dashboard' for ASCII art display (default: json)"
+ "description": "Output format: 'json' for structured data, 'dashboard' for ASCII art display (default: json)",
}
- }
- }
+ },
+ },
),
Tool(
name="dss_audit_components",
@@ -329,13 +357,10 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "path": {
- "type": "string",
- "description": "Path to React component directory"
- }
+ "path": {"type": "string", "description": "Path to React component directory"}
},
- "required": ["path"]
- }
+ "required": ["path"],
+ },
),
Tool(
name="dss_setup_storybook",
@@ -343,18 +368,15 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "path": {
- "type": "string",
- "description": "Path to the project directory"
- },
+ "path": {"type": "string", "description": "Path to the project directory"},
"action": {
"type": "string",
"enum": ["scan", "generate", "configure"],
- "description": "Action to perform: scan existing, generate stories, or configure theme"
- }
+ "description": "Action to perform: scan existing, generate stories, or configure theme",
+ },
},
- "required": ["path"]
- }
+ "required": ["path"],
+ },
),
Tool(
name="dss_sync_figma",
@@ -362,13 +384,10 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "file_key": {
- "type": "string",
- "description": "Figma file key (from URL)"
- }
+ "file_key": {"type": "string", "description": "Figma file key (from URL)"}
},
- "required": ["file_key"]
- }
+ "required": ["file_key"],
+ },
),
Tool(
name="dss_find_quick_wins",
@@ -376,13 +395,10 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "path": {
- "type": "string",
- "description": "Path to the project directory"
- }
+ "path": {"type": "string", "description": "Path to the project directory"}
},
- "required": ["path"]
- }
+ "required": ["path"],
+ },
),
Tool(
name="dss_transform_tokens",
@@ -390,24 +406,21 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "tokens": {
- "type": "object",
- "description": "Tokens to transform"
- },
+ "tokens": {"type": "object", "description": "Tokens to transform"},
"input_format": {
"type": "string",
"enum": ["css", "scss", "json", "tailwind"],
- "description": "Input token format"
+ "description": "Input token format",
},
"output_format": {
"type": "string",
"enum": ["css", "scss", "json", "js"],
- "description": "Desired output format"
- }
+ "description": "Desired output format",
+ },
},
- "required": ["tokens", "output_format"]
- }
- )
+ "required": ["tokens", "output_format"],
+ },
+ ),
]
devtools_tools = [
@@ -417,10 +430,13 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "url": {"type": "string", "description": "Initial URL to navigate to (default: about:blank)"},
- "headless": {"type": "boolean", "description": "Run headless (default: true)"}
- }
- }
+ "url": {
+ "type": "string",
+ "description": "Initial URL to navigate to (default: about:blank)",
+ },
+ "headless": {"type": "boolean", "description": "Run headless (default: true)"},
+ },
+ },
),
Tool(
name="devtools_connect",
@@ -429,19 +445,19 @@ async def list_tools() -> List[Tool]:
"type": "object",
"properties": {
"port": {"type": "integer", "description": "CDP port number (default: 9222)"},
- "host": {"type": "string", "description": "CDP host (default: 'localhost')"}
- }
- }
+ "host": {"type": "string", "description": "CDP host (default: 'localhost')"},
+ },
+ },
),
Tool(
name="devtools_disconnect",
description="Disconnect from Chrome DevTools and clean up resources.",
- inputSchema={"type": "object", "properties": {}}
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="devtools_list_pages",
description="List all available pages (tabs) in the connected browser with their URLs and titles.",
- inputSchema={"type": "object", "properties": {}}
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="devtools_select_page",
@@ -449,10 +465,13 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "page_id": {"type": "string", "description": "The unique ID of the page to select (from devtools_list_pages)"}
+ "page_id": {
+ "type": "string",
+ "description": "The unique ID of the page to select (from devtools_list_pages)",
+ }
},
- "required": ["page_id"]
- }
+ "required": ["page_id"],
+ },
),
Tool(
name="devtools_console_logs",
@@ -463,12 +482,18 @@ async def list_tools() -> List[Tool]:
"level": {
"type": "string",
"enum": ["all", "log", "warn", "error", "info", "debug"],
- "description": "Filter by message level (default: all)"
+ "description": "Filter by message level (default: all)",
},
- "limit": {"type": "integer", "description": "Maximum number of messages to return (default: 100)"},
- "clear": {"type": "boolean", "description": "Clear captured logs after retrieving (default: false)"}
- }
- }
+ "limit": {
+ "type": "integer",
+ "description": "Maximum number of messages to return (default: 100)",
+ },
+ "clear": {
+ "type": "boolean",
+ "description": "Clear captured logs after retrieving (default: false)",
+ },
+ },
+ },
),
Tool(
name="devtools_network_requests",
@@ -476,10 +501,16 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "filter_url": {"type": "string", "description": "Regex pattern to filter requests by URL"},
- "limit": {"type": "integer", "description": "Maximum number of requests to return (default: 50)"}
- }
- }
+ "filter_url": {
+ "type": "string",
+ "description": "Regex pattern to filter requests by URL",
+ },
+ "limit": {
+ "type": "integer",
+ "description": "Maximum number of requests to return (default: 50)",
+ },
+ },
+ },
),
Tool(
name="devtools_evaluate",
@@ -487,10 +518,13 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "expression": {"type": "string", "description": "The JavaScript expression to evaluate"}
+ "expression": {
+ "type": "string",
+ "description": "The JavaScript expression to evaluate",
+ }
},
- "required": ["expression"]
- }
+ "required": ["expression"],
+ },
),
Tool(
name="devtools_query_dom",
@@ -498,10 +532,13 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "selector": {"type": "string", "description": "CSS selector to query for elements"}
+ "selector": {
+ "type": "string",
+ "description": "CSS selector to query for elements",
+ }
},
- "required": ["selector"]
- }
+ "required": ["selector"],
+ },
),
Tool(
name="devtools_goto",
@@ -510,10 +547,13 @@ async def list_tools() -> List[Tool]:
"type": "object",
"properties": {
"url": {"type": "string", "description": "URL to navigate to"},
- "wait_until": {"type": "string", "description": "Wait condition: 'load', 'domcontentloaded', 'networkidle' (default: domcontentloaded)"}
+ "wait_until": {
+ "type": "string",
+ "description": "Wait condition: 'load', 'domcontentloaded', 'networkidle' (default: domcontentloaded)",
+ },
},
- "required": ["url"]
- }
+ "required": ["url"],
+ },
),
Tool(
name="devtools_screenshot",
@@ -521,16 +561,22 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "selector": {"type": "string", "description": "CSS selector of an element to capture. If omitted, captures the viewport."},
- "full_page": {"type": "boolean", "description": "Capture the full scrollable page (default: false)"}
- }
- }
+ "selector": {
+ "type": "string",
+ "description": "CSS selector of an element to capture. If omitted, captures the viewport.",
+ },
+ "full_page": {
+ "type": "boolean",
+ "description": "Capture the full scrollable page (default: false)",
+ },
+ },
+ },
),
Tool(
name="devtools_performance",
description="Get performance metrics for the active page including page load time, DNS lookup, TCP connect, and response times.",
- inputSchema={"type": "object", "properties": {}}
- )
+ inputSchema={"type": "object", "properties": {}},
+ ),
]
# Browser Automation Tools (Unified LOCAL/REMOTE strategy)
@@ -544,22 +590,22 @@ async def list_tools() -> List[Tool]:
"mode": {
"type": "string",
"enum": ["local", "remote"],
- "description": "Automation mode: 'local' for Playwright, 'remote' for Shadow State API (default: local)"
+ "description": "Automation mode: 'local' for Playwright, 'remote' for Shadow State API (default: local)",
},
"url": {
"type": "string",
- "description": "For local mode: URL to navigate to. For remote mode: API endpoint URL."
+ "description": "For local mode: URL to navigate to. For remote mode: API endpoint URL.",
},
"session_id": {
"type": "string",
- "description": "For remote mode: Session ID to fetch logs from."
+ "description": "For remote mode: Session ID to fetch logs from.",
},
"headless": {
"type": "boolean",
- "description": "For local mode: Run browser headless (default: true)"
- }
- }
- }
+ "description": "For local mode: Run browser headless (default: true)",
+ },
+ },
+ },
),
Tool(
name="browser_get_logs",
@@ -570,14 +616,14 @@ async def list_tools() -> List[Tool]:
"level": {
"type": "string",
"enum": ["all", "log", "warn", "error", "info", "debug"],
- "description": "Filter by log level (default: all)"
+ "description": "Filter by log level (default: all)",
},
"limit": {
"type": "integer",
- "description": "Maximum number of logs to return (default: 100)"
- }
- }
- }
+ "description": "Maximum number of logs to return (default: 100)",
+ },
+ },
+ },
),
Tool(
name="browser_screenshot",
@@ -587,19 +633,19 @@ async def list_tools() -> List[Tool]:
"properties": {
"selector": {
"type": "string",
- "description": "CSS selector to capture specific element. If omitted, captures viewport."
+ "description": "CSS selector to capture specific element. If omitted, captures viewport.",
},
"full_page": {
"type": "boolean",
- "description": "Capture full scrollable page (default: false)"
- }
- }
- }
+ "description": "Capture full scrollable page (default: false)",
+ },
+ },
+ },
),
Tool(
name="browser_dom_snapshot",
description="Get current DOM state as HTML. Works in both LOCAL and REMOTE modes.",
- inputSchema={"type": "object", "properties": {}}
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="browser_get_errors",
@@ -609,10 +655,10 @@ async def list_tools() -> List[Tool]:
"properties": {
"limit": {
"type": "integer",
- "description": "Maximum number of errors to return (default: 50)"
+ "description": "Maximum number of errors to return (default: 50)",
}
- }
- }
+ },
+ },
),
Tool(
name="browser_accessibility_audit",
@@ -622,21 +668,21 @@ async def list_tools() -> List[Tool]:
"properties": {
"selector": {
"type": "string",
- "description": "CSS selector to audit specific element. If omitted, audits entire page."
+ "description": "CSS selector to audit specific element. If omitted, audits entire page.",
}
- }
- }
+ },
+ },
),
Tool(
name="browser_performance",
description="Get Core Web Vitals and performance metrics (TTFB, FCP, LCP, CLS).",
- inputSchema={"type": "object", "properties": {}}
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="browser_close",
description="Close the browser automation session and clean up resources.",
- inputSchema={"type": "object", "properties": {}}
- )
+ inputSchema={"type": "object", "properties": {}},
+ ),
]
# Context Compiler Tools
@@ -649,21 +695,21 @@ async def list_tools() -> List[Tool]:
"properties": {
"manifest_path": {
"type": "string",
- "description": "Absolute path to ds.config.json"
+ "description": "Absolute path to ds.config.json",
},
"debug": {
"type": "boolean",
"description": "Enable debug provenance tracking",
- "default": False
+ "default": False,
},
"force_refresh": {
"type": "boolean",
"description": "Bypass cache and recompile",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["manifest_path"]
- }
+ "required": ["manifest_path"],
+ },
),
Tool(
name="dss_resolve_token",
@@ -673,20 +719,20 @@ async def list_tools() -> List[Tool]:
"properties": {
"manifest_path": {
"type": "string",
- "description": "Absolute path to ds.config.json"
+ "description": "Absolute path to ds.config.json",
},
"token_path": {
"type": "string",
- "description": "Dot-notation path to token (e.g. 'colors.primary')"
+ "description": "Dot-notation path to token (e.g. 'colors.primary')",
},
"force_refresh": {
"type": "boolean",
"description": "Bypass cache and recompile",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["manifest_path", "token_path"]
- }
+ "required": ["manifest_path", "token_path"],
+ },
),
Tool(
name="dss_validate_manifest",
@@ -696,28 +742,22 @@ async def list_tools() -> List[Tool]:
"properties": {
"manifest_path": {
"type": "string",
- "description": "Absolute path to ds.config.json"
+ "description": "Absolute path to ds.config.json",
}
},
- "required": ["manifest_path"]
- }
+ "required": ["manifest_path"],
+ },
),
Tool(
name="dss_list_skins",
description="List all available design system skins in the registry.",
- inputSchema={
- "type": "object",
- "properties": {}
- }
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="dss_get_compiler_status",
description="Get Context Compiler health and configuration status.",
- inputSchema={
- "type": "object",
- "properties": {}
- }
- )
+ inputSchema={"type": "object", "properties": {}},
+ ),
]
# Project Management Tools
@@ -728,25 +768,19 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "path": {
- "type": "string",
- "description": "Directory path for the new project"
- },
- "name": {
- "type": "string",
- "description": "Project name"
- },
+ "path": {"type": "string", "description": "Directory path for the new project"},
+ "name": {"type": "string", "description": "Project name"},
"description": {
"type": "string",
- "description": "Optional project description"
+ "description": "Optional project description",
},
"skin": {
"type": "string",
- "description": "Base skin to extend (e.g., 'shadcn', 'material')"
- }
+ "description": "Base skin to extend (e.g., 'shadcn', 'material')",
+ },
},
- "required": ["path", "name"]
- }
+ "required": ["path", "name"],
+ },
),
Tool(
name="dss_project_add_figma_team",
@@ -756,19 +790,16 @@ async def list_tools() -> List[Tool]:
"properties": {
"project_path": {
"type": "string",
- "description": "Path to DSS project directory"
- },
- "team_id": {
- "type": "string",
- "description": "Figma team ID"
+ "description": "Path to DSS project directory",
},
+ "team_id": {"type": "string", "description": "Figma team ID"},
"figma_token": {
"type": "string",
- "description": "Figma personal access token (optional, uses FIGMA_TOKEN env var if not provided)"
- }
+ "description": "Figma personal access token (optional, uses FIGMA_TOKEN env var if not provided)",
+ },
},
- "required": ["project_path", "team_id"]
- }
+ "required": ["project_path", "team_id"],
+ },
),
Tool(
name="dss_project_add_figma_file",
@@ -778,23 +809,20 @@ async def list_tools() -> List[Tool]:
"properties": {
"project_path": {
"type": "string",
- "description": "Path to DSS project directory"
- },
- "file_key": {
- "type": "string",
- "description": "Figma file key (from URL)"
+ "description": "Path to DSS project directory",
},
+ "file_key": {"type": "string", "description": "Figma file key (from URL)"},
"file_name": {
"type": "string",
- "description": "Human-readable name for the file"
+ "description": "Human-readable name for the file",
},
"figma_token": {
"type": "string",
- "description": "Figma personal access token (optional)"
- }
+ "description": "Figma personal access token (optional)",
+ },
},
- "required": ["project_path", "file_key", "file_name"]
- }
+ "required": ["project_path", "file_key", "file_name"],
+ },
),
Tool(
name="dss_project_sync",
@@ -804,20 +832,20 @@ async def list_tools() -> List[Tool]:
"properties": {
"project_path": {
"type": "string",
- "description": "Path to DSS project directory"
+ "description": "Path to DSS project directory",
},
"file_keys": {
"type": "array",
"items": {"type": "string"},
- "description": "Optional: specific file keys to sync (syncs all if not provided)"
+ "description": "Optional: specific file keys to sync (syncs all if not provided)",
},
"figma_token": {
"type": "string",
- "description": "Figma personal access token (optional)"
- }
+ "description": "Figma personal access token (optional)",
+ },
},
- "required": ["project_path"]
- }
+ "required": ["project_path"],
+ },
),
Tool(
name="dss_project_build",
@@ -827,11 +855,11 @@ async def list_tools() -> List[Tool]:
"properties": {
"project_path": {
"type": "string",
- "description": "Path to DSS project directory"
+ "description": "Path to DSS project directory",
}
},
- "required": ["project_path"]
- }
+ "required": ["project_path"],
+ },
),
Tool(
name="dss_project_graph_analysis",
@@ -841,19 +869,16 @@ async def list_tools() -> List[Tool]:
"properties": {
"project_path": {
"type": "string",
- "description": "Path to the project directory to be analyzed."
+ "description": "Path to the project directory to be analyzed.",
}
},
- "required": ["project_path"]
- }
+ "required": ["project_path"],
+ },
),
Tool(
name="dss_project_list",
description="List all registered DSS projects.",
- inputSchema={
- "type": "object",
- "properties": {}
- }
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="dss_project_info",
@@ -863,11 +888,11 @@ async def list_tools() -> List[Tool]:
"properties": {
"project_path": {
"type": "string",
- "description": "Path to DSS project directory"
+ "description": "Path to DSS project directory",
}
},
- "required": ["project_path"]
- }
+ "required": ["project_path"],
+ },
),
Tool(
name="dss_project_export_context",
@@ -877,11 +902,11 @@ async def list_tools() -> List[Tool]:
"properties": {
"project_path": {
"type": "string",
- "description": "Path to the project directory."
+ "description": "Path to the project directory.",
}
},
- "required": ["project_path"]
- }
+ "required": ["project_path"],
+ },
),
Tool(
name="dss_figma_discover",
@@ -889,17 +914,14 @@ async def list_tools() -> List[Tool]:
inputSchema={
"type": "object",
"properties": {
- "team_id": {
- "type": "string",
- "description": "Figma team ID"
- },
+ "team_id": {"type": "string", "description": "Figma team ID"},
"figma_token": {
"type": "string",
- "description": "Figma personal access token (optional)"
- }
+ "description": "Figma personal access token (optional)",
+ },
},
- "required": ["team_id"]
- }
+ "required": ["team_id"],
+ },
),
Tool(
name="dss_core_sync",
@@ -909,38 +931,29 @@ async def list_tools() -> List[Tool]:
"properties": {
"force": {
"type": "boolean",
- "description": "Force sync even if recently synced"
+ "description": "Force sync even if recently synced",
},
"figma_token": {
"type": "string",
- "description": "Figma personal access token (optional)"
- }
- }
- }
+ "description": "Figma personal access token (optional)",
+ },
+ },
+ },
),
Tool(
name="dss_core_status",
description="Get DSS core sync status including Figma reference and synced files.",
- inputSchema={
- "type": "object",
- "properties": {}
- }
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="dss_core_tokens",
description="Get DSS core tokens (synced from shadcn/ui Figma).",
- inputSchema={
- "type": "object",
- "properties": {}
- }
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="dss_core_themes",
description="Get DSS core themes (light/dark based on shadcn/ui).",
- inputSchema={
- "type": "object",
- "properties": {}
- }
+ inputSchema={"type": "object", "properties": {}},
),
Tool(
name="dss_rate_limit_status",
@@ -950,10 +963,10 @@ async def list_tools() -> List[Tool]:
"properties": {
"figma_token": {
"type": "string",
- "description": "Figma personal access token (optional)"
+ "description": "Figma personal access token (optional)",
}
- }
- }
+ },
+ },
),
]
@@ -964,30 +977,38 @@ async def list_tools() -> List[Tool]:
# TOOL DISPATCHER
# =============================================================================
+
@server.call_tool()
async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
- """Handle tool calls"""
+ """Handle tool calls."""
# Tools that work without DSS module imports (use scripts directly)
DSS_INDEPENDENT_TOOLS = {"dss_sync_figma", "dss_get_status", "dss_list_themes"}
if not DSS_AVAILABLE and name.startswith("dss_") and name not in DSS_INDEPENDENT_TOOLS:
- return [TextContent(
- type="text",
- text=json.dumps({
- "success": False,
- "error": f"DSS modules not available: {DSS_IMPORT_ERROR}"
- }, indent=2)
- )]
+ return [
+ TextContent(
+ type="text",
+ text=json.dumps(
+ {"success": False, "error": f"DSS modules not available: {DSS_IMPORT_ERROR}"},
+ indent=2,
+ ),
+ )
+ ]
if not PLAYWRIGHT_AVAILABLE and name.startswith("devtools_"):
- return [TextContent(
- type="text",
- text=json.dumps({
- "success": False,
- "error": "Playwright not installed. Run: pip install playwright && playwright install chromium"
- }, indent=2)
- )]
+ return [
+ TextContent(
+ type="text",
+ text=json.dumps(
+ {
+ "success": False,
+ "error": "Playwright not installed. Run: pip install playwright && playwright install chromium",
+ },
+ indent=2,
+ ),
+ )
+ ]
try:
# DSS Tools
@@ -996,13 +1017,13 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
elif name == "dss_extract_tokens":
result = await extract_tokens(
arguments.get("path", "."),
- arguments.get("sources", ["css", "scss", "tailwind", "json"])
+ arguments.get("sources", ["css", "scss", "tailwind", "json"]),
)
elif name == "dss_generate_theme":
result = await generate_theme(
arguments.get("tokens", {}),
arguments.get("format", "css"),
- arguments.get("theme_name", "default")
+ arguments.get("theme_name", "default"),
)
elif name == "dss_list_themes":
result = await list_themes()
@@ -1012,8 +1033,7 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
result = await audit_components(arguments.get("path", "."))
elif name == "dss_setup_storybook":
result = await setup_storybook(
- arguments.get("path", "."),
- arguments.get("action", "scan")
+ arguments.get("path", "."), arguments.get("action", "scan")
)
elif name == "dss_sync_figma":
result = await sync_figma(arguments.get("file_key", ""))
@@ -1023,18 +1043,16 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
result = await transform_tokens(
arguments.get("tokens", {}),
arguments.get("input_format", "json"),
- arguments.get("output_format", "css")
+ arguments.get("output_format", "css"),
)
# DevTools Tools
elif name == "devtools_launch":
result = await devtools_launch_impl(
- url=arguments.get("url", "about:blank"),
- headless=arguments.get("headless", True)
+ url=arguments.get("url", "about:blank"), headless=arguments.get("headless", True)
)
elif name == "devtools_connect":
result = await devtools_connect_impl(
- port=arguments.get("port", 9222),
- host=arguments.get("host", "localhost")
+ port=arguments.get("port", 9222), host=arguments.get("host", "localhost")
)
elif name == "devtools_disconnect":
result = await devtools_disconnect_impl()
@@ -1046,12 +1064,11 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
result = await devtools_console_logs_impl(
level=arguments.get("level", "all"),
limit=arguments.get("limit", 100),
- clear=arguments.get("clear", False)
+ clear=arguments.get("clear", False),
)
elif name == "devtools_network_requests":
result = await devtools_network_requests_impl(
- filter_url=arguments.get("filter_url", ""),
- limit=arguments.get("limit", 50)
+ filter_url=arguments.get("filter_url", ""), limit=arguments.get("limit", 50)
)
elif name == "devtools_evaluate":
result = await devtools_evaluate_impl(expression=arguments.get("expression"))
@@ -1059,13 +1076,11 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
result = await devtools_query_dom_impl(selector=arguments.get("selector"))
elif name == "devtools_goto":
result = await devtools_goto_impl(
- url=arguments.get("url"),
- wait_until=arguments.get("wait_until", "domcontentloaded")
+ url=arguments.get("url"), wait_until=arguments.get("wait_until", "domcontentloaded")
)
elif name == "devtools_screenshot":
result = await devtools_screenshot_impl(
- selector=arguments.get("selector"),
- full_page=arguments.get("full_page", False)
+ selector=arguments.get("selector"), full_page=arguments.get("full_page", False)
)
elif name == "devtools_performance":
result = await devtools_performance_impl()
@@ -1075,28 +1090,22 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
mode=arguments.get("mode", "local"),
url=arguments.get("url"),
session_id=arguments.get("session_id"),
- headless=arguments.get("headless", True)
+ headless=arguments.get("headless", True),
)
elif name == "browser_get_logs":
result = await browser_get_logs_impl(
- level=arguments.get("level", "all"),
- limit=arguments.get("limit", 100)
+ level=arguments.get("level", "all"), limit=arguments.get("limit", 100)
)
elif name == "browser_screenshot":
result = await browser_screenshot_impl(
- selector=arguments.get("selector"),
- full_page=arguments.get("full_page", False)
+ selector=arguments.get("selector"), full_page=arguments.get("full_page", False)
)
elif name == "browser_dom_snapshot":
result = await browser_dom_snapshot_impl()
elif name == "browser_get_errors":
- result = await browser_get_errors_impl(
- limit=arguments.get("limit", 50)
- )
+ result = await browser_get_errors_impl(limit=arguments.get("limit", 50))
elif name == "browser_accessibility_audit":
- result = await browser_accessibility_audit_impl(
- selector=arguments.get("selector")
- )
+ result = await browser_accessibility_audit_impl(selector=arguments.get("selector"))
elif name == "browser_performance":
result = await browser_performance_impl()
elif name == "browser_close":
@@ -1106,14 +1115,14 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
if not CONTEXT_COMPILER_AVAILABLE:
result = {
"success": False,
- "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}"
+ "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}",
}
else:
try:
context_json = get_active_context(
arguments.get("manifest_path"),
arguments.get("debug", False),
- arguments.get("force_refresh", False)
+ arguments.get("force_refresh", False),
)
result = {"success": True, "context": json.loads(context_json)}
except Exception as e:
@@ -1123,16 +1132,20 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
if not CONTEXT_COMPILER_AVAILABLE:
result = {
"success": False,
- "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}"
+ "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}",
}
else:
try:
token_value = resolve_token(
arguments.get("manifest_path"),
arguments.get("token_path"),
- arguments.get("force_refresh", False)
+ arguments.get("force_refresh", False),
)
- result = {"success": True, "token_path": arguments.get("token_path"), "value": token_value}
+ result = {
+ "success": True,
+ "token_path": arguments.get("token_path"),
+ "value": token_value,
+ }
except Exception as e:
result = {"success": False, "error": str(e)}
@@ -1140,7 +1153,7 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
if not CONTEXT_COMPILER_AVAILABLE:
result = {
"success": False,
- "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}"
+ "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}",
}
else:
try:
@@ -1153,7 +1166,7 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
if not CONTEXT_COMPILER_AVAILABLE:
result = {
"success": False,
- "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}"
+ "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}",
}
else:
try:
@@ -1166,7 +1179,7 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
if not CONTEXT_COMPILER_AVAILABLE:
result = {
"success": False,
- "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}"
+ "error": f"Context Compiler not available: {CONTEXT_COMPILER_IMPORT_ERROR}",
}
else:
try:
@@ -1181,54 +1194,44 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
path=arguments.get("path"),
name=arguments.get("name"),
description=arguments.get("description"),
- skin=arguments.get("skin")
+ skin=arguments.get("skin"),
)
elif name == "dss_project_add_figma_team":
result = await project_add_figma_team_impl(
project_path=arguments.get("project_path"),
team_id=arguments.get("team_id"),
- figma_token=arguments.get("figma_token")
+ figma_token=arguments.get("figma_token"),
)
elif name == "dss_project_add_figma_file":
result = await project_add_figma_file_impl(
project_path=arguments.get("project_path"),
file_key=arguments.get("file_key"),
file_name=arguments.get("file_name"),
- figma_token=arguments.get("figma_token")
+ figma_token=arguments.get("figma_token"),
)
elif name == "dss_project_sync":
result = await project_sync_impl(
project_path=arguments.get("project_path"),
file_keys=arguments.get("file_keys"),
- figma_token=arguments.get("figma_token")
+ figma_token=arguments.get("figma_token"),
)
elif name == "dss_project_build":
- result = await project_build_impl(
- project_path=arguments.get("project_path")
- )
+ result = await project_build_impl(project_path=arguments.get("project_path"))
elif name == "dss_project_graph_analysis":
- result = await project_graph_analysis_impl(
- project_path=arguments.get("project_path")
- )
+ result = await project_graph_analysis_impl(project_path=arguments.get("project_path"))
elif name == "dss_project_list":
result = await project_list_impl()
elif name == "dss_project_info":
- result = await project_info_impl(
- project_path=arguments.get("project_path")
- )
+ result = await project_info_impl(project_path=arguments.get("project_path"))
elif name == "dss_project_export_context":
- result = await project_export_context_impl(
- project_path=arguments.get("project_path")
- )
+ result = await project_export_context_impl(project_path=arguments.get("project_path"))
elif name == "dss_figma_discover":
result = await figma_discover_impl(
- team_id=arguments.get("team_id"),
- figma_token=arguments.get("figma_token")
+ team_id=arguments.get("team_id"), figma_token=arguments.get("figma_token")
)
elif name == "dss_core_sync":
result = await dss_core_sync_impl(
- force=arguments.get("force", False),
- figma_token=arguments.get("figma_token")
+ force=arguments.get("force", False), figma_token=arguments.get("figma_token")
)
elif name == "dss_core_status":
result = await dss_core_status_impl()
@@ -1237,35 +1240,29 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
elif name == "dss_core_themes":
result = await dss_core_themes_impl()
elif name == "dss_rate_limit_status":
- result = await dss_rate_limit_status_impl(
- figma_token=arguments.get("figma_token")
- )
+ result = await dss_rate_limit_status_impl(figma_token=arguments.get("figma_token"))
else:
result = {"success": False, "error": f"Unknown tool: {name}"}
- return [TextContent(
- type="text",
- text=json.dumps(safe_serialize(result), indent=2)
- )]
+ return [TextContent(type="text", text=json.dumps(safe_serialize(result), indent=2))]
except Exception as e:
logger.exception(f"Error in tool {name}")
- return [TextContent(
- type="text",
- text=json.dumps({
- "success": False,
- "error": str(e),
- "tool": name
- }, indent=2)
- )]
+ return [
+ TextContent(
+ type="text",
+ text=json.dumps({"success": False, "error": str(e), "tool": name}, indent=2),
+ )
+ ]
# =============================================================================
# DSS TOOL IMPLEMENTATIONS
# =============================================================================
+
async def analyze_project(path: str) -> Dict[str, Any]:
- """Analyze a project for design system patterns"""
+ """Analyze a project for design system patterns."""
project_path = Path(path).resolve()
if not project_path.exists():
@@ -1287,20 +1284,20 @@ async def analyze_project(path: str) -> Dict[str, Any]:
"analysis": {
"scan": safe_serialize(scan_result),
"react_components": safe_serialize(react_result),
- "styles": safe_serialize(style_result)
+ "styles": safe_serialize(style_result),
},
"summary": {
- "files_scanned": getattr(scan_result, 'files_count', 0),
- "components_found": len(getattr(react_result, 'components', [])),
- "style_patterns": len(getattr(style_result, 'patterns', []))
- }
+ "files_scanned": getattr(scan_result, "files_count", 0),
+ "components_found": len(getattr(react_result, "components", [])),
+ "style_patterns": len(getattr(style_result, "patterns", [])),
+ },
}
except Exception as e:
return {"success": False, "error": str(e)}
async def extract_tokens(path: str, sources: List[str]) -> Dict[str, Any]:
- """Extract design tokens from various sources"""
+ """Extract design tokens from various sources."""
target_path = Path(path).resolve()
if not target_path.exists():
@@ -1310,8 +1307,10 @@ async def extract_tokens(path: str, sources: List[str]) -> Dict[str, Any]:
loop = asyncio.get_event_loop()
all_tokens = []
source_map = {
- "css": CSSTokenSource, "scss": SCSSTokenSource,
- "tailwind": TailwindTokenSource, "json": JSONTokenSource
+ "css": CSSTokenSource,
+ "scss": SCSSTokenSource,
+ "tailwind": TailwindTokenSource,
+ "json": JSONTokenSource,
}
for source_type in sources:
@@ -1325,49 +1324,61 @@ async def extract_tokens(path: str, sources: List[str]) -> Dict[str, Any]:
merger = TokenMerger(strategy=MergeStrategy.PREFER_LATEST)
merged = merger.merge(all_tokens)
return {
- "success": True, "path": str(target_path), "sources": sources,
+ "success": True,
+ "path": str(target_path),
+ "sources": sources,
"tokens": safe_serialize(merged),
- "token_count": len(merged) if hasattr(merged, '__len__') else 0
+ "token_count": len(merged) if hasattr(merged, "__len__") else 0,
}
else:
return {
- "success": True, "path": str(target_path), "sources": sources,
- "tokens": [], "token_count": 0, "message": "No tokens found"
+ "success": True,
+ "path": str(target_path),
+ "sources": sources,
+ "tokens": [],
+ "token_count": 0,
+ "message": "No tokens found",
}
except Exception as e:
return {"success": False, "error": str(e)}
async def generate_theme(tokens: Dict, format: str, theme_name: str) -> Dict[str, Any]:
- """Generate theme files from tokens"""
+ """Generate theme files from tokens."""
try:
loop = asyncio.get_event_loop()
theme = Theme(name=theme_name, tokens=tokens)
sd_wrapper = StyleDictionaryWrapper()
- result = await loop.run_in_executor(None, lambda: sd_wrapper.transform_theme(theme, output_format=format))
+ result = await loop.run_in_executor(
+ None, lambda: sd_wrapper.transform_theme(theme, output_format=format)
+ )
return {
- "success": result.get("success", False), "format": format,
- "theme_name": theme_name, "files": result.get("files", {}),
- "errors": result.get("errors")
+ "success": result.get("success", False),
+ "format": format,
+ "theme_name": theme_name,
+ "files": result.get("files", {}),
+ "errors": result.get("errors"),
}
except Exception as e:
return {"success": False, "error": str(e)}
async def list_themes() -> Dict[str, Any]:
- """List available themes"""
+ """List available themes."""
try:
from dss.themes import default_themes
- themes = list(getattr(default_themes, 'THEMES', {}).keys())
+
+ themes = list(getattr(default_themes, "THEMES", {}).keys())
return {"success": True, "themes": themes, "count": len(themes)}
except Exception as e:
return {"success": False, "error": str(e)}
async def get_status(format: str = "json") -> Dict[str, Any]:
- """Get DSS system status"""
+ """Get DSS system status."""
try:
from dss.status import StatusDashboard
+
dashboard = StatusDashboard()
if format == "dashboard":
return {"success": True, "format": "dashboard", "dashboard": dashboard.render_text()}
@@ -1378,16 +1389,19 @@ async def get_status(format: str = "json") -> Dict[str, Any]:
system_info = manager.get_system_info()
dependencies = manager.check_dependencies()
return {
- "success": True, "version": dss.__version__,
- "system_info": system_info, "dependencies": dependencies,
- "healthy": all(dependencies.values()), "timestamp": datetime.now().isoformat()
+ "success": True,
+ "version": dss.__version__,
+ "system_info": system_info,
+ "dependencies": dependencies,
+ "healthy": all(dependencies.values()),
+ "timestamp": datetime.now().isoformat(),
}
except Exception as e:
return {"success": False, "error": str(e)}
async def audit_components(path: str) -> Dict[str, Any]:
- """Audit React components for design system adoption"""
+ """Audit React components for design system adoption."""
project_path = Path(path).resolve()
if not project_path.exists():
return {"success": False, "error": f"Path does not exist: {path}"}
@@ -1399,22 +1413,23 @@ async def audit_components(path: str) -> Dict[str, Any]:
react_result = await loop.run_in_executor(None, react_analyzer.analyze)
style_result = await loop.run_in_executor(None, style_analyzer.analyze)
graph_result = await loop.run_in_executor(None, graph.build)
- hardcoded = getattr(style_result, 'hardcoded_values', [])
+ hardcoded = getattr(style_result, "hardcoded_values", [])
return {
- "success": True, "path": str(project_path),
+ "success": True,
+ "path": str(project_path),
"audit": {
"components": safe_serialize(react_result),
"styles": safe_serialize(style_result),
- "dependencies": safe_serialize(graph_result)
+ "dependencies": safe_serialize(graph_result),
},
- "issues": {"hardcoded_values": hardcoded}
+ "issues": {"hardcoded_values": hardcoded},
}
except Exception as e:
return {"success": False, "error": str(e)}
async def setup_storybook(path: str, action: str) -> Dict[str, Any]:
- """Setup or configure Storybook"""
+ """Setup or configure Storybook."""
project_path = Path(path).resolve()
if not project_path.exists():
return {"success": False, "error": f"Path does not exist: {path}"}
@@ -1427,7 +1442,11 @@ async def setup_storybook(path: str, action: str) -> Dict[str, Any]:
elif action == "generate":
generator = StoryGenerator(project_path)
result = await loop.run_in_executor(None, generator.generate)
- return {"success": True, "action": "generate", "stories_created": safe_serialize(result)}
+ return {
+ "success": True,
+ "action": "generate",
+ "stories_created": safe_serialize(result),
+ }
elif action == "configure":
theme_gen = ThemeGenerator(project_path)
result = await loop.run_in_executor(None, theme_gen.generate)
@@ -1439,7 +1458,7 @@ async def setup_storybook(path: str, action: str) -> Dict[str, Any]:
async def sync_figma(file_key: str) -> Dict[str, Any]:
- """Sync tokens from Figma using intelligent sync v2.0
+ """Sync tokens from Figma using intelligent sync v2.0.
Features:
- Rate limiting with exponential backoff
@@ -1464,14 +1483,16 @@ async def sync_figma(file_key: str) -> Dict[str, Any]:
pass
if not figma_token:
- return {"success": False, "error": "FIGMA_TOKEN not configured. Set env var or add to .dss/config/figma.json"}
+ return {
+ "success": False,
+ "error": "FIGMA_TOKEN not configured. Set env var or add to .dss/config/figma.json",
+ }
try:
# Import intelligent sync from scripts
scripts_dir = Path(__file__).parent.parent.parent / "scripts"
sys.path.insert(0, str(scripts_dir))
- from importlib import import_module
import importlib.util
spec = importlib.util.spec_from_file_location("figma_sync", scripts_dir / "figma-sync.py")
@@ -1480,16 +1501,17 @@ async def sync_figma(file_key: str) -> Dict[str, Any]:
# Run intelligent sync
success = await figma_sync_module.intelligent_sync(
- file_key=file_key,
- token=figma_token,
- force=True,
- verbose=False
+ file_key=file_key, token=figma_token, force=True, verbose=False
)
if success:
# Load results
- tokens_path = Path(__file__).parent.parent.parent / ".dss/data/_system/tokens/figma-tokens.json"
- components_path = Path(__file__).parent.parent.parent / ".dss/components/figma-registry.json"
+ tokens_path = (
+ Path(__file__).parent.parent.parent / ".dss/data/_system/tokens/figma-tokens.json"
+ )
+ components_path = (
+ Path(__file__).parent.parent.parent / ".dss/components/figma-registry.json"
+ )
tokens = {}
components = {}
@@ -1503,7 +1525,8 @@ async def sync_figma(file_key: str) -> Dict[str, Any]:
components = json.load(f)
token_count = sum(
- len(v) for k, v in tokens.items()
+ len(v)
+ for k, v in tokens.items()
if not k.startswith("$") and not k.startswith("_") and isinstance(v, dict)
)
@@ -1512,21 +1535,19 @@ async def sync_figma(file_key: str) -> Dict[str, Any]:
"file_key": file_key,
"tokens_extracted": token_count,
"components_extracted": components.get("component_count", 0),
- "output_files": {
- "tokens": str(tokens_path),
- "components": str(components_path)
- }
+ "output_files": {"tokens": str(tokens_path), "components": str(components_path)},
}
else:
return {"success": False, "error": "Sync failed - check logs"}
except Exception as e:
import traceback
+
return {"success": False, "error": str(e), "traceback": traceback.format_exc()}
async def find_quick_wins(path: str) -> Dict[str, Any]:
- """Find quick win opportunities"""
+ """Find quick win opportunities."""
project_path = Path(path).resolve()
if not project_path.exists():
return {"success": False, "error": f"Path does not exist: {path}"}
@@ -1535,25 +1556,30 @@ async def find_quick_wins(path: str) -> Dict[str, Any]:
finder = QuickWinFinder(project_path)
quick_wins = await loop.run_in_executor(None, finder.find)
return {
- "success": True, "path": str(project_path),
+ "success": True,
+ "path": str(project_path),
"quick_wins": safe_serialize(quick_wins),
- "count": len(quick_wins) if quick_wins else 0
+ "count": len(quick_wins) if quick_wins else 0,
}
except Exception as e:
return {"success": False, "error": str(e)}
async def transform_tokens(tokens: Dict, input_format: str, output_format: str) -> Dict[str, Any]:
- """Transform tokens between formats"""
+ """Transform tokens between formats."""
try:
loop = asyncio.get_event_loop()
theme = Theme(name="transform_temp", tokens=tokens)
sd_wrapper = StyleDictionaryWrapper()
- result = await loop.run_in_executor(None, lambda: sd_wrapper.transform_theme(theme, output_format=output_format))
+ result = await loop.run_in_executor(
+ None, lambda: sd_wrapper.transform_theme(theme, output_format=output_format)
+ )
return {
- "success": result.get("success", False), "input_format": input_format,
- "output_format": output_format, "transformed": result.get("files", {}),
- "errors": result.get("errors")
+ "success": result.get("success", False),
+ "input_format": input_format,
+ "output_format": output_format,
+ "transformed": result.get("files", {}),
+ "errors": result.get("errors"),
}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -1563,6 +1589,7 @@ async def transform_tokens(tokens: Dict, input_format: str, output_format: str)
# CHROME DEVTOOLS IMPLEMENTATIONS
# =============================================================================
+
def _get_active_page():
"""Retrieve the active Playwright Page from DevTools state.
@@ -1602,13 +1629,15 @@ async def _on_console(msg):
msg: Playwright ConsoleMessage object containing type, text, args, and location.
"""
try:
- devtools.console_logs.append({
- "timestamp": datetime.now().isoformat(),
- "type": msg.type,
- "text": msg.text,
- "args": [str(arg) for arg in msg.args] if msg.args else [],
- "location": getattr(msg, 'location', {})
- })
+ devtools.console_logs.append(
+ {
+ "timestamp": datetime.now().isoformat(),
+ "type": msg.type,
+ "text": msg.text,
+ "args": [str(arg) for arg in msg.args] if msg.args else [],
+ "location": getattr(msg, "location", {}),
+ }
+ )
except Exception as e:
logger.debug(f"Error capturing console message: {e}")
@@ -1623,13 +1652,15 @@ async def _on_request(request):
request: Playwright Request object containing url, method, headers, resource_type.
"""
try:
- devtools.network_requests.append({
- "timestamp": datetime.now().isoformat(),
- "url": request.url,
- "method": request.method,
- "headers": dict(request.headers) if request.headers else {},
- "resource_type": request.resource_type
- })
+ devtools.network_requests.append(
+ {
+ "timestamp": datetime.now().isoformat(),
+ "url": request.url,
+ "method": request.method,
+ "headers": dict(request.headers) if request.headers else {},
+ "resource_type": request.resource_type,
+ }
+ )
except Exception as e:
logger.debug(f"Error capturing network request: {e}")
@@ -1654,7 +1685,7 @@ async def devtools_launch_impl(url: str = "about:blank", headless: bool = True)
devtools.playwright = await async_playwright().start()
devtools.browser = await devtools.playwright.chromium.launch(
headless=headless,
- args=['--no-sandbox', '--disable-dev-shm-usage'] # Required for Docker/remote
+ args=["--no-sandbox", "--disable-dev-shm-usage"], # Required for Docker/remote
)
devtools.connected = True
@@ -1679,11 +1710,11 @@ async def devtools_launch_impl(url: str = "about:blank", headless: bool = True)
return {
"success": True,
- "message": f"Launched headless Chromium",
+ "message": "Launched headless Chromium",
"headless": headless,
"url": url,
"pages_found": len(devtools.pages),
- "active_page_id": devtools.active_page_id
+ "active_page_id": devtools.active_page_id,
}
except Exception as e:
await devtools_disconnect_impl()
@@ -1702,8 +1733,7 @@ async def devtools_connect_impl(port: int = 9222, host: str = "localhost") -> Di
devtools.playwright = await async_playwright().start()
# Use configurable timeout for CDP connection
devtools.browser = await devtools.playwright.chromium.connect_over_cdp(
- f"http://{host}:{port}",
- timeout=DEVTOOLS_CONNECTION_TIMEOUT_MS
+ f"http://{host}:{port}", timeout=DEVTOOLS_CONNECTION_TIMEOUT_MS
)
devtools.connected = True
@@ -1719,11 +1749,14 @@ async def devtools_connect_impl(port: int = 9222, host: str = "localhost") -> Di
"success": True,
"message": f"Connected to Chrome DevTools at {host}:{port}",
"pages_found": len(devtools.pages),
- "active_page_id": devtools.active_page_id
+ "active_page_id": devtools.active_page_id,
}
except Exception as e:
await devtools_disconnect_impl()
- return {"success": False, "error": f"Connection failed: {str(e)}. Is Chrome running with --remote-debugging-port={port}?"}
+ return {
+ "success": False,
+ "error": f"Connection failed: {str(e)}. Is Chrome running with --remote-debugging-port={port}?",
+ }
async def devtools_disconnect_impl() -> Dict[str, Any]:
@@ -1790,7 +1823,7 @@ async def devtools_list_pages_impl() -> Dict[str, Any]:
"success": True,
"pages": page_list,
"count": len(page_list),
- "active_page_id": devtools.active_page_id
+ "active_page_id": devtools.active_page_id,
}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -1858,33 +1891,35 @@ async def devtools_goto_impl(url: str, wait_until: str = "domcontentloaded") ->
response = await page.goto(url, wait_until=wait_until)
status = response.status if response else None
- return {
- "success": True,
- "url": url,
- "status": status,
- "title": await page.title()
- }
+ return {"success": True, "url": url, "status": status, "title": await page.title()}
except (ConnectionError, ValueError) as e:
return {"success": False, "error": str(e)}
except Exception as e:
return {"success": False, "error": f"Navigation failed: {str(e)}"}
-async def devtools_console_logs_impl(level: str = "all", limit: int = 100, clear: bool = False) -> Dict[str, Any]:
+async def devtools_console_logs_impl(
+ level: str = "all", limit: int = 100, clear: bool = False
+) -> Dict[str, Any]:
"""Get console messages."""
try:
_get_active_page()
logs = list(devtools.console_logs)
if level != "all":
- logs = [log for log in logs if log.get('type') == level]
+ logs = [log for log in logs if log.get("type") == level]
result = logs[-limit:]
if clear:
devtools.console_logs.clear()
- return {"success": True, "logs": result, "count": len(result), "total_captured": len(devtools.console_logs)}
+ return {
+ "success": True,
+ "logs": result,
+ "count": len(result),
+ "total_captured": len(devtools.console_logs),
+ }
except (ConnectionError, ValueError) as e:
return {"success": False, "error": str(e)}
@@ -1896,10 +1931,15 @@ async def devtools_network_requests_impl(filter_url: str = "", limit: int = 50)
requests = list(devtools.network_requests)
if filter_url:
- requests = [req for req in requests if re.search(filter_url, req.get('url', ''))]
+ requests = [req for req in requests if re.search(filter_url, req.get("url", ""))]
result = requests[-limit:]
- return {"success": True, "requests": result, "count": len(result), "total_captured": len(devtools.network_requests)}
+ return {
+ "success": True,
+ "requests": result,
+ "count": len(result),
+ "total_captured": len(devtools.network_requests),
+ }
except (ConnectionError, ValueError) as e:
return {"success": False, "error": str(e)}
@@ -1939,12 +1979,16 @@ async def devtools_query_dom_impl(selector: str) -> Dict[str, Any]:
results = []
for el in elements[:50]: # Limit to 50 elements
try:
- results.append({
- "tag": await el.evaluate('el => el.tagName.toLowerCase()'),
- "id": await el.evaluate('el => el.id || null'),
- "classes": await el.evaluate('el => Array.from(el.classList).join(" ") || null'),
- "text": (await el.text_content() or "")[:200],
- })
+ results.append(
+ {
+ "tag": await el.evaluate("el => el.tagName.toLowerCase()"),
+ "id": await el.evaluate("el => el.id || null"),
+ "classes": await el.evaluate(
+ 'el => Array.from(el.classList).join(" ") || null'
+ ),
+ "text": (await el.text_content() or "")[:200],
+ }
+ )
except Exception:
continue
return {"success": True, "elements": results, "count": len(results)}
@@ -1967,7 +2011,7 @@ async def devtools_screenshot_impl(selector: str = None, full_page: bool = False
else:
screenshot_bytes = await page.screenshot(full_page=full_page)
- b64_image = base64.b64encode(screenshot_bytes).decode('utf-8')
+ b64_image = base64.b64encode(screenshot_bytes).decode("utf-8")
return {"success": True, "image_base64_png": b64_image, "size_bytes": len(screenshot_bytes)}
except (ConnectionError, ValueError) as e:
return {"success": False, "error": str(e)}
@@ -1979,7 +2023,8 @@ async def devtools_performance_impl() -> Dict[str, Any]:
"""Get Core Web Vitals and performance metrics."""
try:
page = _get_active_page()
- metrics = await page.evaluate("""() => {
+ metrics = await page.evaluate(
+ """() => {
const timing = window.performance.getEntriesByType('navigation')[0];
if (!timing) return null;
@@ -2001,7 +2046,8 @@ async def devtools_performance_impl() -> Dict[str, Any]:
// Memory (if available)
jsHeapSize: window.performance.memory ? Math.round(window.performance.memory.usedJSHeapSize / 1024 / 1024) : null
};
- }""")
+ }"""
+ )
if not metrics:
return {"success": False, "error": "Performance metrics not available for this page."}
@@ -2017,8 +2063,10 @@ async def devtools_performance_impl() -> Dict[str, Any]:
# BROWSER AUTOMATION IMPLEMENTATIONS (Unified LOCAL/REMOTE)
# =============================================================================
+
class DummyContext:
- """Dummy context for LocalBrowserStrategy initialization"""
+ """Dummy context for LocalBrowserStrategy initialization."""
+
def __init__(self, session_id: str = "local"):
self.session_id = session_id
@@ -2027,7 +2075,7 @@ async def browser_init_impl(
mode: str = "local",
url: Optional[str] = None,
session_id: Optional[str] = None,
- headless: bool = True
+ headless: bool = True,
) -> Dict[str, Any]:
"""Initialize browser automation in LOCAL or REMOTE mode."""
global browser_state
@@ -2039,12 +2087,12 @@ async def browser_init_impl(
if not LOCAL_BROWSER_STRATEGY_AVAILABLE:
return {
"success": False,
- "error": "LocalBrowserStrategy not available. Ensure strategies/local/browser.py exists."
+ "error": "LocalBrowserStrategy not available. Ensure strategies/local/browser.py exists.",
}
if not PLAYWRIGHT_AVAILABLE:
return {
"success": False,
- "error": "Playwright not installed. Run: pip install playwright && playwright install chromium"
+ "error": "Playwright not installed. Run: pip install playwright && playwright install chromium",
}
try:
@@ -2065,14 +2113,17 @@ async def browser_init_impl(
"session_id": browser_state.session_id,
"url": url,
"headless": headless,
- "message": "Local browser automation initialized successfully."
+ "message": "Local browser automation initialized successfully.",
}
except Exception as e:
return {"success": False, "error": f"Failed to initialize LOCAL mode: {str(e)}"}
elif mode == "remote":
if not url:
- return {"success": False, "error": "Remote mode requires 'url' parameter (API endpoint)."}
+ return {
+ "success": False,
+ "error": "Remote mode requires 'url' parameter (API endpoint).",
+ }
if not session_id:
return {"success": False, "error": "Remote mode requires 'session_id' parameter."}
@@ -2088,7 +2139,7 @@ async def browser_init_impl(
"mode": "remote",
"session_id": session_id,
"api_url": url,
- "message": "Remote browser automation configured. Will fetch from Shadow State API."
+ "message": "Remote browser automation configured. Will fetch from Shadow State API.",
}
else:
return {"success": False, "error": f"Unknown mode: {mode}. Use 'local' or 'remote'."}
@@ -2103,11 +2154,14 @@ async def browser_get_logs_impl(level: str = "all", limit: int = 100) -> Dict[st
try:
if browser_state.mode == "local":
- logs = await browser_state.strategy.get_console_logs(limit=limit, level=level if level != "all" else None)
+ logs = await browser_state.strategy.get_console_logs(
+ limit=limit, level=level if level != "all" else None
+ )
return {"success": True, "mode": "local", "logs": logs, "count": len(logs)}
elif browser_state.mode == "remote":
import aiohttp
+
async with aiohttp.ClientSession() as session:
url = f"{browser_state.remote_api_url}/{browser_state.session_id}"
async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response:
@@ -2116,14 +2170,21 @@ async def browser_get_logs_impl(level: str = "all", limit: int = 100) -> Dict[st
logs = data.get("logs", [])
if level != "all":
logs = [log for log in logs if log.get("level") == level]
- return {"success": True, "mode": "remote", "logs": logs[-limit:], "count": len(logs)}
+ return {
+ "success": True,
+ "mode": "remote",
+ "logs": logs[-limit:],
+ "count": len(logs),
+ }
else:
return {"success": False, "error": f"API returned status {response.status}"}
except Exception as e:
return {"success": False, "error": str(e)}
-async def browser_screenshot_impl(selector: Optional[str] = None, full_page: bool = False) -> Dict[str, Any]:
+async def browser_screenshot_impl(
+ selector: Optional[str] = None, full_page: bool = False
+) -> Dict[str, Any]:
"""Capture screenshot (LOCAL mode only)."""
global browser_state
@@ -2134,16 +2195,18 @@ async def browser_screenshot_impl(selector: Optional[str] = None, full_page: boo
return {"success": False, "error": "Screenshots require LOCAL mode."}
try:
- path = await browser_state.strategy.capture_screenshot(selector=selector, full_page=full_page)
+ path = await browser_state.strategy.capture_screenshot(
+ selector=selector, full_page=full_page
+ )
# Read file and encode as base64
- with open(path, 'rb') as f:
+ with open(path, "rb") as f:
screenshot_bytes = f.read()
- b64_image = base64.b64encode(screenshot_bytes).decode('utf-8')
+ b64_image = base64.b64encode(screenshot_bytes).decode("utf-8")
return {
"success": True,
"image_base64_png": b64_image,
"path": path,
- "size_bytes": len(screenshot_bytes)
+ "size_bytes": len(screenshot_bytes),
}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2163,18 +2226,31 @@ async def browser_dom_snapshot_impl() -> Dict[str, Any]:
elif browser_state.mode == "remote":
import aiohttp
+
async with aiohttp.ClientSession() as session:
url = f"{browser_state.remote_api_url}/{browser_state.session_id}"
async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response:
if response.status == 200:
data = await response.json()
# Look for snapshot in logs
- snapshots = [log for log in data.get("logs", []) if log.get("category") == "snapshot"]
+ snapshots = [
+ log for log in data.get("logs", []) if log.get("category") == "snapshot"
+ ]
if snapshots:
latest = snapshots[-1]
html = latest.get("data", {}).get("snapshot", {}).get("html", "")
- return {"success": True, "mode": "remote", "html": html, "length": len(html)}
- return {"success": True, "mode": "remote", "html": "", "message": "No snapshot found in logs."}
+ return {
+ "success": True,
+ "mode": "remote",
+ "html": html,
+ "length": len(html),
+ }
+ return {
+ "success": True,
+ "mode": "remote",
+ "html": "",
+ "message": "No snapshot found in logs.",
+ }
else:
return {"success": False, "error": f"API returned status {response.status}"}
except Exception as e:
@@ -2195,6 +2271,7 @@ async def browser_get_errors_impl(limit: int = 50) -> Dict[str, Any]:
elif browser_state.mode == "remote":
import aiohttp
+
async with aiohttp.ClientSession() as session:
url = f"{browser_state.remote_api_url}/{browser_state.session_id}"
async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response:
@@ -2202,7 +2279,12 @@ async def browser_get_errors_impl(limit: int = 50) -> Dict[str, Any]:
data = await response.json()
logs = data.get("logs", [])
errors = [log for log in logs if log.get("level") == "error"]
- return {"success": True, "mode": "remote", "errors": errors[-limit:], "count": len(errors)}
+ return {
+ "success": True,
+ "mode": "remote",
+ "errors": errors[-limit:],
+ "count": len(errors),
+ }
else:
return {"success": False, "error": f"API returned status {response.status}"}
except Exception as e:
@@ -2222,6 +2304,7 @@ async def browser_accessibility_audit_impl(selector: Optional[str] = None) -> Di
else:
# REMOTE mode: fetch from Shadow State
import aiohttp
+
async with aiohttp.ClientSession() as session:
url = f"{browser_state.remote_api_url}/{browser_state.session_id}"
async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response:
@@ -2229,18 +2312,26 @@ async def browser_accessibility_audit_impl(selector: Optional[str] = None) -> Di
return {"success": False, "error": f"API returned status {response.status}"}
data = await response.json()
logs = data.get("logs", [])
- audits = [l for l in logs if l.get("category") in ["accessibility", "accessibilitySnapshot"]]
+ audits = [
+ l
+ for l in logs
+ if l.get("category") in ["accessibility", "accessibilitySnapshot"]
+ ]
if not audits:
return {
"success": True,
"mode": "remote",
"message": "No accessibility audit in Shadow State. Run __DSS_BROWSER_LOGS.audit() in browser.",
"summary": {"violations": 0, "passes": 0, "incomplete": 0},
- "violations": [], "passes": [], "incomplete": []
+ "violations": [],
+ "passes": [],
+ "incomplete": [],
}
latest = max(audits, key=lambda x: x.get("timestamp", 0))
audit_data = latest.get("data", {})
- result = audit_data.get("results") or audit_data.get("accessibility") or audit_data
+ result = (
+ audit_data.get("results") or audit_data.get("accessibility") or audit_data
+ )
violations_count = len(result.get("violations", []))
passes_count = len(result.get("passes", []))
@@ -2252,11 +2343,11 @@ async def browser_accessibility_audit_impl(selector: Optional[str] = None) -> Di
"summary": {
"violations": violations_count,
"passes": passes_count,
- "incomplete": incomplete_count
+ "incomplete": incomplete_count,
},
"violations": result.get("violations", []),
"passes": result.get("passes", []),
- "incomplete": result.get("incomplete", [])
+ "incomplete": result.get("incomplete", []),
}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2276,6 +2367,7 @@ async def browser_performance_impl() -> Dict[str, Any]:
else:
# REMOTE mode: fetch from Shadow State
import aiohttp
+
async with aiohttp.ClientSession() as session:
url = f"{browser_state.remote_api_url}/{browser_state.session_id}"
async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response:
@@ -2283,18 +2375,26 @@ async def browser_performance_impl() -> Dict[str, Any]:
return {"success": False, "error": f"API returned status {response.status}"}
data = await response.json()
logs = data.get("logs", [])
- perf_logs = [l for l in logs if l.get("category") in ["performance", "accessibilitySnapshot"]]
+ perf_logs = [
+ l
+ for l in logs
+ if l.get("category") in ["performance", "accessibilitySnapshot"]
+ ]
if not perf_logs:
return {
"success": True,
"mode": "remote",
"message": "No performance data in Shadow State. Metrics are captured during page load.",
- "metrics": {}
+ "metrics": {},
}
latest = max(perf_logs, key=lambda x: x.get("timestamp", 0))
perf_data = latest.get("data", {})
metrics = perf_data.get("performance") or {"raw_data": perf_data}
- return {"success": True, "mode": "remote", "metrics": {"core_web_vitals": metrics}}
+ return {
+ "success": True,
+ "mode": "remote",
+ "metrics": {"core_web_vitals": metrics},
+ }
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2324,181 +2424,110 @@ async def browser_close_impl() -> Dict[str, Any]:
# PROJECT MANAGEMENT IMPLEMENTATIONS
# =============================================================================
-async def project_init_impl(path: str, name: str, description: str = None, skin: str = None) -> Dict[str, Any]:
- """Implementation for dss_project_init"""
+async def project_init_impl(
+ path: str, name: str, description: str = None, skin: str = None
+) -> Dict[str, Any]:
+ """Implementation for dss_project_init."""
if not path or not name:
-
return {"success": False, "error": "path and name are required."}
-
-
try:
-
manager = ProjectManager()
- project = manager.init(
-
- path=Path(path),
-
- name=name,
-
- description=description,
-
- skin=skin
-
- )
-
-
+ project = manager.init(path=Path(path), name=name, description=description, skin=skin)
# Trigger graph analysis in the background
asyncio.create_task(project_graph_analysis_impl(project_path=str(project.path)))
-
-
return {
-
"success": True,
-
"project_name": project.config.name,
-
"path": str(project.path),
-
"status": project.status.value,
-
- "message": "Project initialized. Graph analysis started in background."
-
+ "message": "Project initialized. Graph analysis started in background.",
}
except Exception as e:
-
logger.exception("dss_project_init failed")
return {"success": False, "error": str(e)}
-
async def project_graph_analysis_impl(project_path: str) -> Dict[str, Any]:
-
- """Implementation for dss_project_graph_analysis"""
+ """Implementation for dss_project_graph_analysis."""
if not project_path:
-
return {"success": False, "error": "project_path is required."}
-
-
try:
-
from dss.analyze.project_analyzer import run_project_analysis
-
-
loop = asyncio.get_event_loop()
analysis_result = await loop.run_in_executor(None, run_project_analysis, project_path)
-
-
- return {
-
- "success": True,
-
- "project_path": project_path,
-
- "analysis": analysis_result
-
- }
+ return {"success": True, "project_path": project_path, "analysis": analysis_result}
except Exception as e:
-
logger.exception(f"dss_project_graph_analysis failed for {project_path}")
return {"success": False, "error": str(e)}
-
-async def project_add_figma_team_impl(project_path: str, team_id: str, figma_token: Optional[str] = None) -> Dict[str, Any]:
-
- """Implementation for dss_project_add_figma_team"""
+async def project_add_figma_team_impl(
+ project_path: str, team_id: str, figma_token: Optional[str] = None
+) -> Dict[str, Any]:
+ """Implementation for dss_project_add_figma_team."""
if not project_path or not team_id:
-
return {"success": False, "error": "project_path and team_id are required."}
-
-
try:
-
manager = ProjectManager()
project = manager.load(Path(project_path))
-
-
updated_project = manager.add_figma_team(
-
- project=project,
-
- team_id=team_id,
-
- figma_token=figma_token
-
+ project=project, team_id=team_id, figma_token=figma_token
)
-
-
return {
-
"success": True,
-
"project_name": updated_project.config.name,
-
"figma_team_id": updated_project.config.figma.team_id,
-
- "files_added": len(updated_project.config.figma.files)
-
+ "files_added": len(updated_project.config.figma.files),
}
except Exception as e:
-
logger.exception("dss_project_add_figma_team failed")
return {"success": False, "error": str(e)}
async def project_add_figma_file_impl(
- project_path: str,
- file_key: str,
- file_name: str,
- figma_token: Optional[str] = None
+ project_path: str, file_key: str, file_name: str, figma_token: Optional[str] = None
) -> Dict[str, Any]:
"""Add a single Figma file to DSS project."""
if not PROJECT_MANAGEMENT_AVAILABLE:
return {
"success": False,
- "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
+ "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}",
}
try:
loop = asyncio.get_event_loop()
manager = ProjectManager()
- project = await loop.run_in_executor(
- None,
- lambda: manager.load(Path(project_path))
- )
+ project = await loop.run_in_executor(None, lambda: manager.load(Path(project_path)))
updated_project = await loop.run_in_executor(
None,
lambda: manager.add_figma_file(
- project=project,
- file_key=file_key,
- file_name=file_name,
- figma_token=figma_token
- )
+ project=project, file_key=file_key, file_name=file_name, figma_token=figma_token
+ ),
)
return {
@@ -2506,48 +2535,41 @@ async def project_add_figma_file_impl(
"message": f"Added Figma file '{file_name}' to project",
"file_key": file_key,
"file_name": file_name,
- "total_files": len(updated_project.config.figma.files)
+ "total_files": len(updated_project.config.figma.files),
}
except Exception as e:
return {"success": False, "error": str(e)}
async def project_sync_impl(
- project_path: str,
- file_keys: Optional[List[str]] = None,
- figma_token: Optional[str] = None
+ project_path: str, file_keys: Optional[List[str]] = None, figma_token: Optional[str] = None
) -> Dict[str, Any]:
"""Sync design tokens from Figma sources."""
if not PROJECT_MANAGEMENT_AVAILABLE:
return {
"success": False,
- "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
+ "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}",
}
try:
loop = asyncio.get_event_loop()
manager = ProjectManager()
- project = await loop.run_in_executor(
- None,
- lambda: manager.load(Path(project_path))
- )
+ project = await loop.run_in_executor(None, lambda: manager.load(Path(project_path)))
# Sync (use sync version to avoid nested async issues)
updated_project = await loop.run_in_executor(
None,
- lambda: manager.sync(
- project=project,
- figma_token=figma_token,
- file_keys=file_keys
- )
+ lambda: manager.sync(project=project, figma_token=figma_token, file_keys=file_keys),
)
# Count tokens extracted
total_tokens = 0
sources_info = {}
if updated_project.extracted_tokens:
- for source_key, source_data in updated_project.extracted_tokens.get("sources", {}).items():
+ for source_key, source_data in updated_project.extracted_tokens.get(
+ "sources", {}
+ ).items():
token_count = len(source_data.get("tokens", {}))
total_tokens += token_count
sources_info[source_key] = token_count
@@ -2558,7 +2580,7 @@ async def project_sync_impl(
"project_status": updated_project.status.value,
"tokens_extracted": total_tokens,
"sources": sources_info,
- "errors": updated_project.errors
+ "errors": updated_project.errors,
}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2569,32 +2591,26 @@ async def project_build_impl(project_path: str) -> Dict[str, Any]:
if not PROJECT_MANAGEMENT_AVAILABLE:
return {
"success": False,
- "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
+ "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}",
}
try:
loop = asyncio.get_event_loop()
manager = ProjectManager()
- project = await loop.run_in_executor(
- None,
- lambda: manager.load(Path(project_path))
- )
+ project = await loop.run_in_executor(None, lambda: manager.load(Path(project_path)))
- updated_project = await loop.run_in_executor(
- None,
- lambda: manager.build(project)
- )
+ updated_project = await loop.run_in_executor(None, lambda: manager.build(project))
output_dir = str(project.path / project.config.output.tokens_dir)
return {
"success": True,
- "message": f"Built output files",
+ "message": "Built output files",
"project_status": updated_project.status.value,
"output_directory": output_dir,
"formats_generated": updated_project.config.output.formats,
- "errors": updated_project.errors
+ "errors": updated_project.errors,
}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2605,87 +2621,75 @@ async def project_list_impl() -> Dict[str, Any]:
if not PROJECT_MANAGEMENT_AVAILABLE:
return {
"success": False,
- "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
+ "error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}",
}
try:
manager = ProjectManager()
projects = manager.list()
- return {
- "success": True,
- "count": len(projects),
- "projects": projects
- }
+ return {"success": True, "count": len(projects), "projects": projects}
except Exception as e:
return {"success": False, "error": str(e)}
async def project_info_impl(project_path: str) -> Dict[str, Any]:
- """Implementation for dss_project_info"""
+ """Implementation for dss_project_info."""
if not project_path:
return {"success": False, "error": "project_path is required."}
try:
manager = ProjectManager()
project = manager.load(Path(project_path))
- return {
- "success": True,
- "project_info": safe_serialize(project.config)
- }
+ return {"success": True, "project_info": safe_serialize(project.config)}
except Exception as e:
logger.exception("dss_project_info failed")
return {"success": False, "error": str(e)}
+
async def project_export_context_impl(project_path: str) -> Dict[str, Any]:
- """Implementation for dss_project_export_context"""
+ """Implementation for dss_project_export_context."""
if not project_path:
return {"success": False, "error": "project_path is required."}
try:
from dss.analyze.project_analyzer import export_project_context
-
+
loop = asyncio.get_event_loop()
project_context = await loop.run_in_executor(None, export_project_context, project_path)
-
- return {
- "success": True,
- "project_context": project_context
- }
+
+ return {"success": True, "project_context": project_context}
except Exception as e:
logger.exception(f"dss_project_export_context failed for {project_path}")
return {"success": False, "error": str(e)}
+
async def project_graph_analysis_impl(project_path: str) -> Dict[str, Any]:
- """Implementation for dss_project_graph_analysis"""
+ """Implementation for dss_project_graph_analysis."""
if not project_path:
return {"success": False, "error": "project_path is required."}
try:
from dss.analyze.project_analyzer import run_project_analysis
-
+
loop = asyncio.get_event_loop()
analysis_result = await loop.run_in_executor(None, run_project_analysis, project_path)
-
- return {
- "success": True,
- "project_path": project_path,
- "analysis": analysis_result
- }
+
+ return {"success": True, "project_path": project_path, "analysis": analysis_result}
except Exception as e:
logger.exception(f"dss_project_graph_analysis failed for {project_path}")
return {"success": False, "error": str(e)}
-async def figma_discover_impl(team_id: str, figma_token: Optional[str] = None) -> Dict[str, Any]:
- """Implementation for dss_figma_discover"""
+async def figma_discover_impl(team_id: str, figma_token: Optional[str] = None) -> Dict[str, Any]:
+ """Implementation for dss_figma_discover."""
# =============================================================================
# DSS CORE SYNC IMPLEMENTATIONS
# =============================================================================
+
async def dss_core_sync_impl(
- force: bool = False,
- figma_token: Optional[str] = None
+ force: bool = False, figma_token: Optional[str] = None
) -> Dict[str, Any]:
"""
Sync DSS core from the canonical shadcn/ui Figma source.
@@ -2695,17 +2699,14 @@ async def dss_core_sync_impl(
"""
try:
# Import DSS core sync
- from dss.project.sync import DSSCoreSync
from dss.project.figma import FigmaRateLimitError
+ from dss.project.sync import DSSCoreSync
loop = asyncio.get_event_loop()
sync = DSSCoreSync(figma_token=figma_token)
# Run sync in executor (it uses sync requests)
- result = await loop.run_in_executor(
- None,
- lambda: sync.sync(force=force)
- )
+ result = await loop.run_in_executor(None, lambda: sync.sync(force=force))
if result.get("success"):
return {
@@ -2718,7 +2719,7 @@ async def dss_core_sync_impl(
"team_name": sync.reference.team_name,
"uikit_file_key": sync.reference.uikit_file_key,
"uikit_file_name": sync.reference.uikit_file_name,
- }
+ },
}
else:
return result
@@ -2728,13 +2729,10 @@ async def dss_core_sync_impl(
"success": False,
"error": f"Figma rate limit exceeded: {e}",
"retry_after": e.retry_after,
- "hint": "Wait for the rate limit to reset and try again"
+ "hint": "Wait for the rate limit to reset and try again",
}
except ImportError as e:
- return {
- "success": False,
- "error": f"DSS core sync not available: {e}"
- }
+ return {"success": False, "error": f"DSS core sync not available: {e}"}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2747,15 +2745,9 @@ async def dss_core_status_impl() -> Dict[str, Any]:
sync = DSSCoreSync()
status = sync.get_sync_status()
- return {
- "success": True,
- **status
- }
+ return {"success": True, **status}
except ImportError as e:
- return {
- "success": False,
- "error": f"DSS core sync not available: {e}"
- }
+ return {"success": False, "error": f"DSS core sync not available: {e}"}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2773,21 +2765,16 @@ async def dss_core_tokens_impl() -> Dict[str, Any]:
"success": True,
"tokens": tokens,
"categories": list(tokens.get("categories", {}).keys()),
- "total_tokens": sum(
- len(cat) for cat in tokens.get("categories", {}).values()
- )
+ "total_tokens": sum(len(cat) for cat in tokens.get("categories", {}).values()),
}
else:
return {
"success": False,
"error": "DSS core not synced yet. Run dss_core_sync first.",
- "hint": "Use dss_core_sync to sync from Figma"
+ "hint": "Use dss_core_sync to sync from Figma",
}
except ImportError as e:
- return {
- "success": False,
- "error": f"DSS core sync not available: {e}"
- }
+ return {"success": False, "error": f"DSS core sync not available: {e}"}
except Exception as e:
return {"success": False, "error": str(e)}
@@ -2806,26 +2793,21 @@ async def dss_core_themes_impl() -> Dict[str, Any]:
"success": True,
"themes": themes,
"theme_names": theme_names,
- "total_themes": len(theme_names)
+ "total_themes": len(theme_names),
}
else:
return {
"success": False,
"error": "DSS core not synced yet. Run dss_core_sync first.",
- "hint": "Use dss_core_sync to sync from Figma"
+ "hint": "Use dss_core_sync to sync from Figma",
}
except ImportError as e:
- return {
- "success": False,
- "error": f"DSS core sync not available: {e}"
- }
+ return {"success": False, "error": f"DSS core sync not available: {e}"}
except Exception as e:
return {"success": False, "error": str(e)}
-async def dss_rate_limit_status_impl(
- figma_token: Optional[str] = None
-) -> Dict[str, Any]:
+async def dss_rate_limit_status_impl(figma_token: Optional[str] = None) -> Dict[str, Any]:
"""Get current Figma rate limit status."""
try:
from dss.project.figma import FigmaProjectSync
@@ -2836,7 +2818,7 @@ async def dss_rate_limit_status_impl(
return {
"success": True,
**status,
- "hint": "Rate limits reset after 60 seconds of no requests"
+ "hint": "Rate limits reset after 60 seconds of no requests",
}
except ValueError as e:
return {"success": False, "error": str(e)}
@@ -2848,63 +2830,72 @@ async def dss_rate_limit_status_impl(
# MAIN
# =============================================================================
+
async def main():
- """Run the MCP server"""
+ """Run the MCP server."""
# Configure log rotation (10MB per file, keep 5 backups)
if RUNTIME_AVAILABLE:
try:
- configure_log_rotation(max_bytes=10*1024*1024, backup_count=5)
+ configure_log_rotation(max_bytes=10 * 1024 * 1024, backup_count=5)
except Exception as e:
logger.warning("Failed to configure log rotation", extra={"error": str(e)})
# Server startup logging with structured data
- logger.info("Starting DSS MCP Server", extra={
- "version": "2.0.0",
- "dss_path": str(DSS_PATH),
- "capabilities": {
- "dss": DSS_AVAILABLE,
- "playwright": PLAYWRIGHT_AVAILABLE,
- "local_browser": LOCAL_BROWSER_STRATEGY_AVAILABLE,
- "runtime": RUNTIME_AVAILABLE,
- }
- })
+ logger.info(
+ "Starting DSS MCP Server",
+ extra={
+ "version": "2.0.0",
+ "dss_path": str(DSS_PATH),
+ "capabilities": {
+ "dss": DSS_AVAILABLE,
+ "playwright": PLAYWRIGHT_AVAILABLE,
+ "local_browser": LOCAL_BROWSER_STRATEGY_AVAILABLE,
+ "runtime": RUNTIME_AVAILABLE,
+ },
+ },
+ )
# Initialize DSS Runtime with boundary enforcement
if RUNTIME_AVAILABLE:
try:
runtime = get_runtime()
stats = runtime.get_stats()
- logger.info("DSS Runtime initialized", extra={
- "enforcement_mode": stats['enforcement_mode'],
- "boundary_enforcement": "ACTIVE",
- "stats": stats
- })
+ logger.info(
+ "DSS Runtime initialized",
+ extra={
+ "enforcement_mode": stats["enforcement_mode"],
+ "boundary_enforcement": "ACTIVE",
+ "stats": stats,
+ },
+ )
except Exception as e:
- logger.error("Failed to initialize runtime", extra={
- "error": str(e),
- "boundary_enforcement": "DISABLED"
- })
+ logger.error(
+ "Failed to initialize runtime",
+ extra={"error": str(e), "boundary_enforcement": "DISABLED"},
+ )
else:
- logger.warning("DSSRuntime not available", extra={
- "boundary_enforcement": "DISABLED",
- "import_error": RUNTIME_IMPORT_ERROR if not RUNTIME_AVAILABLE else None
- })
+ logger.warning(
+ "DSSRuntime not available",
+ extra={
+ "boundary_enforcement": "DISABLED",
+ "import_error": RUNTIME_IMPORT_ERROR if not RUNTIME_AVAILABLE else None,
+ },
+ )
if DSS_AVAILABLE:
logger.info("DSS module loaded", extra={"version": dss.__version__})
try:
async with stdio_server() as (read_stream, write_stream):
- await server.run(
- read_stream,
- write_stream,
- server.create_initialization_options()
- )
+ await server.run(read_stream, write_stream, server.create_initialization_options())
finally:
- logger.info("Server shutting down", extra={
- "devtools_connected": devtools.connected,
- "browser_initialized": browser_state.initialized
- })
+ logger.info(
+ "Server shutting down",
+ extra={
+ "devtools_connected": devtools.connected,
+ "browser_initialized": browser_state.initialized,
+ },
+ )
# Cleanup DevTools
if devtools.connected:
await devtools_disconnect_impl()
diff --git a/dss-claude-plugin/strategies/base.py b/dss-claude-plugin/strategies/base.py
index 77b9da4..ce9bef2 100644
--- a/dss-claude-plugin/strategies/base.py
+++ b/dss-claude-plugin/strategies/base.py
@@ -8,7 +8,7 @@ transparently.
"""
from abc import ABC, abstractmethod
-from typing import List, Optional, Dict, Any
+from typing import Any, Dict, List, Optional
class BrowserStrategy(ABC):
@@ -22,10 +22,7 @@ class BrowserStrategy(ABC):
@abstractmethod
async def get_console_logs(
- self,
- session_id: Optional[str] = None,
- limit: int = 100,
- level: Optional[str] = None
+ self, session_id: Optional[str] = None, limit: int = 100, level: Optional[str] = None
) -> List[Dict[str, Any]]:
"""
Retrieve console logs from the browser session.
@@ -42,9 +39,7 @@ class BrowserStrategy(ABC):
@abstractmethod
async def capture_screenshot(
- self,
- selector: Optional[str] = None,
- full_page: bool = False
+ self, selector: Optional[str] = None, full_page: bool = False
) -> str:
"""
Capture a screenshot of the current page or specific element.
@@ -72,9 +67,7 @@ class BrowserStrategy(ABC):
@abstractmethod
async def get_errors(
- self,
- severity: Optional[str] = None,
- limit: int = 50
+ self, severity: Optional[str] = None, limit: int = 50
) -> List[Dict[str, Any]]:
"""
Retrieve accumulated browser errors (console errors, crashes, network failures).
@@ -89,10 +82,7 @@ class BrowserStrategy(ABC):
pass
@abstractmethod
- async def run_accessibility_audit(
- self,
- selector: Optional[str] = None
- ) -> Dict[str, Any]:
+ async def run_accessibility_audit(self, selector: Optional[str] = None) -> Dict[str, Any]:
"""
Run accessibility audit using axe-core.
diff --git a/dss-claude-plugin/strategies/local/browser.py b/dss-claude-plugin/strategies/local/browser.py
index bf43ba9..137e32e 100644
--- a/dss-claude-plugin/strategies/local/browser.py
+++ b/dss-claude-plugin/strategies/local/browser.py
@@ -23,15 +23,11 @@ AXE_CORE_SCRIPT_URL = "https://cdnjs.cloudflare.com/ajax/libs/axe-core/4.8.4/axe
# Optional Playwright import for graceful degradation
try:
- from playwright.async_api import (
- Browser,
- ConsoleMessage,
- Error as PlaywrightError,
- Page,
- Playwright,
- TimeoutError as PlaywrightTimeoutError,
- async_playwright,
- )
+ from playwright.async_api import Browser, ConsoleMessage
+ from playwright.async_api import Error as PlaywrightError
+ from playwright.async_api import Page, Playwright
+ from playwright.async_api import TimeoutError as PlaywrightTimeoutError
+ from playwright.async_api import async_playwright
PLAYWRIGHT_AVAILABLE = True
except ImportError:
@@ -199,8 +195,8 @@ class LocalBrowserStrategy(BrowserStrategy):
"timestamp": None, # Playwright doesn't provide timestamp directly
"category": "console",
"data": {
- "location": msg.location if hasattr(msg, 'location') else None,
- }
+ "location": msg.location if hasattr(msg, "location") else None,
+ },
}
logs.append(log_entry)
except Exception as e:
@@ -234,10 +230,8 @@ class LocalBrowserStrategy(BrowserStrategy):
raise RuntimeError("No active page to capture screenshot from.")
# Generate unique filename
- session_id = getattr(self.context, 'session_id', 'local')
- path = os.path.join(
- tempfile.gettempdir(), f"dss_screenshot_{session_id}.png"
- )
+ session_id = getattr(self.context, "session_id", "local")
+ path = os.path.join(tempfile.gettempdir(), f"dss_screenshot_{session_id}.png")
try:
if selector:
@@ -284,9 +278,9 @@ class LocalBrowserStrategy(BrowserStrategy):
"category": "uncaughtError",
"message": str(err),
"data": {
- "name": getattr(err, 'name', 'Error'),
- "stack": getattr(err, 'stack', None),
- }
+ "name": getattr(err, "name", "Error"),
+ "stack": getattr(err, "stack", None),
+ },
}
errors.append(error_entry)
except Exception as e:
@@ -294,9 +288,7 @@ class LocalBrowserStrategy(BrowserStrategy):
return errors[-limit:]
- async def run_accessibility_audit(
- self, selector: Optional[str] = None
- ) -> Dict[str, Any]:
+ async def run_accessibility_audit(self, selector: Optional[str] = None) -> Dict[str, Any]:
"""
Run an accessibility audit on the current page using axe-core.
@@ -330,13 +322,11 @@ class LocalBrowserStrategy(BrowserStrategy):
# Run axe with selector context if provided
if selector:
- result = await self.page.evaluate(
- "(selector) => axe.run(selector)", selector
- )
+ result = await self.page.evaluate("(selector) => axe.run(selector)", selector)
else:
result = await self.page.evaluate("() => axe.run()")
- violations_count = len(result.get('violations', []))
+ violations_count = len(result.get("violations", []))
logger.info(f"Accessibility audit complete. Found {violations_count} violations.")
return result
@@ -357,9 +347,7 @@ class LocalBrowserStrategy(BrowserStrategy):
raise RuntimeError("No active page to get performance metrics from.")
# 1. Get Navigation Timing API metrics
- timing_raw = await self.page.evaluate(
- "() => JSON.stringify(window.performance.timing)"
- )
+ timing_raw = await self.page.evaluate("() => JSON.stringify(window.performance.timing)")
nav_timing = json.loads(timing_raw)
# 2. Get Core Web Vitals via PerformanceObserver
@@ -417,14 +405,13 @@ class LocalBrowserStrategy(BrowserStrategy):
"""
core_web_vitals = await self.page.evaluate(metrics_script)
- return {
- "navigation_timing": nav_timing,
- "core_web_vitals": core_web_vitals
- }
+ return {"navigation_timing": nav_timing, "core_web_vitals": core_web_vitals}
async def close(self) -> None:
"""
- Close the current page. Browser instance is kept in pool for reuse.
+ Close the current page.
+
+ Browser instance is kept in pool for reuse.
To fully close the browser, use close_browser() class method.
"""
diff --git a/dss-claude-plugin/strategies/remote/browser.py b/dss-claude-plugin/strategies/remote/browser.py
index 9bdc018..b94c5b9 100644
--- a/dss-claude-plugin/strategies/remote/browser.py
+++ b/dss-claude-plugin/strategies/remote/browser.py
@@ -1,16 +1,16 @@
"""
Remote Browser Strategy implementation.
+
Connects to the DSS API to retrieve browser state and logs via Shadow State pattern.
"""
-import aiohttp
-import asyncio
import logging
-import base64
-from typing import List, Dict, Any, Optional
+from typing import Any, Dict, List, Optional
+
+import aiohttp
-from ..base import BrowserStrategy
from ...core.context import DSSContext
+from ..base import BrowserStrategy
# Configure module logger
logger = logging.getLogger(__name__)
@@ -19,6 +19,7 @@ logger = logging.getLogger(__name__)
class RemoteBrowserStrategy(BrowserStrategy):
"""
Implements browser interaction via remote API calls.
+
Relies on the browser-side Logger to sync state to the server.
"""
@@ -42,7 +43,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
base_url = self.context.get_api_url()
# Ensure base_url doesn't have trailing slash for clean concatenation
- base_url = base_url.rstrip('/')
+ base_url = base_url.rstrip("/")
url = f"{base_url}/api/browser-logs/{session_id}"
try:
@@ -71,10 +72,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
return []
async def get_console_logs(
- self,
- session_id: Optional[str] = None,
- limit: int = 100,
- level: Optional[str] = None
+ self, session_id: Optional[str] = None, limit: int = 100, level: Optional[str] = None
) -> List[Dict[str, Any]]:
"""
Get browser console logs from the remote API.
@@ -88,7 +86,8 @@ class RemoteBrowserStrategy(BrowserStrategy):
# Filter by console category mostly, but also capture uncaught errors
console_logs = [
- l for l in logs
+ l
+ for l in logs
if l.get("category") in ["console", "uncaughtError", "unhandledRejection"]
]
@@ -102,9 +101,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
return console_logs[:limit]
async def capture_screenshot(
- self,
- selector: Optional[str] = None,
- full_page: bool = False
+ self, selector: Optional[str] = None, full_page: bool = False
) -> str:
"""
Capture a screenshot.
@@ -133,8 +130,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
# Filter for snapshots
snapshots = [
- l for l in logs
- if l.get("category") == "snapshot" and "snapshot" in l.get("data", {})
+ l for l in logs if l.get("category") == "snapshot" and "snapshot" in l.get("data", {})
]
if not snapshots:
@@ -154,9 +150,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
return ""
async def get_errors(
- self,
- severity: Optional[str] = None,
- limit: int = 50
+ self, severity: Optional[str] = None, limit: int = 50
) -> List[Dict[str, Any]]:
"""
Get error logs from the remote API.
@@ -178,10 +172,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
return errors[:limit]
- async def run_accessibility_audit(
- self,
- selector: Optional[str] = None
- ) -> Dict[str, Any]:
+ async def run_accessibility_audit(self, selector: Optional[str] = None) -> Dict[str, Any]:
"""
Get accessibility audit results from Shadow State.
@@ -198,7 +189,8 @@ class RemoteBrowserStrategy(BrowserStrategy):
# Look for accessibility audits in the logs
audits = [
- l for l in logs
+ l
+ for l in logs
if l.get("category") == "accessibility" or l.get("category") == "accessibilitySnapshot"
]
@@ -207,7 +199,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
"violations": [],
"passes": [],
"incomplete": [],
- "message": "No accessibility audit found in Shadow State. Trigger audit from browser console using __DSS_BROWSER_LOGS.audit()"
+ "message": "No accessibility audit found in Shadow State. Trigger audit from browser console using __DSS_BROWSER_LOGS.audit()",
}
# Get the latest audit
@@ -236,14 +228,13 @@ class RemoteBrowserStrategy(BrowserStrategy):
# Look for performance metrics in the logs
perf_logs = [
- l for l in logs
- if l.get("category") in ["performance", "accessibilitySnapshot"]
+ l for l in logs if l.get("category") in ["performance", "accessibilitySnapshot"]
]
if not perf_logs:
return {
"error": "No performance data found in Shadow State.",
- "message": "Performance metrics are captured automatically during page load."
+ "message": "Performance metrics are captured automatically during page load.",
}
# Get the latest performance entry
diff --git a/dss-claude-plugin/strategies/remote/filesystem.py b/dss-claude-plugin/strategies/remote/filesystem.py
index ec75c70..e780dbb 100644
--- a/dss-claude-plugin/strategies/remote/filesystem.py
+++ b/dss-claude-plugin/strategies/remote/filesystem.py
@@ -1,14 +1,14 @@
"""
Remote Filesystem Strategy implementation.
+
Filesystem operations are restricted in REMOTE mode for security.
"""
import logging
-from typing import List, Dict, Any
-from pathlib import Path
+from typing import Any, Dict, List
-from ..base import FilesystemStrategy
from ...core.context import DSSContext
+from ..base import FilesystemStrategy
# Configure module logger
logger = logging.getLogger(__name__)
diff --git a/dss-claude-plugin/tests/test_context_compiler.py b/dss-claude-plugin/tests/test_context_compiler.py
index 7c29ed5..4a93c4f 100644
--- a/dss-claude-plugin/tests/test_context_compiler.py
+++ b/dss-claude-plugin/tests/test_context_compiler.py
@@ -1,29 +1,21 @@
"""
-Test Suite for DSS Context Compiler
+Test Suite for DSS Context Compiler.
+
Validates all core functionality: cascade merging, token resolution, security, and error handling.
"""
import json
-import os
import sys
from pathlib import Path
+from core import ContextCompiler, get_compiler_status, list_skins, resolve_token
+
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
-from core import (
- ContextCompiler,
- get_active_context,
- resolve_token,
- validate_manifest,
- list_skins,
- get_compiler_status,
- EMERGENCY_SKIN
-)
-
class TestContextCompiler:
- """Test suite for Context Compiler"""
+ """Test suite for Context Compiler."""
def __init__(self):
self.base_dir = Path(__file__).parent.parent
@@ -34,7 +26,7 @@ class TestContextCompiler:
self.failed = 0
def assert_equal(self, actual, expected, message):
- """Simple assertion helper"""
+ """Simple assertion helper."""
if actual == expected:
print(f"✓ {message}")
self.passed += 1
@@ -47,7 +39,7 @@ class TestContextCompiler:
return False
def assert_true(self, condition, message):
- """Assert condition is true"""
+ """Assert condition is true."""
if condition:
print(f"✓ {message}")
self.passed += 1
@@ -58,7 +50,7 @@ class TestContextCompiler:
return False
def assert_in(self, needle, haystack, message):
- """Assert needle is in haystack"""
+ """Assert needle is in haystack."""
if needle in haystack:
print(f"✓ {message}")
self.passed += 1
@@ -70,7 +62,7 @@ class TestContextCompiler:
return False
def test_basic_compilation(self):
- """Test 1: Basic 3-layer cascade compilation"""
+ """Test 1: Basic 3-layer cascade compilation."""
print("\n=== Test 1: Basic Compilation (3-Layer Cascade) ===")
try:
@@ -80,29 +72,27 @@ class TestContextCompiler:
self.assert_equal(
context.get("tokens", {}).get("colors", {}).get("primary"),
"#6366f1",
- "Project override applied correctly (colors.primary)"
+ "Project override applied correctly (colors.primary)",
)
# Test skin value (Layer 2 - workbench)
self.assert_equal(
context.get("tokens", {}).get("colors", {}).get("background"),
"#0F172A",
- "Workbench skin value inherited (colors.background)"
+ "Workbench skin value inherited (colors.background)",
)
# Test base value (Layer 1)
self.assert_equal(
context.get("tokens", {}).get("spacing", {}).get("0"),
"0px",
- "Base skin value inherited (spacing.0)"
+ "Base skin value inherited (spacing.0)",
)
# Test metadata injection
self.assert_in("_meta", context, "Metadata injected into context")
self.assert_equal(
- context.get("_meta", {}).get("project_id"),
- "dss-admin",
- "Project ID in metadata"
+ context.get("_meta", {}).get("project_id"), "dss-admin", "Project ID in metadata"
)
except Exception as e:
@@ -110,7 +100,7 @@ class TestContextCompiler:
self.failed += 1
def test_debug_provenance(self):
- """Test 2: Debug provenance tracking"""
+ """Test 2: Debug provenance tracking."""
print("\n=== Test 2: Debug Provenance Tracking ===")
try:
@@ -118,12 +108,10 @@ class TestContextCompiler:
self.assert_in("_provenance", context, "Provenance data included in debug mode")
self.assert_true(
- isinstance(context.get("_provenance", []), list),
- "Provenance is a list"
+ isinstance(context.get("_provenance", []), list), "Provenance is a list"
)
self.assert_true(
- len(context.get("_provenance", [])) > 0,
- "Provenance contains tracking entries"
+ len(context.get("_provenance", [])) > 0, "Provenance contains tracking entries"
)
except Exception as e:
@@ -131,7 +119,7 @@ class TestContextCompiler:
self.failed += 1
def test_token_resolution(self):
- """Test 3: Token resolution via MCP tool"""
+ """Test 3: Token resolution via MCP tool."""
print("\n=== Test 3: Token Resolution ===")
try:
@@ -149,10 +137,7 @@ class TestContextCompiler:
# Test nested token
result = resolve_token(str(self.admin_manifest), "typography.fontFamily.sans")
- self.assert_true(
- "Inter" in result or "system-ui" in result,
- "Resolved nested token"
- )
+ self.assert_true("Inter" in result or "system-ui" in result, "Resolved nested token")
# Test non-existent token
result = resolve_token(str(self.admin_manifest), "nonexistent.token")
@@ -163,7 +148,7 @@ class TestContextCompiler:
self.failed += 1
def test_skin_listing(self):
- """Test 4: Skin listing functionality"""
+ """Test 4: Skin listing functionality."""
print("\n=== Test 4: Skin Listing ===")
try:
@@ -180,7 +165,7 @@ class TestContextCompiler:
self.failed += 1
def test_safe_boot_protocol(self):
- """Test 5: Safe Boot Protocol (emergency fallback)"""
+ """Test 5: Safe Boot Protocol (emergency fallback)."""
print("\n=== Test 5: Safe Boot Protocol ===")
try:
@@ -188,9 +173,7 @@ class TestContextCompiler:
context = self.compiler.compile("/nonexistent/path.json")
self.assert_equal(
- context.get("status"),
- "emergency_mode",
- "Emergency mode activated for invalid path"
+ context.get("status"), "emergency_mode", "Emergency mode activated for invalid path"
)
self.assert_in("_error", context, "Error details included in safe boot")
@@ -198,14 +181,18 @@ class TestContextCompiler:
# Validate emergency skin has required structure
self.assert_in("tokens", context, "Emergency skin has tokens")
self.assert_in("colors", context.get("tokens", {}), "Emergency skin has colors")
- self.assert_in("primary", context.get("tokens", {}).get("colors", {}), "Emergency skin has primary color")
+ self.assert_in(
+ "primary",
+ context.get("tokens", {}).get("colors", {}),
+ "Emergency skin has primary color",
+ )
except Exception as e:
print(f"✗ Safe Boot Protocol test failed with error: {e}")
self.failed += 1
def test_path_traversal_prevention(self):
- """Test 6: Security - Path traversal prevention"""
+ """Test 6: Security - Path traversal prevention."""
print("\n=== Test 6: Path Traversal Prevention (Security) ===")
try:
@@ -215,11 +202,7 @@ class TestContextCompiler:
print("✗ Path traversal not prevented!")
self.failed += 1
except ValueError as e:
- self.assert_in(
- "path traversal",
- str(e).lower(),
- "Path traversal attack blocked"
- )
+ self.assert_in("path traversal", str(e).lower(), "Path traversal attack blocked")
# Attempt another variant
try:
@@ -227,18 +210,14 @@ class TestContextCompiler:
print("✗ Path traversal variant not prevented!")
self.failed += 1
except ValueError as e:
- self.assert_in(
- "path traversal",
- str(e).lower(),
- "Path traversal variant blocked"
- )
+ self.assert_in("path traversal", str(e).lower(), "Path traversal variant blocked")
except Exception as e:
print(f"✗ Path traversal prevention test failed with unexpected error: {e}")
self.failed += 1
def test_compiler_status(self):
- """Bonus Test: Compiler status tool"""
+ """Bonus Test: Compiler status tool."""
print("\n=== Bonus Test: Compiler Status ===")
try:
@@ -254,7 +233,7 @@ class TestContextCompiler:
self.failed += 1
def run_all_tests(self):
- """Execute all tests and report results"""
+ """Execute all tests and report results."""
print("=" * 60)
print("DSS Context Compiler Test Suite")
print("=" * 60)
diff --git a/dss-claude-plugin/verify_tools.py b/dss-claude-plugin/verify_tools.py
index f244e4c..d539ad5 100644
--- a/dss-claude-plugin/verify_tools.py
+++ b/dss-claude-plugin/verify_tools.py
@@ -1,7 +1,5 @@
#!/usr/bin/env python3
-"""
-Verify that dss-mcp-server.py properly exports Context Compiler tools
-"""
+"""Verify that dss-mcp-server.py properly exports Context Compiler tools."""
import sys
from pathlib import Path
@@ -19,11 +17,12 @@ print("\n1. Testing Context Compiler imports...")
try:
from core import (
get_active_context,
+ get_compiler_status,
+ list_skins,
resolve_token,
validate_manifest,
- list_skins,
- get_compiler_status
)
+
print(" ✓ All Context Compiler functions imported successfully")
CONTEXT_COMPILER_AVAILABLE = True
except ImportError as e:
@@ -36,10 +35,8 @@ print("\n2. Checking MCP server tool list...")
try:
# We need to simulate the MCP server initialization
# to see what tools it would export
- import asyncio
+
from mcp.server import Server
- from mcp.server.stdio import stdio_server
- from mcp.types import Tool, TextContent
# Create a test server instance
server = Server("dss-test")
@@ -109,7 +106,7 @@ try:
'elif name == "dss_resolve_token"',
'elif name == "dss_validate_manifest"',
'elif name == "dss_list_skins"',
- 'elif name == "dss_get_compiler_status"'
+ 'elif name == "dss_get_compiler_status"',
]
for handler in handlers:
@@ -140,7 +137,7 @@ try:
status = json.loads(status_json)
print(f" ✓ get_compiler_status() returned status: {status['status']}")
- if status['status'] == 'active':
+ if status["status"] == "active":
print(" ✓ Context Compiler is active and ready")
else:
print(f" ✗ Context Compiler status is: {status['status']}")
diff --git a/dss-cli.py b/dss-cli.py
index f55c654..eabe0ab 100755
--- a/dss-cli.py
+++ b/dss-cli.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
-DSS-CLI - A command-line interface for the DSS Engine
+DSS-CLI - A command-line interface for the DSS Engine.
This script provides a direct, scriptable interface to the core functionalities
of the DSS analysis and context engine. It is designed for use in CI/CD
@@ -10,7 +10,6 @@ pipelines and other automated workflows.
import argparse
import asyncio
import json
-import os
import sys
from pathlib import Path
@@ -20,11 +19,14 @@ from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
try:
- from dss.analyze.project_analyzer import run_project_analysis, export_project_context
- from dss.project.manager import ProjectManager
from dss import StorybookScanner, StoryGenerator, ThemeGenerator
+ from dss.analyze.project_analyzer import export_project_context, run_project_analysis
+ from dss.project.manager import ProjectManager
except ImportError as e:
- print(f"Error: Could not import DSS modules. Make sure dss-mvp1 is in the PYTHONPATH.", file=sys.stderr)
+ print(
+ "Error: Could not import DSS modules. Make sure dss-mvp1 is in the PYTHONPATH.",
+ file=sys.stderr,
+ )
print(f"Import error: {e}", file=sys.stderr)
sys.exit(1)
@@ -40,104 +42,92 @@ def main():
# 'analyze' command
# =========================================================================
analyze_parser = subparsers.add_parser(
- "analyze",
- help="Run a deep analysis of a project and save the results to .dss/analysis_graph.json"
+ "analyze",
+ help="Run a deep analysis of a project and save the results to .dss/analysis_graph.json",
)
analyze_parser.add_argument(
- "--project-path",
- required=True,
- help="The root path to the project directory to be analyzed."
+ "--project-path",
+ required=True,
+ help="The root path to the project directory to be analyzed.",
)
analyze_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
-
# =========================================================================
# 'export-context' command
# =========================================================================
export_parser = subparsers.add_parser(
- "export-context",
- help="Export the comprehensive project context as a JSON object to stdout."
+ "export-context",
+ help="Export the comprehensive project context as a JSON object to stdout.",
)
export_parser.add_argument(
- "--project-path",
- required=True,
- help="The path to the project directory."
+ "--project-path", required=True, help="The path to the project directory."
)
# =========================================================================
# 'add-figma-file' command
# =========================================================================
add_figma_parser = subparsers.add_parser(
- "add-figma-file",
- help="Link a Figma file to a DSS project."
+ "add-figma-file", help="Link a Figma file to a DSS project."
)
add_figma_parser.add_argument(
- "--project-path",
- required=True,
- help="The path to the DSS project directory."
+ "--project-path", required=True, help="The path to the DSS project directory."
)
add_figma_parser.add_argument(
- "--file-key",
- required=True,
- help="The file key of the Figma file (from the URL)."
+ "--file-key", required=True, help="The file key of the Figma file (from the URL)."
)
add_figma_parser.add_argument(
- "--file-name",
- required=True,
- help="A human-readable name for the Figma file."
+ "--file-name", required=True, help="A human-readable name for the Figma file."
)
# =========================================================================
# 'setup-storybook' command
# =========================================================================
storybook_parser = subparsers.add_parser(
- "setup-storybook",
- help="Scan, generate, or configure Storybook for a project."
+ "setup-storybook", help="Scan, generate, or configure Storybook for a project."
)
storybook_parser.add_argument(
- "--project-path",
- required=True,
- help="The path to the DSS project directory."
+ "--project-path", required=True, help="The path to the DSS project directory."
)
storybook_parser.add_argument(
"--action",
required=True,
choices=["scan", "generate", "configure"],
- help="The Storybook action to perform."
+ help="The Storybook action to perform.",
)
# =========================================================================
# 'sync-tokens' command
# =========================================================================
sync_parser = subparsers.add_parser(
- "sync-tokens",
- help="Synchronize design tokens from the linked Figma file(s)."
+ "sync-tokens", help="Synchronize design tokens from the linked Figma file(s)."
)
sync_parser.add_argument(
- "--project-path",
- required=True,
- help="The path to the DSS project directory."
+ "--project-path", required=True, help="The path to the DSS project directory."
)
sync_parser.add_argument(
"--figma-token",
- help="Your Figma personal access token. If not provided, it will try to use the FIGMA_TOKEN environment variable."
+ help="Your Figma personal access token. If not provided, it will try to use the FIGMA_TOKEN environment variable.",
)
sync_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
sync_parser.add_argument("--force", action="store_true", help="Force sync, ignoring cache")
-
args = parser.parse_args()
# --- Command Dispatch ---
project_path = Path(args.project_path).resolve()
if not project_path.is_dir():
- print(f"Error: Provided project path is not a valid directory: {project_path}", file=sys.stderr)
+ print(
+ f"Error: Provided project path is not a valid directory: {project_path}",
+ file=sys.stderr,
+ )
sys.exit(1)
try:
if args.command == "analyze":
result = run_project_analysis(str(project_path))
- print(f"Analysis complete. Graph saved to {project_path / '.dss' / 'analysis_graph.json'}")
+ print(
+ f"Analysis complete. Graph saved to {project_path / '.dss' / 'analysis_graph.json'}"
+ )
# Optionally print a summary to stdout
summary = {
"status": "success",
@@ -156,20 +146,23 @@ def main():
try:
project = manager.load(project_path)
except FileNotFoundError:
- print(f"Error: No 'ds.config.json' found at {project_path}. Is this a valid DSS project?", file=sys.stderr)
+ print(
+ f"Error: No 'ds.config.json' found at {project_path}. Is this a valid DSS project?",
+ file=sys.stderr,
+ )
sys.exit(1)
manager.add_figma_file(
- project=project,
- file_key=args.file_key,
- file_name=args.file_name
+ project=project, file_key=args.file_key, file_name=args.file_name
+ )
+ print(
+ f"Successfully added Figma file '{args.file_name}' to project '{project.config.name}'."
)
- print(f"Successfully added Figma file '{args.file_name}' to project '{project.config.name}'.")
elif args.command == "setup-storybook":
action = args.action
print(f"Running Storybook setup with action: {action}...")
-
+
if action == "scan":
scanner = StorybookScanner(project_path)
result = scanner.scan()
@@ -182,7 +175,7 @@ def main():
theme_gen = ThemeGenerator(project_path)
result = theme_gen.generate()
print(f"Storybook theme configured at {result.get('theme_file')}")
-
+
print("Storybook setup complete.")
elif args.command == "sync-tokens":
@@ -190,25 +183,29 @@ def main():
try:
project = manager.load(project_path)
except FileNotFoundError:
- print(f"Error: No 'ds.config.json' found at {project_path}. Is this a valid DSS project?", file=sys.stderr)
+ print(
+ f"Error: No 'ds.config.json' found at {project_path}. Is this a valid DSS project?",
+ file=sys.stderr,
+ )
sys.exit(1)
-
+
print("Synchronizing tokens from Figma...")
# The manager.sync method is now async
- asyncio.run(manager.sync(
- project,
- figma_token=args.figma_token,
- force=args.force,
- verbose=args.verbose
- ))
+ asyncio.run(
+ manager.sync(
+ project, figma_token=args.figma_token, force=args.force, verbose=args.verbose
+ )
+ )
print("Token synchronization complete.")
-
+
except Exception as e:
print(json.dumps({"success": False, "error": str(e)}), file=sys.stderr)
import traceback
+
traceback.print_exc()
sys.exit(1)
+
if __name__ == "__main__":
# The main function now handles both sync and async command dispatches
main()
diff --git a/dss-temp-handover.md b/dss-temp-handover.md
index deed114..bfe99fe 100644
--- a/dss-temp-handover.md
+++ b/dss-temp-handover.md
@@ -56,4 +56,4 @@
3. **Validate Component Structure**: Ensure the `ds-button.js` (or any target component) has a structure that the updated parser can understand and extract metadata from.
4. **Re-run Storybook Generation**: Once `_parse_component` can correctly extract metadata, re-run `setup-storybook --action generate` to confirm stories are created.
-I have included the contents of `dss/storybook/generator.py` for direct reference.
\ No newline at end of file
+I have included the contents of `dss/storybook/generator.py` for direct reference.
diff --git a/dss/__init__.py b/dss/__init__.py
index f72c35d..5937224 100644
--- a/dss/__init__.py
+++ b/dss/__init__.py
@@ -1,5 +1,5 @@
"""
-DSS - Design System Server
+DSS - Design System Server.
A Model Context Protocol (MCP) server that provides Claude Code with 40+ design system tools.
Supports local development and remote team deployment.
@@ -12,60 +12,70 @@ Usage:
__version__ = "1.0.0"
-# Settings & Configuration
-from dss.settings import settings, DSSSettings, DSSManager, manager
-
-# Storage Layer
-from dss.storage.json_store import (
- Projects,
- Components,
- Tokens,
- Styles,
- SyncHistory,
- ActivityLog,
- Teams,
- Cache,
- FigmaFiles,
- CodeMetrics,
- TestResults,
- TokenDrift,
- Integrations,
- IntegrationHealth,
- get_stats,
-)
-
# Analyze
from dss.analyze.base import (
+ ComponentInfo,
+ Framework,
ProjectAnalysis,
QuickWin,
- ComponentInfo,
StylePattern,
- Framework,
StylingApproach,
)
-from dss.analyze.scanner import ProjectScanner
-from dss.analyze.react import ReactAnalyzer
-from dss.analyze.styles import StyleAnalyzer
from dss.analyze.graph import DependencyGraph
from dss.analyze.quick_wins import QuickWinFinder
-
-# Ingest
-from dss.ingest.base import (
- DesignToken,
- TokenCollection,
- TokenSource,
- TokenType,
- TokenCategory,
-)
-from dss.ingest.css import CSSTokenSource
-from dss.ingest.scss import SCSSTokenSource
-from dss.ingest.tailwind import TailwindTokenSource
-from dss.ingest.json_tokens import JSONTokenSource
-from dss.ingest.merge import TokenMerger, MergeStrategy
+from dss.analyze.react import ReactAnalyzer
+from dss.analyze.scanner import ProjectScanner
+from dss.analyze.styles import StyleAnalyzer
+from dss.export_import.merger import SmartMerger
# Export/Import
from dss.export_import.service import DSSArchiveExporter, DSSArchiveImporter
-from dss.export_import.merger import SmartMerger
+
+# Figma
+from dss.figma.figma_tools import FigmaToolSuite
+
+# Ingest
+from dss.ingest.base import DesignToken, TokenCategory, TokenCollection, TokenSource, TokenType
+from dss.ingest.css import CSSTokenSource
+from dss.ingest.json_tokens import JSONTokenSource
+from dss.ingest.merge import MergeStrategy, TokenMerger
+from dss.ingest.scss import SCSSTokenSource
+from dss.ingest.tailwind import TailwindTokenSource
+from dss.models.component import Component
+from dss.models.project import Project, ProjectMetadata
+
+# Models
+from dss.models.theme import Theme
+
+# Project
+from dss.project.manager import DSSProject
+from dss.services.config_service import ConfigService, DSSConfig
+
+# Services
+from dss.services.project_manager import ProjectManager
+from dss.services.sandboxed_fs import SandboxedFS
+
+# Settings & Configuration
+from dss.settings import DSSManager, DSSSettings, manager, settings
+
+# Storage Layer
+from dss.storage.json_store import (
+ ActivityLog,
+ Cache,
+ CodeMetrics,
+ Components,
+ FigmaFiles,
+ IntegrationHealth,
+ Integrations,
+ Projects,
+ Styles,
+ SyncHistory,
+ Teams,
+ TestResults,
+ TokenDrift,
+ Tokens,
+ get_stats,
+)
# Storybook
from dss.storybook.generator import StoryGenerator
@@ -73,23 +83,7 @@ from dss.storybook.scanner import StorybookScanner
from dss.storybook.theme import ThemeGenerator
# Translations
-from dss.translations import TranslationDictionary, TokenResolver
-
-# Services
-from dss.services.project_manager import ProjectManager
-from dss.services.config_service import ConfigService, DSSConfig
-from dss.services.sandboxed_fs import SandboxedFS
-
-# Figma
-from dss.figma.figma_tools import FigmaToolSuite
-
-# Project
-from dss.project.manager import DSSProject
-
-# Models
-from dss.models.theme import Theme
-from dss.models.component import Component
-from dss.models.project import Project, ProjectMetadata
+from dss.translations import TokenResolver, TranslationDictionary
# Validators
from dss.validators.schema import ProjectValidator, ValidationResult
diff --git a/dss/analyze/__init__.py b/dss/analyze/__init__.py
index 4ddb7f0..7b9d0fd 100644
--- a/dss/analyze/__init__.py
+++ b/dss/analyze/__init__.py
@@ -1,25 +1,25 @@
"""
-DSS Code Analysis Module
+DSS Code Analysis Module.
Provides tools for analyzing React projects, detecting style patterns,
building dependency graphs, and identifying quick-win improvements.
"""
from .base import (
- ProjectAnalysis,
- StylePattern,
- QuickWin,
- QuickWinType,
- QuickWinPriority,
- Location,
ComponentInfo,
+ Location,
+ ProjectAnalysis,
+ QuickWin,
+ QuickWinPriority,
+ QuickWinType,
StyleFile,
+ StylePattern,
)
-from .scanner import ProjectScanner
-from .react import ReactAnalyzer
-from .styles import StyleAnalyzer
from .graph import DependencyGraph
from .quick_wins import QuickWinFinder
+from .react import ReactAnalyzer
+from .scanner import ProjectScanner
+from .styles import StyleAnalyzer
__all__ = [
# Data classes
diff --git a/dss/analyze/base.py b/dss/analyze/base.py
index 9ba13f7..e551edf 100644
--- a/dss/analyze/base.py
+++ b/dss/analyze/base.py
@@ -1,36 +1,36 @@
-"""
-Base classes and data structures for code analysis.
-"""
+"""Base classes and data structures for code analysis."""
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
-from typing import List, Dict, Any, Optional, Set
-from pathlib import Path
+from typing import Any, Dict, List, Optional
class QuickWinType(str, Enum):
"""Types of quick-win improvements."""
- INLINE_STYLE = "inline_style" # Inline styles that can be extracted
- DUPLICATE_VALUE = "duplicate_value" # Duplicate color/spacing values
- UNUSED_STYLE = "unused_style" # Unused CSS/SCSS
- HARDCODED_VALUE = "hardcoded_value" # Hardcoded values that should be tokens
- NAMING_INCONSISTENCY = "naming" # Inconsistent naming patterns
- DEPRECATED_PATTERN = "deprecated" # Deprecated styling patterns
- ACCESSIBILITY = "accessibility" # A11y improvements
- PERFORMANCE = "performance" # Performance improvements
+
+ INLINE_STYLE = "inline_style" # Inline styles that can be extracted
+ DUPLICATE_VALUE = "duplicate_value" # Duplicate color/spacing values
+ UNUSED_STYLE = "unused_style" # Unused CSS/SCSS
+ HARDCODED_VALUE = "hardcoded_value" # Hardcoded values that should be tokens
+ NAMING_INCONSISTENCY = "naming" # Inconsistent naming patterns
+ DEPRECATED_PATTERN = "deprecated" # Deprecated styling patterns
+ ACCESSIBILITY = "accessibility" # A11y improvements
+ PERFORMANCE = "performance" # Performance improvements
class QuickWinPriority(str, Enum):
"""Priority levels for quick-wins."""
- CRITICAL = "critical" # Must fix - breaking issues
- HIGH = "high" # Should fix - significant improvement
- MEDIUM = "medium" # Nice to fix - moderate improvement
- LOW = "low" # Optional - minor improvement
+
+ CRITICAL = "critical" # Must fix - breaking issues
+ HIGH = "high" # Should fix - significant improvement
+ MEDIUM = "medium" # Nice to fix - moderate improvement
+ LOW = "low" # Optional - minor improvement
class StylingApproach(str, Enum):
"""Detected styling approaches in a project."""
+
CSS_MODULES = "css-modules"
STYLED_COMPONENTS = "styled-components"
EMOTION = "emotion"
@@ -45,6 +45,7 @@ class StylingApproach(str, Enum):
class Framework(str, Enum):
"""Detected UI frameworks."""
+
REACT = "react"
NEXT = "next"
VUE = "vue"
@@ -58,6 +59,7 @@ class Framework(str, Enum):
@dataclass
class Location:
"""Represents a location in source code."""
+
file_path: str
line: int
column: int = 0
@@ -80,8 +82,9 @@ class Location:
@dataclass
class StyleFile:
"""Represents a style file in the project."""
+
path: str
- type: str # css, scss, less, styled, etc.
+ type: str # css, scss, less, styled, etc.
size_bytes: int = 0
line_count: int = 0
variable_count: int = 0
@@ -105,9 +108,10 @@ class StyleFile:
@dataclass
class ComponentInfo:
"""Information about a React component."""
+
name: str
path: str
- type: str = "functional" # functional, class, forwardRef, memo
+ type: str = "functional" # functional, class, forwardRef, memo
props: List[str] = field(default_factory=list)
has_styles: bool = False
style_files: List[str] = field(default_factory=list)
@@ -136,6 +140,7 @@ class ComponentInfo:
@dataclass
class StylePattern:
"""A detected style pattern in code."""
+
type: StylingApproach
locations: List[Location] = field(default_factory=list)
count: int = 0
@@ -153,12 +158,13 @@ class StylePattern:
@dataclass
class TokenCandidate:
"""A value that could be extracted as a design token."""
- value: str # The actual value (e.g., "#3B82F6")
- suggested_name: str # Suggested token name
- category: str # colors, spacing, typography, etc.
- occurrences: int = 1 # How many times it appears
+
+ value: str # The actual value (e.g., "#3B82F6")
+ suggested_name: str # Suggested token name
+ category: str # colors, spacing, typography, etc.
+ occurrences: int = 1 # How many times it appears
locations: List[Location] = field(default_factory=list)
- confidence: float = 0.0 # 0-1 confidence score
+ confidence: float = 0.0 # 0-1 confidence score
def to_dict(self) -> Dict[str, Any]:
return {
@@ -174,15 +180,16 @@ class TokenCandidate:
@dataclass
class QuickWin:
"""A quick improvement opportunity."""
+
type: QuickWinType
priority: QuickWinPriority
title: str
description: str
location: Optional[Location] = None
affected_files: List[str] = field(default_factory=list)
- estimated_impact: str = "" # e.g., "Remove 50 lines of duplicate code"
- fix_suggestion: str = "" # Suggested fix
- auto_fixable: bool = False # Can be auto-fixed
+ estimated_impact: str = "" # e.g., "Remove 50 lines of duplicate code"
+ fix_suggestion: str = "" # Suggested fix
+ auto_fixable: bool = False # Can be auto-fixed
def to_dict(self) -> Dict[str, Any]:
return {
@@ -201,6 +208,7 @@ class QuickWin:
@dataclass
class ProjectAnalysis:
"""Complete analysis result for a project."""
+
# Basic info
project_path: str
analyzed_at: datetime = field(default_factory=datetime.now)
@@ -275,14 +283,16 @@ class ProjectAnalysis:
for sp in self.styling_approaches:
lines.append(f" • {sp.type.value}: {sp.count} occurrences")
- lines.extend([
- "",
- f"Inline styles found: {len(self.inline_style_locations)}",
- f"Token candidates: {len(self.token_candidates)}",
- f"Quick wins: {len(self.quick_wins)}",
- "",
- "Quick Wins by Priority:",
- ])
+ lines.extend(
+ [
+ "",
+ f"Inline styles found: {len(self.inline_style_locations)}",
+ f"Token candidates: {len(self.token_candidates)}",
+ f"Quick wins: {len(self.quick_wins)}",
+ "",
+ "Quick Wins by Priority:",
+ ]
+ )
by_priority = {}
for qw in self.quick_wins:
@@ -290,8 +300,12 @@ class ProjectAnalysis:
by_priority[qw.priority] = []
by_priority[qw.priority].append(qw)
- for priority in [QuickWinPriority.CRITICAL, QuickWinPriority.HIGH,
- QuickWinPriority.MEDIUM, QuickWinPriority.LOW]:
+ for priority in [
+ QuickWinPriority.CRITICAL,
+ QuickWinPriority.HIGH,
+ QuickWinPriority.MEDIUM,
+ QuickWinPriority.LOW,
+ ]:
if priority in by_priority:
lines.append(f" [{priority.value.upper()}] {len(by_priority[priority])} items")
diff --git a/dss/analyze/graph.py b/dss/analyze/graph.py
index 1a5ed5d..9a44c1a 100644
--- a/dss/analyze/graph.py
+++ b/dss/analyze/graph.py
@@ -1,21 +1,21 @@
"""
-Dependency Graph Builder
+Dependency Graph Builder.
Builds component and style dependency graphs for visualization
and analysis of project structure.
"""
-import re
import json
-from pathlib import Path
-from typing import List, Dict, Any, Optional, Set, Tuple
+import re
from dataclasses import dataclass, field
-from collections import defaultdict
+from pathlib import Path
+from typing import Any, Dict, List, Optional
@dataclass
class GraphNode:
"""A node in the dependency graph."""
+
id: str
name: str
type: str # 'component', 'style', 'util', 'hook'
@@ -27,20 +27,21 @@ class GraphNode:
def to_dict(self) -> Dict[str, Any]:
return {
- 'id': self.id,
- 'name': self.name,
- 'type': self.type,
- 'path': self.path,
- 'size': self.size,
- 'children': self.children,
- 'parents': self.parents,
- 'metadata': self.metadata,
+ "id": self.id,
+ "name": self.name,
+ "type": self.type,
+ "path": self.path,
+ "size": self.size,
+ "children": self.children,
+ "parents": self.parents,
+ "metadata": self.metadata,
}
@dataclass
class GraphEdge:
"""An edge in the dependency graph."""
+
source: str
target: str
type: str # 'import', 'uses', 'styles'
@@ -48,10 +49,10 @@ class GraphEdge:
def to_dict(self) -> Dict[str, Any]:
return {
- 'source': self.source,
- 'target': self.target,
- 'type': self.type,
- 'weight': self.weight,
+ "source": self.source,
+ "target": self.target,
+ "type": self.type,
+ "weight": self.weight,
}
@@ -97,10 +98,10 @@ class DependencyGraph:
async def _scan_files(self) -> None:
"""Scan project files and create nodes."""
- skip_dirs = {'node_modules', '.git', 'dist', 'build', '.next'}
+ skip_dirs = {"node_modules", ".git", "dist", "build", ".next"}
# Component files
- for ext in ['*.jsx', '*.tsx']:
+ for ext in ["*.jsx", "*.tsx"]:
for file_path in self.root.rglob(ext):
if any(skip in file_path.parts for skip in skip_dirs):
continue
@@ -111,13 +112,13 @@ class DependencyGraph:
self.nodes[node_id] = GraphNode(
id=node_id,
name=file_path.stem,
- type='component',
+ type="component",
path=rel_path,
size=file_path.stat().st_size,
)
# Style files
- for ext in ['*.css', '*.scss', '*.sass', '*.less']:
+ for ext in ["*.css", "*.scss", "*.sass", "*.less"]:
for file_path in self.root.rglob(ext):
if any(skip in file_path.parts for skip in skip_dirs):
continue
@@ -128,13 +129,13 @@ class DependencyGraph:
self.nodes[node_id] = GraphNode(
id=node_id,
name=file_path.stem,
- type='style',
+ type="style",
path=rel_path,
size=file_path.stat().st_size,
)
# Utility/Hook files
- for ext in ['*.js', '*.ts']:
+ for ext in ["*.js", "*.ts"]:
for file_path in self.root.rglob(ext):
if any(skip in file_path.parts for skip in skip_dirs):
continue
@@ -144,10 +145,10 @@ class DependencyGraph:
node_id = self._path_to_id(rel_path)
# Classify file type
- if 'hook' in name or name.startswith('use'):
- node_type = 'hook'
- elif any(x in name for x in ['util', 'helper', 'lib']):
- node_type = 'util'
+ if "hook" in name or name.startswith("use"):
+ node_type = "hook"
+ elif any(x in name for x in ["util", "helper", "lib"]):
+ node_type = "util"
else:
continue # Skip other JS/TS files
@@ -163,11 +164,11 @@ class DependencyGraph:
"""Build edges from import statements."""
import_pattern = re.compile(
r'import\s+(?:\{[^}]+\}|\*\s+as\s+\w+|\w+)?\s*(?:,\s*\{[^}]+\})?\s*from\s+["\']([^"\']+)["\']',
- re.MULTILINE
+ re.MULTILINE,
)
for node_id, node in self.nodes.items():
- if node.type not in ['component', 'hook', 'util']:
+ if node.type not in ["component", "hook", "util"]:
continue
file_path = self.root / node.path
@@ -175,7 +176,7 @@ class DependencyGraph:
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
for match in import_pattern.finditer(content):
import_path = match.group(1)
@@ -185,11 +186,13 @@ class DependencyGraph:
if target_id and target_id in self.nodes:
# Add edge
- self.edges.append(GraphEdge(
- source=node_id,
- target=target_id,
- type='import',
- ))
+ self.edges.append(
+ GraphEdge(
+ source=node_id,
+ target=target_id,
+ type="import",
+ )
+ )
# Update parent/child relationships
node.children.append(target_id)
@@ -201,16 +204,16 @@ class DependencyGraph:
async def _build_usage_edges(self) -> None:
"""Build edges from component usage in JSX."""
# Pattern to find JSX component usage
- jsx_pattern = re.compile(r'<([A-Z][A-Za-z0-9]*)')
+ jsx_pattern = re.compile(r"<([A-Z][A-Za-z0-9]*)")
# Build name -> id mapping for components
name_to_id = {}
for node_id, node in self.nodes.items():
- if node.type == 'component':
+ if node.type == "component":
name_to_id[node.name] = node_id
for node_id, node in self.nodes.items():
- if node.type != 'component':
+ if node.type != "component":
continue
file_path = self.root / node.path
@@ -218,7 +221,7 @@ class DependencyGraph:
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
used_components = set()
for match in jsx_pattern.finditer(content):
@@ -227,11 +230,13 @@ class DependencyGraph:
used_components.add(name_to_id[comp_name])
for target_id in used_components:
- self.edges.append(GraphEdge(
- source=node_id,
- target=target_id,
- type='uses',
- ))
+ self.edges.append(
+ GraphEdge(
+ source=node_id,
+ target=target_id,
+ type="uses",
+ )
+ )
except Exception:
continue
@@ -239,26 +244,37 @@ class DependencyGraph:
def _path_to_id(self, path: str) -> str:
"""Convert file path to node ID."""
# Remove extension and normalize
- path = re.sub(r'\.(jsx?|tsx?|css|scss|sass|less)$', '', path)
- return path.replace('/', '_').replace('\\', '_').replace('.', '_')
+ path = re.sub(r"\.(jsx?|tsx?|css|scss|sass|less)$", "", path)
+ return path.replace("/", "_").replace("\\", "_").replace(".", "_")
def _resolve_import(self, source_path: str, import_path: str) -> Optional[str]:
"""Resolve import path to node ID."""
- if not import_path.startswith('.'):
+ if not import_path.startswith("."):
return None # Skip node_modules imports
source_dir = Path(source_path).parent
# Handle various import patterns
- if import_path.startswith('./'):
+ if import_path.startswith("./"):
resolved = source_dir / import_path[2:]
- elif import_path.startswith('../'):
+ elif import_path.startswith("../"):
resolved = source_dir / import_path
else:
resolved = source_dir / import_path
# Try to resolve with extensions
- extensions = ['.tsx', '.ts', '.jsx', '.js', '.css', '.scss', '/index.tsx', '/index.ts', '/index.jsx', '/index.js']
+ extensions = [
+ ".tsx",
+ ".ts",
+ ".jsx",
+ ".js",
+ ".css",
+ ".scss",
+ "/index.tsx",
+ "/index.ts",
+ "/index.jsx",
+ "/index.js",
+ ]
resolved_str = str(resolved)
for ext in extensions:
@@ -276,16 +292,16 @@ class DependencyGraph:
def to_dict(self) -> Dict[str, Any]:
"""Convert graph to dictionary for serialization."""
return {
- 'nodes': [node.to_dict() for node in self.nodes.values()],
- 'edges': [edge.to_dict() for edge in self.edges],
- 'stats': {
- 'total_nodes': len(self.nodes),
- 'total_edges': len(self.edges),
- 'components': len([n for n in self.nodes.values() if n.type == 'component']),
- 'styles': len([n for n in self.nodes.values() if n.type == 'style']),
- 'hooks': len([n for n in self.nodes.values() if n.type == 'hook']),
- 'utils': len([n for n in self.nodes.values() if n.type == 'util']),
- }
+ "nodes": [node.to_dict() for node in self.nodes.values()],
+ "edges": [edge.to_dict() for edge in self.edges],
+ "stats": {
+ "total_nodes": len(self.nodes),
+ "total_edges": len(self.edges),
+ "components": len([n for n in self.nodes.values() if n.type == "component"]),
+ "styles": len([n for n in self.nodes.values() if n.type == "style"]),
+ "hooks": len([n for n in self.nodes.values() if n.type == "hook"]),
+ "utils": len([n for n in self.nodes.values() if n.type == "util"]),
+ },
}
def to_json(self, pretty: bool = True) -> str:
@@ -296,11 +312,11 @@ class DependencyGraph:
"""Get simplified component dependency tree."""
tree = {}
for node_id, node in self.nodes.items():
- if node.type == 'component':
+ if node.type == "component":
tree[node.name] = [
self.nodes[child_id].name
for child_id in node.children
- if child_id in self.nodes and self.nodes[child_id].type == 'component'
+ if child_id in self.nodes and self.nodes[child_id].type == "component"
]
return tree
@@ -308,9 +324,9 @@ class DependencyGraph:
"""Find components with no parents (not imported anywhere)."""
orphans = []
for node_id, node in self.nodes.items():
- if node.type == 'component' and not node.parents:
+ if node.type == "component" and not node.parents:
# Exclude entry points (index, App, etc.)
- if node.name.lower() not in ['app', 'index', 'main', 'root']:
+ if node.name.lower() not in ["app", "index", "main", "root"]:
orphans.append(node.path)
return orphans
@@ -320,16 +336,18 @@ class DependencyGraph:
for node_id, node in self.nodes.items():
connections = len(node.children) + len(node.parents)
if connections >= min_connections:
- hubs.append({
- 'name': node.name,
- 'path': node.path,
- 'type': node.type,
- 'imports': len(node.children),
- 'imported_by': len(node.parents),
- 'total_connections': connections,
- })
+ hubs.append(
+ {
+ "name": node.name,
+ "path": node.path,
+ "type": node.type,
+ "imports": len(node.children),
+ "imported_by": len(node.parents),
+ "total_connections": connections,
+ }
+ )
- hubs.sort(key=lambda x: x['total_connections'], reverse=True)
+ hubs.sort(key=lambda x: x["total_connections"], reverse=True)
return hubs
def find_circular_dependencies(self) -> List[List[str]]:
@@ -343,7 +361,7 @@ class DependencyGraph:
rec_stack.add(node_id)
path.append(node_id)
- for child_id in self.nodes.get(node_id, GraphNode('', '', '', '')).children:
+ for child_id in self.nodes.get(node_id, GraphNode("", "", "", "")).children:
if child_id not in visited:
dfs(child_id, path.copy())
elif child_id in rec_stack:
@@ -363,7 +381,7 @@ class DependencyGraph:
def get_subgraph(self, node_id: str, depth: int = 2) -> Dict[str, Any]:
"""Get subgraph centered on a specific node."""
if node_id not in self.nodes:
- return {'nodes': [], 'edges': []}
+ return {"nodes": [], "edges": []}
# BFS to find nodes within depth
included_nodes = {node_id}
@@ -380,11 +398,7 @@ class DependencyGraph:
frontier = new_frontier
# Filter nodes and edges
- subgraph_nodes = [
- self.nodes[nid].to_dict()
- for nid in included_nodes
- if nid in self.nodes
- ]
+ subgraph_nodes = [self.nodes[nid].to_dict() for nid in included_nodes if nid in self.nodes]
subgraph_edges = [
edge.to_dict()
@@ -393,10 +407,10 @@ class DependencyGraph:
]
return {
- 'nodes': subgraph_nodes,
- 'edges': subgraph_edges,
- 'center': node_id,
- 'depth': depth,
+ "nodes": subgraph_nodes,
+ "edges": subgraph_edges,
+ "center": node_id,
+ "depth": depth,
}
def get_style_dependencies(self) -> Dict[str, List[str]]:
@@ -404,13 +418,13 @@ class DependencyGraph:
style_deps = {}
for node_id, node in self.nodes.items():
- if node.type != 'component':
+ if node.type != "component":
continue
style_children = [
self.nodes[child_id].path
for child_id in node.children
- if child_id in self.nodes and self.nodes[child_id].type == 'style'
+ if child_id in self.nodes and self.nodes[child_id].type == "style"
]
if style_children:
diff --git a/dss/analyze/project_analyzer.py b/dss/analyze/project_analyzer.py
index e39495e..2a2a451 100644
--- a/dss/analyze/project_analyzer.py
+++ b/dss/analyze/project_analyzer.py
@@ -1,172 +1,113 @@
-import os
-import json
-import networkx as nx
-import subprocess
-import cssutils
-import logging
-from pathlib import Path
+"""This module provides tools for analyzing a project."""
+
+import json
+import logging
+import subprocess
+from pathlib import Path
+from typing import Dict
+
+from dss.analyze.base import ProjectAnalysis
-# Configure logging
-logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
-# Configure cssutils to ignore noisy error messages
-cssutils.log.setLevel(logging.CRITICAL)
+# Path to the node.js parser script.
+# This assumes the script is located in the same directory as this file.
+parser_script_path = Path(__file__).parent / "parser.js"
-def analyze_react_project(project_path: str) -> dict:
+
+def analyze_project(
+ path: str,
+ output_graph: bool = False,
+ prune: bool = False,
+ visualize: bool = False,
+) -> ProjectAnalysis:
"""
- Analyzes a React project, building a graph of its components and styles.
+ Analyzes a project, including all its components and their dependencies.
Args:
- project_path: The root path of the React project.
+ path: The path to the project to analyze.
+ output_graph: Whether to output the dependency graph.
+ prune: Whether to prune the dependency graph.
+ visualize: Whether to visualize the dependency graph.
Returns:
- A dictionary containing the component graph and analysis report.
+ A ProjectAnalysis object containing the analysis results.
"""
- log.info(f"Starting analysis of project at: {project_path}")
- graph = nx.DiGraph()
-
- # Supported extensions for react/js/ts files
- supported_exts = ('.js', '.jsx', '.ts', '.tsx')
-
- # Path to the parser script
- parser_script_path = Path(__file__).parent / 'parser.js'
- if not parser_script_path.exists():
- raise FileNotFoundError(f"Parser script not found at {parser_script_path}")
+ project_path = Path(path).resolve()
+ log.info(f"Analyzing project at {project_path}...")
- for root, _, files in os.walk(project_path):
- # Ignore node_modules and build directories
- if 'node_modules' in root or 'build' in root or 'dist' in root:
- continue
-
- for file in files:
- file_path = os.path.join(root, file)
- relative_path = os.path.relpath(file_path, project_path)
+ # Get all component files in the project.
+ component_files = list(project_path.glob("**/*.js")) + list(project_path.glob("**/*.jsx"))
- # Add a node for every file
- graph.add_node(relative_path, type='file')
+ # For each component file, get its AST.
+ for file_path in component_files:
+ if file_path.is_file():
+ # Call the external node.js parser
+ result = subprocess.run(
+ ["node", str(parser_script_path), file_path],
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ # The AST is now in result.stdout as a JSON string.
+ ast = json.loads(result.stdout)
+ # TODO: Do something with the AST.
- if file.endswith(supported_exts):
- graph.nodes[relative_path]['language'] = 'typescript'
- try:
- # Call the external node.js parser
- result = subprocess.run(
- ['node', str(parser_script_path), file_path],
- capture_output=True,
- text=True,
- check=True
- )
- # The AST is now in result.stdout as a JSON string.
- # ast = json.loads(result.stdout)
-
- except subprocess.CalledProcessError as e:
- log.error(f"Failed to parse {file_path} with babel. Error: {e.stderr}")
- except Exception as e:
- log.error(f"Could not process file {file_path}: {e}")
+ # TODO: Populate the ProjectAnalysis object with the analysis results.
+ analysis = ProjectAnalysis(
+ project_name=project_path.name,
+ project_path=str(project_path),
+ total_files=len(component_files),
+ components={},
+ )
+ log.info(f"Analysis complete for {project_path.name}.")
+ return analysis
- elif file.endswith('.css'):
- graph.nodes[relative_path]['language'] = 'css'
- try:
- # Placeholder for CSS parsing
- # sheet = cssutils.parseFile(file_path)
- pass
- except Exception as e:
- log.error(f"Could not parse css file {file_path}: {e}")
- log.info(f"Analysis complete. Found {graph.number_of_nodes()} files.")
-
- # Convert graph to a serializable format
- serializable_graph = nx.node_link_data(graph)
-
- return serializable_graph
-
-def save_analysis_to_project(project_path: str, analysis_data: dict):
+def export_project_context(analysis: ProjectAnalysis, output_path: str):
"""
- Saves the analysis data to a file in the project's .dss directory.
+ Exports the project context to a JSON file.
"""
- # In the context of dss-mvp1, the .dss directory for metadata might be at the root.
- dss_dir = os.path.join(project_path, '.dss')
- os.makedirs(dss_dir, exist_ok=True)
-
- output_path = os.path.join(dss_dir, 'analysis_graph.json')
-
- with open(output_path, 'w', encoding='utf-8') as f:
- json.dump(analysis_data, f, indent=2)
-
- log.info(f"Analysis data saved to {output_path}")
+ log.info(f"Exporting project context to {output_path}...")
+ with open(output_path, "w") as f:
+ json.dump(analysis.dict(), f, indent=2)
+ log.info("Export complete.")
-def run_project_analysis(project_path: str):
+
+def get_ast(file_path: str) -> Dict:
"""
- High-level function to run analysis and save the result.
+ Gets the AST of a file using a node.js parser.
"""
- analysis_result = analyze_react_project(project_path)
- save_analysis_to_project(project_path, analysis_result)
- return analysis_result
+ log.info(f"Getting AST for {file_path}...")
+ result = subprocess.run(
+ ["node", str(parser_script_path), file_path],
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ log.info("AST retrieved.")
+ return json.loads(result.stdout)
-def _read_ds_config(project_path: str) -> dict:
+
+def main():
"""
- Reads the ds.config.json file from the project root.
+ Main function for the project analyzer.
"""
- config_path = os.path.join(project_path, 'ds.config.json')
- if not os.path.exists(config_path):
- return {}
- try:
- with open(config_path, 'r', encoding='utf-8') as f:
- return json.load(f)
- except Exception as e:
- log.error(f"Could not read or parse ds.config.json: {e}")
- return {}
+ import argparse
-def export_project_context(project_path: str) -> dict:
- """
- Exports a comprehensive project context for agents.
+ parser = argparse.ArgumentParser(description="Analyze a project.")
+ parser.add_argument("path", help="The path to the project to analyze.")
+ parser.add_argument("--output-graph", action="store_true", help="Output the dependency graph.")
+ parser.add_argument("--prune", action="store_true", help="Prune the dependency graph.")
+ parser.add_argument("--visualize", action="store_true", help="Visualize the dependency graph.")
+ parser.add_argument("--export-context", help="Export the project context to a JSON file.")
+ args = parser.parse_args()
- This context includes the analysis graph, project configuration,
- and a summary of the project's structure.
- """
- analysis_graph_path = os.path.join(project_path, '.dss', 'analysis_graph.json')
+ analysis = analyze_project(args.path, args.output_graph, args.prune, args.visualize)
- if not os.path.exists(analysis_graph_path):
- # If the analysis hasn't been run, run it first.
- log.info(f"Analysis graph not found for {project_path}. Running analysis now.")
- run_project_analysis(project_path)
+ if args.export_context:
+ export_project_context(analysis, args.export_context)
- try:
- with open(analysis_graph_path, 'r', encoding='utf-8') as f:
- analysis_graph = json.load(f)
- except Exception as e:
- log.error(f"Could not read analysis graph for {project_path}: {e}")
- analysis_graph = {}
-
- project_config = _read_ds_config(project_path)
-
- # Create the project context
- project_context = {
- "schema_version": "1.0",
- "project_name": project_config.get("name", "Unknown"),
- "analysis_summary": {
- "file_nodes": len(analysis_graph.get("nodes", [])),
- "dependencies": len(analysis_graph.get("links", [])),
- "analyzed_at": log.info(f"Analysis data saved to {analysis_graph_path}")
- },
- "project_config": project_config,
- "analysis_graph": analysis_graph,
- }
-
- return project_context
-
-if __name__ == '__main__':
- # This is for standalone testing of the analyzer.
- # Provide a path to a project to test.
- # e.g., python -m dss.analyze.project_analyzer ../../admin-ui
- import sys
- if len(sys.argv) > 1:
- target_project_path = sys.argv[1]
- if not os.path.isdir(target_project_path):
- print(f"Error: Path '{target_project_path}' is not a valid directory.")
- sys.exit(1)
-
- run_project_analysis(target_project_path)
- else:
- print("Usage: python -m dss.analyze.project_analyzer ")
+if __name__ == "__main__":
+ main()
diff --git a/dss/analyze/quick_wins.py b/dss/analyze/quick_wins.py
index 296aaa2..a48f149 100644
--- a/dss/analyze/quick_wins.py
+++ b/dss/analyze/quick_wins.py
@@ -1,5 +1,5 @@
"""
-Quick-Win Finder
+Quick-Win Finder.
Identifies easy improvement opportunities in a codebase:
- Inline styles that can be extracted
@@ -11,18 +11,11 @@ Identifies easy improvement opportunities in a codebase:
import re
from pathlib import Path
-from typing import List, Dict, Any, Optional
-from dataclasses import dataclass
+from typing import Any, Dict, List
-from .base import (
- QuickWin,
- QuickWinType,
- QuickWinPriority,
- Location,
- ProjectAnalysis,
-)
-from .styles import StyleAnalyzer
+from .base import Location, QuickWin, QuickWinPriority, QuickWinType
from .react import ReactAnalyzer
+from .styles import StyleAnalyzer
class QuickWinFinder:
@@ -100,7 +93,7 @@ class QuickWinFinder:
# Group by file
by_file = {}
for style in inline_styles:
- file_path = style['file']
+ file_path = style["file"]
if file_path not in by_file:
by_file[file_path] = []
by_file[file_path].append(style)
@@ -108,31 +101,36 @@ class QuickWinFinder:
# Create quick-wins for files with multiple inline styles
for file_path, styles in by_file.items():
if len(styles) >= 3: # Only flag if 3+ inline styles
- wins.append(QuickWin(
- type=QuickWinType.INLINE_STYLE,
- priority=QuickWinPriority.HIGH,
- title=f"Extract {len(styles)} inline styles",
- description=f"File {file_path} has {len(styles)} inline style declarations that could be extracted to CSS classes or design tokens.",
- location=Location(file_path, styles[0]['line']),
- affected_files=[file_path],
- estimated_impact=f"Reduce inline styles, improve maintainability",
- fix_suggestion="Extract repeated style properties to CSS classes or design tokens. Use className instead of style prop.",
- auto_fixable=True,
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.INLINE_STYLE,
+ priority=QuickWinPriority.HIGH,
+ title=f"Extract {len(styles)} inline styles",
+ description=f"File {file_path} has {len(styles)} inline style declarations that could be extracted to CSS classes or design tokens.",
+ location=Location(file_path, styles[0]["line"]),
+ affected_files=[file_path],
+ estimated_impact="Reduce inline styles, improve maintainability",
+ fix_suggestion="Extract repeated style properties to CSS classes or design tokens. Use className instead of style prop.",
+ auto_fixable=True,
+ )
+ )
# Create summary if many files have inline styles
total_inline = len(inline_styles)
if total_inline >= 10:
- wins.insert(0, QuickWin(
- type=QuickWinType.INLINE_STYLE,
- priority=QuickWinPriority.HIGH,
- title=f"Project has {total_inline} inline styles",
- description=f"Found {total_inline} inline style declarations across {len(by_file)} files. Consider migrating to CSS classes or design tokens.",
- affected_files=list(by_file.keys())[:10],
- estimated_impact=f"Improve code maintainability and bundle size",
- fix_suggestion="Run 'dss migrate inline-styles' to preview migration options.",
- auto_fixable=True,
- ))
+ wins.insert(
+ 0,
+ QuickWin(
+ type=QuickWinType.INLINE_STYLE,
+ priority=QuickWinPriority.HIGH,
+ title=f"Project has {total_inline} inline styles",
+ description=f"Found {total_inline} inline style declarations across {len(by_file)} files. Consider migrating to CSS classes or design tokens.",
+ affected_files=list(by_file.keys())[:10],
+ estimated_impact="Improve code maintainability and bundle size",
+ fix_suggestion="Run 'dss migrate inline-styles' to preview migration options.",
+ auto_fixable=True,
+ ),
+ )
return wins
@@ -141,23 +139,25 @@ class QuickWinFinder:
wins = []
analysis = await self.style_analyzer.analyze()
- duplicates = analysis.get('duplicates', [])
+ duplicates = analysis.get("duplicates", [])
# Find high-occurrence duplicates
for dup in duplicates[:10]: # Top 10 duplicates
- if dup['count'] >= 5: # Only if used 5+ times
- priority = QuickWinPriority.HIGH if dup['count'] >= 10 else QuickWinPriority.MEDIUM
+ if dup["count"] >= 5: # Only if used 5+ times
+ priority = QuickWinPriority.HIGH if dup["count"] >= 10 else QuickWinPriority.MEDIUM
- wins.append(QuickWin(
- type=QuickWinType.DUPLICATE_VALUE,
- priority=priority,
- title=f"Duplicate value '{dup['value']}' used {dup['count']} times",
- description=f"The value '{dup['value']}' appears {dup['count']} times across {len(dup['files'])} files. This should be a design token.",
- affected_files=dup['files'],
- estimated_impact=f"Create single source of truth, easier theme updates",
- fix_suggestion=f"Create token for this value and replace all occurrences.",
- auto_fixable=True,
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.DUPLICATE_VALUE,
+ priority=priority,
+ title=f"Duplicate value '{dup['value']}' used {dup['count']} times",
+ description=f"The value '{dup['value']}' appears {dup['count']} times across {len(dup['files'])} files. This should be a design token.",
+ affected_files=dup["files"],
+ estimated_impact="Create single source of truth, easier theme updates",
+ fix_suggestion="Create token for this value and replace all occurrences.",
+ auto_fixable=True,
+ )
+ )
return wins
@@ -168,16 +168,18 @@ class QuickWinFinder:
unused = await self.style_analyzer.find_unused_styles()
if len(unused) >= 5:
- wins.append(QuickWin(
- type=QuickWinType.UNUSED_STYLE,
- priority=QuickWinPriority.MEDIUM,
- title=f"Found {len(unused)} potentially unused CSS classes",
- description=f"These CSS classes are defined but don't appear to be used in the codebase. Review and remove if confirmed unused.",
- affected_files=list(set(u['file'] for u in unused))[:10],
- estimated_impact=f"Reduce CSS bundle size by removing dead code",
- fix_suggestion="Review each class and remove if unused. Some may be dynamically generated.",
- auto_fixable=False, # Needs human review
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.UNUSED_STYLE,
+ priority=QuickWinPriority.MEDIUM,
+ title=f"Found {len(unused)} potentially unused CSS classes",
+ description="These CSS classes are defined but don't appear to be used in the codebase. Review and remove if confirmed unused.",
+ affected_files=list(set(u["file"] for u in unused))[:10],
+ estimated_impact="Reduce CSS bundle size by removing dead code",
+ fix_suggestion="Review each class and remove if unused. Some may be dynamically generated.",
+ auto_fixable=False, # Needs human review
+ )
+ )
return wins
@@ -186,35 +188,39 @@ class QuickWinFinder:
wins = []
analysis = await self.style_analyzer.analyze()
- candidates = analysis.get('token_candidates', [])
+ candidates = analysis.get("token_candidates", [])
# Find high-confidence candidates
high_confidence = [c for c in candidates if c.confidence >= 0.7]
if high_confidence:
- wins.append(QuickWin(
- type=QuickWinType.HARDCODED_VALUE,
- priority=QuickWinPriority.MEDIUM,
- title=f"Found {len(high_confidence)} values that should be tokens",
- description="These hardcoded values appear multiple times and should be extracted as design tokens for consistency.",
- estimated_impact="Improve theme consistency and make updates easier",
- fix_suggestion="Use 'dss extract-tokens' to create tokens from these values.",
- auto_fixable=True,
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.HARDCODED_VALUE,
+ priority=QuickWinPriority.MEDIUM,
+ title=f"Found {len(high_confidence)} values that should be tokens",
+ description="These hardcoded values appear multiple times and should be extracted as design tokens for consistency.",
+ estimated_impact="Improve theme consistency and make updates easier",
+ fix_suggestion="Use 'dss extract-tokens' to create tokens from these values.",
+ auto_fixable=True,
+ )
+ )
# Add specific wins for top candidates
for candidate in high_confidence[:5]:
- wins.append(QuickWin(
- type=QuickWinType.HARDCODED_VALUE,
- priority=QuickWinPriority.LOW,
- title=f"Extract '{candidate.value}' as token",
- description=f"Value '{candidate.value}' appears {candidate.occurrences} times. Suggested token: {candidate.suggested_name}",
- location=candidate.locations[0] if candidate.locations else None,
- affected_files=[loc.file_path for loc in candidate.locations[:5]],
- estimated_impact=f"Single source of truth for this value",
- fix_suggestion=f"Create token '{candidate.suggested_name}' with value '{candidate.value}'",
- auto_fixable=True,
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.HARDCODED_VALUE,
+ priority=QuickWinPriority.LOW,
+ title=f"Extract '{candidate.value}' as token",
+ description=f"Value '{candidate.value}' appears {candidate.occurrences} times. Suggested token: {candidate.suggested_name}",
+ location=candidate.locations[0] if candidate.locations else None,
+ affected_files=[loc.file_path for loc in candidate.locations[:5]],
+ estimated_impact="Single source of truth for this value",
+ fix_suggestion=f"Create token '{candidate.suggested_name}' with value '{candidate.value}'",
+ auto_fixable=True,
+ )
+ )
return wins
@@ -224,102 +230,114 @@ class QuickWinFinder:
naming = await self.style_analyzer.analyze_naming_consistency()
- if naming.get('inconsistencies'):
- primary = naming.get('primary_pattern', 'unknown')
- inconsistent_count = len(naming['inconsistencies'])
+ if naming.get("inconsistencies"):
+ primary = naming.get("primary_pattern", "unknown")
+ inconsistent_count = len(naming["inconsistencies"])
- wins.append(QuickWin(
- type=QuickWinType.NAMING_INCONSISTENCY,
- priority=QuickWinPriority.LOW,
- title=f"Found {inconsistent_count} naming inconsistencies",
- description=f"The project primarily uses {primary} naming, but {inconsistent_count} classes use different conventions.",
- affected_files=list(set(i['file'] for i in naming['inconsistencies']))[:10],
- estimated_impact="Improve code consistency and readability",
- fix_suggestion=f"Standardize all class names to use {primary} convention.",
- auto_fixable=True,
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.NAMING_INCONSISTENCY,
+ priority=QuickWinPriority.LOW,
+ title=f"Found {inconsistent_count} naming inconsistencies",
+ description=f"The project primarily uses {primary} naming, but {inconsistent_count} classes use different conventions.",
+ affected_files=list(set(i["file"] for i in naming["inconsistencies"]))[:10],
+ estimated_impact="Improve code consistency and readability",
+ fix_suggestion=f"Standardize all class names to use {primary} convention.",
+ auto_fixable=True,
+ )
+ )
return wins
async def _find_accessibility_wins(self) -> List[QuickWin]:
"""Find accessibility issues."""
wins = []
- skip_dirs = {'node_modules', '.git', 'dist', 'build'}
+ skip_dirs = {"node_modules", ".git", "dist", "build"}
a11y_issues = []
- for ext in ['*.jsx', '*.tsx']:
+ for ext in ["*.jsx", "*.tsx"]:
for file_path in self.root.rglob(ext):
if any(skip in file_path.parts for skip in skip_dirs):
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
rel_path = str(file_path.relative_to(self.root))
# Check for images without alt
img_no_alt = re.findall(r' ]+(?]*>', content)
if img_no_alt:
for match in img_no_alt[:3]:
- if 'alt=' not in match:
- line = content[:content.find(match)].count('\n') + 1
- a11y_issues.append({
- 'type': 'img-no-alt',
- 'file': rel_path,
- 'line': line,
- })
+ if "alt=" not in match:
+ line = content[: content.find(match)].count("\n") + 1
+ a11y_issues.append(
+ {
+ "type": "img-no-alt",
+ "file": rel_path,
+ "line": line,
+ }
+ )
# Check for buttons without accessible text
icon_only_buttons = re.findall(
- r']*>\s*<(?:svg|Icon|img)[^>]*/?>\s* ',
+ r"]*>\s*<(?:svg|Icon|img)[^>]*/?>\s* ",
content,
- re.IGNORECASE
+ re.IGNORECASE,
)
if icon_only_buttons:
- a11y_issues.append({
- 'type': 'icon-button-no-label',
- 'file': rel_path,
- })
+ a11y_issues.append(
+ {
+ "type": "icon-button-no-label",
+ "file": rel_path,
+ }
+ )
# Check for click handlers on non-interactive elements
- div_onclick = re.findall(r']+onClick', content)
+ div_onclick = re.findall(r"
]+onClick", content)
if div_onclick:
- a11y_issues.append({
- 'type': 'div-click-handler',
- 'file': rel_path,
- 'count': len(div_onclick),
- })
+ a11y_issues.append(
+ {
+ "type": "div-click-handler",
+ "file": rel_path,
+ "count": len(div_onclick),
+ }
+ )
except Exception:
continue
# Group issues by type
if a11y_issues:
- img_issues = [i for i in a11y_issues if i['type'] == 'img-no-alt']
+ img_issues = [i for i in a11y_issues if i["type"] == "img-no-alt"]
if img_issues:
- wins.append(QuickWin(
- type=QuickWinType.ACCESSIBILITY,
- priority=QuickWinPriority.HIGH,
- title=f"Found {len(img_issues)} images without alt text",
- description="Images should have alt attributes for screen readers. Empty alt='' is acceptable for decorative images.",
- affected_files=list(set(i['file'] for i in img_issues))[:10],
- estimated_impact="Improve accessibility for screen reader users",
- fix_suggestion="Add descriptive alt text to images or alt='' for decorative images.",
- auto_fixable=False,
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.ACCESSIBILITY,
+ priority=QuickWinPriority.HIGH,
+ title=f"Found {len(img_issues)} images without alt text",
+ description="Images should have alt attributes for screen readers. Empty alt='' is acceptable for decorative images.",
+ affected_files=list(set(i["file"] for i in img_issues))[:10],
+ estimated_impact="Improve accessibility for screen reader users",
+ fix_suggestion="Add descriptive alt text to images or alt='' for decorative images.",
+ auto_fixable=False,
+ )
+ )
- div_issues = [i for i in a11y_issues if i['type'] == 'div-click-handler']
+ div_issues = [i for i in a11y_issues if i["type"] == "div-click-handler"]
if div_issues:
- wins.append(QuickWin(
- type=QuickWinType.ACCESSIBILITY,
- priority=QuickWinPriority.MEDIUM,
- title=f"Found click handlers on div elements",
- description="Using onClick on div elements makes them inaccessible to keyboard users. Use button or add proper ARIA attributes.",
- affected_files=list(set(i['file'] for i in div_issues))[:10],
- estimated_impact="Improve keyboard navigation accessibility",
- fix_suggestion="Replace
with
or add role='button' and tabIndex={0}.",
- auto_fixable=True,
- ))
+ wins.append(
+ QuickWin(
+ type=QuickWinType.ACCESSIBILITY,
+ priority=QuickWinPriority.MEDIUM,
+ title="Found click handlers on div elements",
+ description="Using onClick on div elements makes them inaccessible to keyboard users. Use button or add proper ARIA attributes.",
+ affected_files=list(set(i["file"] for i in div_issues))[:10],
+ estimated_impact="Improve keyboard navigation accessibility",
+ fix_suggestion="Replace with
or add role='button' and tabIndex={0}.",
+ auto_fixable=True,
+ )
+ )
return wins
@@ -343,11 +361,11 @@ class QuickWinFinder:
by_priority[priority_key] += 1
return {
- 'total': len(wins),
- 'by_type': by_type,
- 'by_priority': by_priority,
- 'auto_fixable': len([w for w in wins if w.auto_fixable]),
- 'top_wins': [w.to_dict() for w in wins[:10]],
+ "total": len(wins),
+ "by_type": by_type,
+ "by_priority": by_priority,
+ "auto_fixable": len([w for w in wins if w.auto_fixable]),
+ "top_wins": [w.to_dict() for w in wins[:10]],
}
async def get_actionable_report(self) -> str:
@@ -387,17 +405,21 @@ class QuickWinFinder:
if not priority_wins:
continue
- lines.extend([
- f"\n[{label}] ({len(priority_wins)} items)",
- "-" * 40,
- ])
+ lines.extend(
+ [
+ f"\n[{label}] ({len(priority_wins)} items)",
+ "-" * 40,
+ ]
+ )
for i, win in enumerate(priority_wins[:5], 1):
- lines.extend([
- f"\n{i}. {win.title}",
- f" {win.description[:100]}...",
- f" Impact: {win.estimated_impact}",
- ])
+ lines.extend(
+ [
+ f"\n{i}. {win.title}",
+ f" {win.description[:100]}...",
+ f" Impact: {win.estimated_impact}",
+ ]
+ )
if win.auto_fixable:
lines.append(" [Auto-fixable]")
@@ -405,14 +427,16 @@ class QuickWinFinder:
lines.append(f"\n ... and {len(priority_wins) - 5} more")
# Summary
- lines.extend([
- "",
- "=" * 50,
- "SUMMARY",
- f"Total quick-wins: {len(wins)}",
- f"Auto-fixable: {len([w for w in wins if w.auto_fixable])}",
- "",
- "Run 'dss fix --preview' to see suggested changes.",
- ])
+ lines.extend(
+ [
+ "",
+ "=" * 50,
+ "SUMMARY",
+ f"Total quick-wins: {len(wins)}",
+ f"Auto-fixable: {len([w for w in wins if w.auto_fixable])}",
+ "",
+ "Run 'dss fix --preview' to see suggested changes.",
+ ]
+ )
return "\n".join(lines)
diff --git a/dss/analyze/react.py b/dss/analyze/react.py
index e42b5e2..a7e27be 100644
--- a/dss/analyze/react.py
+++ b/dss/analyze/react.py
@@ -1,5 +1,5 @@
"""
-React Project Analyzer
+React Project Analyzer.
Analyzes React codebases to extract component information,
detect patterns, and identify style usage.
@@ -7,90 +7,58 @@ detect patterns, and identify style usage.
import re
from pathlib import Path
-from typing import List, Dict, Any, Optional, Set, Tuple
-from dataclasses import dataclass, field
-
-from .base import (
- ComponentInfo,
- Location,
- StylePattern,
- StylingApproach,
-)
+from typing import Any, Dict, List, Optional, Set
+from .base import ComponentInfo, Location
# Patterns for React component detection
FUNCTIONAL_COMPONENT = re.compile(
- r'(?:export\s+)?(?:const|let|var|function)\s+([A-Z][A-Za-z0-9]*)\s*(?::\s*(?:React\.)?FC)?'
- r'\s*(?:=\s*(?:\([^)]*\)|[a-zA-Z_]\w*)\s*=>|\()',
- re.MULTILINE
+ r"(?:export\s+)?(?:const|let|var|function)\s+([A-Z][A-Za-z0-9]*)\s*(?::\s*(?:React\.)?FC)?"
+ r"\s*(?:=\s*(?:\([^)]*\)|[a-zA-Z_]\w*)\s*=>|\()",
+ re.MULTILINE,
)
CLASS_COMPONENT = re.compile(
- r'class\s+([A-Z][A-Za-z0-9]*)\s+extends\s+(?:React\.)?(?:Component|PureComponent)',
- re.MULTILINE
+ r"class\s+([A-Z][A-Za-z0-9]*)\s+extends\s+(?:React\.)?(?:Component|PureComponent)", re.MULTILINE
)
FORWARD_REF = re.compile(
- r'(?:export\s+)?(?:const|let)\s+([A-Z][A-Za-z0-9]*)\s*=\s*(?:React\.)?forwardRef',
- re.MULTILINE
+ r"(?:export\s+)?(?:const|let)\s+([A-Z][A-Za-z0-9]*)\s*=\s*(?:React\.)?forwardRef", re.MULTILINE
)
MEMO_COMPONENT = re.compile(
- r'(?:export\s+)?(?:const|let)\s+([A-Z][A-Za-z0-9]*)\s*=\s*(?:React\.)?memo\(',
- re.MULTILINE
+ r"(?:export\s+)?(?:const|let)\s+([A-Z][A-Za-z0-9]*)\s*=\s*(?:React\.)?memo\(", re.MULTILINE
)
# Import patterns
IMPORT_PATTERN = re.compile(
- r'import\s+(?:\{[^}]+\}|\*\s+as\s+\w+|\w+)\s+from\s+["\']([^"\']+)["\']',
- re.MULTILINE
+ r'import\s+(?:\{[^}]+\}|\*\s+as\s+\w+|\w+)\s+from\s+["\']([^"\']+)["\']', re.MULTILINE
)
STYLE_IMPORT = re.compile(
- r'import\s+(?:(\w+)\s+from\s+)?["\']([^"\']+\.(?:css|scss|sass|less|styl))["\']',
- re.MULTILINE
+ r'import\s+(?:(\w+)\s+from\s+)?["\']([^"\']+\.(?:css|scss|sass|less|styl))["\']', re.MULTILINE
)
# Inline style patterns
-INLINE_STYLE_OBJECT = re.compile(
- r'style\s*=\s*\{\s*\{([^}]+)\}\s*\}',
- re.MULTILINE | re.DOTALL
-)
+INLINE_STYLE_OBJECT = re.compile(r"style\s*=\s*\{\s*\{([^}]+)\}\s*\}", re.MULTILINE | re.DOTALL)
-INLINE_STYLE_VAR = re.compile(
- r'style\s*=\s*\{(\w+)\}',
- re.MULTILINE
-)
+INLINE_STYLE_VAR = re.compile(r"style\s*=\s*\{(\w+)\}", re.MULTILINE)
# Props extraction
-PROPS_DESTRUCTURE = re.compile(
- r'\(\s*\{\s*([^}]+)\s*\}\s*(?::\s*[^)]+)?\)',
- re.MULTILINE
-)
+PROPS_DESTRUCTURE = re.compile(r"\(\s*\{\s*([^}]+)\s*\}\s*(?::\s*[^)]+)?\)", re.MULTILINE)
-PROPS_INTERFACE = re.compile(
- r'interface\s+\w*Props\s*\{([^}]+)\}',
- re.MULTILINE | re.DOTALL
-)
+PROPS_INTERFACE = re.compile(r"interface\s+\w*Props\s*\{([^}]+)\}", re.MULTILINE | re.DOTALL)
-PROPS_TYPE = re.compile(
- r'type\s+\w*Props\s*=\s*\{([^}]+)\}',
- re.MULTILINE | re.DOTALL
-)
+PROPS_TYPE = re.compile(r"type\s+\w*Props\s*=\s*\{([^}]+)\}", re.MULTILINE | re.DOTALL)
class ReactAnalyzer:
- """
- Analyzes React projects for component structure and style usage.
- """
+ """Analyzes React projects for component structure and style usage."""
def __init__(self, root_path: str):
self.root = Path(root_path).resolve()
- async def analyze(
- self,
- component_files: Optional[List[Path]] = None
- ) -> List[ComponentInfo]:
+ async def analyze(self, component_files: Optional[List[Path]] = None) -> List[ComponentInfo]:
"""
Analyze React components in the project.
@@ -110,7 +78,7 @@ class ReactAnalyzer:
try:
file_components = await self._analyze_file(file_path)
components.extend(file_components)
- except Exception as e:
+ except Exception:
# Log error but continue
continue
@@ -118,21 +86,23 @@ class ReactAnalyzer:
def _find_component_files(self) -> List[Path]:
"""Find all potential React component files."""
- skip_dirs = {'node_modules', '.git', 'dist', 'build', '.next'}
+ skip_dirs = {"node_modules", ".git", "dist", "build", ".next"}
component_files = []
- for ext in ['*.jsx', '*.tsx']:
+ for ext in ["*.jsx", "*.tsx"]:
for path in self.root.rglob(ext):
if not any(skip in path.parts for skip in skip_dirs):
component_files.append(path)
# Also check .js/.ts files that look like components
- for ext in ['*.js', '*.ts']:
+ for ext in ["*.js", "*.ts"]:
for path in self.root.rglob(ext):
if any(skip in path.parts for skip in skip_dirs):
continue
# Skip config and utility files
- if any(x in path.name.lower() for x in ['config', 'util', 'helper', 'hook', 'context']):
+ if any(
+ x in path.name.lower() for x in ["config", "util", "helper", "hook", "context"]
+ ):
continue
# Check if PascalCase (likely component)
if path.stem[0].isupper():
@@ -142,7 +112,7 @@ class ReactAnalyzer:
async def _analyze_file(self, file_path: Path) -> List[ComponentInfo]:
"""Analyze a single file for React components."""
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
components = []
# Find all components in the file
@@ -152,22 +122,22 @@ class ReactAnalyzer:
for match in FUNCTIONAL_COMPONENT.finditer(content):
name = match.group(1)
if self._is_valid_component_name(name):
- component_matches.append((name, 'functional', match.start()))
+ component_matches.append((name, "functional", match.start()))
# Class components
for match in CLASS_COMPONENT.finditer(content):
name = match.group(1)
- component_matches.append((name, 'class', match.start()))
+ component_matches.append((name, "class", match.start()))
# forwardRef components
for match in FORWARD_REF.finditer(content):
name = match.group(1)
- component_matches.append((name, 'forwardRef', match.start()))
+ component_matches.append((name, "forwardRef", match.start()))
# memo components
for match in MEMO_COMPONENT.finditer(content):
name = match.group(1)
- component_matches.append((name, 'memo', match.start()))
+ component_matches.append((name, "memo", match.start()))
# Dedupe by name (keep first occurrence)
seen_names = set()
@@ -193,19 +163,21 @@ class ReactAnalyzer:
# Check if component has styles
has_styles = bool(style_files) or bool(inline_styles)
- components.append(ComponentInfo(
- name=name,
- path=str(file_path.relative_to(self.root)),
- type=comp_type,
- props=props,
- has_styles=has_styles,
- style_files=style_files,
- inline_style_count=len(inline_styles),
- imports=imports,
- exports=self._find_exports(content, name),
- children=children,
- line_count=content.count('\n') + 1,
- ))
+ components.append(
+ ComponentInfo(
+ name=name,
+ path=str(file_path.relative_to(self.root)),
+ type=comp_type,
+ props=props,
+ has_styles=has_styles,
+ style_files=style_files,
+ inline_style_count=len(inline_styles),
+ imports=imports,
+ exports=self._find_exports(content, name),
+ children=children,
+ line_count=content.count("\n") + 1,
+ )
+ )
return components
@@ -217,10 +189,22 @@ class ReactAnalyzer:
# Filter out common non-component patterns
invalid_names = {
- 'React', 'Component', 'PureComponent', 'Fragment',
- 'Suspense', 'Provider', 'Consumer', 'Context',
- 'Error', 'ErrorBoundary', 'Wrapper', 'Container',
- 'Props', 'State', 'Type', 'Interface',
+ "React",
+ "Component",
+ "PureComponent",
+ "Fragment",
+ "Suspense",
+ "Provider",
+ "Consumer",
+ "Context",
+ "Error",
+ "ErrorBoundary",
+ "Wrapper",
+ "Container",
+ "Props",
+ "State",
+ "Type",
+ "Interface",
}
return name not in invalid_names
@@ -231,7 +215,7 @@ class ReactAnalyzer:
for match in IMPORT_PATTERN.finditer(content):
import_path = match.group(1)
# Skip node_modules style imports for brevity
- if not import_path.startswith('.') and '/' not in import_path:
+ if not import_path.startswith(".") and "/" not in import_path:
continue
imports.append(import_path)
return imports
@@ -250,11 +234,13 @@ class ReactAnalyzer:
# style={{ ... }}
for match in INLINE_STYLE_OBJECT.finditer(content):
- line = content[:match.start()].count('\n') + 1
- locations.append(Location(
- file_path="", # Will be set by caller
- line=line,
- ))
+ line = content[: match.start()].count("\n") + 1
+ locations.append(
+ Location(
+ file_path="", # Will be set by caller
+ line=line,
+ )
+ )
return locations
@@ -266,7 +252,7 @@ class ReactAnalyzer:
for match in PROPS_DESTRUCTURE.finditer(content):
props_str = match.group(1)
# Extract prop names from destructuring
- for prop in re.findall(r'(\w+)(?:\s*[=:])?', props_str):
+ for prop in re.findall(r"(\w+)(?:\s*[=:])?", props_str):
if prop and not prop[0].isupper(): # Skip types
props.add(prop)
@@ -275,28 +261,24 @@ class ReactAnalyzer:
for match in pattern.finditer(content):
props_str = match.group(1)
# Extract prop names
- for line in props_str.split('\n'):
- prop_match = re.match(r'\s*(\w+)\s*[?:]', line)
+ for line in props_str.split("\n"):
+ prop_match = re.match(r"\s*(\w+)\s*[?:]", line)
if prop_match:
props.add(prop_match.group(1))
return list(props)
- def _find_child_components(
- self,
- content: str,
- current_components: Set[str]
- ) -> List[str]:
+ def _find_child_components(self, content: str, current_components: Set[str]) -> List[str]:
"""Find child components used in JSX."""
children = set()
# Find JSX elements that look like components (PascalCase)
- jsx_pattern = re.compile(r'<([A-Z][A-Za-z0-9]*)')
+ jsx_pattern = re.compile(r"<([A-Z][A-Za-z0-9]*)")
for match in jsx_pattern.finditer(content):
component_name = match.group(1)
# Skip current file's components and React built-ins
if component_name not in current_components:
- if component_name not in {'Fragment', 'Suspense', 'Provider'}:
+ if component_name not in {"Fragment", "Suspense", "Provider"}:
children.add(component_name)
return list(children)
@@ -306,16 +288,16 @@ class ReactAnalyzer:
exports = []
# Default export
- if re.search(rf'export\s+default\s+{component_name}\b', content):
- exports.append('default')
- if re.search(rf'export\s+default\s+(?:function|const)\s+{component_name}\b', content):
- exports.append('default')
+ if re.search(rf"export\s+default\s+{component_name}\b", content):
+ exports.append("default")
+ if re.search(rf"export\s+default\s+(?:function|const)\s+{component_name}\b", content):
+ exports.append("default")
# Named export
- if re.search(rf'export\s+(?:const|function|class)\s+{component_name}\b', content):
- exports.append('named')
- if re.search(r'export\s*\{[^}]*\b' + re.escape(component_name) + r'\b[^}]*\}', content):
- exports.append('named')
+ if re.search(rf"export\s+(?:const|function|class)\s+{component_name}\b", content):
+ exports.append("named")
+ if re.search(r"export\s*\{[^}]*\b" + re.escape(component_name) + r"\b[^}]*\}", content):
+ exports.append("named")
return exports
@@ -332,39 +314,44 @@ class ReactAnalyzer:
search_path = Path(path) if path else self.root
results = []
- for ext in ['*.jsx', '*.tsx', '*.js', '*.ts']:
+ for ext in ["*.jsx", "*.tsx", "*.js", "*.ts"]:
for file_path in search_path.rglob(ext):
- if any(skip in file_path.parts for skip in
- {'node_modules', '.git', 'dist', 'build'}):
+ if any(
+ skip in file_path.parts for skip in {"node_modules", ".git", "dist", "build"}
+ ):
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
# Find style={{ ... }}
for match in INLINE_STYLE_OBJECT.finditer(content):
- line = content[:match.start()].count('\n') + 1
+ line = content[: match.start()].count("\n") + 1
style_content = match.group(1).strip()
- results.append({
- 'file': str(file_path.relative_to(self.root)),
- 'line': line,
- 'content': style_content[:200],
- 'type': 'object',
- })
+ results.append(
+ {
+ "file": str(file_path.relative_to(self.root)),
+ "line": line,
+ "content": style_content[:200],
+ "type": "object",
+ }
+ )
# Find style={variable}
for match in INLINE_STYLE_VAR.finditer(content):
- line = content[:match.start()].count('\n') + 1
+ line = content[: match.start()].count("\n") + 1
var_name = match.group(1)
- results.append({
- 'file': str(file_path.relative_to(self.root)),
- 'line': line,
- 'content': f'style={{{var_name}}}',
- 'type': 'variable',
- 'variable': var_name,
- })
+ results.append(
+ {
+ "file": str(file_path.relative_to(self.root)),
+ "line": line,
+ "content": f"style={{{var_name}}}",
+ "type": "variable",
+ "variable": var_name,
+ }
+ )
except Exception:
continue
@@ -392,48 +379,50 @@ class ReactAnalyzer:
Returns dict with pattern types and their occurrences.
"""
patterns = {
- 'inline_styles': [],
- 'css_modules': [],
- 'styled_components': [],
- 'emotion': [],
- 'tailwind': [],
- 'css_classes': [],
+ "inline_styles": [],
+ "css_modules": [],
+ "styled_components": [],
+ "emotion": [],
+ "tailwind": [],
+ "css_classes": [],
}
component_files = self._find_component_files()
for file_path in component_files:
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
rel_path = str(file_path.relative_to(self.root))
# CSS Modules
if re.search(r'import\s+\w+\s+from\s+["\'].*\.module\.', content):
- patterns['css_modules'].append({'file': rel_path})
+ patterns["css_modules"].append({"file": rel_path})
# styled-components
if re.search(r'styled\.|from\s+["\']styled-components', content):
- patterns['styled_components'].append({'file': rel_path})
+ patterns["styled_components"].append({"file": rel_path})
# Emotion
- if re.search(r'@emotion|css`', content):
- patterns['emotion'].append({'file': rel_path})
+ if re.search(r"@emotion|css`", content):
+ patterns["emotion"].append({"file": rel_path})
# Tailwind (className with utility classes)
if re.search(r'className\s*=\s*["\'][^"\']*(?:flex|grid|p-\d|m-\d|bg-)', content):
- patterns['tailwind'].append({'file': rel_path})
+ patterns["tailwind"].append({"file": rel_path})
# Regular CSS classes
if re.search(r'className\s*=\s*["\'][a-zA-Z]', content):
- patterns['css_classes'].append({'file': rel_path})
+ patterns["css_classes"].append({"file": rel_path})
# Inline styles
for match in INLINE_STYLE_OBJECT.finditer(content):
- line = content[:match.start()].count('\n') + 1
- patterns['inline_styles'].append({
- 'file': rel_path,
- 'line': line,
- })
+ line = content[: match.start()].count("\n") + 1
+ patterns["inline_styles"].append(
+ {
+ "file": rel_path,
+ "line": line,
+ }
+ )
except Exception:
continue
diff --git a/dss/analyze/scanner.py b/dss/analyze/scanner.py
index cfd724f..3cb5f72 100644
--- a/dss/analyze/scanner.py
+++ b/dss/analyze/scanner.py
@@ -1,55 +1,59 @@
"""
-Project Scanner
+Project Scanner.
Scans file system to discover project structure, frameworks, and style files.
"""
import json
import re
-from pathlib import Path
-from typing import List, Dict, Any, Optional, Set, Tuple
from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Tuple
-from .base import (
- Framework,
- StylingApproach,
- StyleFile,
- ProjectAnalysis,
-)
-
+from .base import Framework, ProjectAnalysis, StyleFile, StylingApproach
# Directories to skip during scanning
SKIP_DIRS = {
- 'node_modules',
- '.git',
- '.next',
- '.nuxt',
- 'dist',
- 'build',
- 'out',
- '.cache',
- 'coverage',
- '__pycache__',
- '.venv',
- 'venv',
- '.turbo',
- '.vercel',
+ "node_modules",
+ ".git",
+ ".next",
+ ".nuxt",
+ "dist",
+ "build",
+ "out",
+ ".cache",
+ "coverage",
+ "__pycache__",
+ ".venv",
+ "venv",
+ ".turbo",
+ ".vercel",
}
# File extensions to scan
SCAN_EXTENSIONS = {
# JavaScript/TypeScript
- '.js', '.jsx', '.ts', '.tsx', '.mjs', '.cjs',
+ ".js",
+ ".jsx",
+ ".ts",
+ ".tsx",
+ ".mjs",
+ ".cjs",
# Styles
- '.css', '.scss', '.sass', '.less', '.styl',
+ ".css",
+ ".scss",
+ ".sass",
+ ".less",
+ ".styl",
# Config
- '.json',
+ ".json",
}
@dataclass
class ScanResult:
"""Result of file system scan."""
+
files: List[Path] = field(default_factory=list)
style_files: List[Path] = field(default_factory=list)
component_files: List[Path] = field(default_factory=list)
@@ -60,6 +64,7 @@ class ScanResult:
class ProjectScanner:
"""
Scans a project directory to identify:
+
- Framework (React, Next, Vue, etc.)
- Styling approach (CSS modules, styled-components, Tailwind, etc.)
- Component files
@@ -88,6 +93,7 @@ class ProjectScanner:
# Check cache if enabled
if self.use_cache:
import time
+
cache_key = str(self.root)
if cache_key in self._cache:
timestamp, cached_analysis = self._cache[cache_key]
@@ -118,20 +124,19 @@ class ProjectScanner:
"total_lines": scan_result.total_lines,
"component_files": len(scan_result.component_files),
"style_files": len(scan_result.style_files),
- }
+ },
)
# Determine primary styling approach
if styling:
analysis.styling_approaches = styling
# Primary is the one with most occurrences
- analysis.primary_styling = max(
- styling, key=lambda x: x.count
- ).type if styling else None
+ analysis.primary_styling = max(styling, key=lambda x: x.count).type if styling else None
# Cache result if enabled
if self.use_cache:
import time
+
cache_key = str(self.root)
self._cache[cache_key] = (time.time(), analysis)
@@ -156,39 +161,39 @@ class ProjectScanner:
result.files.append(path)
# Categorize files
- if suffix in {'.css', '.scss', '.sass', '.less', '.styl'}:
+ if suffix in {".css", ".scss", ".sass", ".less", ".styl"}:
result.style_files.append(path)
- elif suffix in {'.jsx', '.tsx'}:
+ elif suffix in {".jsx", ".tsx"}:
result.component_files.append(path)
- elif suffix in {'.js', '.ts'}:
+ elif suffix in {".js", ".ts"}:
# Check if it's a component or config
name = path.name.lower()
- if any(cfg in name for cfg in ['config', 'rc', '.config']):
+ if any(cfg in name for cfg in ["config", "rc", ".config"]):
result.config_files[name] = path
elif self._looks_like_component(path):
result.component_files.append(path)
# Count lines (approximate for large files)
try:
- content = path.read_text(encoding='utf-8', errors='ignore')
- result.total_lines += content.count('\n') + 1
+ content = path.read_text(encoding="utf-8", errors="ignore")
+ result.total_lines += content.count("\n") + 1
except Exception:
pass
# Look for specific config files
config_names = [
- 'package.json',
- 'tsconfig.json',
- 'tailwind.config.js',
- 'tailwind.config.ts',
- 'next.config.js',
- 'next.config.mjs',
- 'vite.config.js',
- 'vite.config.ts',
- 'nuxt.config.js',
- 'nuxt.config.ts',
- '.eslintrc.json',
- '.eslintrc.js',
+ "package.json",
+ "tsconfig.json",
+ "tailwind.config.js",
+ "tailwind.config.ts",
+ "next.config.js",
+ "next.config.mjs",
+ "vite.config.js",
+ "vite.config.ts",
+ "nuxt.config.js",
+ "nuxt.config.ts",
+ ".eslintrc.json",
+ ".eslintrc.js",
]
for name in config_names:
@@ -205,50 +210,47 @@ class ProjectScanner:
if name[0].isupper() and not name.isupper():
return True
# Common component patterns
- if any(x in name.lower() for x in ['component', 'page', 'view', 'screen']):
+ if any(x in name.lower() for x in ["component", "page", "view", "screen"]):
return True
return False
- def _detect_framework(
- self,
- config_files: Dict[str, Path]
- ) -> Tuple[Framework, str]:
+ def _detect_framework(self, config_files: Dict[str, Path]) -> Tuple[Framework, str]:
"""Detect the UI framework and version."""
# Check package.json for dependencies
- pkg_json = config_files.get('package.json')
+ pkg_json = config_files.get("package.json")
if not pkg_json:
return Framework.UNKNOWN, ""
try:
pkg = json.loads(pkg_json.read_text())
deps = {
- **pkg.get('dependencies', {}),
- **pkg.get('devDependencies', {}),
+ **pkg.get("dependencies", {}),
+ **pkg.get("devDependencies", {}),
}
# Check for Next.js first (it includes React)
- if 'next' in deps:
- return Framework.NEXT, deps.get('next', '').lstrip('^~')
+ if "next" in deps:
+ return Framework.NEXT, deps.get("next", "").lstrip("^~")
# Check for Nuxt (Vue-based)
- if 'nuxt' in deps:
- return Framework.NUXT, deps.get('nuxt', '').lstrip('^~')
+ if "nuxt" in deps:
+ return Framework.NUXT, deps.get("nuxt", "").lstrip("^~")
# Check for other frameworks
- if 'react' in deps:
- return Framework.REACT, deps.get('react', '').lstrip('^~')
+ if "react" in deps:
+ return Framework.REACT, deps.get("react", "").lstrip("^~")
- if 'vue' in deps:
- return Framework.VUE, deps.get('vue', '').lstrip('^~')
+ if "vue" in deps:
+ return Framework.VUE, deps.get("vue", "").lstrip("^~")
- if '@angular/core' in deps:
- return Framework.ANGULAR, deps.get('@angular/core', '').lstrip('^~')
+ if "@angular/core" in deps:
+ return Framework.ANGULAR, deps.get("@angular/core", "").lstrip("^~")
- if 'svelte' in deps:
- return Framework.SVELTE, deps.get('svelte', '').lstrip('^~')
+ if "svelte" in deps:
+ return Framework.SVELTE, deps.get("svelte", "").lstrip("^~")
- if 'solid-js' in deps:
- return Framework.SOLID, deps.get('solid-js', '').lstrip('^~')
+ if "solid-js" in deps:
+ return Framework.SOLID, deps.get("solid-js", "").lstrip("^~")
except (json.JSONDecodeError, KeyError):
pass
@@ -257,72 +259,66 @@ class ProjectScanner:
def _detect_styling(self, scan_result: ScanResult) -> List:
"""Detect styling approaches used in the project."""
- from .base import StylePattern, Location
+ from .base import Location, StylePattern
patterns: Dict[StylingApproach, StylePattern] = {}
# Check config files for styling indicators
- pkg_json = scan_result.config_files.get('package.json')
+ pkg_json = scan_result.config_files.get("package.json")
if pkg_json:
try:
pkg = json.loads(pkg_json.read_text())
deps = {
- **pkg.get('dependencies', {}),
- **pkg.get('devDependencies', {}),
+ **pkg.get("dependencies", {}),
+ **pkg.get("devDependencies", {}),
}
# Tailwind
- if 'tailwindcss' in deps:
+ if "tailwindcss" in deps:
patterns[StylingApproach.TAILWIND] = StylePattern(
type=StylingApproach.TAILWIND,
count=1,
- examples=["tailwindcss in dependencies"]
+ examples=["tailwindcss in dependencies"],
)
# styled-components
- if 'styled-components' in deps:
+ if "styled-components" in deps:
patterns[StylingApproach.STYLED_COMPONENTS] = StylePattern(
type=StylingApproach.STYLED_COMPONENTS,
count=1,
- examples=["styled-components in dependencies"]
+ examples=["styled-components in dependencies"],
)
# Emotion
- if '@emotion/react' in deps or '@emotion/styled' in deps:
+ if "@emotion/react" in deps or "@emotion/styled" in deps:
patterns[StylingApproach.EMOTION] = StylePattern(
- type=StylingApproach.EMOTION,
- count=1,
- examples=["@emotion in dependencies"]
+ type=StylingApproach.EMOTION, count=1, examples=["@emotion in dependencies"]
)
# SASS/SCSS
- if 'sass' in deps or 'node-sass' in deps:
+ if "sass" in deps or "node-sass" in deps:
patterns[StylingApproach.SASS_SCSS] = StylePattern(
- type=StylingApproach.SASS_SCSS,
- count=1,
- examples=["sass in dependencies"]
+ type=StylingApproach.SASS_SCSS, count=1, examples=["sass in dependencies"]
)
except (json.JSONDecodeError, KeyError):
pass
# Check tailwind config
- if 'tailwind.config.js' in scan_result.config_files or \
- 'tailwind.config.ts' in scan_result.config_files:
+ if (
+ "tailwind.config.js" in scan_result.config_files
+ or "tailwind.config.ts" in scan_result.config_files
+ ):
if StylingApproach.TAILWIND not in patterns:
patterns[StylingApproach.TAILWIND] = StylePattern(
- type=StylingApproach.TAILWIND,
- count=1,
- examples=["tailwind.config found"]
+ type=StylingApproach.TAILWIND, count=1, examples=["tailwind.config found"]
)
# Scan component files for styling patterns
for comp_file in scan_result.component_files[:100]: # Limit for performance
try:
- content = comp_file.read_text(encoding='utf-8', errors='ignore')
- self._detect_patterns_in_file(
- content, str(comp_file), patterns
- )
+ content = comp_file.read_text(encoding="utf-8", errors="ignore")
+ self._detect_patterns_in_file(content, str(comp_file), patterns)
except Exception:
pass
@@ -330,9 +326,9 @@ class ProjectScanner:
for style_file in scan_result.style_files:
suffix = style_file.suffix.lower()
- if suffix == '.css':
+ if suffix == ".css":
# Check for CSS modules
- if '.module.css' in style_file.name.lower():
+ if ".module.css" in style_file.name.lower():
approach = StylingApproach.CSS_MODULES
else:
approach = StylingApproach.VANILLA_CSS
@@ -340,11 +336,9 @@ class ProjectScanner:
if approach not in patterns:
patterns[approach] = StylePattern(type=approach)
patterns[approach].count += 1
- patterns[approach].locations.append(
- Location(str(style_file), 1)
- )
+ patterns[approach].locations.append(Location(str(style_file), 1))
- elif suffix in {'.scss', '.sass'}:
+ elif suffix in {".scss", ".sass"}:
if StylingApproach.SASS_SCSS not in patterns:
patterns[StylingApproach.SASS_SCSS] = StylePattern(
type=StylingApproach.SASS_SCSS
@@ -354,13 +348,10 @@ class ProjectScanner:
return list(patterns.values())
def _detect_patterns_in_file(
- self,
- content: str,
- file_path: str,
- patterns: Dict[StylingApproach, Any]
+ self, content: str, file_path: str, patterns: Dict[StylingApproach, Any]
) -> None:
"""Detect styling patterns in a single file."""
- from .base import StylePattern, Location
+ from .base import Location, StylePattern
# CSS Modules import
css_module_pattern = re.compile(
@@ -372,15 +363,11 @@ class ProjectScanner:
type=StylingApproach.CSS_MODULES
)
patterns[StylingApproach.CSS_MODULES].count += 1
- line_num = content[:match.start()].count('\n') + 1
- patterns[StylingApproach.CSS_MODULES].locations.append(
- Location(file_path, line_num)
- )
+ line_num = content[: match.start()].count("\n") + 1
+ patterns[StylingApproach.CSS_MODULES].locations.append(Location(file_path, line_num))
# styled-components
- styled_pattern = re.compile(
- r"(styled\.|styled\()|(from\s+['\"]styled-components['\"])"
- )
+ styled_pattern = re.compile(r"(styled\.|styled\()|(from\s+['\"]styled-components['\"])")
for match in styled_pattern.finditer(content):
if StylingApproach.STYLED_COMPONENTS not in patterns:
patterns[StylingApproach.STYLED_COMPONENTS] = StylePattern(
@@ -389,33 +376,23 @@ class ProjectScanner:
patterns[StylingApproach.STYLED_COMPONENTS].count += 1
# Emotion
- emotion_pattern = re.compile(
- r"(css`|@emotion|from\s+['\"]@emotion)"
- )
+ emotion_pattern = re.compile(r"(css`|@emotion|from\s+['\"]@emotion)")
for match in emotion_pattern.finditer(content):
if StylingApproach.EMOTION not in patterns:
- patterns[StylingApproach.EMOTION] = StylePattern(
- type=StylingApproach.EMOTION
- )
+ patterns[StylingApproach.EMOTION] = StylePattern(type=StylingApproach.EMOTION)
patterns[StylingApproach.EMOTION].count += 1
# Inline styles
- inline_pattern = re.compile(
- r'style\s*=\s*\{\s*\{[^}]+\}\s*\}'
- )
+ inline_pattern = re.compile(r"style\s*=\s*\{\s*\{[^}]+\}\s*\}")
for match in inline_pattern.finditer(content):
if StylingApproach.INLINE_STYLES not in patterns:
patterns[StylingApproach.INLINE_STYLES] = StylePattern(
type=StylingApproach.INLINE_STYLES
)
patterns[StylingApproach.INLINE_STYLES].count += 1
- line_num = content[:match.start()].count('\n') + 1
- patterns[StylingApproach.INLINE_STYLES].locations.append(
- Location(file_path, line_num)
- )
- patterns[StylingApproach.INLINE_STYLES].examples.append(
- match.group(0)[:100]
- )
+ line_num = content[: match.start()].count("\n") + 1
+ patterns[StylingApproach.INLINE_STYLES].locations.append(Location(file_path, line_num))
+ patterns[StylingApproach.INLINE_STYLES].examples.append(match.group(0)[:100])
# Tailwind classes
tailwind_pattern = re.compile(
@@ -423,9 +400,7 @@ class ProjectScanner:
)
for match in tailwind_pattern.finditer(content):
if StylingApproach.TAILWIND not in patterns:
- patterns[StylingApproach.TAILWIND] = StylePattern(
- type=StylingApproach.TAILWIND
- )
+ patterns[StylingApproach.TAILWIND] = StylePattern(type=StylingApproach.TAILWIND)
patterns[StylingApproach.TAILWIND].count += 1
def _analyze_style_files(self, style_paths: List[Path]) -> List[StyleFile]:
@@ -434,43 +409,45 @@ class ProjectScanner:
for path in style_paths:
try:
- content = path.read_text(encoding='utf-8', errors='ignore')
+ content = path.read_text(encoding="utf-8", errors="ignore")
# Determine type
suffix = path.suffix.lower()
- if '.module.' in path.name.lower():
- file_type = 'css-module'
- elif suffix == '.scss':
- file_type = 'scss'
- elif suffix == '.sass':
- file_type = 'sass'
- elif suffix == '.less':
- file_type = 'less'
+ if ".module." in path.name.lower():
+ file_type = "css-module"
+ elif suffix == ".scss":
+ file_type = "scss"
+ elif suffix == ".sass":
+ file_type = "sass"
+ elif suffix == ".less":
+ file_type = "less"
else:
- file_type = 'css'
+ file_type = "css"
# Count variables
var_count = 0
- if file_type == 'css' or file_type == 'css-module':
- var_count = len(re.findall(r'--[\w-]+\s*:', content))
- elif file_type in {'scss', 'sass'}:
- var_count = len(re.findall(r'\$[\w-]+\s*:', content))
+ if file_type == "css" or file_type == "css-module":
+ var_count = len(re.findall(r"--[\w-]+\s*:", content))
+ elif file_type in {"scss", "sass"}:
+ var_count = len(re.findall(r"\$[\w-]+\s*:", content))
# Count selectors (approximate)
- selector_count = len(re.findall(r'[.#][\w-]+\s*\{', content))
+ selector_count = len(re.findall(r"[.#][\w-]+\s*\{", content))
# Find imports
imports = re.findall(r'@import\s+["\']([^"\']+)["\']', content)
- style_files.append(StyleFile(
- path=str(path.relative_to(self.root)),
- type=file_type,
- size_bytes=path.stat().st_size,
- line_count=content.count('\n') + 1,
- variable_count=var_count,
- selector_count=selector_count,
- imports=imports,
- ))
+ style_files.append(
+ StyleFile(
+ path=str(path.relative_to(self.root)),
+ type=file_type,
+ size_bytes=path.stat().st_size,
+ line_count=content.count("\n") + 1,
+ variable_count=var_count,
+ selector_count=selector_count,
+ imports=imports,
+ )
+ )
except Exception:
continue
@@ -479,6 +456,7 @@ class ProjectScanner:
def get_file_tree(self, max_depth: int = 3) -> Dict[str, Any]:
"""Get project file tree structure."""
+
def build_tree(path: Path, depth: int) -> Dict[str, Any]:
if depth > max_depth:
return {"...": "truncated"}
diff --git a/dss/analyze/styles.py b/dss/analyze/styles.py
index 6cfb12d..35deb9b 100644
--- a/dss/analyze/styles.py
+++ b/dss/analyze/styles.py
@@ -1,5 +1,5 @@
"""
-Style Pattern Analyzer
+Style Pattern Analyzer.
Detects and analyzes style patterns in code to identify:
- Hardcoded values that should be tokens
@@ -9,65 +9,61 @@ Detects and analyzes style patterns in code to identify:
"""
import re
-from pathlib import Path
-from typing import List, Dict, Any, Optional, Set, Tuple
from collections import defaultdict
-from dataclasses import dataclass, field
-
-from .base import (
- Location,
- TokenCandidate,
- StylePattern,
- StylingApproach,
-)
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, Dict, List
+from .base import Location, TokenCandidate
# Color patterns
-HEX_COLOR = re.compile(r'#(?:[0-9a-fA-F]{3}){1,2}\b')
-RGB_COLOR = re.compile(r'rgba?\s*\(\s*\d+\s*,\s*\d+\s*,\s*\d+(?:\s*,\s*[\d.]+)?\s*\)')
-HSL_COLOR = re.compile(r'hsla?\s*\(\s*\d+\s*,\s*[\d.]+%\s*,\s*[\d.]+%(?:\s*,\s*[\d.]+)?\s*\)')
-OKLCH_COLOR = re.compile(r'oklch\s*\([^)]+\)')
+HEX_COLOR = re.compile(r"#(?:[0-9a-fA-F]{3}){1,2}\b")
+RGB_COLOR = re.compile(r"rgba?\s*\(\s*\d+\s*,\s*\d+\s*,\s*\d+(?:\s*,\s*[\d.]+)?\s*\)")
+HSL_COLOR = re.compile(r"hsla?\s*\(\s*\d+\s*,\s*[\d.]+%\s*,\s*[\d.]+%(?:\s*,\s*[\d.]+)?\s*\)")
+OKLCH_COLOR = re.compile(r"oklch\s*\([^)]+\)")
# Dimension patterns
-PX_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*px\b')
-REM_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*rem\b')
-EM_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*em\b')
-PERCENT_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*%\b')
+PX_VALUE = re.compile(r"\b(\d+(?:\.\d+)?)\s*px\b")
+REM_VALUE = re.compile(r"\b(\d+(?:\.\d+)?)\s*rem\b")
+EM_VALUE = re.compile(r"\b(\d+(?:\.\d+)?)\s*em\b")
+PERCENT_VALUE = re.compile(r"\b(\d+(?:\.\d+)?)\s*%\b")
# Font patterns
-FONT_SIZE = re.compile(r'font-size\s*:\s*([^;]+)')
-FONT_FAMILY = re.compile(r'font-family\s*:\s*([^;]+)')
-FONT_WEIGHT = re.compile(r'font-weight\s*:\s*(\d+|normal|bold|lighter|bolder)')
-LINE_HEIGHT = re.compile(r'line-height\s*:\s*([^;]+)')
+FONT_SIZE = re.compile(r"font-size\s*:\s*([^;]+)")
+FONT_FAMILY = re.compile(r"font-family\s*:\s*([^;]+)")
+FONT_WEIGHT = re.compile(r"font-weight\s*:\s*(\d+|normal|bold|lighter|bolder)")
+LINE_HEIGHT = re.compile(r"line-height\s*:\s*([^;]+)")
# Spacing patterns
-MARGIN_PADDING = re.compile(r'(?:margin|padding)(?:-(?:top|right|bottom|left))?\s*:\s*([^;]+)')
-GAP = re.compile(r'gap\s*:\s*([^;]+)')
+MARGIN_PADDING = re.compile(r"(?:margin|padding)(?:-(?:top|right|bottom|left))?\s*:\s*([^;]+)")
+GAP = re.compile(r"gap\s*:\s*([^;]+)")
# Border patterns
-BORDER_RADIUS = re.compile(r'border-radius\s*:\s*([^;]+)')
-BORDER_WIDTH = re.compile(r'border(?:-(?:top|right|bottom|left))?-width\s*:\s*([^;]+)')
+BORDER_RADIUS = re.compile(r"border-radius\s*:\s*([^;]+)")
+BORDER_WIDTH = re.compile(r"border(?:-(?:top|right|bottom|left))?-width\s*:\s*([^;]+)")
# Shadow patterns
-BOX_SHADOW = re.compile(r'box-shadow\s*:\s*([^;]+)')
+BOX_SHADOW = re.compile(r"box-shadow\s*:\s*([^;]+)")
# Z-index
-Z_INDEX = re.compile(r'z-index\s*:\s*(\d+)')
+Z_INDEX = re.compile(r"z-index\s*:\s*(\d+)")
@dataclass
class ValueOccurrence:
"""Tracks where a value appears."""
+
value: str
file: str
line: int
property: str # CSS property name
- context: str # Surrounding code
+ context: str # Surrounding code
class StyleAnalyzer:
"""
Analyzes style files and inline styles to find:
+
- Hardcoded values that should be tokens
- Duplicate values
- Inconsistent patterns
@@ -81,9 +77,7 @@ class StyleAnalyzer:
self.font_values: Dict[str, List[ValueOccurrence]] = defaultdict(list)
async def analyze(
- self,
- include_inline: bool = True,
- include_css: bool = True
+ self, include_inline: bool = True, include_css: bool = True
) -> Dict[str, Any]:
"""
Analyze all styles in the project.
@@ -110,24 +104,24 @@ class StyleAnalyzer:
candidates = self._generate_token_candidates()
return {
- 'total_values_found': sum(len(v) for v in self.values.values()),
- 'unique_colors': len(self.color_values),
- 'unique_spacing': len(self.spacing_values),
- 'duplicates': duplicates,
- 'token_candidates': candidates,
+ "total_values_found": sum(len(v) for v in self.values.values()),
+ "unique_colors": len(self.color_values),
+ "unique_spacing": len(self.spacing_values),
+ "duplicates": duplicates,
+ "token_candidates": candidates,
}
async def _scan_style_files(self) -> None:
"""Scan CSS and SCSS files for values."""
- skip_dirs = {'node_modules', '.git', 'dist', 'build'}
+ skip_dirs = {"node_modules", ".git", "dist", "build"}
- for pattern in ['**/*.css', '**/*.scss', '**/*.sass', '**/*.less']:
+ for pattern in ["**/*.css", "**/*.scss", "**/*.sass", "**/*.less"]:
for file_path in self.root.rglob(pattern):
if any(skip in file_path.parts for skip in skip_dirs):
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
rel_path = str(file_path.relative_to(self.root))
self._extract_values_from_css(content, rel_path)
except Exception:
@@ -135,15 +129,15 @@ class StyleAnalyzer:
async def _scan_inline_styles(self) -> None:
"""Scan JS/TS files for inline style values."""
- skip_dirs = {'node_modules', '.git', 'dist', 'build'}
+ skip_dirs = {"node_modules", ".git", "dist", "build"}
- for pattern in ['**/*.jsx', '**/*.tsx', '**/*.js', '**/*.ts']:
+ for pattern in ["**/*.jsx", "**/*.tsx", "**/*.js", "**/*.ts"]:
for file_path in self.root.rglob(pattern):
if any(skip in file_path.parts for skip in skip_dirs):
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
rel_path = str(file_path.relative_to(self.root))
self._extract_values_from_jsx(content, rel_path)
except Exception:
@@ -151,11 +145,11 @@ class StyleAnalyzer:
def _extract_values_from_css(self, content: str, file_path: str) -> None:
"""Extract style values from CSS content."""
- lines = content.split('\n')
+ lines = content.split("\n")
for line_num, line in enumerate(lines, 1):
# Skip comments and empty lines
- if not line.strip() or line.strip().startswith('//') or line.strip().startswith('/*'):
+ if not line.strip() or line.strip().startswith("//") or line.strip().startswith("/*"):
continue
# Extract colors
@@ -176,25 +170,25 @@ class StyleAnalyzer:
# Extract font properties
for match in FONT_SIZE.finditer(line):
value = match.group(1).strip()
- self._record_font(value, file_path, line_num, 'font-size', line.strip())
+ self._record_font(value, file_path, line_num, "font-size", line.strip())
for match in FONT_WEIGHT.finditer(line):
value = match.group(1).strip()
- self._record_font(value, file_path, line_num, 'font-weight', line.strip())
+ self._record_font(value, file_path, line_num, "font-weight", line.strip())
# Extract z-index
for match in Z_INDEX.finditer(line):
value = match.group(1)
- self._record_value(f"z-{value}", file_path, line_num, 'z-index', line.strip())
+ self._record_value(f"z-{value}", file_path, line_num, "z-index", line.strip())
def _extract_values_from_jsx(self, content: str, file_path: str) -> None:
"""Extract style values from JSX inline styles."""
# Find style={{ ... }} blocks
- style_pattern = re.compile(r'style\s*=\s*\{\s*\{([^}]+)\}\s*\}', re.DOTALL)
+ style_pattern = re.compile(r"style\s*=\s*\{\s*\{([^}]+)\}\s*\}", re.DOTALL)
for match in style_pattern.finditer(content):
style_content = match.group(1)
- line_num = content[:match.start()].count('\n') + 1
+ line_num = content[: match.start()].count("\n") + 1
# Parse the style object
# Look for property: value patterns
@@ -205,84 +199,102 @@ class StyleAnalyzer:
prop_value = prop_match.group(2).strip()
# Check for colors
- if any(c in prop_name.lower() for c in ['color', 'background']):
+ if any(c in prop_name.lower() for c in ["color", "background"]):
if HEX_COLOR.search(prop_value) or RGB_COLOR.search(prop_value):
- self._record_color(prop_value.lower(), file_path, line_num, style_content[:100])
+ self._record_color(
+ prop_value.lower(), file_path, line_num, style_content[:100]
+ )
# Check for dimensions
if PX_VALUE.search(prop_value):
self._record_spacing(prop_value, file_path, line_num, style_content[:100])
- if 'fontSize' in prop_name or 'fontWeight' in prop_name:
- self._record_font(prop_value, file_path, line_num, prop_name, style_content[:100])
+ if "fontSize" in prop_name or "fontWeight" in prop_name:
+ self._record_font(
+ prop_value, file_path, line_num, prop_name, style_content[:100]
+ )
def _record_color(self, value: str, file: str, line: int, context: str) -> None:
"""Record a color value occurrence."""
normalized = self._normalize_color(value)
- self.color_values[normalized].append(ValueOccurrence(
- value=value,
- file=file,
- line=line,
- property='color',
- context=context,
- ))
- self.values[normalized].append(ValueOccurrence(
- value=value,
- file=file,
- line=line,
- property='color',
- context=context,
- ))
+ self.color_values[normalized].append(
+ ValueOccurrence(
+ value=value,
+ file=file,
+ line=line,
+ property="color",
+ context=context,
+ )
+ )
+ self.values[normalized].append(
+ ValueOccurrence(
+ value=value,
+ file=file,
+ line=line,
+ property="color",
+ context=context,
+ )
+ )
def _record_spacing(self, value: str, file: str, line: int, context: str) -> None:
"""Record a spacing/dimension value occurrence."""
- self.spacing_values[value].append(ValueOccurrence(
- value=value,
- file=file,
- line=line,
- property='spacing',
- context=context,
- ))
- self.values[value].append(ValueOccurrence(
- value=value,
- file=file,
- line=line,
- property='spacing',
- context=context,
- ))
+ self.spacing_values[value].append(
+ ValueOccurrence(
+ value=value,
+ file=file,
+ line=line,
+ property="spacing",
+ context=context,
+ )
+ )
+ self.values[value].append(
+ ValueOccurrence(
+ value=value,
+ file=file,
+ line=line,
+ property="spacing",
+ context=context,
+ )
+ )
def _record_font(self, value: str, file: str, line: int, prop: str, context: str) -> None:
"""Record a font-related value occurrence."""
- self.font_values[value].append(ValueOccurrence(
- value=value,
- file=file,
- line=line,
- property=prop,
- context=context,
- ))
- self.values[value].append(ValueOccurrence(
- value=value,
- file=file,
- line=line,
- property=prop,
- context=context,
- ))
+ self.font_values[value].append(
+ ValueOccurrence(
+ value=value,
+ file=file,
+ line=line,
+ property=prop,
+ context=context,
+ )
+ )
+ self.values[value].append(
+ ValueOccurrence(
+ value=value,
+ file=file,
+ line=line,
+ property=prop,
+ context=context,
+ )
+ )
def _record_value(self, value: str, file: str, line: int, prop: str, context: str) -> None:
"""Record a generic value occurrence."""
- self.values[value].append(ValueOccurrence(
- value=value,
- file=file,
- line=line,
- property=prop,
- context=context,
- ))
+ self.values[value].append(
+ ValueOccurrence(
+ value=value,
+ file=file,
+ line=line,
+ property=prop,
+ context=context,
+ )
+ )
def _normalize_color(self, color: str) -> str:
"""Normalize color value for comparison."""
color = color.lower().strip()
# Expand 3-digit hex to 6-digit
- if re.match(r'^#[0-9a-f]{3}$', color):
+ if re.match(r"^#[0-9a-f]{3}$", color):
color = f"#{color[1]*2}{color[2]*2}{color[3]*2}"
return color
@@ -295,19 +307,18 @@ class StyleAnalyzer:
# Get unique files
files = list(set(o.file for o in occurrences))
- duplicates.append({
- 'value': value,
- 'count': len(occurrences),
- 'files': files[:5], # Limit to 5 files
- 'category': occurrences[0].property,
- 'locations': [
- {'file': o.file, 'line': o.line}
- for o in occurrences[:5]
- ],
- })
+ duplicates.append(
+ {
+ "value": value,
+ "count": len(occurrences),
+ "files": files[:5], # Limit to 5 files
+ "category": occurrences[0].property,
+ "locations": [{"file": o.file, "line": o.line} for o in occurrences[:5]],
+ }
+ )
# Sort by count (most duplicated first)
- duplicates.sort(key=lambda x: x['count'], reverse=True)
+ duplicates.sort(key=lambda x: x["count"], reverse=True)
return duplicates[:50] # Return top 50
@@ -319,31 +330,31 @@ class StyleAnalyzer:
for value, occurrences in self.color_values.items():
if len(occurrences) >= 2:
suggested_name = self._suggest_color_name(value)
- candidates.append(TokenCandidate(
- value=value,
- suggested_name=suggested_name,
- category='colors',
- occurrences=len(occurrences),
- locations=[
- Location(o.file, o.line) for o in occurrences[:5]
- ],
- confidence=min(0.9, 0.3 + (len(occurrences) * 0.1)),
- ))
+ candidates.append(
+ TokenCandidate(
+ value=value,
+ suggested_name=suggested_name,
+ category="colors",
+ occurrences=len(occurrences),
+ locations=[Location(o.file, o.line) for o in occurrences[:5]],
+ confidence=min(0.9, 0.3 + (len(occurrences) * 0.1)),
+ )
+ )
# Spacing candidates
for value, occurrences in self.spacing_values.items():
if len(occurrences) >= 3: # Higher threshold for spacing
suggested_name = self._suggest_spacing_name(value)
- candidates.append(TokenCandidate(
- value=value,
- suggested_name=suggested_name,
- category='spacing',
- occurrences=len(occurrences),
- locations=[
- Location(o.file, o.line) for o in occurrences[:5]
- ],
- confidence=min(0.8, 0.2 + (len(occurrences) * 0.05)),
- ))
+ candidates.append(
+ TokenCandidate(
+ value=value,
+ suggested_name=suggested_name,
+ category="spacing",
+ occurrences=len(occurrences),
+ locations=[Location(o.file, o.line) for o in occurrences[:5]],
+ confidence=min(0.8, 0.2 + (len(occurrences) * 0.05)),
+ )
+ )
# Sort by confidence
candidates.sort(key=lambda x: x.confidence, reverse=True)
@@ -354,48 +365,48 @@ class StyleAnalyzer:
"""Suggest a token name for a color value."""
# Common color mappings
common_colors = {
- '#ffffff': 'color.white',
- '#000000': 'color.black',
- '#f3f4f6': 'color.neutral.100',
- '#e5e7eb': 'color.neutral.200',
- '#d1d5db': 'color.neutral.300',
- '#9ca3af': 'color.neutral.400',
- '#6b7280': 'color.neutral.500',
- '#4b5563': 'color.neutral.600',
- '#374151': 'color.neutral.700',
- '#1f2937': 'color.neutral.800',
- '#111827': 'color.neutral.900',
+ "#ffffff": "color.white",
+ "#000000": "color.black",
+ "#f3f4f6": "color.neutral.100",
+ "#e5e7eb": "color.neutral.200",
+ "#d1d5db": "color.neutral.300",
+ "#9ca3af": "color.neutral.400",
+ "#6b7280": "color.neutral.500",
+ "#4b5563": "color.neutral.600",
+ "#374151": "color.neutral.700",
+ "#1f2937": "color.neutral.800",
+ "#111827": "color.neutral.900",
}
if color in common_colors:
return common_colors[color]
# Detect color family by hue (simplified)
- if color.startswith('#'):
+ if color.startswith("#"):
return f"color.custom.{color[1:7]}"
- return f"color.custom.value"
+ return "color.custom.value"
def _suggest_spacing_name(self, value: str) -> str:
"""Suggest a token name for a spacing value."""
# Common spacing values
spacing_map = {
- '0px': 'spacing.0',
- '4px': 'spacing.xs',
- '8px': 'spacing.sm',
- '12px': 'spacing.md',
- '16px': 'spacing.lg',
- '20px': 'spacing.lg',
- '24px': 'spacing.xl',
- '32px': 'spacing.2xl',
- '48px': 'spacing.3xl',
- '64px': 'spacing.4xl',
- '0.25rem': 'spacing.xs',
- '0.5rem': 'spacing.sm',
- '0.75rem': 'spacing.md',
- '1rem': 'spacing.lg',
- '1.5rem': 'spacing.xl',
- '2rem': 'spacing.2xl',
+ "0px": "spacing.0",
+ "4px": "spacing.xs",
+ "8px": "spacing.sm",
+ "12px": "spacing.md",
+ "16px": "spacing.lg",
+ "20px": "spacing.lg",
+ "24px": "spacing.xl",
+ "32px": "spacing.2xl",
+ "48px": "spacing.3xl",
+ "64px": "spacing.4xl",
+ "0.25rem": "spacing.xs",
+ "0.5rem": "spacing.sm",
+ "0.75rem": "spacing.md",
+ "1rem": "spacing.lg",
+ "1.5rem": "spacing.xl",
+ "2rem": "spacing.2xl",
}
if value in spacing_map:
@@ -413,19 +424,19 @@ class StyleAnalyzer:
css_classes = set()
class_locations = {}
- skip_dirs = {'node_modules', '.git', 'dist', 'build'}
+ skip_dirs = {"node_modules", ".git", "dist", "build"}
- for pattern in ['**/*.css', '**/*.scss']:
+ for pattern in ["**/*.css", "**/*.scss"]:
for file_path in self.root.rglob(pattern):
if any(skip in file_path.parts for skip in skip_dirs):
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
rel_path = str(file_path.relative_to(self.root))
# Find class definitions
- for match in re.finditer(r'\.([a-zA-Z_][\w-]*)\s*[{,]', content):
+ for match in re.finditer(r"\.([a-zA-Z_][\w-]*)\s*[{,]", content):
class_name = match.group(1)
css_classes.add(class_name)
class_locations[class_name] = rel_path
@@ -436,13 +447,13 @@ class StyleAnalyzer:
# Collect all class usage in JS/JSX/TS/TSX
used_classes = set()
- for pattern in ['**/*.jsx', '**/*.tsx', '**/*.js', '**/*.ts']:
+ for pattern in ["**/*.jsx", "**/*.tsx", "**/*.js", "**/*.ts"]:
for file_path in self.root.rglob(pattern):
if any(skip in file_path.parts for skip in skip_dirs):
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
# Find className usage
for match in re.finditer(r'className\s*=\s*["\']([^"\']+)["\']', content):
@@ -450,7 +461,7 @@ class StyleAnalyzer:
used_classes.update(classes)
# Find styles.xxx usage (CSS modules)
- for match in re.finditer(r'styles\.(\w+)', content):
+ for match in re.finditer(r"styles\.(\w+)", content):
used_classes.add(match.group(1))
except Exception:
@@ -461,11 +472,13 @@ class StyleAnalyzer:
return [
{
- 'class': cls,
- 'file': class_locations.get(cls, 'unknown'),
+ "class": cls,
+ "file": class_locations.get(cls, "unknown"),
}
for cls in sorted(unused)
- ][:50] # Limit results
+ ][
+ :50
+ ] # Limit results
async def analyze_naming_consistency(self) -> Dict[str, Any]:
"""
@@ -474,44 +487,52 @@ class StyleAnalyzer:
Returns analysis of naming patterns and inconsistencies.
"""
patterns = {
- 'kebab-case': [], # my-class-name
- 'camelCase': [], # myClassName
- 'snake_case': [], # my_class_name
- 'BEM': [], # block__element--modifier
+ "kebab-case": [], # my-class-name
+ "camelCase": [], # myClassName
+ "snake_case": [], # my_class_name
+ "BEM": [], # block__element--modifier
}
- skip_dirs = {'node_modules', '.git', 'dist', 'build'}
+ skip_dirs = {"node_modules", ".git", "dist", "build"}
- for pattern in ['**/*.css', '**/*.scss']:
+ for pattern in ["**/*.css", "**/*.scss"]:
for file_path in self.root.rglob(pattern):
if any(skip in file_path.parts for skip in skip_dirs):
continue
try:
- content = file_path.read_text(encoding='utf-8', errors='ignore')
+ content = file_path.read_text(encoding="utf-8", errors="ignore")
rel_path = str(file_path.relative_to(self.root))
# Find class names
- for match in re.finditer(r'\.([a-zA-Z_][\w-]*)', content):
+ for match in re.finditer(r"\.([a-zA-Z_][\w-]*)", content):
name = match.group(1)
- line = content[:match.start()].count('\n') + 1
+ line = content[: match.start()].count("\n") + 1
# Classify naming pattern
- if '__' in name or '--' in name:
- patterns['BEM'].append({'name': name, 'file': rel_path, 'line': line})
- elif '_' in name:
- patterns['snake_case'].append({'name': name, 'file': rel_path, 'line': line})
- elif '-' in name:
- patterns['kebab-case'].append({'name': name, 'file': rel_path, 'line': line})
+ if "__" in name or "--" in name:
+ patterns["BEM"].append({"name": name, "file": rel_path, "line": line})
+ elif "_" in name:
+ patterns["snake_case"].append(
+ {"name": name, "file": rel_path, "line": line}
+ )
+ elif "-" in name:
+ patterns["kebab-case"].append(
+ {"name": name, "file": rel_path, "line": line}
+ )
elif name != name.lower():
- patterns['camelCase'].append({'name': name, 'file': rel_path, 'line': line})
+ patterns["camelCase"].append(
+ {"name": name, "file": rel_path, "line": line}
+ )
except Exception:
continue
# Calculate primary pattern
pattern_counts = {k: len(v) for k, v in patterns.items()}
- primary = max(pattern_counts, key=pattern_counts.get) if any(pattern_counts.values()) else None
+ primary = (
+ max(pattern_counts, key=pattern_counts.get) if any(pattern_counts.values()) else None
+ )
# Find inconsistencies (patterns different from primary)
inconsistencies = []
@@ -521,7 +542,7 @@ class StyleAnalyzer:
inconsistencies.extend(items[:10])
return {
- 'pattern_counts': pattern_counts,
- 'primary_pattern': primary,
- 'inconsistencies': inconsistencies[:20],
+ "pattern_counts": pattern_counts,
+ "primary_pattern": primary,
+ "inconsistencies": inconsistencies[:20],
}
diff --git a/dss/auth/__init__.py b/dss/auth/__init__.py
index 79daf90..822db76 100644
--- a/dss/auth/__init__.py
+++ b/dss/auth/__init__.py
@@ -1,5 +1,5 @@
"""
-Authentication Module
+Authentication Module.
Atlassian-based authentication for DSS.
Users authenticate with their Jira/Confluence credentials.
diff --git a/dss/auth/atlassian_auth.py b/dss/auth/atlassian_auth.py
index 166c996..d2e5d08 100644
--- a/dss/auth/atlassian_auth.py
+++ b/dss/auth/atlassian_auth.py
@@ -1,18 +1,17 @@
"""
-Atlassian-based Authentication
+Atlassian-based Authentication.
Validates users by verifying their Atlassian (Jira/Confluence) credentials.
On successful login, creates a JWT token for subsequent requests.
"""
-import os
-import jwt
import hashlib
+import os
from datetime import datetime, timedelta
-from typing import Optional, Dict, Any
-from atlassian import Jira, Confluence
+from typing import Any, Dict, Optional
-from dss.storage.json_store import read_json, write_json, SYSTEM_DIR
+import jwt
+from atlassian import Confluence, Jira
class AtlassianAuth:
@@ -36,11 +35,7 @@ class AtlassianAuth:
self.jwt_expiry_hours = int(os.getenv("JWT_EXPIRY_HOURS", "24"))
async def verify_atlassian_credentials(
- self,
- url: str,
- email: str,
- api_token: str,
- service: str = "jira"
+ self, url: str, email: str, api_token: str, service: str = "jira"
) -> Dict[str, Any]:
"""
Verify Atlassian credentials by making a test API call.
@@ -59,11 +54,11 @@ class AtlassianAuth:
return {
"email": email,
"display_name": email.split("@")[0].title().replace(".", " ") + " (Mock)",
- "account_id": "mock_" + hashlib.md5(email.encode()).hexdigest()[:8],
+ "account_id": "mock_" + hashlib.sha256(email.encode()).hexdigest()[:16],
"atlassian_url": url or "https://mock.atlassian.net",
"service": service,
"verified": True,
- "mock_mode": True
+ "mock_mode": True,
}
try:
@@ -83,21 +78,17 @@ class AtlassianAuth:
"atlassian_url": url,
"service": service,
"verified": True,
- "mock_mode": False
+ "mock_mode": False,
}
except Exception as e:
raise ValueError(f"Invalid Atlassian credentials: {str(e)}")
def hash_api_token(self, api_token: str) -> str:
- """Hash API token for storage (we don't store plain tokens)"""
+ """Hash API token for storage (we don't store plain tokens)."""
return hashlib.sha256(api_token.encode()).hexdigest()
async def login(
- self,
- url: str,
- email: str,
- api_token: str,
- service: str = "jira"
+ self, url: str, email: str, api_token: str, service: str = "jira"
) -> Dict[str, Any]:
"""
Authenticate user with Atlassian credentials.
@@ -110,9 +101,7 @@ class AtlassianAuth:
}
"""
# Verify credentials against Atlassian
- user_info = await self.verify_atlassian_credentials(
- url, email, api_token, service
- )
+ user_info = await self.verify_atlassian_credentials(url, email, api_token, service)
# Hash the API token
token_hash = self.hash_api_token(api_token)
@@ -121,8 +110,7 @@ class AtlassianAuth:
with get_connection() as conn:
# Check if user exists
existing = conn.execute(
- "SELECT id, email FROM users WHERE email = ?",
- (email,)
+ "SELECT id, email FROM users WHERE email = ?", (email,)
).fetchone()
if existing:
@@ -144,8 +132,8 @@ class AtlassianAuth:
service,
token_hash,
datetime.utcnow().isoformat(),
- user_id
- )
+ user_id,
+ ),
)
else:
# Create new user
@@ -164,8 +152,8 @@ class AtlassianAuth:
service,
token_hash,
datetime.utcnow().isoformat(),
- datetime.utcnow().isoformat()
- )
+ datetime.utcnow().isoformat(),
+ ),
)
user_id = cursor.lastrowid
@@ -176,14 +164,10 @@ class AtlassianAuth:
"email": email,
"display_name": user_info["display_name"],
"exp": expires_at,
- "iat": datetime.utcnow()
+ "iat": datetime.utcnow(),
}
- jwt_token = jwt.encode(
- token_payload,
- self.jwt_secret,
- algorithm=self.jwt_algorithm
- )
+ jwt_token = jwt.encode(token_payload, self.jwt_secret, algorithm=self.jwt_algorithm)
return {
"token": jwt_token,
@@ -192,9 +176,9 @@ class AtlassianAuth:
"email": email,
"display_name": user_info["display_name"],
"atlassian_url": url,
- "service": service
+ "service": service,
},
- "expires_at": expires_at.isoformat()
+ "expires_at": expires_at.isoformat(),
}
def verify_token(self, token: str) -> Optional[Dict[str, Any]]:
@@ -205,11 +189,7 @@ class AtlassianAuth:
User dict if valid, None if invalid/expired
"""
try:
- payload = jwt.decode(
- token,
- self.jwt_secret,
- algorithms=[self.jwt_algorithm]
- )
+ payload = jwt.decode(token, self.jwt_secret, algorithms=[self.jwt_algorithm])
return payload
except jwt.ExpiredSignatureError:
return None
@@ -217,7 +197,7 @@ class AtlassianAuth:
return None
async def get_user_by_id(self, user_id: int) -> Optional[Dict[str, Any]]:
- """Get user information by ID"""
+ """Get user information by ID."""
with get_connection() as conn:
user = conn.execute(
"""
@@ -226,7 +206,7 @@ class AtlassianAuth:
FROM users
WHERE id = ?
""",
- (user_id,)
+ (user_id,),
).fetchone()
if user:
@@ -239,7 +219,7 @@ _auth_instance: Optional[AtlassianAuth] = None
def get_auth() -> AtlassianAuth:
- """Get singleton auth instance"""
+ """Get singleton auth instance."""
global _auth_instance
if _auth_instance is None:
_auth_instance = AtlassianAuth()
diff --git a/dss/core_tokens/components.json b/dss/core_tokens/components.json
index 64ea692..5b7c531 100644
--- a/dss/core_tokens/components.json
+++ b/dss/core_tokens/components.json
@@ -286,4 +286,4 @@
"source": "dss-core"
}
}
-}
\ No newline at end of file
+}
diff --git a/dss/core_tokens/manifest.json b/dss/core_tokens/manifest.json
index a3e375b..e0f20b1 100644
--- a/dss/core_tokens/manifest.json
+++ b/dss/core_tokens/manifest.json
@@ -15,4 +15,4 @@
"effects": 10,
"variables": 530
}
-}
\ No newline at end of file
+}
diff --git a/dss/core_tokens/themes.json b/dss/core_tokens/themes.json
index f16d5b4..6aec2f2 100644
--- a/dss/core_tokens/themes.json
+++ b/dss/core_tokens/themes.json
@@ -54,4 +54,4 @@
"source": "dss-defaults"
}
}
-}
\ No newline at end of file
+}
diff --git a/dss/export_import/__init__.py b/dss/export_import/__init__.py
index 3b22473..24e9f7d 100644
--- a/dss/export_import/__init__.py
+++ b/dss/export_import/__init__.py
@@ -1,4 +1,4 @@
-"""DSS Export/Import System - Complete project archival and restoration
+"""DSS Export/Import System - Complete project archival and restoration.
This module provides comprehensive export/import capabilities for DSS projects:
@@ -63,42 +63,19 @@ Example Usage:
)
"""
-from .exporter import (
- DSSArchiveExporter,
- DSSArchiveManifest,
- ArchiveWriter,
-)
-from .importer import (
- DSSArchiveImporter,
- ArchiveValidator,
- ImportAnalysis,
- ImportValidationError,
-)
-from .merger import (
- SmartMerger,
- ConflictResolutionMode,
- ConflictItem,
- MergeAnalysis,
- UUIDHashMap,
-)
-from .migrations import (
- MigrationManager,
- SchemaMigration,
-)
-from .service import (
- DSSProjectService,
- ExportSummary,
- ImportSummary,
- MergeSummary,
-)
+from .exporter import ArchiveWriter, DSSArchiveExporter, DSSArchiveManifest
+from .importer import ArchiveValidator, DSSArchiveImporter, ImportAnalysis, ImportValidationError
+from .merger import ConflictItem, ConflictResolutionMode, MergeAnalysis, SmartMerger, UUIDHashMap
+from .migrations import MigrationManager, SchemaMigration
from .security import (
- ZipSlipValidator,
+ ArchiveIntegrity,
+ DatabaseLockingStrategy,
MemoryLimitManager,
StreamingJsonLoader,
TimestampConflictResolver,
- DatabaseLockingStrategy,
- ArchiveIntegrity,
+ ZipSlipValidator,
)
+from .service import DSSProjectService, ExportSummary, ImportSummary, MergeSummary
__version__ = "1.0.1"
__all__ = [
diff --git a/dss/export_import/examples.py b/dss/export_import/examples.py
index 0a0c167..a8f7076 100644
--- a/dss/export_import/examples.py
+++ b/dss/export_import/examples.py
@@ -1,27 +1,20 @@
"""
-Example usage of DSS Export/Import System
+Example usage of DSS Export/Import System.
Run with: python -m dss.export_import.examples
"""
-from pathlib import Path
from datetime import datetime
-from uuid import uuid4
+from pathlib import Path
-from ..models.project import Project, ProjectMetadata
-from ..models.theme import Theme, DesignToken, TokenCategory
from ..models.component import Component
-
-from . import (
- DSSArchiveExporter,
- DSSArchiveImporter,
- SmartMerger,
- ConflictResolutionMode,
-)
+from ..models.project import Project, ProjectMetadata
+from ..models.theme import DesignToken, Theme, TokenCategory
+from . import ConflictResolutionMode, DSSArchiveExporter, DSSArchiveImporter, SmartMerger
def create_sample_project(name="Sample Design System") -> Project:
- """Create a sample project for testing"""
+ """Create a sample project for testing."""
# Create tokens
tokens = {
@@ -85,7 +78,7 @@ def create_sample_project(name="Sample Design System") -> Project:
def example_1_basic_export():
- """Example 1: Basic export to .dss file"""
+ """Example 1: Basic export to .dss file."""
print("\n" + "=" * 70)
print("EXAMPLE 1: Basic Export")
print("=" * 70)
@@ -109,7 +102,7 @@ def example_1_basic_export():
def example_2_archive_analysis():
- """Example 2: Analyze archive before importing"""
+ """Example 2: Analyze archive before importing."""
print("\n" + "=" * 70)
print("EXAMPLE 2: Archive Analysis")
print("=" * 70)
@@ -124,7 +117,7 @@ def example_2_archive_analysis():
importer = DSSArchiveImporter(output_path)
analysis = importer.analyze()
- print(f"✓ Archive analysis complete")
+ print("✓ Archive analysis complete")
print(f" - Valid: {analysis.is_valid}")
print(f" - Project: {analysis.project_name}")
print(f" - Schema: {analysis.schema_version}")
@@ -133,15 +126,15 @@ def example_2_archive_analysis():
print(f" - Migration needed: {analysis.migration_needed}")
if analysis.errors:
- print(f"\n Errors:")
+ print("\n Errors:")
for error in analysis.errors:
print(f" - [{error.stage}] {error.message}")
else:
- print(f"\n ✓ No validation errors")
+ print("\n ✓ No validation errors")
def example_3_replace_import():
- """Example 3: Import with REPLACE strategy"""
+ """Example 3: Import with REPLACE strategy."""
print("\n" + "=" * 70)
print("EXAMPLE 3: REPLACE Import (Full Restoration)")
print("=" * 70)
@@ -152,7 +145,7 @@ def example_3_replace_import():
exporter = DSSArchiveExporter(original)
exporter.export_to_file(output_path)
- print(f"✓ Original project exported")
+ print("✓ Original project exported")
print(f" - Tokens: {len(original.theme.tokens)}")
print(f" - Components: {len(original.components)}")
@@ -160,7 +153,7 @@ def example_3_replace_import():
importer = DSSArchiveImporter(output_path)
imported = importer.import_replace()
- print(f"\n✓ Project imported (REPLACE strategy)")
+ print("\n✓ Project imported (REPLACE strategy)")
print(f" - Name: {imported.name}")
print(f" - UUID: {imported.uuid}")
print(f" - Tokens: {len(imported.theme.tokens)}")
@@ -170,11 +163,11 @@ def example_3_replace_import():
assert imported.name == original.name
assert len(imported.theme.tokens) == len(original.theme.tokens)
assert len(imported.components) == len(original.components)
- print(f"\n✓ Round-trip verification successful")
+ print("\n✓ Round-trip verification successful")
def example_4_merge_analysis():
- """Example 4: Analyze merge without modifying"""
+ """Example 4: Analyze merge without modifying."""
print("\n" + "=" * 70)
print("EXAMPLE 4: Merge Analysis")
print("=" * 70)
@@ -208,7 +201,7 @@ def example_4_merge_analysis():
merger = SmartMerger(local, imported_proj)
analysis = merger.analyze_merge()
- print(f"\n✓ Merge analysis complete")
+ print("\n✓ Merge analysis complete")
print(f" - New tokens: {len(analysis.new_items['tokens'])}")
print(f" - Updated tokens: {len(analysis.updated_items['tokens'])}")
print(f" - Updated components: {len(analysis.updated_items['components'])}")
@@ -217,7 +210,7 @@ def example_4_merge_analysis():
def example_5_merge_with_strategy():
- """Example 5: Perform merge with conflict strategy"""
+ """Example 5: Perform merge with conflict strategy."""
print("\n" + "=" * 70)
print("EXAMPLE 5: Merge with Strategy")
print("=" * 70)
@@ -243,20 +236,20 @@ def example_5_merge_with_strategy():
merger = SmartMerger(local, imported)
merged = merger.merge_with_strategy(ConflictResolutionMode.OVERWRITE)
- print(f"✓ Merge complete (OVERWRITE strategy)")
+ print("✓ Merge complete (OVERWRITE strategy)")
print(f" - Tokens: {len(merged.theme.tokens)}")
print(f" - primary token value: {merged.theme.tokens['primary'].value}")
- print(f" - (Should be remote: #00FF00)")
+ print(" - (Should be remote: #00FF00)")
# Merge with KEEP_LOCAL
merged2 = merger.merge_with_strategy(ConflictResolutionMode.KEEP_LOCAL)
- print(f"\n✓ Merge complete (KEEP_LOCAL strategy)")
+ print("\n✓ Merge complete (KEEP_LOCAL strategy)")
print(f" - primary token value: {merged2.theme.tokens['primary'].value}")
- print(f" - (Should be local: #FF0000)")
+ print(" - (Should be local: #FF0000)")
def example_6_schema_migration():
- """Example 6: Automatic schema migration"""
+ """Example 6: Automatic schema migration."""
print("\n" + "=" * 70)
print("EXAMPLE 6: Schema Migration")
print("=" * 70)
@@ -302,7 +295,7 @@ def example_6_schema_migration():
def main():
- """Run all examples"""
+ """Run all examples."""
print("\n" + "█" * 70)
print("█ DSS Export/Import System - Usage Examples")
print("█" * 70)
diff --git a/dss/export_import/exporter.py b/dss/export_import/exporter.py
index 91d5ade..438eb29 100644
--- a/dss/export_import/exporter.py
+++ b/dss/export_import/exporter.py
@@ -1,18 +1,18 @@
-"""DSS Archive Exporter - Creates .dss files for project export/import"""
+"""DSS Archive Exporter - Creates .dss files for project export/import."""
import json
import zipfile
from datetime import datetime
from pathlib import Path
-from typing import Dict, Any, Optional, List
-from .migrations import MigrationManager
-from ..models.project import Project
-from ..models.theme import Theme, DesignToken
+from typing import Any, Dict, Optional
+
from ..models.component import Component
+from ..models.project import Project
+from ..models.theme import DesignToken
class DSSArchiveManifest:
- """Manifest for .dss archive"""
+ """Manifest for .dss archive."""
SCHEMA_VERSION = "1.0.1"
@@ -41,7 +41,7 @@ class DSSArchiveManifest:
}
def to_dict(self) -> Dict[str, Any]:
- """Serialize manifest to dict"""
+ """Serialize manifest to dict."""
return {
"dssVersion": self.dss_version,
"schemaVersion": self.schema_version,
@@ -56,12 +56,12 @@ class DSSArchiveManifest:
}
def to_json(self) -> str:
- """Serialize manifest to JSON"""
+ """Serialize manifest to JSON."""
return json.dumps(self.to_dict(), indent=2)
class DSSArchiveExporter:
- """Exports DSS projects to .dss archive format"""
+ """Exports DSS projects to .dss archive format."""
def __init__(self, project: Project):
self.project = project
@@ -75,7 +75,7 @@ class DSSArchiveExporter:
def export_to_file(self, output_path: Path) -> Path:
"""
- Export project to .dss file
+ Export project to .dss file.
Args:
output_path: Path where to save the .dss archive
@@ -125,7 +125,7 @@ class DSSArchiveExporter:
return output_path
def _export_tokens(self) -> Dict[str, Any]:
- """Export tokens from theme"""
+ """Export tokens from theme."""
if not self.project.theme or not self.project.theme.tokens:
return {}
@@ -136,7 +136,7 @@ class DSSArchiveExporter:
return {"tokens": tokens_dict}
def _export_themes(self) -> Dict[str, Any]:
- """Export theme definition"""
+ """Export theme definition."""
if not self.project.theme:
return {}
@@ -154,7 +154,7 @@ class DSSArchiveExporter:
}
def _export_components(self) -> Dict[str, Any]:
- """Export all components"""
+ """Export all components."""
if not self.project.components:
return {}
@@ -165,7 +165,7 @@ class DSSArchiveExporter:
return {"components": components_list}
def _export_config(self) -> Dict[str, Any]:
- """Export project configuration"""
+ """Export project configuration."""
return {
"project": {
"id": self.project.id,
@@ -182,7 +182,7 @@ class DSSArchiveExporter:
}
def _serialize_token(self, token: DesignToken) -> Dict[str, Any]:
- """Serialize token to export format"""
+ """Serialize token to export format."""
return {
"uuid": token.uuid,
"$value": token.value,
@@ -196,7 +196,7 @@ class DSSArchiveExporter:
}
def _serialize_component(self, component: Component) -> Dict[str, Any]:
- """Serialize component to export format"""
+ """Serialize component to export format."""
return {
"uuid": component.uuid,
"name": component.name,
@@ -209,12 +209,12 @@ class DSSArchiveExporter:
class ArchiveWriter:
- """Low-level archive writing utilities"""
+ """Low-level archive writing utilities."""
@staticmethod
def create_archive(output_path: Path, files: Dict[str, str]) -> Path:
"""
- Create a zip archive with given files
+ Create a zip archive with given files.
Args:
output_path: Path for output .dss file
diff --git a/dss/export_import/importer.py b/dss/export_import/importer.py
index ef01d6d..32bee7a 100644
--- a/dss/export_import/importer.py
+++ b/dss/export_import/importer.py
@@ -1,26 +1,22 @@
-"""DSS Archive Importer - Loads .dss files and restores project state"""
+"""DSS Archive Importer - Loads .dss files and restores project state."""
import json
import zipfile
+from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
-from typing import Dict, Any, List, Optional, Tuple
-from dataclasses import dataclass
-from .migrations import MigrationManager
-from .security import (
- ZipSlipValidator,
- MemoryLimitManager,
- StreamingJsonLoader,
- ArchiveIntegrity,
-)
-from ..models.project import Project, ProjectMetadata
-from ..models.theme import Theme, DesignToken, TokenCategory
+from typing import Any, Dict, List, Optional
+
from ..models.component import Component
+from ..models.project import Project, ProjectMetadata
+from ..models.theme import DesignToken, Theme, TokenCategory
+from .migrations import MigrationManager
+from .security import ArchiveIntegrity, MemoryLimitManager, StreamingJsonLoader, ZipSlipValidator
@dataclass
class ImportValidationError:
- """Validation error details"""
+ """Validation error details."""
stage: str # archive, manifest, schema, structure, referential
message: str
@@ -29,7 +25,7 @@ class ImportValidationError:
@dataclass
class ImportAnalysis:
- """Analysis of archive before import"""
+ """Analysis of archive before import."""
is_valid: bool
errors: List[ImportValidationError]
@@ -42,11 +38,11 @@ class ImportAnalysis:
class ArchiveValidator:
- """Validates .dss archive integrity"""
+ """Validates .dss archive integrity."""
@staticmethod
def validate_archive_structure(archive: zipfile.ZipFile) -> Optional[ImportValidationError]:
- """Validate basic archive structure and security (Zip Slip protection)"""
+ """Validate basic archive structure and security (Zip Slip protection)."""
required_files = ["manifest.json"]
archive_files = archive.namelist()
@@ -69,7 +65,7 @@ class ArchiveValidator:
@staticmethod
def validate_manifest(manifest: Dict[str, Any]) -> Optional[ImportValidationError]:
- """Validate manifest.json structure and integrity"""
+ """Validate manifest.json structure and integrity."""
required_fields = ["schemaVersion", "projectName", "projectUuid"]
for field in required_fields:
if field not in manifest:
@@ -100,7 +96,7 @@ class ArchiveValidator:
def validate_schema_version(
archive_version: str, current_version: str
) -> Optional[ImportValidationError]:
- """Check if schema version can be migrated"""
+ """Check if schema version can be migrated."""
if archive_version > current_version:
return ImportValidationError(
stage="schema",
@@ -113,7 +109,7 @@ class ArchiveValidator:
def validate_referential_integrity(
data: Dict[str, Any], local_uuids: Optional[Dict[str, set]] = None
) -> List[ImportValidationError]:
- """Validate all UUID references are resolvable"""
+ """Validate all UUID references are resolvable."""
errors = []
local_uuids = local_uuids or {"tokens": set(), "components": set()}
@@ -140,7 +136,7 @@ class ArchiveValidator:
class DSSArchiveImporter:
- """Imports .dss archives into DSS"""
+ """Imports .dss archives into DSS."""
def __init__(self, archive_path: Path):
self.archive_path = Path(archive_path)
@@ -149,7 +145,7 @@ class DSSArchiveImporter:
self.data: Optional[Dict[str, Any]] = None
def analyze(self) -> ImportAnalysis:
- """Analyze archive without importing"""
+ """Analyze archive without importing."""
errors = []
warnings = []
@@ -183,7 +179,9 @@ class DSSArchiveImporter:
# Check schema version
schema_version = self.manifest.get("schemaVersion", "1.0.0")
current_version = MigrationManager.get_latest_version()
- version_err = ArchiveValidator.validate_schema_version(schema_version, current_version)
+ version_err = ArchiveValidator.validate_schema_version(
+ schema_version, current_version
+ )
if version_err:
errors.append(version_err)
@@ -259,7 +257,7 @@ class DSSArchiveImporter:
def import_replace(self) -> Project:
"""
- Import with REPLACE strategy - replaces all project data
+ Import with REPLACE strategy - replaces all project data.
Returns:
Restored Project object
@@ -300,11 +298,15 @@ class DSSArchiveImporter:
return project
def _build_theme(self) -> Theme:
- """Build Theme from archive data"""
+ """Build Theme from archive data."""
tokens_data = self.data.get("tokens", {})
themes_raw = self.data.get("themes", {})
# Handle both dict and list cases
- themes_data = themes_raw.get("themes", []) if isinstance(themes_raw, dict) else (themes_raw if isinstance(themes_raw, list) else [])
+ themes_data = (
+ themes_raw.get("themes", [])
+ if isinstance(themes_raw, dict)
+ else (themes_raw if isinstance(themes_raw, list) else [])
+ )
# Build tokens dict
tokens_dict = {}
@@ -322,10 +324,14 @@ class DSSArchiveImporter:
)
def _build_components(self) -> List[Component]:
- """Build components from archive data"""
+ """Build components from archive data."""
components_raw = self.data.get("components", {})
# Handle both dict and list cases
- components_data = components_raw.get("components", []) if isinstance(components_raw, dict) else (components_raw if isinstance(components_raw, list) else [])
+ components_data = (
+ components_raw.get("components", [])
+ if isinstance(components_raw, dict)
+ else (components_raw if isinstance(components_raw, list) else [])
+ )
components = []
for comp_data in components_data:
@@ -335,7 +341,7 @@ class DSSArchiveImporter:
@staticmethod
def _deserialize_token(token_data: Dict[str, Any]) -> DesignToken:
- """Deserialize token from archive format"""
+ """Deserialize token from archive format."""
return DesignToken(
uuid=token_data.get("uuid"),
name=token_data.get("name", ""),
@@ -351,7 +357,7 @@ class DSSArchiveImporter:
@staticmethod
def _deserialize_component(comp_data: Dict[str, Any]) -> Component:
- """Deserialize component from archive format"""
+ """Deserialize component from archive format."""
return Component(
uuid=comp_data.get("uuid"),
name=comp_data.get("name", ""),
@@ -364,7 +370,7 @@ class DSSArchiveImporter:
def _is_valid_version(version: str) -> bool:
- """Check if version string matches semantic versioning"""
+ """Check if version string matches semantic versioning."""
parts = version.split(".")
if len(parts) != 3:
return False
@@ -372,7 +378,7 @@ def _is_valid_version(version: str) -> bool:
def _parse_datetime(dt_str: Optional[str]) -> datetime:
- """Parse ISO datetime string"""
+ """Parse ISO datetime string."""
if not dt_str:
return datetime.utcnow()
try:
diff --git a/dss/export_import/merger.py b/dss/export_import/merger.py
index 74d33a1..4425180 100644
--- a/dss/export_import/merger.py
+++ b/dss/export_import/merger.py
@@ -1,23 +1,21 @@
-"""Smart merge strategy for .dss imports with conflict detection"""
+"""Smart merge strategy for .dss imports with conflict detection."""
import hashlib
-from datetime import datetime
-from typing import Dict, Any, List, Optional, Tuple, Literal
from dataclasses import dataclass
+from datetime import datetime
from enum import Enum
+from typing import Dict, List, Literal, Optional, Tuple
-from .security import TimestampConflictResolver
+from ..models.component import Component
from ..models.project import Project
from ..models.theme import DesignToken
-from ..models.component import Component
-from dss.storage.json_store import Projects, Components, Tokens
-
+from .security import TimestampConflictResolver
MergeStrategy = Literal["overwrite", "keep_local", "fork", "skip"]
class ConflictResolutionMode(str, Enum):
- """How to handle conflicts during merge"""
+ """How to handle conflicts during merge."""
OVERWRITE = "overwrite" # Import wins
KEEP_LOCAL = "keep_local" # Local wins
@@ -27,7 +25,7 @@ class ConflictResolutionMode(str, Enum):
@dataclass
class ConflictItem:
- """Detected conflict"""
+ """Detected conflict."""
uuid: str
entity_type: str # token, component, theme
@@ -53,7 +51,9 @@ class ConflictItem:
"""Are both versions identical?"""
return self.local_hash == self.imported_hash
- def get_safe_recommendation(self, allow_drift_detection: bool = True) -> Tuple[str, Optional[str]]:
+ def get_safe_recommendation(
+ self, allow_drift_detection: bool = True
+ ) -> Tuple[str, Optional[str]]:
"""Get safe conflict resolution recommendation with clock skew detection.
Uses TimestampConflictResolver to safely determine winner, accounting
@@ -75,7 +75,7 @@ class ConflictItem:
@dataclass
class MergeAnalysis:
- """Analysis of merge operation"""
+ """Analysis of merge operation."""
new_items: Dict[str, List[str]] # type -> [names]
updated_items: Dict[str, List[str]] # type -> [names]
@@ -89,20 +89,20 @@ class MergeAnalysis:
class UUIDHashMap:
- """Maps UUIDs to content hashes for detecting changes"""
+ """Maps UUIDs to content hashes for detecting changes."""
def __init__(self):
self.hashes: Dict[str, str] = {}
@staticmethod
def hash_token(token: DesignToken) -> str:
- """Generate stable hash of token content (excludes UUID, timestamps)"""
+ """Generate stable hash of token content (excludes UUID, timestamps)."""
content = f"{token.name}:{token.value}:{token.type}:{token.category}:{token.description}:{token.source}:{token.deprecated}"
return hashlib.sha256(content.encode()).hexdigest()
@staticmethod
def hash_component(component: Component) -> str:
- """Generate stable hash of component content"""
+ """Generate stable hash of component content."""
import json
content = json.dumps(
@@ -119,20 +119,20 @@ class UUIDHashMap:
return hashlib.sha256(content.encode()).hexdigest()
def add_token(self, token: DesignToken):
- """Add token to hash map"""
+ """Add token to hash map."""
self.hashes[token.uuid] = self.hash_token(token)
def add_component(self, component: Component):
- """Add component to hash map"""
+ """Add component to hash map."""
self.hashes[component.uuid] = self.hash_component(component)
def get(self, uuid: str) -> Optional[str]:
- """Get hash for UUID"""
+ """Get hash for UUID."""
return self.hashes.get(uuid)
class SmartMerger:
- """Intelligent merge strategy for archives"""
+ """Intelligent merge strategy for archives."""
def __init__(self, local_project: Project, imported_project: Project):
self.local_project = local_project
@@ -140,7 +140,7 @@ class SmartMerger:
def analyze_merge(self) -> MergeAnalysis:
"""
- Analyze what would happen in a merge without modifying anything
+ Analyze what would happen in a merge without modifying anything.
Returns:
MergeAnalysis with new, updated, and conflicted items
@@ -216,7 +216,7 @@ class SmartMerger:
conflict_handler: ConflictResolutionMode = ConflictResolutionMode.OVERWRITE,
) -> Project:
"""
- Perform merge with specified conflict strategy
+ Perform merge with specified conflict strategy.
Args:
conflict_handler: How to handle conflicts
@@ -269,7 +269,7 @@ class SmartMerger:
local_token: DesignToken,
imported_token: DesignToken,
) -> Optional[ConflictItem]:
- """Check if token versions conflict"""
+ """Check if token versions conflict."""
local_hash = UUIDHashMap.hash_token(local_token)
imported_hash = UUIDHashMap.hash_token(imported_token)
@@ -295,7 +295,7 @@ class SmartMerger:
local_comp: Component,
imported_comp: Component,
) -> Optional[ConflictItem]:
- """Check if component versions conflict"""
+ """Check if component versions conflict."""
local_hash = UUIDHashMap.hash_component(local_comp)
imported_hash = UUIDHashMap.hash_component(imported_comp)
@@ -308,8 +308,12 @@ class SmartMerger:
uuid=comp_uuid,
entity_type="component",
entity_name=local_comp.name,
- local_updated_at=local_comp.updated_at if hasattr(local_comp, 'updated_at') else datetime.utcnow(),
- imported_updated_at=imported_comp.updated_at if hasattr(imported_comp, 'updated_at') else datetime.utcnow(),
+ local_updated_at=local_comp.updated_at
+ if hasattr(local_comp, "updated_at")
+ else datetime.utcnow(),
+ imported_updated_at=imported_comp.updated_at
+ if hasattr(imported_comp, "updated_at")
+ else datetime.utcnow(),
local_hash=local_hash,
imported_hash=imported_hash,
is_modified_both=True,
@@ -321,7 +325,7 @@ class SmartMerger:
conflict: ConflictItem,
strategy: ConflictResolutionMode,
):
- """Apply conflict resolution strategy"""
+ """Apply conflict resolution strategy."""
if strategy == ConflictResolutionMode.OVERWRITE:
# Import wins - already applied
pass
@@ -330,7 +334,11 @@ class SmartMerger:
if conflict.entity_type == "token":
# Find and restore local token
local_token = next(
- (t for t in self.local_project.theme.tokens.values() if t.uuid == conflict.uuid),
+ (
+ t
+ for t in self.local_project.theme.tokens.values()
+ if t.uuid == conflict.uuid
+ ),
None,
)
if local_token:
@@ -353,7 +361,11 @@ class SmartMerger:
if conflict.entity_type == "token":
imported_token = next(
- (t for t in self.imported_project.theme.tokens.values() if t.uuid == conflict.uuid),
+ (
+ t
+ for t in self.imported_project.theme.tokens.values()
+ if t.uuid == conflict.uuid
+ ),
None,
)
if imported_token:
diff --git a/dss/export_import/migrations.py b/dss/export_import/migrations.py
index 18a64f7..4abf454 100644
--- a/dss/export_import/migrations.py
+++ b/dss/export_import/migrations.py
@@ -1,86 +1,85 @@
-"""Schema migration system for .dss archive compatibility"""
+"""Schema migration system for .dss archive compatibility."""
-from typing import Dict, Any, List, Callable
-import json
+from typing import Any, Dict
class SchemaMigration:
- """Base class for schema migrations"""
+ """Base class for schema migrations."""
source_version: str = "1.0.0"
target_version: str = "1.0.1"
def up(self, data: Dict[str, Any]) -> Dict[str, Any]:
- """Migrate data from source to target version"""
+ """Migrate data from source to target version."""
raise NotImplementedError
def down(self, data: Dict[str, Any]) -> Dict[str, Any]:
- """Rollback migration"""
+ """Rollback migration."""
raise NotImplementedError
class MigrationV1_0_0_to_V1_0_1(SchemaMigration):
- """Initial migration: add UUID support to all entities"""
+ """Initial migration: add UUID support to all entities."""
source_version = "1.0.0"
target_version = "1.0.1"
def up(self, data: Dict[str, Any]) -> Dict[str, Any]:
- """Add UUID fields if missing"""
+ """Add UUID fields if missing."""
from uuid import uuid4
# Ensure all entities have UUIDs (backward compat)
- if 'project' in data:
- if 'uuid' not in data['project']:
- data['project']['uuid'] = str(uuid4())
+ if "project" in data:
+ if "uuid" not in data["project"]:
+ data["project"]["uuid"] = str(uuid4())
- if 'tokens' in data:
- for token_name, token in data['tokens'].items():
- if isinstance(token, dict) and 'uuid' not in token:
- token['uuid'] = str(uuid4())
+ if "tokens" in data:
+ for token_name, token in data["tokens"].items():
+ if isinstance(token, dict) and "uuid" not in token:
+ token["uuid"] = str(uuid4())
- if 'components' in data:
- for comp in data['components']:
- if 'uuid' not in comp:
- comp['uuid'] = str(uuid4())
- if 'variants' in comp:
- for variant in comp['variants']:
- if 'uuid' not in variant:
- variant['uuid'] = str(uuid4())
+ if "components" in data:
+ for comp in data["components"]:
+ if "uuid" not in comp:
+ comp["uuid"] = str(uuid4())
+ if "variants" in comp:
+ for variant in comp["variants"]:
+ if "uuid" not in variant:
+ variant["uuid"] = str(uuid4())
- if 'themes' in data:
- for theme in data['themes']:
- if 'uuid' not in theme:
- theme['uuid'] = str(uuid4())
+ if "themes" in data:
+ for theme in data["themes"]:
+ if "uuid" not in theme:
+ theme["uuid"] = str(uuid4())
return data
def down(self, data: Dict[str, Any]) -> Dict[str, Any]:
- """Remove UUID fields (rollback)"""
- if 'project' in data:
- data['project'].pop('uuid', None)
+ """Remove UUID fields (rollback)."""
+ if "project" in data:
+ data["project"].pop("uuid", None)
- if 'tokens' in data:
- for token in data['tokens'].values():
+ if "tokens" in data:
+ for token in data["tokens"].values():
if isinstance(token, dict):
- token.pop('uuid', None)
+ token.pop("uuid", None)
- if 'components' in data:
- for comp in data['components']:
- comp.pop('uuid', None)
- if 'variants' in comp:
- for variant in comp['variants']:
- variant.pop('uuid', None)
+ if "components" in data:
+ for comp in data["components"]:
+ comp.pop("uuid", None)
+ if "variants" in comp:
+ for variant in comp["variants"]:
+ variant.pop("uuid", None)
- if 'themes' in data:
- for theme in data['themes']:
- theme.pop('uuid', None)
+ if "themes" in data:
+ for theme in data["themes"]:
+ theme.pop("uuid", None)
return data
class MigrationManager:
- """Manages schema migrations for .dss archives"""
+ """Manages schema migrations for .dss archives."""
# Map of version pairs to migration classes
MIGRATIONS: Dict[tuple, type] = {
@@ -140,9 +139,9 @@ class MigrationManager:
@classmethod
def get_latest_version(cls) -> str:
- """Get latest schema version"""
+ """Get latest schema version."""
return cls.VERSIONS[-1]
# Export
-__all__ = ['MigrationManager', 'SchemaMigration']
+__all__ = ["MigrationManager", "SchemaMigration"]
diff --git a/dss/export_import/security.py b/dss/export_import/security.py
index 74ad4d3..85f97a4 100644
--- a/dss/export_import/security.py
+++ b/dss/export_import/security.py
@@ -8,12 +8,11 @@ Addresses:
4. Timestamp-based conflict resolution safeguards
"""
-import json
-import os
-from pathlib import Path
-from typing import Dict, Any, Iterator, Optional
import hashlib
+import json
from datetime import datetime, timedelta
+from pathlib import Path
+from typing import Any, Dict, Optional
class ZipSlipValidator:
@@ -233,7 +232,9 @@ class TimestampConflictResolver:
# Check for clock drift
warning = None
if allow_drift_detection and time_diff > self.DEFAULT_DRIFT_WARNING_THRESHOLD:
- warning = f"Large timestamp gap ({time_diff.total_seconds()}s) detected. Clock skew possible?"
+ warning = (
+ f"Large timestamp gap ({time_diff.total_seconds()}s) detected. Clock skew possible?"
+ )
# Within tolerance threshold - cannot determine winner
if time_diff <= self.clock_skew_tolerance:
@@ -246,9 +247,7 @@ class TimestampConflictResolver:
return "local", warning
@staticmethod
- def compute_logical_version(
- previous_version: int, is_modified: bool
- ) -> int:
+ def compute_logical_version(previous_version: int, is_modified: bool) -> int:
"""Compute next logical version (Lamport timestamp style).
Recommended: Use this instead of wall-clock timestamps for
@@ -321,9 +320,7 @@ class ArchiveIntegrity:
"""
@staticmethod
- def compute_manifest_hash(
- manifest: Dict[str, Any], exclude_fields: list[str] = None
- ) -> str:
+ def compute_manifest_hash(manifest: Dict[str, Any], exclude_fields: list[str] = None) -> str:
"""Compute hash of manifest for integrity verification.
Args:
diff --git a/dss/export_import/service.py b/dss/export_import/service.py
index edccfde..ee02191 100644
--- a/dss/export_import/service.py
+++ b/dss/export_import/service.py
@@ -9,23 +9,22 @@ This service provides:
5. SQLite configuration management
"""
-from pathlib import Path
-from typing import Optional, Dict, Any, BinaryIO
+from contextlib import contextmanager
from dataclasses import dataclass
from datetime import datetime
-from contextlib import contextmanager
+from pathlib import Path
+from typing import Dict, Optional
+from ..models.project import Project
from .exporter import DSSArchiveExporter
from .importer import DSSArchiveImporter, ImportAnalysis
-from .merger import SmartMerger, ConflictResolutionMode, MergeAnalysis
+from .merger import ConflictResolutionMode, MergeAnalysis, SmartMerger
from .security import DatabaseLockingStrategy, MemoryLimitManager
-from ..models.project import Project
-from dss.storage.json_store import Projects, ActivityLog
@dataclass
class ExportSummary:
- """Result of an export operation"""
+ """Result of an export operation."""
success: bool
archive_path: Optional[Path] = None
@@ -37,7 +36,7 @@ class ExportSummary:
@dataclass
class ImportSummary:
- """Result of an import operation"""
+ """Result of an import operation."""
success: bool
project_name: Optional[str] = None
@@ -51,7 +50,7 @@ class ImportSummary:
@dataclass
class MergeSummary:
- """Result of a merge operation"""
+ """Result of a merge operation."""
success: bool
new_items_count: Optional[int] = None
diff --git a/dss/figma/figma_tools.py b/dss/figma/figma_tools.py
index 9de233b..2f363d6 100644
--- a/dss/figma/figma_tools.py
+++ b/dss/figma/figma_tools.py
@@ -1,5 +1,5 @@
"""
-DSS Figma Integration
+DSS Figma Integration.
Extracts design system data from Figma:
- Tokens (colors, spacing, typography)
@@ -16,17 +16,18 @@ Tools:
7. figma_generate_code - Generate component code
"""
-import json
-import hashlib
import asyncio
-from datetime import datetime
-from typing import Optional, Dict, List, Any
-from dataclasses import dataclass, asdict
+import hashlib
+import json
+from dataclasses import asdict, dataclass
from pathlib import Path
+from typing import Any, Dict, List, Optional
+
import httpx
from dss.settings import settings
-from dss.storage.json_store import Cache, ActivityLog
+from dss.storage.json_store import ActivityLog, Cache
+
@dataclass
class DesignToken:
@@ -36,6 +37,7 @@ class DesignToken:
description: str = ""
category: str = ""
+
@dataclass
class ComponentDefinition:
name: str
@@ -44,6 +46,7 @@ class ComponentDefinition:
properties: Dict[str, Any]
variants: List[Dict[str, Any]]
+
@dataclass
class StyleDefinition:
name: str
@@ -69,7 +72,7 @@ class FigmaClient:
self._use_real_api = bool(self.token)
def _cache_key(self, endpoint: str) -> str:
- return f"figma:{hashlib.md5(endpoint.encode()).hexdigest()}"
+ return f"figma:{hashlib.sha256(endpoint.encode()).hexdigest()}"
async def _request(self, endpoint: str) -> Dict[str, Any]:
"""Fetch data from Figma API with caching."""
@@ -84,8 +87,7 @@ class FigmaClient:
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.get(
- f"{self.base_url}{endpoint}",
- headers={"X-Figma-Token": self.token}
+ f"{self.base_url}{endpoint}", headers={"X-Figma-Token": self.token}
)
response.raise_for_status()
data = response.json()
@@ -95,7 +97,7 @@ class FigmaClient:
ActivityLog.log(
action="figma_api_request",
entity_type="figma",
- details={"endpoint": endpoint, "cached": False}
+ details={"endpoint": endpoint, "cached": False},
)
return data
@@ -110,46 +112,92 @@ class FigmaClient:
"VC1": {
"id": "VC1",
"name": "Colors",
- "modes": [{"modeId": "M1", "name": "Light"}, {"modeId": "M2", "name": "Dark"}]
+ "modes": [
+ {"modeId": "M1", "name": "Light"},
+ {"modeId": "M2", "name": "Dark"},
+ ],
},
"VC2": {
"id": "VC2",
"name": "Spacing",
- "modes": [{"modeId": "M1", "name": "Default"}]
- }
+ "modes": [{"modeId": "M1", "name": "Default"}],
+ },
},
"variables": {
- "V1": {"id": "V1", "name": "primary", "resolvedType": "COLOR",
- "valuesByMode": {"M1": {"r": 0.2, "g": 0.4, "b": 0.9, "a": 1}}},
- "V2": {"id": "V2", "name": "secondary", "resolvedType": "COLOR",
- "valuesByMode": {"M1": {"r": 0.5, "g": 0.5, "b": 0.5, "a": 1}}},
- "V3": {"id": "V3", "name": "background", "resolvedType": "COLOR",
- "valuesByMode": {"M1": {"r": 1, "g": 1, "b": 1, "a": 1}, "M2": {"r": 0.1, "g": 0.1, "b": 0.1, "a": 1}}},
- "V4": {"id": "V4", "name": "space-1", "resolvedType": "FLOAT",
- "valuesByMode": {"M1": 4}},
- "V5": {"id": "V5", "name": "space-2", "resolvedType": "FLOAT",
- "valuesByMode": {"M1": 8}},
- "V6": {"id": "V6", "name": "space-4", "resolvedType": "FLOAT",
- "valuesByMode": {"M1": 16}},
- }
- }
+ "V1": {
+ "id": "V1",
+ "name": "primary",
+ "resolvedType": "COLOR",
+ "valuesByMode": {"M1": {"r": 0.2, "g": 0.4, "b": 0.9, "a": 1}},
+ },
+ "V2": {
+ "id": "V2",
+ "name": "secondary",
+ "resolvedType": "COLOR",
+ "valuesByMode": {"M1": {"r": 0.5, "g": 0.5, "b": 0.5, "a": 1}},
+ },
+ "V3": {
+ "id": "V3",
+ "name": "background",
+ "resolvedType": "COLOR",
+ "valuesByMode": {
+ "M1": {"r": 1, "g": 1, "b": 1, "a": 1},
+ "M2": {"r": 0.1, "g": 0.1, "b": 0.1, "a": 1},
+ },
+ },
+ "V4": {
+ "id": "V4",
+ "name": "space-1",
+ "resolvedType": "FLOAT",
+ "valuesByMode": {"M1": 4},
+ },
+ "V5": {
+ "id": "V5",
+ "name": "space-2",
+ "resolvedType": "FLOAT",
+ "valuesByMode": {"M1": 8},
+ },
+ "V6": {
+ "id": "V6",
+ "name": "space-4",
+ "resolvedType": "FLOAT",
+ "valuesByMode": {"M1": 16},
+ },
+ },
+ },
}
elif "/components" in endpoint:
return {
"status": 200,
"meta": {
"components": {
- "C1": {"key": "C1", "name": "Button", "description": "Primary action button",
- "containing_frame": {"name": "Components"}},
- "C2": {"key": "C2", "name": "Card", "description": "Content container",
- "containing_frame": {"name": "Components"}},
- "C3": {"key": "C3", "name": "Input", "description": "Text input field",
- "containing_frame": {"name": "Components"}},
+ "C1": {
+ "key": "C1",
+ "name": "Button",
+ "description": "Primary action button",
+ "containing_frame": {"name": "Components"},
+ },
+ "C2": {
+ "key": "C2",
+ "name": "Card",
+ "description": "Content container",
+ "containing_frame": {"name": "Components"},
+ },
+ "C3": {
+ "key": "C3",
+ "name": "Input",
+ "description": "Text input field",
+ "containing_frame": {"name": "Components"},
+ },
},
"component_sets": {
- "CS1": {"key": "CS1", "name": "Button", "description": "Button with variants"}
- }
- }
+ "CS1": {
+ "key": "CS1",
+ "name": "Button",
+ "description": "Button with variants",
+ }
+ },
+ },
}
elif "/styles" in endpoint:
return {
@@ -162,7 +210,7 @@ class FigmaClient:
"S4": {"key": "S4", "name": "Primary", "style_type": "FILL"},
"S5": {"key": "S5", "name": "Shadow/Medium", "style_type": "EFFECT"},
}
- }
+ },
}
else:
return {"status": 200, "document": {"name": "Mock Design System"}}
@@ -236,12 +284,14 @@ class FigmaToolSuite:
token_type = self._map_figma_type(var_type)
formatted_value = self._format_value(first_value, token_type)
- tokens.append(DesignToken(
- name=self._to_css_name(name),
- value=formatted_value,
- type=token_type,
- category=self._get_category(name)
- ))
+ tokens.append(
+ DesignToken(
+ name=self._to_css_name(name),
+ value=formatted_value,
+ type=token_type,
+ category=self._get_category(name),
+ )
+ )
# Generate output in requested format
output = self._format_tokens(tokens, format)
@@ -257,14 +307,15 @@ class FigmaToolSuite:
"collections": list(collections.keys()),
"output_path": str(output_path),
"tokens": [asdict(t) for t in tokens],
- "formatted_output": output
+ "formatted_output": output,
}
# === Tool 2: Extract Components ===
async def extract_components(self, file_key: str) -> Dict[str, Any]:
"""
- Extract all component definitions from a Figma file by recursively
+ Extract all component definitions from a Figma file by recursively.
+
traversing the document tree.
Args:
@@ -274,7 +325,7 @@ class FigmaToolSuite:
Dict with: success, components_count, output_path, components
"""
definitions: List[ComponentDefinition] = []
-
+
try:
file_data = await self.client.get_file(file_key)
doc = file_data.get("document", {})
@@ -285,12 +336,7 @@ class FigmaToolSuite:
except Exception as e:
# Log the exception for debugging
print(f"Error extracting components from Figma file {file_key}: {e}")
- return {
- "success": False,
- "components_count": 0,
- "error": str(e),
- "components": []
- }
+ return {"success": False, "components_count": 0, "error": str(e), "components": []}
output_path = self.output_dir / "components.json"
output_path.write_text(json.dumps([asdict(d) for d in definitions], indent=2))
@@ -299,10 +345,12 @@ class FigmaToolSuite:
"success": True,
"components_count": len(definitions),
"output_path": str(output_path),
- "components": [asdict(d) for d in definitions]
+ "components": [asdict(d) for d in definitions],
}
- def _recursive_find_components(self, node: Dict[str, Any], definitions: List[ComponentDefinition]):
+ def _recursive_find_components(
+ self, node: Dict[str, Any], definitions: List[ComponentDefinition]
+ ):
"""
Recursively traverse the Figma node tree and extract all components.
@@ -312,13 +360,15 @@ class FigmaToolSuite:
"""
# If the node is a component, extract its definition
if node.get("type") == "COMPONENT":
- definitions.append(ComponentDefinition(
- name=node.get("name", ""),
- key=node.get("id", ""),
- description=node.get("description", ""),
- properties={}, # Properties can be enriched later
- variants=[] # Variant info can be enriched later
- ))
+ definitions.append(
+ ComponentDefinition(
+ name=node.get("name", ""),
+ key=node.get("id", ""),
+ description=node.get("description", ""),
+ properties={}, # Properties can be enriched later
+ variants=[], # Variant info can be enriched later
+ )
+ )
# If the node has children, recurse into them
if "children" in node and isinstance(node["children"], list):
@@ -359,7 +409,7 @@ class FigmaToolSuite:
name=style.get("name", ""),
key=style.get("key", style_id),
type=style_type,
- properties={}
+ properties={},
)
definitions.append(defn)
if style_type in by_type:
@@ -377,10 +427,7 @@ class FigmaToolSuite:
# Document styles use styleType instead of style_type
style_type = style.get("styleType", "")
defn = StyleDefinition(
- name=style.get("name", ""),
- key=style_id,
- type=style_type,
- properties={}
+ name=style.get("name", ""), key=style_id, type=style_type, properties={}
)
definitions.append(defn)
if style_type in by_type:
@@ -389,22 +436,23 @@ class FigmaToolSuite:
pass
output_path = self.output_dir / "styles.json"
- output_path.write_text(json.dumps({
- "all": [asdict(d) for d in definitions],
- "by_type": by_type
- }, indent=2))
+ output_path.write_text(
+ json.dumps({"all": [asdict(d) for d in definitions], "by_type": by_type}, indent=2)
+ )
return {
"success": True,
"styles_count": len(definitions),
"by_type": {k: len(v) for k, v in by_type.items()},
"output_path": str(output_path),
- "styles": by_type
+ "styles": by_type,
}
# === Tool 4: Sync Tokens ===
- async def sync_tokens(self, file_key: str, target_path: str, format: str = "css") -> Dict[str, Any]:
+ async def sync_tokens(
+ self, file_key: str, target_path: str, format: str = "css"
+ ) -> Dict[str, Any]:
"""
Sync design tokens from Figma to codebase.
@@ -441,7 +489,7 @@ class FigmaToolSuite:
"has_changes": has_changes,
"tokens_synced": result["tokens_count"],
"target_path": str(target),
- "backup_created": has_changes and bool(existing_content)
+ "backup_created": has_changes and bool(existing_content),
}
# === Tool 5: Visual Diff ===
@@ -472,16 +520,14 @@ class FigmaToolSuite:
{"name": "Button", "change_percent": 5.2, "type": "color"},
{"name": "Card", "change_percent": 0.0, "type": "none"},
],
- "summary": {
- "total_components": 3,
- "changed": 1,
- "unchanged": 2
- }
+ "summary": {"total_components": 3, "changed": 1, "unchanged": 2},
}
# === Tool 6: Validate Components ===
- async def validate_components(self, file_key: str, schema_path: Optional[str] = None) -> Dict[str, Any]:
+ async def validate_components(
+ self, file_key: str, schema_path: Optional[str] = None
+ ) -> Dict[str, Any]:
"""
Validate component definitions against rules.
@@ -500,21 +546,25 @@ class FigmaToolSuite:
for comp in components["components"]:
# Rule 1: Naming convention (capitalize first letter)
if not comp["name"][0].isupper():
- issues.append({
- "component": comp["name"],
- "rule": "naming-convention",
- "severity": "warning",
- "message": f"'{comp['name']}' should start with capital letter"
- })
+ issues.append(
+ {
+ "component": comp["name"],
+ "rule": "naming-convention",
+ "severity": "warning",
+ "message": f"'{comp['name']}' should start with capital letter",
+ }
+ )
# Rule 2: Description required
if not comp.get("description"):
- issues.append({
- "component": comp["name"],
- "rule": "description-required",
- "severity": "info",
- "message": f"'{comp['name']}' should have a description"
- })
+ issues.append(
+ {
+ "component": comp["name"],
+ "rule": "description-required",
+ "severity": "info",
+ "message": f"'{comp['name']}' should have a description",
+ }
+ )
return {
"success": True,
@@ -524,14 +574,15 @@ class FigmaToolSuite:
"summary": {
"errors": len([i for i in issues if i["severity"] == "error"]),
"warnings": len([i for i in issues if i["severity"] == "warning"]),
- "info": len([i for i in issues if i["severity"] == "info"])
- }
+ "info": len([i for i in issues if i["severity"] == "info"]),
+ },
}
# === Tool 7: Generate Code ===
- async def generate_code(self, file_key: str, component_name: str,
- framework: str = "webcomponent") -> Dict[str, Any]:
+ async def generate_code(
+ self, file_key: str, component_name: str, framework: str = "webcomponent"
+ ) -> Dict[str, Any]:
"""
Generate component code from Figma definition.
@@ -546,13 +597,13 @@ class FigmaToolSuite:
components = await self.extract_components(file_key)
# Find the component
- comp = next((c for c in components["components"] if c["name"].lower() == component_name.lower()), None)
+ comp = next(
+ (c for c in components["components"] if c["name"].lower() == component_name.lower()),
+ None,
+ )
if not comp:
- return {
- "success": False,
- "error": f"Component '{component_name}' not found"
- }
+ return {"success": False, "error": f"Component '{component_name}' not found"}
# Generate code based on framework
if framework == "webcomponent":
@@ -572,18 +623,13 @@ class FigmaToolSuite:
"component": comp["name"],
"framework": framework,
"output_path": str(output_path),
- "code": code
+ "code": code,
}
# === Helper Methods ===
def _map_figma_type(self, figma_type: str) -> str:
- mapping = {
- "COLOR": "color",
- "FLOAT": "dimension",
- "STRING": "string",
- "BOOLEAN": "boolean"
- }
+ mapping = {"COLOR": "color", "FLOAT": "dimension", "STRING": "string", "BOOLEAN": "boolean"}
return mapping.get(figma_type, "unknown")
def _format_value(self, value: Any, token_type: str) -> str:
@@ -621,7 +667,9 @@ class FigmaToolSuite:
return "\n".join(lines)
elif format == "json":
- return json.dumps({t.name: {"value": t.value, "type": t.type} for t in tokens}, indent=2)
+ return json.dumps(
+ {t.name: {"value": t.value, "type": t.type} for t in tokens}, indent=2
+ )
elif format == "scss":
return "\n".join([f"${t.name}: {t.value};" for t in tokens])
@@ -639,7 +687,7 @@ class FigmaToolSuite:
def _generate_webcomponent(self, comp: Dict[str, Any]) -> str:
name = comp["name"]
tag = f"ds-{name.lower()}"
- return f'''/**
+ return f"""/**
* {name} - Web Component
* {comp.get("description", "")}
*
@@ -687,11 +735,11 @@ class Ds{name} extends HTMLElement {{
customElements.define('{tag}', Ds{name});
export default Ds{name};
-'''
+"""
def _generate_react(self, comp: Dict[str, Any]) -> str:
name = comp["name"]
- return f'''import React from 'react';
+ return f"""import React from 'react';
import styles from './{name}.module.css';
/**
@@ -717,11 +765,11 @@ export function {name}({{
}}
export default {name};
-'''
+"""
def _generate_vue(self, comp: Dict[str, Any]) -> str:
name = comp["name"]
- return f'''
+ return f"""
@@ -753,7 +801,7 @@ const classes = computed(() => [
/* Component styles */
}}
-'''
+"""
def _get_extension(self, framework: str) -> str:
return {"webcomponent": "js", "react": "jsx", "vue": "vue"}[framework]
@@ -761,6 +809,7 @@ const classes = computed(() => [
# === MCP Tool Registration ===
+
def create_mcp_tools(mcp_instance):
"""
Register all Figma tools with MCP server.
@@ -808,7 +857,9 @@ def create_mcp_tools(mcp_instance):
return json.dumps(result, indent=2)
@mcp_instance.tool()
- async def figma_generate_code(file_key: str, component_name: str, framework: str = "webcomponent") -> str:
+ async def figma_generate_code(
+ file_key: str, component_name: str, framework: str = "webcomponent"
+ ) -> str:
"""Generate component code from Figma definition."""
result = await suite.generate_code(file_key, component_name, framework)
return json.dumps(result, indent=2)
@@ -819,9 +870,7 @@ if __name__ == "__main__":
import asyncio
async def test():
- """
- Run a series of tests for the FigmaToolSuite in mock mode.
- """
+ """Run a series of tests for the FigmaToolSuite in mock mode."""
suite = FigmaToolSuite(output_dir="./test_output")
print("Testing Figma Tool Suite (Mock Mode)\n")
diff --git a/dss/ingest/__init__.py b/dss/ingest/__init__.py
index 62e798c..1f0b602 100644
--- a/dss/ingest/__init__.py
+++ b/dss/ingest/__init__.py
@@ -1,25 +1,25 @@
"""
-DSS Token Ingestion Module
+DSS Token Ingestion Module.
Multi-source design token extraction and normalization.
Supports: Figma, CSS, SCSS, Tailwind, JSON/YAML, styled-components
"""
-from .base import DesignToken, TokenSource, TokenCollection
+from .base import DesignToken, TokenCollection, TokenSource
from .css import CSSTokenSource
+from .json_tokens import JSONTokenSource
+from .merge import MergeStrategy, TokenMerger
from .scss import SCSSTokenSource
from .tailwind import TailwindTokenSource
-from .json_tokens import JSONTokenSource
-from .merge import TokenMerger, MergeStrategy
__all__ = [
- 'DesignToken',
- 'TokenSource',
- 'TokenCollection',
- 'CSSTokenSource',
- 'SCSSTokenSource',
- 'TailwindTokenSource',
- 'JSONTokenSource',
- 'TokenMerger',
- 'MergeStrategy',
+ "DesignToken",
+ "TokenSource",
+ "TokenCollection",
+ "CSSTokenSource",
+ "SCSSTokenSource",
+ "TailwindTokenSource",
+ "JSONTokenSource",
+ "TokenMerger",
+ "MergeStrategy",
]
diff --git a/dss/ingest/base.py b/dss/ingest/base.py
index e07c74f..b22a89a 100644
--- a/dss/ingest/base.py
+++ b/dss/ingest/base.py
@@ -1,5 +1,5 @@
"""
-Token Ingestion & Processing Module
+Token Ingestion & Processing Module.
Provides a comprehensive system for extracting, processing, and managing design
tokens from various sources (CSS, JSON, Figma, Tailwind, etc.).
@@ -17,17 +17,18 @@ Token Processing Pipeline:
5. Distribution: Export tokens in various formats (CSS, JSON, TypeScript, etc.)
"""
+import json
+import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Set
-import json
-import re
class TokenType(str, Enum):
"""W3C Design Token types."""
+
COLOR = "color"
DIMENSION = "dimension"
FONT_FAMILY = "fontFamily"
@@ -49,6 +50,7 @@ class TokenType(str, Enum):
class TokenCategory(str, Enum):
"""Token categories for organization."""
+
COLORS = "colors"
SPACING = "spacing"
TYPOGRAPHY = "typography"
@@ -79,23 +81,24 @@ class DesignToken:
- State: Deprecation status
- Metadata: Version, timestamps, extensions
"""
+
# Core properties (W3C spec)
- name: str # e.g., "color.primary.500"
- value: Any # e.g., "#3B82F6"
+ name: str # e.g., "color.primary.500"
+ value: Any # e.g., "#3B82F6"
type: TokenType = TokenType.UNKNOWN # Token type classification
- description: str = "" # Token description
+ description: str = "" # Token description
# Source attribution
- source: str = "" # e.g., "figma:abc123", "css:tokens.css:12"
- source_file: str = "" # Source file path
- source_line: int = 0 # Line number in source file
- original_name: str = "" # Name before normalization
- original_value: str = "" # Value before processing
+ source: str = "" # e.g., "figma:abc123", "css:tokens.css:12"
+ source_file: str = "" # Source file path
+ source_line: int = 0 # Line number in source file
+ original_name: str = "" # Name before normalization
+ original_value: str = "" # Value before processing
# Organization
category: TokenCategory = TokenCategory.OTHER
tags: List[str] = field(default_factory=list)
- group: str = "" # Logical grouping (e.g., "brand", "semantic")
+ group: str = "" # Logical grouping (e.g., "brand", "semantic")
# State
deprecated: bool = False
@@ -133,40 +136,40 @@ class DesignToken:
value_str = str(self.value).lower().strip()
# Color patterns
- if re.match(r'^#[0-9a-f]{3,8}$', value_str):
+ if re.match(r"^#[0-9a-f]{3,8}$", value_str):
return TokenType.COLOR
- if re.match(r'^rgb[a]?\s*\(', value_str):
+ if re.match(r"^rgb[a]?\s*\(", value_str):
return TokenType.COLOR
- if re.match(r'^hsl[a]?\s*\(', value_str):
+ if re.match(r"^hsl[a]?\s*\(", value_str):
return TokenType.COLOR
- if value_str in ('transparent', 'currentcolor', 'inherit'):
+ if value_str in ("transparent", "currentcolor", "inherit"):
return TokenType.COLOR
# Dimension patterns
- if re.match(r'^-?\d+(\.\d+)?(px|rem|em|%|vh|vw|ch|ex|vmin|vmax)$', value_str):
+ if re.match(r"^-?\d+(\.\d+)?(px|rem|em|%|vh|vw|ch|ex|vmin|vmax)$", value_str):
return TokenType.DIMENSION
# Duration patterns
- if re.match(r'^\d+(\.\d+)?(ms|s)$', value_str):
+ if re.match(r"^\d+(\.\d+)?(ms|s)$", value_str):
return TokenType.DURATION
# Number patterns
- if re.match(r'^-?\d+(\.\d+)?$', value_str):
+ if re.match(r"^-?\d+(\.\d+)?$", value_str):
return TokenType.NUMBER
# Font family (contains quotes or commas)
- if ',' in value_str or '"' in value_str or "'" in value_str:
- if 'sans' in value_str or 'serif' in value_str or 'mono' in value_str:
+ if "," in value_str or '"' in value_str or "'" in value_str:
+ if "sans" in value_str or "serif" in value_str or "mono" in value_str:
return TokenType.FONT_FAMILY
# Font weight
- if value_str in ('normal', 'bold', 'lighter', 'bolder') or \
- re.match(r'^[1-9]00$', value_str):
+ if value_str in ("normal", "bold", "lighter", "bolder") or re.match(
+ r"^[1-9]00$", value_str
+ ):
return TokenType.FONT_WEIGHT
# Shadow
- if 'shadow' in self.name.lower() or \
- re.match(r'^-?\d+.*\s+-?\d+.*\s+-?\d+', value_str):
+ if "shadow" in self.name.lower() or re.match(r"^-?\d+.*\s+-?\d+.*\s+-?\d+", value_str):
return TokenType.SHADOW
return TokenType.STRING
@@ -177,16 +180,37 @@ class DesignToken:
# Check name patterns
patterns = {
- TokenCategory.COLORS: ['color', 'bg', 'background', 'text', 'border-color', 'fill', 'stroke'],
- TokenCategory.SPACING: ['space', 'spacing', 'gap', 'margin', 'padding', 'inset'],
- TokenCategory.TYPOGRAPHY: ['font', 'text', 'line-height', 'letter-spacing', 'typography'],
- TokenCategory.SIZING: ['size', 'width', 'height', 'min-', 'max-'],
- TokenCategory.BORDERS: ['border', 'radius', 'outline'],
- TokenCategory.SHADOWS: ['shadow', 'elevation'],
- TokenCategory.EFFECTS: ['blur', 'opacity', 'filter', 'backdrop'],
- TokenCategory.MOTION: ['transition', 'animation', 'duration', 'delay', 'timing', 'ease'],
- TokenCategory.BREAKPOINTS: ['breakpoint', 'screen', 'media'],
- TokenCategory.Z_INDEX: ['z-index', 'z-', 'layer'],
+ TokenCategory.COLORS: [
+ "color",
+ "bg",
+ "background",
+ "text",
+ "border-color",
+ "fill",
+ "stroke",
+ ],
+ TokenCategory.SPACING: ["space", "spacing", "gap", "margin", "padding", "inset"],
+ TokenCategory.TYPOGRAPHY: [
+ "font",
+ "text",
+ "line-height",
+ "letter-spacing",
+ "typography",
+ ],
+ TokenCategory.SIZING: ["size", "width", "height", "min-", "max-"],
+ TokenCategory.BORDERS: ["border", "radius", "outline"],
+ TokenCategory.SHADOWS: ["shadow", "elevation"],
+ TokenCategory.EFFECTS: ["blur", "opacity", "filter", "backdrop"],
+ TokenCategory.MOTION: [
+ "transition",
+ "animation",
+ "duration",
+ "delay",
+ "timing",
+ "ease",
+ ],
+ TokenCategory.BREAKPOINTS: ["breakpoint", "screen", "media"],
+ TokenCategory.Z_INDEX: ["z-index", "z-", "layer"],
}
for category, keywords in patterns.items():
@@ -196,7 +220,12 @@ class DesignToken:
# Check by type
if self.type == TokenType.COLOR:
return TokenCategory.COLORS
- if self.type in (TokenType.FONT_FAMILY, TokenType.FONT_WEIGHT, TokenType.FONT_SIZE, TokenType.LINE_HEIGHT):
+ if self.type in (
+ TokenType.FONT_FAMILY,
+ TokenType.FONT_WEIGHT,
+ TokenType.FONT_SIZE,
+ TokenType.LINE_HEIGHT,
+ ):
return TokenCategory.TYPOGRAPHY
if self.type == TokenType.DURATION:
return TokenCategory.MOTION
@@ -217,12 +246,12 @@ class DesignToken:
name = self.name
# Handle camelCase
- name = re.sub(r'([a-z])([A-Z])', r'\1.\2', name)
+ name = re.sub(r"([a-z])([A-Z])", r"\1.\2", name)
# Replace separators
- name = name.replace('-', separator)
- name = name.replace('_', separator)
- name = name.replace('/', separator)
+ name = name.replace("-", separator)
+ name = name.replace("_", separator)
+ name = name.replace("/", separator)
# Clean up multiple separators
while separator * 2 in name:
@@ -296,6 +325,7 @@ class TokenCollection:
Tracks composition, source attribution, and timestamps for full token traceability.
"""
+
tokens: List[DesignToken] = field(default_factory=list)
name: str = ""
description: str = ""
@@ -329,7 +359,7 @@ class TokenCollection:
return token
return None
- def filter_by_category(self, category: TokenCategory) -> 'TokenCollection':
+ def filter_by_category(self, category: TokenCategory) -> "TokenCollection":
"""Return new collection filtered by category."""
filtered = [t for t in self.tokens if t.category == category]
return TokenCollection(
@@ -338,7 +368,7 @@ class TokenCollection:
sources=self.sources,
)
- def filter_by_type(self, token_type: TokenType) -> 'TokenCollection':
+ def filter_by_type(self, token_type: TokenType) -> "TokenCollection":
"""Return new collection filtered by type."""
filtered = [t for t in self.tokens if t.type == token_type]
return TokenCollection(
@@ -347,7 +377,7 @@ class TokenCollection:
sources=self.sources,
)
- def filter_by_source(self, source: str) -> 'TokenCollection':
+ def filter_by_source(self, source: str) -> "TokenCollection":
"""Return new collection filtered by source."""
filtered = [t for t in self.tokens if source in t.source]
return TokenCollection(
@@ -450,10 +480,10 @@ class TokenCollection:
"""Get collection summary."""
return {
"total_tokens": len(self.tokens),
- "categories": {cat.value: len(self.filter_by_category(cat))
- for cat in self.get_categories()},
- "types": {t.value: len(self.filter_by_type(t))
- for t in self.get_types()},
+ "categories": {
+ cat.value: len(self.filter_by_category(cat)) for cat in self.get_categories()
+ },
+ "types": {t.value: len(self.filter_by_type(t)) for t in self.get_types()},
"sources": self.sources,
"duplicates": len(self.get_duplicates()),
}
@@ -475,9 +505,7 @@ class TokenSource(ABC):
@property
@abstractmethod
def source_type(self) -> str:
- """
- Return source type identifier (e.g., 'css', 'scss', 'figma', 'json').
- """
+ """Return source type identifier (e.g., 'css', 'scss', 'figma', 'json')."""
pass
@abstractmethod
diff --git a/dss/ingest/css.py b/dss/ingest/css.py
index 047f3bb..a54ac37 100644
--- a/dss/ingest/css.py
+++ b/dss/ingest/css.py
@@ -1,5 +1,5 @@
"""
-CSS Token Source
+CSS Token Source.
Extracts design tokens from CSS custom properties (CSS variables).
Parses :root declarations and other CSS variable definitions.
@@ -7,8 +7,9 @@ Parses :root declarations and other CSS variable definitions.
import re
from pathlib import Path
-from typing import List, Optional, Tuple
-from .base import DesignToken, TokenCollection, TokenSource, TokenType, TokenCategory
+from typing import List, Tuple
+
+from .base import DesignToken, TokenCollection, TokenSource
class CSSTokenSource(TokenSource):
@@ -58,10 +59,10 @@ class CSSTokenSource(TokenSource):
def _is_file_path(self, source: str) -> bool:
"""Check if source looks like a file path."""
# If it contains CSS syntax, it's content
- if '{' in source or ':' in source and ';' in source:
+ if "{" in source or ":" in source and ";" in source:
return False
# If it ends with .css, it's a file
- if source.endswith('.css'):
+ if source.endswith(".css"):
return True
# If path exists, it's a file
return Path(source).exists()
@@ -71,16 +72,16 @@ class CSSTokenSource(TokenSource):
tokens = []
# Track line numbers
- lines = content.split('\n')
+ lines = content.split("\n")
line_map = self._build_line_map(content)
# Find all CSS variable declarations
# Pattern matches: --var-name: value;
var_pattern = re.compile(
- r'(\/\*[^*]*\*\/\s*)?' # Optional preceding comment
- r'(--[\w-]+)\s*:\s*' # Variable name
- r'([^;]+);', # Value
- re.MULTILINE
+ r"(\/\*[^*]*\*\/\s*)?" # Optional preceding comment
+ r"(--[\w-]+)\s*:\s*" # Variable name
+ r"([^;]+);", # Value
+ re.MULTILINE,
)
# Find variables in all rule blocks
@@ -125,7 +126,7 @@ class CSSTokenSource(TokenSource):
"""Build map of character positions to line numbers."""
line_map = []
pos = 0
- for i, line in enumerate(content.split('\n'), 1):
+ for i, line in enumerate(content.split("\n"), 1):
line_map.append(pos)
pos += len(line) + 1 # +1 for newline
return line_map
@@ -143,9 +144,9 @@ class CSSTokenSource(TokenSource):
def _normalize_var_name(self, var_name: str) -> str:
"""Convert CSS variable name to token name."""
# Remove -- prefix
- name = var_name.lstrip('-')
+ name = var_name.lstrip("-")
# Convert kebab-case to dot notation
- name = name.replace('-', '.')
+ name = name.replace("-", ".")
return name
def _clean_comment(self, comment: str) -> str:
@@ -153,30 +154,30 @@ class CSSTokenSource(TokenSource):
if not comment:
return ""
# Remove /* and */
- text = re.sub(r'/\*|\*/', '', comment)
+ text = re.sub(r"/\*|\*/", "", comment)
# Clean whitespace
- text = ' '.join(text.split())
+ text = " ".join(text.split())
return text.strip()
def _get_selector_context(self, content: str, pos: int) -> str:
"""Get the CSS selector context for a variable."""
# Find the opening brace before this position
before = content[:pos]
- last_open = before.rfind('{')
+ last_open = before.rfind("{")
if last_open == -1:
return ""
# Find the selector before the brace
selector_part = before[:last_open]
# Get last selector (after } or start)
- last_close = selector_part.rfind('}')
+ last_close = selector_part.rfind("}")
if last_close != -1:
- selector_part = selector_part[last_close + 1:]
+ selector_part = selector_part[last_close + 1 :]
# Clean up
selector = selector_part.strip()
# Handle multi-line selectors
- selector = ' '.join(selector.split())
+ selector = " ".join(selector.split())
return selector
@@ -188,16 +189,10 @@ class CSSInlineExtractor:
"""
# Patterns for extracting inline styles
- STYLE_ATTR_PATTERN = re.compile(
- r'style\s*=\s*["\']([^"\']+)["\']',
- re.IGNORECASE
- )
+ STYLE_ATTR_PATTERN = re.compile(r'style\s*=\s*["\']([^"\']+)["\']', re.IGNORECASE)
# JSX style object pattern
- JSX_STYLE_PATTERN = re.compile(
- r'style\s*=\s*\{\{([^}]+)\}\}',
- re.MULTILINE
- )
+ JSX_STYLE_PATTERN = re.compile(r"style\s*=\s*\{\{([^}]+)\}\}", re.MULTILINE)
async def extract_candidates(self, source: str) -> List[Tuple[str, str, int]]:
"""
@@ -213,7 +208,7 @@ class CSSInlineExtractor:
else:
content = source
- lines = content.split('\n')
+ lines = content.split("\n")
for i, line in enumerate(lines, 1):
# Check HTML style attribute
@@ -235,9 +230,9 @@ class CSSInlineExtractor:
def _parse_style_string(self, style: str) -> List[Tuple[str, str]]:
"""Parse CSS style string into property-value pairs."""
pairs = []
- for declaration in style.split(';'):
- if ':' in declaration:
- prop, value = declaration.split(':', 1)
+ for declaration in style.split(";"):
+ if ":" in declaration:
+ prop, value = declaration.split(":", 1)
pairs.append((prop.strip(), value.strip()))
return pairs
@@ -245,13 +240,13 @@ class CSSInlineExtractor:
"""Parse JSX style object into property-value pairs."""
pairs = []
# Simple parsing for common cases
- for part in style.split(','):
- if ':' in part:
- prop, value = part.split(':', 1)
- prop = prop.strip().strip('"\'')
- value = value.strip().strip('"\'')
+ for part in style.split(","):
+ if ":" in part:
+ prop, value = part.split(":", 1)
+ prop = prop.strip().strip("\"'")
+ value = value.strip().strip("\"'")
# Convert camelCase to kebab-case
- prop = re.sub(r'([a-z])([A-Z])', r'\1-\2', prop).lower()
+ prop = re.sub(r"([a-z])([A-Z])", r"\1-\2", prop).lower()
pairs.append((prop, value))
return pairs
@@ -260,23 +255,23 @@ class CSSInlineExtractor:
value = value.strip().lower()
# Colors are always candidates
- if re.match(r'^#[0-9a-f]{3,8}$', value):
+ if re.match(r"^#[0-9a-f]{3,8}$", value):
return True
- if re.match(r'^rgb[a]?\s*\(', value):
+ if re.match(r"^rgb[a]?\s*\(", value):
return True
- if re.match(r'^hsl[a]?\s*\(', value):
+ if re.match(r"^hsl[a]?\s*\(", value):
return True
# Dimensions with common units
- if re.match(r'^\d+(\.\d+)?(px|rem|em|%)$', value):
+ if re.match(r"^\d+(\.\d+)?(px|rem|em|%)$", value):
return True
# Skip variable references
- if value.startswith('var('):
+ if value.startswith("var("):
return False
# Skip inherit/initial/etc
- if value in ('inherit', 'initial', 'unset', 'auto', 'none'):
+ if value in ("inherit", "initial", "unset", "auto", "none"):
return False
return False
diff --git a/dss/ingest/json_tokens.py b/dss/ingest/json_tokens.py
index 84e0f93..72b683b 100644
--- a/dss/ingest/json_tokens.py
+++ b/dss/ingest/json_tokens.py
@@ -1,15 +1,15 @@
"""
-JSON Token Source
+JSON Token Source.
Extracts design tokens from JSON/YAML files.
Supports W3C Design Tokens format and Style Dictionary format.
"""
import json
-import re
from pathlib import Path
-from typing import List, Dict, Any, Optional
-from .base import DesignToken, TokenCollection, TokenSource, TokenType, TokenCategory
+from typing import Any, Dict, List
+
+from .base import DesignToken, TokenCollection, TokenSource, TokenType
class JSONTokenSource(TokenSource):
@@ -65,9 +65,9 @@ class JSONTokenSource(TokenSource):
def _is_file_path(self, source: str) -> bool:
"""Check if source looks like a file path."""
- if source.strip().startswith('{'):
+ if source.strip().startswith("{"):
return False
- if source.endswith('.json') or source.endswith('.tokens.json'):
+ if source.endswith(".json") or source.endswith(".tokens.json"):
return True
return Path(source).exists()
@@ -90,82 +90,74 @@ class JSONTokenSource(TokenSource):
def _is_w3c_format(self, data: Dict) -> bool:
"""Check if data follows W3C Design Tokens format."""
+
# W3C format uses $value and $type
def check_node(node: Any) -> bool:
if isinstance(node, dict):
- if '$value' in node:
+ if "$value" in node:
return True
return any(check_node(v) for v in node.values())
return False
+
return check_node(data)
def _is_style_dictionary_format(self, data: Dict) -> bool:
"""Check if data follows Style Dictionary format."""
+
# Style Dictionary uses 'value' without $
def check_node(node: Any) -> bool:
if isinstance(node, dict):
- if 'value' in node and '$value' not in node:
+ if "value" in node and "$value" not in node:
return True
return any(check_node(v) for v in node.values())
return False
+
return check_node(data)
def _is_tokens_studio_format(self, data: Dict) -> bool:
"""Check if data follows Tokens Studio format."""
# Tokens Studio has specific structure with sets
- return '$themes' in data or '$metadata' in data
+ return "$themes" in data or "$metadata" in data
def _extract_w3c_tokens(
- self,
- data: Dict,
- source_file: str,
- prefix: str = ""
+ self, data: Dict, source_file: str, prefix: str = ""
) -> List[DesignToken]:
"""Extract tokens in W3C Design Tokens format."""
tokens = []
for key, value in data.items():
# Skip metadata keys
- if key.startswith('$'):
+ if key.startswith("$"):
continue
current_path = f"{prefix}.{key}" if prefix else key
if isinstance(value, dict):
- if '$value' in value:
+ if "$value" in value:
# This is a token
- token = self._create_w3c_token(
- current_path, value, source_file
- )
+ token = self._create_w3c_token(current_path, value, source_file)
tokens.append(token)
else:
# Nested group
- tokens.extend(
- self._extract_w3c_tokens(value, source_file, current_path)
- )
+ tokens.extend(self._extract_w3c_tokens(value, source_file, current_path))
return tokens
- def _create_w3c_token(
- self,
- name: str,
- data: Dict,
- source_file: str
- ) -> DesignToken:
+ def _create_w3c_token(self, name: str, data: Dict, source_file: str) -> DesignToken:
"""Create token from W3C format node."""
- value = data.get('$value')
- token_type = self._parse_w3c_type(data.get('$type', ''))
- description = data.get('$description', '')
+ value = data.get("$value")
+ token_type = self._parse_w3c_type(data.get("$type", ""))
+ description = data.get("$description", "")
# Handle aliases/references
- if isinstance(value, str) and value.startswith('{') and value.endswith('}'):
+ if isinstance(value, str) and value.startswith("{") and value.endswith("}"):
# This is a reference like {colors.primary}
pass # Keep as-is for now
# Get extensions
extensions = {}
- if '$extensions' in data:
- extensions = data['$extensions']
+ if "$extensions" in data:
+ extensions = data["$extensions"]
token = DesignToken(
name=name,
@@ -178,34 +170,31 @@ class JSONTokenSource(TokenSource):
)
# Check for deprecated
- if extensions.get('deprecated'):
+ if extensions.get("deprecated"):
token.deprecated = True
- token.deprecated_message = extensions.get('deprecatedMessage', '')
+ token.deprecated_message = extensions.get("deprecatedMessage", "")
return token
def _parse_w3c_type(self, type_str: str) -> TokenType:
"""Convert W3C type string to TokenType."""
type_map = {
- 'color': TokenType.COLOR,
- 'dimension': TokenType.DIMENSION,
- 'fontFamily': TokenType.FONT_FAMILY,
- 'fontWeight': TokenType.FONT_WEIGHT,
- 'duration': TokenType.DURATION,
- 'cubicBezier': TokenType.CUBIC_BEZIER,
- 'number': TokenType.NUMBER,
- 'shadow': TokenType.SHADOW,
- 'border': TokenType.BORDER,
- 'gradient': TokenType.GRADIENT,
- 'transition': TokenType.TRANSITION,
+ "color": TokenType.COLOR,
+ "dimension": TokenType.DIMENSION,
+ "fontFamily": TokenType.FONT_FAMILY,
+ "fontWeight": TokenType.FONT_WEIGHT,
+ "duration": TokenType.DURATION,
+ "cubicBezier": TokenType.CUBIC_BEZIER,
+ "number": TokenType.NUMBER,
+ "shadow": TokenType.SHADOW,
+ "border": TokenType.BORDER,
+ "gradient": TokenType.GRADIENT,
+ "transition": TokenType.TRANSITION,
}
return type_map.get(type_str, TokenType.UNKNOWN)
def _extract_style_dictionary_tokens(
- self,
- data: Dict,
- source_file: str,
- prefix: str = ""
+ self, data: Dict, source_file: str, prefix: str = ""
) -> List[DesignToken]:
"""Extract tokens in Style Dictionary format."""
tokens = []
@@ -214,20 +203,20 @@ class JSONTokenSource(TokenSource):
current_path = f"{prefix}.{key}" if prefix else key
if isinstance(value, dict):
- if 'value' in value:
+ if "value" in value:
# This is a token
token = DesignToken(
name=current_path,
- value=value['value'],
- description=value.get('comment', value.get('description', '')),
+ value=value["value"],
+ description=value.get("comment", value.get("description", "")),
source=self._create_source_id(source_file),
source_file=source_file,
)
# Handle attributes
- if 'attributes' in value:
- attrs = value['attributes']
- if 'category' in attrs:
+ if "attributes" in value:
+ attrs = value["attributes"]
+ if "category" in attrs:
token.tags.append(f"category:{attrs['category']}")
token.tags.append("style-dictionary")
@@ -235,31 +224,23 @@ class JSONTokenSource(TokenSource):
else:
# Nested group
tokens.extend(
- self._extract_style_dictionary_tokens(
- value, source_file, current_path
- )
+ self._extract_style_dictionary_tokens(value, source_file, current_path)
)
return tokens
- def _extract_tokens_studio(
- self,
- data: Dict,
- source_file: str
- ) -> List[DesignToken]:
+ def _extract_tokens_studio(self, data: Dict, source_file: str) -> List[DesignToken]:
"""Extract tokens from Tokens Studio format."""
tokens = []
# Tokens Studio has token sets as top-level keys
# Skip metadata keys
for set_name, set_data in data.items():
- if set_name.startswith('$'):
+ if set_name.startswith("$"):
continue
if isinstance(set_data, dict):
- set_tokens = self._extract_tokens_studio_set(
- set_data, source_file, set_name
- )
+ set_tokens = self._extract_tokens_studio_set(set_data, source_file, set_name)
for token in set_tokens:
token.group = set_name
tokens.extend(set_tokens)
@@ -267,10 +248,7 @@ class JSONTokenSource(TokenSource):
return tokens
def _extract_tokens_studio_set(
- self,
- data: Dict,
- source_file: str,
- prefix: str = ""
+ self, data: Dict, source_file: str, prefix: str = ""
) -> List[DesignToken]:
"""Extract tokens from a Tokens Studio set."""
tokens = []
@@ -279,13 +257,13 @@ class JSONTokenSource(TokenSource):
current_path = f"{prefix}.{key}" if prefix else key
if isinstance(value, dict):
- if 'value' in value and 'type' in value:
+ if "value" in value and "type" in value:
# This is a token
token = DesignToken(
name=current_path,
- value=value['value'],
- type=self._parse_tokens_studio_type(value.get('type', '')),
- description=value.get('description', ''),
+ value=value["value"],
+ type=self._parse_tokens_studio_type(value.get("type", "")),
+ description=value.get("description", ""),
source=self._create_source_id(source_file),
source_file=source_file,
)
@@ -293,41 +271,34 @@ class JSONTokenSource(TokenSource):
tokens.append(token)
else:
# Nested group
- tokens.extend(
- self._extract_tokens_studio_set(
- value, source_file, current_path
- )
- )
+ tokens.extend(self._extract_tokens_studio_set(value, source_file, current_path))
return tokens
def _parse_tokens_studio_type(self, type_str: str) -> TokenType:
"""Convert Tokens Studio type to TokenType."""
type_map = {
- 'color': TokenType.COLOR,
- 'sizing': TokenType.DIMENSION,
- 'spacing': TokenType.DIMENSION,
- 'borderRadius': TokenType.DIMENSION,
- 'borderWidth': TokenType.DIMENSION,
- 'fontFamilies': TokenType.FONT_FAMILY,
- 'fontWeights': TokenType.FONT_WEIGHT,
- 'fontSizes': TokenType.FONT_SIZE,
- 'lineHeights': TokenType.LINE_HEIGHT,
- 'letterSpacing': TokenType.LETTER_SPACING,
- 'paragraphSpacing': TokenType.DIMENSION,
- 'boxShadow': TokenType.SHADOW,
- 'opacity': TokenType.NUMBER,
- 'dimension': TokenType.DIMENSION,
- 'text': TokenType.STRING,
- 'other': TokenType.STRING,
+ "color": TokenType.COLOR,
+ "sizing": TokenType.DIMENSION,
+ "spacing": TokenType.DIMENSION,
+ "borderRadius": TokenType.DIMENSION,
+ "borderWidth": TokenType.DIMENSION,
+ "fontFamilies": TokenType.FONT_FAMILY,
+ "fontWeights": TokenType.FONT_WEIGHT,
+ "fontSizes": TokenType.FONT_SIZE,
+ "lineHeights": TokenType.LINE_HEIGHT,
+ "letterSpacing": TokenType.LETTER_SPACING,
+ "paragraphSpacing": TokenType.DIMENSION,
+ "boxShadow": TokenType.SHADOW,
+ "opacity": TokenType.NUMBER,
+ "dimension": TokenType.DIMENSION,
+ "text": TokenType.STRING,
+ "other": TokenType.STRING,
}
return type_map.get(type_str, TokenType.UNKNOWN)
def _extract_nested_tokens(
- self,
- data: Dict,
- source_file: str,
- prefix: str = ""
+ self, data: Dict, source_file: str, prefix: str = ""
) -> List[DesignToken]:
"""Extract tokens from generic nested JSON."""
tokens = []
@@ -341,40 +312,38 @@ class JSONTokenSource(TokenSource):
if not has_nested and len(value) <= 3:
# Might be a simple token object
- if 'value' in value:
- tokens.append(DesignToken(
- name=current_path,
- value=value['value'],
- source=self._create_source_id(source_file),
- source_file=source_file,
- ))
+ if "value" in value:
+ tokens.append(
+ DesignToken(
+ name=current_path,
+ value=value["value"],
+ source=self._create_source_id(source_file),
+ source_file=source_file,
+ )
+ )
else:
# Recurse
- tokens.extend(
- self._extract_nested_tokens(value, source_file, current_path)
- )
+ tokens.extend(self._extract_nested_tokens(value, source_file, current_path))
else:
# Recurse into nested object
- tokens.extend(
- self._extract_nested_tokens(value, source_file, current_path)
- )
+ tokens.extend(self._extract_nested_tokens(value, source_file, current_path))
elif isinstance(value, (str, int, float, bool)):
# Simple value - treat as token
- tokens.append(DesignToken(
- name=current_path,
- value=value,
- source=self._create_source_id(source_file),
- source_file=source_file,
- ))
+ tokens.append(
+ DesignToken(
+ name=current_path,
+ value=value,
+ source=self._create_source_id(source_file),
+ source_file=source_file,
+ )
+ )
return tokens
class TokenExporter:
- """
- Export tokens to various JSON formats.
- """
+ """Export tokens to various JSON formats."""
@staticmethod
def to_w3c(collection: TokenCollection) -> str:
@@ -382,7 +351,7 @@ class TokenExporter:
result = {}
for token in collection.tokens:
- parts = token.normalize_name().split('.')
+ parts = token.normalize_name().split(".")
current = result
for part in parts[:-1]:
@@ -406,7 +375,7 @@ class TokenExporter:
result = {}
for token in collection.tokens:
- parts = token.normalize_name().split('.')
+ parts = token.normalize_name().split(".")
current = result
for part in parts[:-1]:
diff --git a/dss/ingest/merge.py b/dss/ingest/merge.py
index fb4728e..a81abba 100644
--- a/dss/ingest/merge.py
+++ b/dss/ingest/merge.py
@@ -1,5 +1,5 @@
"""
-Token Merge Module
+Token Merge Module.
Merge tokens from multiple sources with conflict resolution strategies.
"""
@@ -7,31 +7,33 @@ Merge tokens from multiple sources with conflict resolution strategies.
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
-from typing import List, Dict, Optional, Callable, Tuple
-from .base import DesignToken, TokenCollection, TokenCategory
+from typing import Callable, Dict, List, Optional
+
+from .base import DesignToken, TokenCollection
class MergeStrategy(str, Enum):
"""Token merge conflict resolution strategies."""
# Simple strategies
- FIRST = "first" # Keep first occurrence
- LAST = "last" # Keep last occurrence (override)
- ERROR = "error" # Raise error on conflict
+ FIRST = "first" # Keep first occurrence
+ LAST = "last" # Keep last occurrence (override)
+ ERROR = "error" # Raise error on conflict
# Value-based strategies
- PREFER_FIGMA = "prefer_figma" # Prefer Figma source
- PREFER_CODE = "prefer_code" # Prefer code sources (CSS, SCSS)
+ PREFER_FIGMA = "prefer_figma" # Prefer Figma source
+ PREFER_CODE = "prefer_code" # Prefer code sources (CSS, SCSS)
PREFER_SPECIFIC = "prefer_specific" # Prefer more specific values
# Smart strategies
- MERGE_METADATA = "merge_metadata" # Merge metadata, keep latest value
- INTERACTIVE = "interactive" # Require user decision
+ MERGE_METADATA = "merge_metadata" # Merge metadata, keep latest value
+ INTERACTIVE = "interactive" # Require user decision
@dataclass
class MergeConflict:
"""Represents a token name conflict during merge."""
+
token_name: str
existing: DesignToken
incoming: DesignToken
@@ -42,6 +44,7 @@ class MergeConflict:
@dataclass
class MergeResult:
"""Result of a token merge operation."""
+
collection: TokenCollection
conflicts: List[MergeConflict] = field(default_factory=list)
stats: Dict[str, int] = field(default_factory=dict)
@@ -79,7 +82,7 @@ class TokenMerger:
def __init__(
self,
strategy: MergeStrategy = MergeStrategy.LAST,
- custom_resolver: Optional[Callable[[MergeConflict], DesignToken]] = None
+ custom_resolver: Optional[Callable[[MergeConflict], DesignToken]] = None,
):
"""
Initialize merger.
@@ -92,9 +95,7 @@ class TokenMerger:
self.custom_resolver = custom_resolver
def merge(
- self,
- collections: List[TokenCollection],
- normalize_names: bool = True
+ self, collections: List[TokenCollection], normalize_names: bool = True
) -> MergeResult:
"""
Merge multiple token collections.
@@ -142,9 +143,7 @@ class TokenMerger:
result.stats["updated_tokens"] += 1
else:
result.stats["conflicts_unresolved"] += 1
- result.warnings.append(
- f"Unresolved conflict for token: {name}"
- )
+ result.warnings.append(f"Unresolved conflict for token: {name}")
result.conflicts.append(conflict)
else:
@@ -201,11 +200,7 @@ class TokenMerger:
return conflict.incoming
- def _update_token(
- self,
- source: DesignToken,
- base: DesignToken
- ) -> DesignToken:
+ def _update_token(self, source: DesignToken, base: DesignToken) -> DesignToken:
"""Create updated token preserving some base metadata."""
# Create new token with source's value but enhanced metadata
updated = DesignToken(
@@ -228,14 +223,10 @@ class TokenMerger:
)
return updated
- def _prefer_source(
- self,
- conflict: MergeConflict,
- preferred_source: str
- ) -> DesignToken:
+ def _prefer_source(self, conflict: MergeConflict, preferred_source: str) -> DesignToken:
"""Prefer token from specific source type."""
- existing_source = conflict.existing.source.split(':')[0]
- incoming_source = conflict.incoming.source.split(':')[0]
+ existing_source = conflict.existing.source.split(":")[0]
+ incoming_source = conflict.incoming.source.split(":")[0]
if incoming_source == preferred_source:
conflict.resolution = f"preferred_{preferred_source}"
@@ -252,8 +243,8 @@ class TokenMerger:
"""Prefer code sources (CSS, SCSS) over design sources."""
code_sources = {"css", "scss", "tailwind"}
- existing_source = conflict.existing.source.split(':')[0]
- incoming_source = conflict.incoming.source.split(':')[0]
+ existing_source = conflict.existing.source.split(":")[0]
+ incoming_source = conflict.incoming.source.split(":")[0]
existing_is_code = existing_source in code_sources
incoming_is_code = incoming_source in code_sources
@@ -270,8 +261,8 @@ class TokenMerger:
def _prefer_by_priority(self, conflict: MergeConflict) -> DesignToken:
"""Choose based on source priority."""
- existing_source = conflict.existing.source.split(':')[0]
- incoming_source = conflict.incoming.source.split(':')[0]
+ existing_source = conflict.existing.source.split(":")[0]
+ incoming_source = conflict.incoming.source.split(":")[0]
existing_priority = self.SOURCE_PRIORITY.get(existing_source, 0)
incoming_priority = self.SOURCE_PRIORITY.get(incoming_source, 0)
@@ -289,12 +280,16 @@ class TokenMerger:
incoming_value = str(conflict.incoming.value).lower()
# Prefer concrete values over variables/references
- existing_is_var = existing_value.startswith('var(') or \
- existing_value.startswith('$') or \
- existing_value.startswith('{')
- incoming_is_var = incoming_value.startswith('var(') or \
- incoming_value.startswith('$') or \
- incoming_value.startswith('{')
+ existing_is_var = (
+ existing_value.startswith("var(")
+ or existing_value.startswith("$")
+ or existing_value.startswith("{")
+ )
+ incoming_is_var = (
+ incoming_value.startswith("var(")
+ or incoming_value.startswith("$")
+ or incoming_value.startswith("{")
+ )
if incoming_is_var and not existing_is_var:
conflict.resolution = "kept_concrete"
@@ -304,8 +299,8 @@ class TokenMerger:
return self._update_token(conflict.incoming, conflict.existing)
# Prefer hex colors over named colors
- existing_is_hex = existing_value.startswith('#')
- incoming_is_hex = incoming_value.startswith('#')
+ existing_is_hex = existing_value.startswith("#")
+ incoming_is_hex = incoming_value.startswith("#")
if incoming_is_hex and not existing_is_hex:
conflict.resolution = "preferred_hex"
@@ -323,20 +318,15 @@ class TokenMerger:
conflict.resolution = "merged_metadata"
# Use incoming value but merge all metadata
- merged_tags = list(set(
- conflict.existing.tags + conflict.incoming.tags
- ))
+ merged_tags = list(set(conflict.existing.tags + conflict.incoming.tags))
- merged_extensions = {
- **conflict.existing.extensions,
- **conflict.incoming.extensions
- }
+ merged_extensions = {**conflict.existing.extensions, **conflict.incoming.extensions}
# Track both sources
- merged_extensions['dss'] = merged_extensions.get('dss', {})
- merged_extensions['dss']['previousSources'] = [
+ merged_extensions["dss"] = merged_extensions.get("dss", {})
+ merged_extensions["dss"]["previousSources"] = [
conflict.existing.source,
- conflict.incoming.source
+ conflict.incoming.source,
]
return DesignToken(
@@ -352,7 +342,8 @@ class TokenMerger:
category=conflict.incoming.category or conflict.existing.category,
tags=merged_tags,
deprecated=conflict.incoming.deprecated or conflict.existing.deprecated,
- deprecated_message=conflict.incoming.deprecated_message or conflict.existing.deprecated_message,
+ deprecated_message=conflict.incoming.deprecated_message
+ or conflict.existing.deprecated_message,
version=conflict.incoming.version,
updated_at=datetime.now(),
extensions=merged_extensions,
@@ -360,15 +351,10 @@ class TokenMerger:
class TokenDiff:
- """
- Compare two token collections and find differences.
- """
+ """Compare two token collections and find differences."""
@staticmethod
- def diff(
- source: TokenCollection,
- target: TokenCollection
- ) -> Dict[str, List]:
+ def diff(source: TokenCollection, target: TokenCollection) -> Dict[str, List]:
"""
Compare two token collections.
@@ -382,19 +368,19 @@ class TokenDiff:
target_names = set(target_by_name.keys())
result = {
- 'added': [], # In target but not source
- 'removed': [], # In source but not target
- 'changed': [], # In both but different value
- 'unchanged': [], # In both with same value
+ "added": [], # In target but not source
+ "removed": [], # In source but not target
+ "changed": [], # In both but different value
+ "unchanged": [], # In both with same value
}
# Find added (in target, not in source)
for name in target_names - source_names:
- result['added'].append(target_by_name[name])
+ result["added"].append(target_by_name[name])
# Find removed (in source, not in target)
for name in source_names - target_names:
- result['removed'].append(source_by_name[name])
+ result["removed"].append(source_by_name[name])
# Find changed/unchanged (in both)
for name in source_names & target_names:
@@ -402,15 +388,17 @@ class TokenDiff:
target_token = target_by_name[name]
if str(source_token.value) != str(target_token.value):
- result['changed'].append({
- 'name': name,
- 'old_value': source_token.value,
- 'new_value': target_token.value,
- 'source_token': source_token,
- 'target_token': target_token,
- })
+ result["changed"].append(
+ {
+ "name": name,
+ "old_value": source_token.value,
+ "new_value": target_token.value,
+ "source_token": source_token,
+ "target_token": target_token,
+ }
+ )
else:
- result['unchanged'].append(source_token)
+ result["unchanged"].append(source_token)
return result
@@ -419,27 +407,25 @@ class TokenDiff:
"""Generate human-readable diff summary."""
lines = ["Token Diff Summary:", "=" * 40]
- if diff_result['added']:
+ if diff_result["added"]:
lines.append(f"\n+ Added ({len(diff_result['added'])}):")
- for token in diff_result['added'][:10]:
+ for token in diff_result["added"][:10]:
lines.append(f" + {token.name}: {token.value}")
- if len(diff_result['added']) > 10:
+ if len(diff_result["added"]) > 10:
lines.append(f" ... and {len(diff_result['added']) - 10} more")
- if diff_result['removed']:
+ if diff_result["removed"]:
lines.append(f"\n- Removed ({len(diff_result['removed'])}):")
- for token in diff_result['removed'][:10]:
+ for token in diff_result["removed"][:10]:
lines.append(f" - {token.name}: {token.value}")
- if len(diff_result['removed']) > 10:
+ if len(diff_result["removed"]) > 10:
lines.append(f" ... and {len(diff_result['removed']) - 10} more")
- if diff_result['changed']:
+ if diff_result["changed"]:
lines.append(f"\n~ Changed ({len(diff_result['changed'])}):")
- for change in diff_result['changed'][:10]:
- lines.append(
- f" ~ {change['name']}: {change['old_value']} → {change['new_value']}"
- )
- if len(diff_result['changed']) > 10:
+ for change in diff_result["changed"][:10]:
+ lines.append(f" ~ {change['name']}: {change['old_value']} → {change['new_value']}")
+ if len(diff_result["changed"]) > 10:
lines.append(f" ... and {len(diff_result['changed']) - 10} more")
lines.append(f"\n Unchanged: {len(diff_result['unchanged'])}")
diff --git a/dss/ingest/scss.py b/dss/ingest/scss.py
index dfd5b3a..b4a016f 100644
--- a/dss/ingest/scss.py
+++ b/dss/ingest/scss.py
@@ -1,5 +1,5 @@
"""
-SCSS Token Source
+SCSS Token Source.
Extracts design tokens from SCSS/Sass variables.
Supports $variable declarations and @use module variables.
@@ -7,7 +7,8 @@ Supports $variable declarations and @use module variables.
import re
from pathlib import Path
-from typing import List, Dict, Optional
+from typing import Dict, List, Optional
+
from .base import DesignToken, TokenCollection, TokenSource
@@ -63,25 +64,25 @@ class SCSSTokenSource(TokenSource):
def _is_file_path(self, source: str) -> bool:
"""Check if source looks like a file path."""
- if '$' in source and ':' in source:
+ if "$" in source and ":" in source:
return False
- if source.endswith('.scss') or source.endswith('.sass'):
+ if source.endswith(".scss") or source.endswith(".sass"):
return True
return Path(source).exists()
def _parse_variables(self, content: str, source_file: str) -> List[DesignToken]:
"""Parse simple $variable declarations."""
tokens = []
- lines = content.split('\n')
+ lines = content.split("\n")
# Pattern for variable declarations
var_pattern = re.compile(
- r'^\s*'
- r'(\$[\w-]+)\s*:\s*' # Variable name
- r'([^;!]+)' # Value
- r'(\s*!default)?' # Optional !default
- r'\s*;',
- re.MULTILINE
+ r"^\s*"
+ r"(\$[\w-]+)\s*:\s*" # Variable name
+ r"([^;!]+)" # Value
+ r"(\s*!default)?" # Optional !default
+ r"\s*;",
+ re.MULTILINE,
)
# Track comments for descriptions
@@ -89,7 +90,7 @@ class SCSSTokenSource(TokenSource):
for i, line in enumerate(lines, 1):
# Check for comment
- comment_match = re.match(r'^\s*//\s*(.+)$', line)
+ comment_match = re.match(r"^\s*//\s*(.+)$", line)
if comment_match:
prev_comment = comment_match.group(1).strip()
continue
@@ -102,12 +103,12 @@ class SCSSTokenSource(TokenSource):
is_default = bool(var_match.group(3))
# Skip if value is a map (handled separately)
- if var_value.startswith('(') and var_value.endswith(')'):
+ if var_value.startswith("(") and var_value.endswith(")"):
prev_comment = ""
continue
# Skip if value references another variable that we can't resolve
- if var_value.startswith('$') and '(' not in var_value:
+ if var_value.startswith("$") and "(" not in var_value:
# It's a simple variable reference, try to extract
pass
@@ -129,7 +130,7 @@ class SCSSTokenSource(TokenSource):
prev_comment = ""
else:
# Reset comment if line doesn't match
- if line.strip() and not line.strip().startswith('//'):
+ if line.strip() and not line.strip().startswith("//"):
prev_comment = ""
return tokens
@@ -139,17 +140,14 @@ class SCSSTokenSource(TokenSource):
tokens = []
# Pattern for map declarations (handles multi-line)
- map_pattern = re.compile(
- r'\$(\w[\w-]*)\s*:\s*\(([\s\S]*?)\)\s*;',
- re.MULTILINE
- )
+ map_pattern = re.compile(r"\$(\w[\w-]*)\s*:\s*\(([\s\S]*?)\)\s*;", re.MULTILINE)
for match in map_pattern.finditer(content):
map_name = match.group(1)
map_content = match.group(2)
# Get line number
- line_num = content[:match.start()].count('\n') + 1
+ line_num = content[: match.start()].count("\n") + 1
# Parse map entries
entries = self._parse_map_entries(map_content)
@@ -177,7 +175,7 @@ class SCSSTokenSource(TokenSource):
# This is a simplified parser for common cases
# Remove comments
- map_content = re.sub(r'//[^\n]*', '', map_content)
+ map_content = re.sub(r"//[^\n]*", "", map_content)
# Split by comma (not inside parentheses)
depth = 0
@@ -185,13 +183,13 @@ class SCSSTokenSource(TokenSource):
parts = []
for char in map_content:
- if char == '(':
+ if char == "(":
depth += 1
current += char
- elif char == ')':
+ elif char == ")":
depth -= 1
current += char
- elif char == ',' and depth == 0:
+ elif char == "," and depth == 0:
parts.append(current.strip())
current = ""
else:
@@ -202,9 +200,9 @@ class SCSSTokenSource(TokenSource):
# Parse each part
for part in parts:
- if ':' in part:
- key, value = part.split(':', 1)
- key = key.strip().strip('"\'')
+ if ":" in part:
+ key, value = part.split(":", 1)
+ key = key.strip().strip("\"'")
value = value.strip()
entries[key] = value
@@ -213,9 +211,9 @@ class SCSSTokenSource(TokenSource):
def _normalize_var_name(self, var_name: str) -> str:
"""Convert SCSS variable name to token name."""
# Remove $ prefix
- name = var_name.lstrip('$')
+ name = var_name.lstrip("$")
# Convert kebab-case and underscores to dots
- name = re.sub(r'[-_]', '.', name)
+ name = re.sub(r"[-_]", ".", name)
return name.lower()
def _process_value(self, value: str) -> str:
@@ -223,12 +221,13 @@ class SCSSTokenSource(TokenSource):
value = value.strip()
# Handle function calls (keep as-is for now)
- if '(' in value and ')' in value:
+ if "(" in value and ")" in value:
return value
# Handle quotes
- if (value.startswith('"') and value.endswith('"')) or \
- (value.startswith("'") and value.endswith("'")):
+ if (value.startswith('"') and value.endswith('"')) or (
+ value.startswith("'") and value.endswith("'")
+ ):
return value[1:-1]
return value
@@ -259,7 +258,7 @@ class SCSSVariableResolver:
return None
# Check if value references other variables
- if '$' in value:
+ if "$" in value:
resolved_value = self._resolve_references(value)
self.resolved[name] = resolved_value
return resolved_value
@@ -273,11 +272,11 @@ class SCSSVariableResolver:
return value
# Find variable references
- var_pattern = re.compile(r'\$[\w-]+')
+ var_pattern = re.compile(r"\$[\w-]+")
def replace_var(match):
var_name = match.group(0)
- resolved = self.resolve(var_name.lstrip('$'))
+ resolved = self.resolve(var_name.lstrip("$"))
return resolved if resolved else var_name
return var_pattern.sub(replace_var, value)
diff --git a/dss/ingest/sources/figma.py b/dss/ingest/sources/figma.py
index c0bb8e3..f8c2312 100644
--- a/dss/ingest/sources/figma.py
+++ b/dss/ingest/sources/figma.py
@@ -1,24 +1,19 @@
# dss/ingest/sources/figma.py
"""
-Figma Token Ingestion Source
+Figma Token Ingestion Source.
Extracts design tokens and components from a Figma file.
"""
import asyncio
-from dataclasses import dataclass, field
-from datetime import datetime, timedelta
-import json
-import os
-import re
-from pathlib import Path
+from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
import aiohttp
+from ...models.component import AtomicType, Component
from ..base import DesignToken, TokenCollection, TokenSource, TokenType
-from ...models.component import Component, AtomicType
# Re-using some of the data classes and constants from the original script
# In a real-world scenario, these might be moved to a more central location.
@@ -38,22 +33,25 @@ BOOLEAN_PROPS = {"Checked?", "Selected", "Open", "Expanded", "Loading", "Flip Ic
# DATA CLASSES (from original script)
# =============================================================================
+
@dataclass
class ValidationIssue:
- """Design validation issue"""
+ """Design validation issue."""
+
severity: str
component: str
message: str
suggestion: str = ""
+
# =============================================================================
# RATE LIMITER (from original script)
# =============================================================================
+
class RateLimiter:
- """
- Manages API request rate limiting and exponential backoff.
- """
+ """Manages API request rate limiting and exponential backoff."""
+
def __init__(self, max_per_minute: int = MAX_REQUESTS_PER_MINUTE):
self.max_per_minute = max_per_minute
self.requests: List[float] = []
@@ -63,7 +61,8 @@ class RateLimiter:
async def acquire(self):
"""
- Acquire a slot for an API request, waiting if rate limit is reached
+ Acquire a slot for an API request, waiting if rate limit is reached.
+
or if an exponential backoff is active.
"""
async with self._lock:
@@ -81,28 +80,28 @@ class RateLimiter:
def handle_429(self):
"""
- Handles a 429 (Too Many Requests) response by initiating an
+ Handles a 429 (Too Many Requests) response by initiating an.
+
exponential backoff.
"""
self.consecutive_429s += 1
- backoff = min(INITIAL_BACKOFF_SECONDS * (2 ** self.consecutive_429s), MAX_BACKOFF_SECONDS)
+ backoff = min(INITIAL_BACKOFF_SECONDS * (2**self.consecutive_429s), MAX_BACKOFF_SECONDS)
self.backoff_until = asyncio.get_event_loop().time() + backoff
return backoff
def reset_backoff(self):
- """
- Resets the exponential backoff counter.
- """
+ """Resets the exponential backoff counter."""
self.consecutive_429s = 0
+
# =============================================================================
# FIGMA API CLIENT (from original script)
# =============================================================================
+
class IntelligentFigmaClient:
- """
- Figma API client with intelligent rate limiting and retry logic.
- """
+ """Figma API client with intelligent rate limiting and retry logic."""
+
def __init__(self, token: str, verbose: bool = False):
self.token = token
self.verbose = verbose
@@ -137,9 +136,9 @@ class IntelligentFigmaClient:
text = await resp.text()
raise Exception(f"API error {resp.status}: {text[:200]}")
return await resp.json()
- except aiohttp.ClientError as e:
+ except aiohttp.ClientError:
if attempt < MAX_RETRIES - 1:
- wait = INITIAL_BACKOFF_SECONDS * (2 ** attempt)
+ wait = INITIAL_BACKOFF_SECONDS * (2**attempt)
await asyncio.sleep(wait)
continue
raise
@@ -151,27 +150,32 @@ class IntelligentFigmaClient:
async def get_file_variables(self, file_key: str) -> Dict:
return await self._request(f"files/{file_key}/variables/local")
+
# =============================================================================
# DESIGN VALIDATOR (stub, from original script)
# =============================================================================
+
class DesignValidator:
"""
A stub validator for design components.
+
In a full implementation, this would validate components against
design system rules.
"""
+
def validate_component(self, component: Dict) -> List[ValidationIssue]:
- return [] # Dummy implementation for now
+ return [] # Dummy implementation for now
+
# =============================================================================
# TOKEN EXTRACTORS (adapted from original script)
# =============================================================================
+
class VariableExtractor:
- """
- Extracts design tokens from Figma variables.
- """
+ """Extracts design tokens from Figma variables."""
+
def extract(self, variables_data: Dict, file_key: str) -> List[DesignToken]:
tokens = []
meta = variables_data.get("meta", {})
@@ -195,7 +199,7 @@ class VariableExtractor:
value = values_by_mode.get(first_mode_id)
if value is None:
continue
-
+
token = self._create_design_token(token_path, resolved_type, value, var_id, file_key)
if token:
tokens.append(token)
@@ -204,7 +208,9 @@ class VariableExtractor:
def _sanitize_path(self, path: str) -> str:
return path.lower().replace(" ", "-").replace("--", "-").strip("-.")
- def _create_design_token(self, name: str, resolved_type: str, value: Any, var_id: str, file_key: str) -> Optional[DesignToken]:
+ def _create_design_token(
+ self, name: str, resolved_type: str, value: Any, var_id: str, file_key: str
+ ) -> Optional[DesignToken]:
extensions = {"figma": {"variableId": var_id, "fileKey": file_key}}
token_type = TokenType.UNKNOWN
final_value = value
@@ -217,48 +223,70 @@ class VariableExtractor:
else:
final_value = self._rgba_to_css(value)
elif resolved_type == "FLOAT":
- token_type = TokenType.DIMENSION if any(x in name.lower() for x in ["spacing", "size", "width", "height", "radius", "gap"]) else TokenType.NUMBER
+ token_type = (
+ TokenType.DIMENSION
+ if any(
+ x in name.lower()
+ for x in ["spacing", "size", "width", "height", "radius", "gap"]
+ )
+ else TokenType.NUMBER
+ )
final_value = f"{value}px" if token_type == TokenType.DIMENSION else value
elif resolved_type == "STRING":
token_type = TokenType.STRING
final_value = str(value)
-
+
if token_type != TokenType.UNKNOWN:
- return DesignToken(name=name, value=final_value, type=token_type, source=f"figma:{file_key}:{var_id}", extensions=extensions)
+ return DesignToken(
+ name=name,
+ value=final_value,
+ type=token_type,
+ source=f"figma:{file_key}:{var_id}",
+ extensions=extensions,
+ )
return None
def _rgba_to_css(self, color: Dict) -> str:
- r, g, b, a = int(color.get("r", 0) * 255), int(color.get("g", 0) * 255), int(color.get("b", 0) * 255), round(color.get("a", 1), 3)
+ r, g, b, a = (
+ int(color.get("r", 0) * 255),
+ int(color.get("g", 0) * 255),
+ int(color.get("b", 0) * 255),
+ round(color.get("a", 1), 3),
+ )
return f"#{r:02x}{g:02x}{b:02x}" if a == 1 else f"rgba({r}, {g}, {b}, {a})"
+
class StyleExtractor:
def extract(self, file_data: Dict) -> List[DesignToken]:
# This is a simplified version for brevity. A full implementation
# would be more robust like the original script.
return []
+
class ComponentExtractor:
def __init__(self, validator: DesignValidator, verbose: bool = False):
self.validator = validator
self.verbose = verbose
def _find_all_components_recursive(self, node: Dict, components: Dict, component_sets: Dict):
- if node.get('type') == 'COMPONENT':
- if node.get('id') not in components:
- components[node.get('id')] = node
- if node.get('type') == 'COMPONENT_SET':
- if node.get('id') not in component_sets:
- component_sets[node.get('id')] = node
+ if node.get("type") == "COMPONENT":
+ if node.get("id") not in components:
+ components[node.get("id")] = node
+ if node.get("type") == "COMPONENT_SET":
+ if node.get("id") not in component_sets:
+ component_sets[node.get("id")] = node
for child in node.get("children", []):
self._find_all_components_recursive(child, components, component_sets)
def extract(self, file_data: Dict) -> List[Component]:
raw_components = {}
raw_component_sets = {}
- self._find_all_components_recursive(file_data['document'], raw_components, raw_component_sets)
-
+ self._find_all_components_recursive(
+ file_data["document"], raw_components, raw_component_sets
+ )
+
component_models: List[Component] = []
-
+
# Temporary map to hold component set data
set_map = {}
for set_id, set_data in raw_component_sets.items():
@@ -268,7 +296,7 @@ class ComponentExtractor:
"key": set_data.get("key", ""),
"description": set_data.get("description", ""),
"variants": [],
- "children_ids": [child.get("id") for child in set_data.get("children", [])]
+ "children_ids": [child.get("id") for child in set_data.get("children", [])],
}
# Process individual components (variants)
@@ -277,52 +305,60 @@ class ComponentExtractor:
if set_id and set_id in set_map:
variant_name = comp_data.get("name", "")
variant_props = self._parse_variant_name(variant_name)
- set_map[set_id]["variants"].append({
- "id": comp_id,
- "name": variant_name,
- "props": variant_props,
- "figma_node_id": comp_id,
- })
+ set_map[set_id]["variants"].append(
+ {
+ "id": comp_id,
+ "name": variant_name,
+ "props": variant_props,
+ "figma_node_id": comp_id,
+ }
+ )
# Create Component models from the processed sets
for set_id, set_data in set_map.items():
-
# Classify the component
classification = self._classify_component(set_data)
-
+
# Get variant names
- variant_names = [v['name'] for v in set_data['variants']]
+ variant_names = [v["name"] for v in set_data["variants"]]
# Create the component model
component_model = Component(
figma_node_id=set_id,
- name=set_data['name'],
+ name=set_data["name"],
source="figma",
- description=set_data.get('description', ''),
+ description=set_data.get("description", ""),
classification=classification,
variants=variant_names,
- props={}, # Prop schema can be enriched later
- dependencies=[], # Dependencies can be determined later
- sub_components=set_data.get('children_ids', [])
+ props={}, # Prop schema can be enriched later
+ dependencies=[], # Dependencies can be determined later
+ sub_components=set_data.get("children_ids", []),
)
component_models.append(component_model)
-
+
return component_models
def _classify_component(self, set_data: Dict) -> AtomicType:
- """
- Classify a component as a PRIMITIVE_COMPONENT or COMPOSITE_COMPONENT based on heuristics.
- """
- name = set_data.get('name', '').lower()
- num_children = len(set_data.get('children_ids', []))
+ """Classify a component as a PRIMITIVE_COMPONENT or COMPOSITE_COMPONENT based on heuristics."""
+ name = set_data.get("name", "").lower()
+ num_children = len(set_data.get("children_ids", []))
# Heuristics for Primitive Components
- primitive_keywords = ['icon', 'button', 'input', 'text', 'avatar', 'checkbox', 'radio', 'switch']
+ primitive_keywords = [
+ "icon",
+ "button",
+ "input",
+ "text",
+ "avatar",
+ "checkbox",
+ "radio",
+ "switch",
+ ]
if any(keyword in name for keyword in primitive_keywords):
return AtomicType.PRIMITIVE_COMPONENT
-
+
# Heuristics for Composite Components
- composite_keywords = ['card', 'modal', 'navbar', 'sidebar', 'form']
+ composite_keywords = ["card", "modal", "navbar", "sidebar", "form"]
if any(keyword in name for keyword in composite_keywords):
return AtomicType.COMPOSITE_COMPONENT
@@ -331,23 +367,36 @@ class ComponentExtractor:
return AtomicType.PRIMITIVE_COMPONENT
elif num_children > 0:
return AtomicType.COMPOSITE_COMPONENT
-
+
return AtomicType.UNKNOWN
+
def _parse_variant_name(self, name: str) -> Dict[str, str]:
- return {key.strip(): value.strip() for part in name.split(", ") if "=" in part for key, value in [part.split("=", 1)]}
+ return {
+ key.strip(): value.strip()
+ for part in name.split(", ")
+ if "=" in part
+ for key, value in [part.split("=", 1)]
+ }
def _get_css_pseudo(self, state_name: str) -> str:
- return {"Hover": ":hover", "Focused": ":focus", "Focus": ":focus", "Pressed": ":active", "Active": ":active", "Disabled": ":disabled"}.get(state_name, "")
+ return {
+ "Hover": ":hover",
+ "Focused": ":focus",
+ "Focus": ":focus",
+ "Pressed": ":active",
+ "Active": ":active",
+ "Disabled": ":disabled",
+ }.get(state_name, "")
# =============================================================================
# FIGMA TOKEN SOURCE
# =============================================================================
+
class FigmaTokenSource(TokenSource):
- """
- Extracts design tokens and components from a Figma file.
- """
+ """Extracts design tokens and components from a Figma file."""
+
def __init__(self, figma_token: str, verbose: bool = False):
self.figma_token = figma_token
self.verbose = verbose
@@ -371,17 +420,19 @@ class FigmaTokenSource(TokenSource):
validator = DesignValidator()
async with IntelligentFigmaClient(self.figma_token, self.verbose) as client:
- if self.verbose: print(f"Fetching Figma file: {file_key}")
+ if self.verbose:
+ print(f"Fetching Figma file: {file_key}")
file_task = client.get_file(file_key)
vars_task = client.get_file_variables(file_key)
-
+
file_data = await file_task
try:
vars_data = await vars_task
except Exception:
vars_data = {"meta": {"variables": {}, "variableCollections": {}}}
- if self.verbose: print("Extracting tokens and components...")
+ if self.verbose:
+ print("Extracting tokens and components...")
var_extractor = VariableExtractor()
style_extractor = StyleExtractor()
comp_extractor = ComponentExtractor(validator, self.verbose)
@@ -391,14 +442,16 @@ class FigmaTokenSource(TokenSource):
components = comp_extractor.extract(file_data)
all_tokens = variable_tokens + style_tokens
-
+
token_collection = TokenCollection(
name=f"Figma Tokens for {file_data.get('name', file_key)}",
tokens=all_tokens,
- sources=[f"figma:{file_key}"]
+ sources=[f"figma:{file_key}"],
)
-
+
if self.verbose:
- print(f"Extraction complete. Found {len(token_collection)} tokens and {len(components)} components.")
+ print(
+ f"Extraction complete. Found {len(token_collection)} tokens and {len(components)} components."
+ )
return token_collection, components
diff --git a/dss/ingest/tailwind.py b/dss/ingest/tailwind.py
index 370b609..e90fe40 100644
--- a/dss/ingest/tailwind.py
+++ b/dss/ingest/tailwind.py
@@ -1,15 +1,15 @@
"""
-Tailwind Token Source
+Tailwind Token Source.
Extracts design tokens from Tailwind CSS configuration files.
Supports tailwind.config.js/ts and CSS-based Tailwind v4 configurations.
"""
import re
-import json
from pathlib import Path
-from typing import List, Dict, Any, Optional
-from .base import DesignToken, TokenCollection, TokenSource, TokenCategory
+from typing import Dict, List, Optional
+
+from .base import DesignToken, TokenCategory, TokenCollection, TokenSource
class TailwindTokenSource(TokenSource):
@@ -24,35 +24,35 @@ class TailwindTokenSource(TokenSource):
# Tailwind category mappings
TAILWIND_CATEGORIES = {
- 'colors': TokenCategory.COLORS,
- 'backgroundColor': TokenCategory.COLORS,
- 'textColor': TokenCategory.COLORS,
- 'borderColor': TokenCategory.COLORS,
- 'spacing': TokenCategory.SPACING,
- 'padding': TokenCategory.SPACING,
- 'margin': TokenCategory.SPACING,
- 'gap': TokenCategory.SPACING,
- 'fontSize': TokenCategory.TYPOGRAPHY,
- 'fontFamily': TokenCategory.TYPOGRAPHY,
- 'fontWeight': TokenCategory.TYPOGRAPHY,
- 'lineHeight': TokenCategory.TYPOGRAPHY,
- 'letterSpacing': TokenCategory.TYPOGRAPHY,
- 'width': TokenCategory.SIZING,
- 'height': TokenCategory.SIZING,
- 'maxWidth': TokenCategory.SIZING,
- 'maxHeight': TokenCategory.SIZING,
- 'minWidth': TokenCategory.SIZING,
- 'minHeight': TokenCategory.SIZING,
- 'borderRadius': TokenCategory.BORDERS,
- 'borderWidth': TokenCategory.BORDERS,
- 'boxShadow': TokenCategory.SHADOWS,
- 'dropShadow': TokenCategory.SHADOWS,
- 'opacity': TokenCategory.OPACITY,
- 'zIndex': TokenCategory.Z_INDEX,
- 'transitionDuration': TokenCategory.MOTION,
- 'transitionTimingFunction': TokenCategory.MOTION,
- 'animation': TokenCategory.MOTION,
- 'screens': TokenCategory.BREAKPOINTS,
+ "colors": TokenCategory.COLORS,
+ "backgroundColor": TokenCategory.COLORS,
+ "textColor": TokenCategory.COLORS,
+ "borderColor": TokenCategory.COLORS,
+ "spacing": TokenCategory.SPACING,
+ "padding": TokenCategory.SPACING,
+ "margin": TokenCategory.SPACING,
+ "gap": TokenCategory.SPACING,
+ "fontSize": TokenCategory.TYPOGRAPHY,
+ "fontFamily": TokenCategory.TYPOGRAPHY,
+ "fontWeight": TokenCategory.TYPOGRAPHY,
+ "lineHeight": TokenCategory.TYPOGRAPHY,
+ "letterSpacing": TokenCategory.TYPOGRAPHY,
+ "width": TokenCategory.SIZING,
+ "height": TokenCategory.SIZING,
+ "maxWidth": TokenCategory.SIZING,
+ "maxHeight": TokenCategory.SIZING,
+ "minWidth": TokenCategory.SIZING,
+ "minHeight": TokenCategory.SIZING,
+ "borderRadius": TokenCategory.BORDERS,
+ "borderWidth": TokenCategory.BORDERS,
+ "boxShadow": TokenCategory.SHADOWS,
+ "dropShadow": TokenCategory.SHADOWS,
+ "opacity": TokenCategory.OPACITY,
+ "zIndex": TokenCategory.Z_INDEX,
+ "transitionDuration": TokenCategory.MOTION,
+ "transitionTimingFunction": TokenCategory.MOTION,
+ "animation": TokenCategory.MOTION,
+ "screens": TokenCategory.BREAKPOINTS,
}
@property
@@ -77,9 +77,9 @@ class TailwindTokenSource(TokenSource):
source_file = str(config_path.absolute())
# Parse based on file type
- if config_path.suffix in ('.js', '.cjs', '.mjs', '.ts'):
+ if config_path.suffix in (".js", ".cjs", ".mjs", ".ts"):
tokens = self._parse_js_config(content, source_file)
- elif config_path.suffix == '.css':
+ elif config_path.suffix == ".css":
tokens = self._parse_css_config(content, source_file)
else:
tokens = []
@@ -101,10 +101,10 @@ class TailwindTokenSource(TokenSource):
# If it's a directory, look for config files
if path.is_dir():
config_names = [
- 'tailwind.config.js',
- 'tailwind.config.cjs',
- 'tailwind.config.mjs',
- 'tailwind.config.ts',
+ "tailwind.config.js",
+ "tailwind.config.cjs",
+ "tailwind.config.mjs",
+ "tailwind.config.ts",
]
for name in config_names:
config_path = path / name
@@ -121,15 +121,9 @@ class TailwindTokenSource(TokenSource):
# This handles common patterns but may not cover all edge cases
# Look for theme: { ... } or theme.extend: { ... }
- theme_match = re.search(
- r'theme\s*:\s*\{([\s\S]*?)\n\s*\}(?=\s*[,}])',
- content
- )
+ theme_match = re.search(r"theme\s*:\s*\{([\s\S]*?)\n\s*\}(?=\s*[,}])", content)
- extend_match = re.search(
- r'extend\s*:\s*\{([\s\S]*?)\n\s{4}\}',
- content
- )
+ extend_match = re.search(r"extend\s*:\s*\{([\s\S]*?)\n\s{4}\}", content)
if extend_match:
theme_content = extend_match.group(1)
@@ -146,37 +140,23 @@ class TailwindTokenSource(TokenSource):
tokens = []
# Find property blocks like: colors: { primary: '#3B82F6', ... }
- prop_pattern = re.compile(
- r"(\w+)\s*:\s*\{([^{}]*(?:\{[^{}]*\}[^{}]*)*)\}",
- re.MULTILINE
- )
+ prop_pattern = re.compile(r"(\w+)\s*:\s*\{([^{}]*(?:\{[^{}]*\}[^{}]*)*)\}", re.MULTILINE)
for match in prop_pattern.finditer(content):
category_name = match.group(1)
category_content = match.group(2)
- category = self.TAILWIND_CATEGORIES.get(
- category_name, TokenCategory.OTHER
- )
+ category = self.TAILWIND_CATEGORIES.get(category_name, TokenCategory.OTHER)
# Parse values in this category
tokens.extend(
- self._parse_category_values(
- category_name,
- category_content,
- source_file,
- category
- )
+ self._parse_category_values(category_name, category_content, source_file, category)
)
return tokens
def _parse_category_values(
- self,
- category_name: str,
- content: str,
- source_file: str,
- category: TokenCategory
+ self, category_name: str, content: str, source_file: str, category: TokenCategory
) -> List[DesignToken]:
"""Parse values within a category."""
tokens = []
@@ -192,11 +172,11 @@ class TailwindTokenSource(TokenSource):
value = match.group(2).strip()
# Skip function calls and complex values for now
- if '(' in value or '{' in value:
+ if "(" in value or "{" in value:
continue
# Skip references to other values
- if value.startswith('colors.') or value.startswith('theme('):
+ if value.startswith("colors.") or value.startswith("theme("):
continue
token = DesignToken(
@@ -218,18 +198,13 @@ class TailwindTokenSource(TokenSource):
tokens = []
# Tailwind v4 uses @theme directive
- theme_match = re.search(
- r'@theme\s*\{([\s\S]*?)\}',
- content
- )
+ theme_match = re.search(r"@theme\s*\{([\s\S]*?)\}", content)
if theme_match:
theme_content = theme_match.group(1)
# Parse CSS custom properties
- var_pattern = re.compile(
- r'(--[\w-]+)\s*:\s*([^;]+);'
- )
+ var_pattern = re.compile(r"(--[\w-]+)\s*:\s*([^;]+);")
for match in var_pattern.finditer(theme_content):
var_name = match.group(1)
@@ -254,23 +229,23 @@ class TailwindTokenSource(TokenSource):
def _normalize_var_name(self, var_name: str) -> str:
"""Convert CSS variable name to token name."""
- name = var_name.lstrip('-')
- name = name.replace('-', '.')
+ name = var_name.lstrip("-")
+ name = name.replace("-", ".")
return name.lower()
def _category_from_var_name(self, var_name: str) -> TokenCategory:
"""Determine category from variable name."""
name_lower = var_name.lower()
- if 'color' in name_lower or 'bg' in name_lower:
+ if "color" in name_lower or "bg" in name_lower:
return TokenCategory.COLORS
- if 'spacing' in name_lower or 'gap' in name_lower:
+ if "spacing" in name_lower or "gap" in name_lower:
return TokenCategory.SPACING
- if 'font' in name_lower or 'text' in name_lower:
+ if "font" in name_lower or "text" in name_lower:
return TokenCategory.TYPOGRAPHY
- if 'radius' in name_lower or 'border' in name_lower:
+ if "radius" in name_lower or "border" in name_lower:
return TokenCategory.BORDERS
- if 'shadow' in name_lower:
+ if "shadow" in name_lower:
return TokenCategory.SHADOWS
return TokenCategory.OTHER
@@ -285,14 +260,44 @@ class TailwindClassExtractor:
# Common Tailwind class prefixes
TAILWIND_PREFIXES = [
- 'bg-', 'text-', 'border-', 'ring-',
- 'p-', 'px-', 'py-', 'pt-', 'pr-', 'pb-', 'pl-',
- 'm-', 'mx-', 'my-', 'mt-', 'mr-', 'mb-', 'ml-',
- 'w-', 'h-', 'min-w-', 'min-h-', 'max-w-', 'max-h-',
- 'flex-', 'grid-', 'gap-',
- 'font-', 'text-', 'leading-', 'tracking-',
- 'rounded-', 'shadow-', 'opacity-',
- 'z-', 'transition-', 'duration-', 'ease-',
+ "bg-",
+ "text-",
+ "border-",
+ "ring-",
+ "p-",
+ "px-",
+ "py-",
+ "pt-",
+ "pr-",
+ "pb-",
+ "pl-",
+ "m-",
+ "mx-",
+ "my-",
+ "mt-",
+ "mr-",
+ "mb-",
+ "ml-",
+ "w-",
+ "h-",
+ "min-w-",
+ "min-h-",
+ "max-w-",
+ "max-h-",
+ "flex-",
+ "grid-",
+ "gap-",
+ "font-",
+ "text-",
+ "leading-",
+ "tracking-",
+ "rounded-",
+ "shadow-",
+ "opacity-",
+ "z-",
+ "transition-",
+ "duration-",
+ "ease-",
]
async def extract_usage(self, source: str) -> Dict[str, List[str]]:
@@ -309,9 +314,7 @@ class TailwindClassExtractor:
usage: Dict[str, List[str]] = {}
# Find className or class attributes
- class_pattern = re.compile(
- r'(?:className|class)\s*=\s*["\']([^"\']+)["\']'
- )
+ class_pattern = re.compile(r'(?:className|class)\s*=\s*["\']([^"\']+)["\']')
for match in class_pattern.finditer(content):
classes = match.group(1).split()
@@ -320,7 +323,7 @@ class TailwindClassExtractor:
# Check if it's a Tailwind class
for prefix in self.TAILWIND_PREFIXES:
if cls.startswith(prefix):
- category = prefix.rstrip('-')
+ category = prefix.rstrip("-")
if category not in usage:
usage[category] = []
if cls not in usage[category]:
diff --git a/dss/mcp/__init__.py b/dss/mcp/__init__.py
index 28fbddb..a0f3ea8 100644
--- a/dss/mcp/__init__.py
+++ b/dss/mcp/__init__.py
@@ -1,5 +1,5 @@
"""
-DSS MCP Server
+DSS MCP Server.
Model Context Protocol server for Design System Server.
Provides project-isolated context and tools to Claude chat instances.
diff --git a/dss/mcp/audit.py b/dss/mcp/audit.py
index a11f932..01a9685 100644
--- a/dss/mcp/audit.py
+++ b/dss/mcp/audit.py
@@ -1,5 +1,5 @@
"""
-DSS MCP Audit Module
+DSS MCP Audit Module.
Tracks all operations for compliance, debugging, and audit trails.
Maintains immutable logs of all state-changing operations with before/after snapshots.
@@ -7,15 +7,14 @@ Maintains immutable logs of all state-changing operations with before/after snap
import json
import uuid
-from typing import Optional, Dict, Any
from datetime import datetime
from enum import Enum
-
-from dss.storage.json_store import ActivityLog, append_jsonl, read_jsonl, SYSTEM_DIR # JSON storage
+from typing import Any, Dict, Optional
class AuditEventType(Enum):
- """Types of auditable events"""
+ """Types of auditable events."""
+
TOOL_CALL = "tool_call"
CREDENTIAL_ACCESS = "credential_access"
CREDENTIAL_CREATE = "credential_create"
@@ -53,7 +52,7 @@ class AuditLog:
result: Optional[Dict[str, Any]] = None,
error: Optional[str] = None,
before_state: Optional[Dict[str, Any]] = None,
- after_state: Optional[Dict[str, Any]] = None
+ after_state: Optional[Dict[str, Any]] = None,
) -> str:
"""
Log an operation to the audit trail.
@@ -79,26 +78,29 @@ class AuditLog:
scrubbed_args = AuditLog._scrub_sensitive_data(args)
with get_connection() as conn:
- conn.execute("""
+ conn.execute(
+ """
INSERT INTO audit_log (
id, event_type, operation_name, operation_id, user_id,
project_id, args, result, error, before_state, after_state,
created_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- """, (
- audit_id,
- event_type.value,
- operation_name,
- operation_id,
- user_id,
- project_id,
- json.dumps(scrubbed_args),
- json.dumps(result) if result else None,
- error,
- json.dumps(before_state) if before_state else None,
- json.dumps(after_state) if after_state else None,
- datetime.utcnow().isoformat()
- ))
+ """,
+ (
+ audit_id,
+ event_type.value,
+ operation_name,
+ operation_id,
+ user_id,
+ project_id,
+ json.dumps(scrubbed_args),
+ json.dumps(result) if result else None,
+ error,
+ json.dumps(before_state) if before_state else None,
+ json.dumps(after_state) if after_state else None,
+ datetime.utcnow().isoformat(),
+ ),
+ )
return audit_id
@@ -108,7 +110,7 @@ class AuditLog:
user_id: Optional[str] = None,
operation_name: Optional[str] = None,
limit: int = 100,
- offset: int = 0
+ offset: int = 0,
) -> list:
"""
Get operation history with optional filtering.
@@ -149,9 +151,7 @@ class AuditLog:
@staticmethod
def get_audit_trail(
- start_date: datetime,
- end_date: datetime,
- event_type: Optional[str] = None
+ start_date: datetime, end_date: datetime, event_type: Optional[str] = None
) -> list:
"""
Get audit trail for a date range.
@@ -185,10 +185,7 @@ class AuditLog:
return [dict(row) for row in cursor.fetchall()]
@staticmethod
- def get_user_activity(
- user_id: str,
- days: int = 30
- ) -> Dict[str, Any]:
+ def get_user_activity(user_id: str, days: int = 30) -> Dict[str, Any]:
"""
Get user activity summary for the past N days.
@@ -207,34 +204,46 @@ class AuditLog:
cursor = conn.cursor()
# Get total operations
- cursor.execute("""
+ cursor.execute(
+ """
SELECT COUNT(*) FROM audit_log
WHERE user_id = ? AND created_at >= ?
- """, (user_id, start_date.isoformat()))
+ """,
+ (user_id, start_date.isoformat()),
+ )
total_ops = cursor.fetchone()[0]
# Get operations by type
- cursor.execute("""
+ cursor.execute(
+ """
SELECT event_type, COUNT(*) as count
FROM audit_log
WHERE user_id = ? AND created_at >= ?
GROUP BY event_type
ORDER BY count DESC
- """, (user_id, start_date.isoformat()))
+ """,
+ (user_id, start_date.isoformat()),
+ )
ops_by_type = {row[0]: row[1] for row in cursor.fetchall()}
# Get error count
- cursor.execute("""
+ cursor.execute(
+ """
SELECT COUNT(*) FROM audit_log
WHERE user_id = ? AND created_at >= ? AND error IS NOT NULL
- """, (user_id, start_date.isoformat()))
+ """,
+ (user_id, start_date.isoformat()),
+ )
errors = cursor.fetchone()[0]
# Get unique projects
- cursor.execute("""
+ cursor.execute(
+ """
SELECT COUNT(DISTINCT project_id) FROM audit_log
WHERE user_id = ? AND created_at >= ?
- """, (user_id, start_date.isoformat()))
+ """,
+ (user_id, start_date.isoformat()),
+ )
projects = cursor.fetchone()[0]
return {
@@ -244,14 +253,11 @@ class AuditLog:
"operations_by_type": ops_by_type,
"errors": errors,
"projects_touched": projects,
- "average_ops_per_day": round(total_ops / days, 2) if days > 0 else 0
+ "average_ops_per_day": round(total_ops / days, 2) if days > 0 else 0,
}
@staticmethod
- def search_audit_log(
- search_term: str,
- limit: int = 50
- ) -> list:
+ def search_audit_log(search_term: str, limit: int = 50) -> list:
"""
Search audit log by operation name or error message.
@@ -265,12 +271,15 @@ class AuditLog:
with get_connection() as conn:
cursor = conn.cursor()
- cursor.execute("""
+ cursor.execute(
+ """
SELECT * FROM audit_log
WHERE operation_name LIKE ? OR error LIKE ?
ORDER BY created_at DESC
LIMIT ?
- """, (f"%{search_term}%", f"%{search_term}%", limit))
+ """,
+ (f"%{search_term}%", f"%{search_term}%", limit),
+ )
return [dict(row) for row in cursor.fetchall()]
@@ -282,8 +291,14 @@ class AuditLog:
Removes API tokens, passwords, and other secrets.
"""
sensitive_keys = {
- 'token', 'api_key', 'secret', 'password',
- 'credential', 'auth', 'figma_token', 'encrypted_data'
+ "token",
+ "api_key",
+ "secret",
+ "password",
+ "credential",
+ "auth",
+ "figma_token",
+ "encrypted_data",
}
scrubbed = {}
@@ -294,8 +309,7 @@ class AuditLog:
scrubbed[key] = AuditLog._scrub_sensitive_data(value)
elif isinstance(value, list):
scrubbed[key] = [
- AuditLog._scrub_sensitive_data(item)
- if isinstance(item, dict) else item
+ AuditLog._scrub_sensitive_data(item) if isinstance(item, dict) else item
for item in value
]
else:
@@ -305,9 +319,10 @@ class AuditLog:
@staticmethod
def ensure_audit_log_table():
- """Ensure audit_log table exists"""
+ """Ensure audit_log table exists."""
with get_connection() as conn:
- conn.execute("""
+ conn.execute(
+ """
CREATE TABLE IF NOT EXISTS audit_log (
id TEXT PRIMARY KEY,
event_type TEXT NOT NULL,
@@ -322,19 +337,12 @@ class AuditLog:
after_state TEXT,
created_at TEXT DEFAULT CURRENT_TIMESTAMP
)
- """)
- conn.execute(
- "CREATE INDEX IF NOT EXISTS idx_audit_user ON audit_log(user_id)"
- )
- conn.execute(
- "CREATE INDEX IF NOT EXISTS idx_audit_project ON audit_log(project_id)"
- )
- conn.execute(
- "CREATE INDEX IF NOT EXISTS idx_audit_type ON audit_log(event_type)"
- )
- conn.execute(
- "CREATE INDEX IF NOT EXISTS idx_audit_date ON audit_log(created_at)"
+ """
)
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_user ON audit_log(user_id)")
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_project ON audit_log(project_id)")
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_type ON audit_log(event_type)")
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_date ON audit_log(created_at)")
# Initialize table on import
diff --git a/dss/mcp/config.py b/dss/mcp/config.py
index 5a1b242..8d33ba2 100644
--- a/dss/mcp/config.py
+++ b/dss/mcp/config.py
@@ -1,5 +1,5 @@
"""
-MCP Server Configuration
+MCP Server Configuration.
Loads configuration from environment variables and provides settings
for the MCP server, integrations, and security.
@@ -8,8 +8,9 @@ for the MCP server, integrations, and security.
import os
from pathlib import Path
from typing import Optional
-from dotenv import load_dotenv
+
from cryptography.fernet import Fernet
+from dotenv import load_dotenv
# Load environment variables
load_dotenv()
@@ -22,17 +23,14 @@ CACHE_DIR = PROJECT_ROOT / os.getenv("DSS_CACHE_DIR", ".dss/cache")
class MCPConfig:
- """MCP Server Configuration"""
+ """MCP Server Configuration."""
# Server Settings
HOST: str = os.getenv("DSS_MCP_HOST", "127.0.0.1")
PORT: int = int(os.getenv("DSS_MCP_PORT", "3457"))
# Database
- DATABASE_PATH: str = os.getenv(
- "DATABASE_PATH",
- str(STORAGE_DIR / "dss.db")
- )
+ DATABASE_PATH: str = os.getenv("DATABASE_PATH", str(STORAGE_DIR / "dss.db"))
# Context Caching
CONTEXT_CACHE_TTL: int = int(os.getenv("DSS_CONTEXT_CACHE_TTL", "300")) # 5 minutes
@@ -42,14 +40,14 @@ class MCPConfig:
@classmethod
def get_cipher(cls) -> Optional[Fernet]:
- """Get Fernet cipher for encryption/decryption"""
+ """Get Fernet cipher for encryption/decryption."""
if not cls.ENCRYPTION_KEY:
return None
return Fernet(cls.ENCRYPTION_KEY.encode())
@classmethod
def generate_encryption_key(cls) -> str:
- """Generate a new encryption key"""
+ """Generate a new encryption key."""
return Fernet.generate_key().decode()
# Redis/Celery for worker pool
@@ -61,16 +59,14 @@ class MCPConfig:
CIRCUIT_BREAKER_FAILURE_THRESHOLD: int = int(
os.getenv("CIRCUIT_BREAKER_FAILURE_THRESHOLD", "5")
)
- CIRCUIT_BREAKER_TIMEOUT_SECONDS: int = int(
- os.getenv("CIRCUIT_BREAKER_TIMEOUT_SECONDS", "60")
- )
+ CIRCUIT_BREAKER_TIMEOUT_SECONDS: int = int(os.getenv("CIRCUIT_BREAKER_TIMEOUT_SECONDS", "60"))
# Logging
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO").upper()
class IntegrationConfig:
- """External Integration Configuration"""
+ """External Integration Configuration."""
# Figma
FIGMA_TOKEN: Optional[str] = os.getenv("FIGMA_TOKEN")
@@ -107,11 +103,13 @@ def validate_config() -> list[str]:
if not mcp_config.ENCRYPTION_KEY:
warnings.append(
"DSS_ENCRYPTION_KEY not set. Integration credentials will not be encrypted. "
- f"Generate one with: python -c \"from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())\""
+ 'Generate one with: python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"'
)
if not integration_config.ANTHROPIC_API_KEY:
- warnings.append("ANTHROPIC_API_KEY not set. Sequential Thinking tools will not be available.")
+ warnings.append(
+ "ANTHROPIC_API_KEY not set. Sequential Thinking tools will not be available."
+ )
if not integration_config.FIGMA_TOKEN:
warnings.append("FIGMA_TOKEN not set. Figma tools will not be available.")
@@ -126,11 +124,11 @@ if __name__ == "__main__":
print(f"Context Cache TTL: {mcp_config.CONTEXT_CACHE_TTL}s")
print(f"Encryption Key: {'✓ Set' if mcp_config.ENCRYPTION_KEY else '✗ Not Set'}")
print(f"Redis URL: {mcp_config.REDIS_URL}")
- print(f"\nCircuit Breaker:")
+ print("\nCircuit Breaker:")
print(f" Failure Threshold: {mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD}")
print(f" Timeout: {mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS}s")
- print(f"\n=== Integration Configuration ===\n")
+ print("\n=== Integration Configuration ===\n")
print(f"Figma Token: {'✓ Set' if integration_config.FIGMA_TOKEN else '✗ Not Set'}")
print(f"Anthropic API Key: {'✓ Set' if integration_config.ANTHROPIC_API_KEY else '✗ Not Set'}")
print(f"Jira URL: {integration_config.JIRA_URL or '✗ Not Set'}")
@@ -138,8 +136,8 @@ if __name__ == "__main__":
warnings = validate_config()
if warnings:
- print(f"\n⚠️ Warnings:")
+ print("\n⚠️ Warnings:")
for warning in warnings:
print(f" - {warning}")
else:
- print(f"\n✓ Configuration is valid")
+ print("\n✓ Configuration is valid")
diff --git a/dss/mcp/context/project_context.py b/dss/mcp/context/project_context.py
index fd912d4..5fffa20 100644
--- a/dss/mcp/context/project_context.py
+++ b/dss/mcp/context/project_context.py
@@ -1,30 +1,31 @@
"""
-Project Context Manager
+Project Context Manager.
Provides cached, project-isolated context for Claude MCP sessions.
Loads all relevant project data (components, tokens, config, health, etc.)
and caches it for performance.
"""
-import json
import asyncio
-from datetime import datetime, timedelta
-from dataclasses import dataclass, asdict
-from typing import Dict, Any, Optional, List
-from pathlib import Path
+import json
# Import from existing DSS modules
import sys
-sys.path.insert(0, str(Path(__file__).parent.parent.parent))
+from dataclasses import asdict, dataclass
+from datetime import datetime, timedelta
+from pathlib import Path
+from typing import Any, Dict, List, Optional
-from dss.storage.json_store import Projects, Components, Tokens
from analyze.scanner import ProjectScanner
+
from ..config import mcp_config
+sys.path.insert(0, str(Path(__file__).parent.parent.parent))
+
@dataclass
class ProjectContext:
- """Complete project context for MCP sessions"""
+ """Complete project context for MCP sessions."""
project_id: str
name: str
@@ -57,16 +58,16 @@ class ProjectContext:
cache_expires_at: datetime
def to_dict(self) -> Dict[str, Any]:
- """Convert to dictionary for JSON serialization"""
+ """Convert to dictionary for JSON serialization."""
data = asdict(self)
- data['loaded_at'] = self.loaded_at.isoformat()
- data['cache_expires_at'] = self.cache_expires_at.isoformat()
+ data["loaded_at"] = self.loaded_at.isoformat()
+ data["cache_expires_at"] = self.cache_expires_at.isoformat()
if self.path:
- data['path'] = str(self.path)
+ data["path"] = str(self.path)
return data
def is_expired(self) -> bool:
- """Check if cache has expired"""
+ """Check if cache has expired."""
return datetime.now() >= self.cache_expires_at
@@ -83,10 +84,7 @@ class ProjectContextManager:
self._cache_ttl = timedelta(seconds=mcp_config.CONTEXT_CACHE_TTL)
async def get_context(
- self,
- project_id: str,
- user_id: Optional[int] = None,
- force_refresh: bool = False
+ self, project_id: str, user_id: Optional[int] = None, force_refresh: bool = False
) -> Optional[ProjectContext]:
"""
Get project context, using cache if available.
@@ -114,11 +112,9 @@ class ProjectContextManager:
return context
async def _load_context(
- self,
- project_id: str,
- user_id: Optional[int] = None
+ self, project_id: str, user_id: Optional[int] = None
) -> Optional[ProjectContext]:
- """Load complete project context from database and filesystem"""
+ """Load complete project context from database and filesystem."""
# Run database queries in thread pool to avoid blocking
loop = asyncio.get_event_loop()
@@ -142,7 +138,7 @@ class ProjectContextManager:
# Load tokens from filesystem if project has a path
tokens = {}
project_path = None
- if project.get('figma_file_key'):
+ if project.get("figma_file_key"):
# Try to find project path based on naming convention
# (This can be enhanced based on actual project structure)
project_path = Path.cwd()
@@ -158,35 +154,32 @@ class ProjectContextManager:
now = datetime.now()
context = ProjectContext(
project_id=project_id,
- name=project['name'],
- description=project.get('description'),
+ name=project["name"],
+ description=project.get("description"),
path=project_path,
components=components,
component_count=len(components),
tokens=tokens,
styles=styles,
config={
- 'figma_file_key': project.get('figma_file_key'),
- 'status': project.get('status', 'active')
+ "figma_file_key": project.get("figma_file_key"),
+ "status": project.get("status", "active"),
},
integrations=integrations,
health=health,
stats=stats,
discovery=discovery,
loaded_at=now,
- cache_expires_at=now + self._cache_ttl
+ cache_expires_at=now + self._cache_ttl,
)
return context
def _load_project(self, project_id: str) -> Optional[Dict[str, Any]]:
- """Load project metadata from database"""
+ """Load project metadata from database."""
try:
with get_connection() as conn:
- row = conn.execute(
- "SELECT * FROM projects WHERE id = ?",
- (project_id,)
- ).fetchone()
+ row = conn.execute("SELECT * FROM projects WHERE id = ?", (project_id,)).fetchone()
if row:
return dict(row)
@@ -196,7 +189,7 @@ class ProjectContextManager:
return None
def _load_components(self, project_id: str) -> List[Dict[str, Any]]:
- """Load all components for project"""
+ """Load all components for project."""
try:
with get_connection() as conn:
rows = conn.execute(
@@ -208,17 +201,17 @@ class ProjectContextManager:
WHERE project_id = ?
ORDER BY name
""",
- (project_id,)
+ (project_id,),
).fetchall()
components = []
for row in rows:
comp = dict(row)
# Parse JSON fields
- if comp.get('properties'):
- comp['properties'] = json.loads(comp['properties'])
- if comp.get('variants'):
- comp['variants'] = json.loads(comp['variants'])
+ if comp.get("properties"):
+ comp["properties"] = json.loads(comp["properties"])
+ if comp.get("variants"):
+ comp["variants"] = json.loads(comp["variants"])
components.append(comp)
return components
@@ -227,7 +220,7 @@ class ProjectContextManager:
return []
def _load_styles(self, project_id: str) -> List[Dict[str, Any]]:
- """Load all styles for project"""
+ """Load all styles for project."""
try:
with get_connection() as conn:
rows = conn.execute(
@@ -237,14 +230,14 @@ class ProjectContextManager:
WHERE project_id = ?
ORDER BY type, name
""",
- (project_id,)
+ (project_id,),
).fetchall()
styles = []
for row in rows:
style = dict(row)
- if style.get('properties'):
- style['properties'] = json.loads(style['properties'])
+ if style.get("properties"):
+ style["properties"] = json.loads(style["properties"])
styles.append(style)
return styles
@@ -253,7 +246,7 @@ class ProjectContextManager:
return []
def _load_stats(self, project_id: str) -> Dict[str, Any]:
- """Load project statistics"""
+ """Load project statistics."""
try:
with get_connection() as conn:
# Component count by type
@@ -264,7 +257,7 @@ class ProjectContextManager:
FROM components
WHERE project_id = ?
""",
- (project_id,)
+ (project_id,),
).fetchone()
# Style count by type
@@ -275,19 +268,21 @@ class ProjectContextManager:
WHERE project_id = ?
GROUP BY type
""",
- (project_id,)
+ (project_id,),
).fetchall()
return {
- 'components': dict(component_stats) if component_stats else {'total': 0, 'generated': 0},
- 'styles': {row['type']: row['count'] for row in style_stats}
+ "components": dict(component_stats)
+ if component_stats
+ else {"total": 0, "generated": 0},
+ "styles": {row["type"]: row["count"] for row in style_stats},
}
except Exception as e:
print(f"Error loading stats: {e}")
- return {'components': {'total': 0, 'generated': 0}, 'styles': {}}
+ return {"components": {"total": 0, "generated": 0}, "styles": {}}
def _load_integrations(self, project_id: str, user_id: Optional[int]) -> Dict[str, Any]:
- """Load user's enabled integrations for this project"""
+ """Load user's enabled integrations for this project."""
if not user_id:
return {}
@@ -299,7 +294,7 @@ class ProjectContextManager:
FROM project_integrations
WHERE project_id = ? AND user_id = ? AND enabled = 1
""",
- (project_id, user_id)
+ (project_id, user_id),
).fetchall()
# Return decrypted config for each integration
@@ -307,8 +302,8 @@ class ProjectContextManager:
cipher = mcp_config.get_cipher()
for row in rows:
- integration_type = row['integration_type']
- encrypted_config = row['config']
+ integration_type = row["integration_type"]
+ encrypted_config = row["config"]
# Decrypt config
if cipher:
@@ -326,9 +321,9 @@ class ProjectContextManager:
config = {}
integrations[integration_type] = {
- 'enabled': True,
- 'config': config,
- 'last_used_at': row['last_used_at']
+ "enabled": True,
+ "config": config,
+ "last_used_at": row["last_used_at"],
}
return integrations
@@ -337,12 +332,12 @@ class ProjectContextManager:
return {}
def _load_tokens(self, project_path: Optional[Path]) -> Dict[str, Any]:
- """Load design tokens from filesystem"""
+ """Load design tokens from filesystem."""
if not project_path:
return {}
tokens = {}
- token_files = ['tokens.json', 'design-tokens.json', 'variables.json']
+ token_files = ["tokens.json", "design-tokens.json", "variables.json"]
for token_file in token_files:
token_path = project_path / token_file
@@ -357,7 +352,7 @@ class ProjectContextManager:
return tokens
def _load_discovery(self, project_path: Optional[Path]) -> Dict[str, Any]:
- """Load project discovery data"""
+ """Load project discovery data."""
if not project_path:
return {}
@@ -369,18 +364,13 @@ class ProjectContextManager:
print(f"Error running discovery scan: {e}")
return {}
- def _compute_health(
- self,
- components: List[Dict],
- tokens: Dict,
- stats: Dict
- ) -> Dict[str, Any]:
- """Compute project health score"""
+ def _compute_health(self, components: List[Dict], tokens: Dict, stats: Dict) -> Dict[str, Any]:
+ """Compute project health score."""
score = 100
issues = []
# Deduct points for missing components
- if stats['components']['total'] == 0:
+ if stats["components"]["total"] == 0:
score -= 30
issues.append("No components defined")
@@ -390,8 +380,8 @@ class ProjectContextManager:
issues.append("No design tokens defined")
# Deduct points for ungeneratedcomponents
- total = stats['components']['total']
- generated = stats['components']['generated']
+ total = stats["components"]["total"]
+ generated = stats["components"]["generated"]
if total > 0 and generated < total:
percentage = (generated / total) * 100
if percentage < 50:
@@ -403,24 +393,20 @@ class ProjectContextManager:
# Compute grade
if score >= 90:
- grade = 'A'
+ grade = "A"
elif score >= 80:
- grade = 'B'
+ grade = "B"
elif score >= 70:
- grade = 'C'
+ grade = "C"
elif score >= 60:
- grade = 'D'
+ grade = "D"
else:
- grade = 'F'
+ grade = "F"
- return {
- 'score': max(0, score),
- 'grade': grade,
- 'issues': issues
- }
+ return {"score": max(0, score), "grade": grade, "issues": issues}
def clear_cache(self, project_id: Optional[str] = None):
- """Clear cache for specific project or all projects"""
+ """Clear cache for specific project or all projects."""
if project_id:
# Clear all cache entries for this project
keys_to_remove = [k for k in self._cache.keys() if k.startswith(f"{project_id}:")]
@@ -436,7 +422,7 @@ _context_manager = None
def get_context_manager() -> ProjectContextManager:
- """Get singleton context manager instance"""
+ """Get singleton context manager instance."""
global _context_manager
if _context_manager is None:
_context_manager = ProjectContextManager()
diff --git a/dss/mcp/handler.py b/dss/mcp/handler.py
index 05d3cb4..1483331 100644
--- a/dss/mcp/handler.py
+++ b/dss/mcp/handler.py
@@ -1,5 +1,5 @@
"""
-Unified MCP Handler
+Unified MCP Handler.
Central handler for all MCP tool execution. Used by:
- Direct API calls (/api/mcp/tools/{name}/execute)
@@ -10,34 +10,31 @@ This module ensures all MCP requests go through a single code path
for consistent logging, error handling, and security.
"""
-import json
import asyncio
-from typing import Dict, Any, List, Optional, Tuple
+import json
+from dataclasses import asdict, dataclass
from datetime import datetime
-from dataclasses import dataclass, asdict
+from typing import Any, Dict, List, Optional
-import sys
-from pathlib import Path
+from .config import integration_config, mcp_config
+from .context.project_context import ProjectContext, get_context_manager
+from .integrations.base import CircuitBreakerOpen
+from .integrations.confluence import CONFLUENCE_TOOLS, ConfluenceTools
+from .integrations.figma import FIGMA_TOOLS, FigmaTools
+from .integrations.jira import JIRA_TOOLS, JiraTools
+from .integrations.storybook import STORYBOOK_TOOLS, StorybookTools
+from .integrations.translations import TRANSLATION_TOOLS, TranslationTools
+from .tools.analysis_tools import ANALYSIS_TOOLS, AnalysisTools
+from .tools.project_tools import PROJECT_TOOLS, ProjectTools
# Note: sys.path is set up by the importing module (server.py)
# Do NOT modify sys.path here as it causes relative import issues
-from dss.storage.json_store import Projects, ActivityLog
-from .config import mcp_config, integration_config
-from .context.project_context import get_context_manager, ProjectContext
-from .tools.project_tools import PROJECT_TOOLS, ProjectTools
-from .tools.analysis_tools import ANALYSIS_TOOLS, AnalysisTools
-from .integrations.figma import FIGMA_TOOLS, FigmaTools
-from .integrations.storybook import STORYBOOK_TOOLS, StorybookTools
-from .integrations.jira import JIRA_TOOLS, JiraTools
-from .integrations.confluence import CONFLUENCE_TOOLS, ConfluenceTools
-from .integrations.translations import TRANSLATION_TOOLS, TranslationTools
-from .integrations.base import CircuitBreakerOpen
-
@dataclass
class ToolResult:
- """Result of a tool execution"""
+ """Result of a tool execution."""
+
tool_name: str
success: bool
result: Any
@@ -55,7 +52,8 @@ class ToolResult:
@dataclass
class MCPContext:
- """Context for MCP operations"""
+ """Context for MCP operations."""
+
project_id: str
user_id: Optional[int] = None
session_id: Optional[str] = None
@@ -78,13 +76,13 @@ class MCPHandler:
self._initialize_tools()
def _initialize_tools(self):
- """Initialize tool registry with all available tools"""
+ """Initialize tool registry with all available tools."""
# Register base project tools
for tool in PROJECT_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "project",
- "requires_integration": False
+ "requires_integration": False,
}
# Register analysis tools
@@ -92,7 +90,7 @@ class MCPHandler:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "analysis",
- "requires_integration": False
+ "requires_integration": False,
}
# Register Figma tools
@@ -101,7 +99,7 @@ class MCPHandler:
"tool": tool,
"category": "figma",
"requires_integration": True,
- "integration_type": "figma"
+ "integration_type": "figma",
}
# Register Storybook tools
@@ -109,7 +107,7 @@ class MCPHandler:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "storybook",
- "requires_integration": False
+ "requires_integration": False,
}
# Register Jira tools
@@ -118,7 +116,7 @@ class MCPHandler:
"tool": tool,
"category": "jira",
"requires_integration": True,
- "integration_type": "jira"
+ "integration_type": "jira",
}
# Register Confluence tools
@@ -127,7 +125,7 @@ class MCPHandler:
"tool": tool,
"category": "confluence",
"requires_integration": True,
- "integration_type": "confluence"
+ "integration_type": "confluence",
}
# Register Translation tools
@@ -135,7 +133,7 @@ class MCPHandler:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "translations",
- "requires_integration": False
+ "requires_integration": False,
}
def list_tools(self, include_details: bool = False) -> Dict[str, Any]:
@@ -158,7 +156,7 @@ class MCPHandler:
tool_info = {
"name": name,
"description": info["tool"].description,
- "requires_integration": info.get("requires_integration", False)
+ "requires_integration": info.get("requires_integration", False),
}
if include_details:
@@ -166,13 +164,10 @@ class MCPHandler:
tools_by_category[category].append(tool_info)
- return {
- "tools": tools_by_category,
- "total_count": len(self._tool_registry)
- }
+ return {"tools": tools_by_category, "total_count": len(self._tool_registry)}
def get_tool_info(self, tool_name: str) -> Optional[Dict[str, Any]]:
- """Get information about a specific tool"""
+ """Get information about a specific tool."""
if tool_name not in self._tool_registry:
return None
@@ -183,14 +178,11 @@ class MCPHandler:
"category": info["category"],
"input_schema": info["tool"].inputSchema,
"requires_integration": info.get("requires_integration", False),
- "integration_type": info.get("integration_type")
+ "integration_type": info.get("integration_type"),
}
async def execute_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> ToolResult:
"""
Execute an MCP tool.
@@ -208,10 +200,7 @@ class MCPHandler:
# Check if tool exists
if tool_name not in self._tool_registry:
return ToolResult(
- tool_name=tool_name,
- success=False,
- result=None,
- error=f"Unknown tool: {tool_name}"
+ tool_name=tool_name, success=False, result=None, error=f"Unknown tool: {tool_name}"
)
tool_info = self._tool_registry[tool_name]
@@ -251,7 +240,7 @@ class MCPHandler:
user_id=context.user_id,
success=success,
duration_ms=duration_ms,
- error=error
+ error=error,
)
return ToolResult(
@@ -259,7 +248,7 @@ class MCPHandler:
success=success,
result=result if success else None,
error=error,
- duration_ms=duration_ms
+ duration_ms=duration_ms,
)
except CircuitBreakerOpen as e:
@@ -269,7 +258,7 @@ class MCPHandler:
success=False,
result=None,
error=str(e),
- duration_ms=duration_ms
+ duration_ms=duration_ms,
)
except Exception as e:
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
@@ -280,23 +269,20 @@ class MCPHandler:
user_id=context.user_id,
success=False,
duration_ms=duration_ms,
- error=str(e)
+ error=str(e),
)
return ToolResult(
tool_name=tool_name,
success=False,
result=None,
error=str(e),
- duration_ms=duration_ms
+ duration_ms=duration_ms,
)
async def _execute_project_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
- """Execute a project tool"""
+ """Execute a project tool."""
# Ensure project_id is set
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
@@ -305,26 +291,20 @@ class MCPHandler:
return await project_tools.execute_tool(tool_name, arguments)
async def _execute_analysis_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
- """Execute an analysis tool"""
+ """Execute an analysis tool."""
# Ensure project_id is set for context if needed, though project_path is explicit
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
-
+
analysis_tools = AnalysisTools(context.user_id)
return await analysis_tools.execute_tool(tool_name, arguments)
async def _execute_figma_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
- """Execute a Figma tool"""
+ """Execute a Figma tool."""
# Get Figma config
config = await self._get_integration_config("figma", context)
if not config:
@@ -338,12 +318,9 @@ class MCPHandler:
return await figma_tools.execute_tool(tool_name, arguments)
async def _execute_storybook_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
- """Execute a Storybook tool"""
+ """Execute a Storybook tool."""
# Ensure project_id is set
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
@@ -352,12 +329,9 @@ class MCPHandler:
return await storybook_tools.execute_tool(tool_name, arguments)
async def _execute_jira_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
- """Execute a Jira tool"""
+ """Execute a Jira tool."""
config = await self._get_integration_config("jira", context)
if not config:
return {"error": "Jira not configured. Please configure Jira integration."}
@@ -366,12 +340,9 @@ class MCPHandler:
return await jira_tools.execute_tool(tool_name, arguments)
async def _execute_confluence_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
- """Execute a Confluence tool"""
+ """Execute a Confluence tool."""
config = await self._get_integration_config("confluence", context)
if not config:
return {"error": "Confluence not configured. Please configure Confluence integration."}
@@ -380,12 +351,9 @@ class MCPHandler:
return await confluence_tools.execute_tool(tool_name, arguments)
async def _execute_translations_tool(
- self,
- tool_name: str,
- arguments: Dict[str, Any],
- context: MCPContext
+ self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
- """Execute a Translation tool"""
+ """Execute a Translation tool."""
# Ensure project_id is set
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
@@ -394,11 +362,9 @@ class MCPHandler:
return await translation_tools.execute_tool(tool_name, arguments)
async def _get_integration_config(
- self,
- integration_type: str,
- context: MCPContext
+ self, integration_type: str, context: MCPContext
) -> Optional[Dict[str, Any]]:
- """Get decrypted integration config for user/project"""
+ """Get decrypted integration config for user/project."""
if not context.user_id or not context.project_id:
return None
@@ -412,7 +378,7 @@ class MCPHandler:
SELECT config FROM project_integrations
WHERE project_id = ? AND user_id = ? AND integration_type = ? AND enabled = 1
""",
- (context.project_id, context.user_id, integration_type)
+ (context.project_id, context.user_id, integration_type),
).fetchone()
if not row:
@@ -447,9 +413,9 @@ class MCPHandler:
user_id: Optional[int],
success: bool,
duration_ms: int,
- error: Optional[str] = None
+ error: Optional[str] = None,
):
- """Log tool execution to database"""
+ """Log tool execution to database."""
loop = asyncio.get_event_loop()
def log():
@@ -461,7 +427,7 @@ class MCPHandler:
(project_id, user_id, tool_name, tool_category, duration_ms, success, error_message)
VALUES (?, ?, ?, ?, ?, ?, ?)
""",
- (project_id, user_id, tool_name, category, duration_ms, success, error)
+ (project_id, user_id, tool_name, category, duration_ms, success, error),
)
except:
pass # Don't fail on logging errors
@@ -469,11 +435,9 @@ class MCPHandler:
await loop.run_in_executor(None, log)
async def get_project_context(
- self,
- project_id: str,
- user_id: Optional[int] = None
+ self, project_id: str, user_id: Optional[int] = None
) -> Optional[ProjectContext]:
- """Get project context for Claude system prompt"""
+ """Get project context for Claude system prompt."""
return await self.context_manager.get_context(project_id, user_id)
def get_tools_for_claude(self) -> List[Dict[str, Any]]:
@@ -485,11 +449,13 @@ class MCPHandler:
"""
tools = []
for name, info in self._tool_registry.items():
- tools.append({
- "name": name,
- "description": info["tool"].description,
- "input_schema": info["tool"].inputSchema
- })
+ tools.append(
+ {
+ "name": name,
+ "description": info["tool"].description,
+ "input_schema": info["tool"].inputSchema,
+ }
+ )
return tools
@@ -498,7 +464,7 @@ _mcp_handler: Optional[MCPHandler] = None
def get_mcp_handler() -> MCPHandler:
- """Get singleton MCP handler instance"""
+ """Get singleton MCP handler instance."""
global _mcp_handler
if _mcp_handler is None:
_mcp_handler = MCPHandler()
diff --git a/dss/mcp/integrations/base.py b/dss/mcp/integrations/base.py
index 390d381..cf28ec1 100644
--- a/dss/mcp/integrations/base.py
+++ b/dss/mcp/integrations/base.py
@@ -1,22 +1,22 @@
"""
-Base Integration Classes
+Base Integration Classes.
Provides circuit breaker pattern and base classes for external integrations.
"""
-import time
import asyncio
-from typing import Callable, Any, Optional, Dict
+import time
from dataclasses import dataclass
-from datetime import datetime, timedelta
+from datetime import datetime
from enum import Enum
+from typing import Any, Callable, Dict, Optional
from ..config import mcp_config
-from dss.storage.json_store import Cache, read_json, write_json, SYSTEM_DIR
class CircuitState(Enum):
- """Circuit breaker states"""
+ """Circuit breaker states."""
+
CLOSED = "closed" # Normal operation
OPEN = "open" # Failing, reject requests
HALF_OPEN = "half_open" # Testing if service recovered
@@ -24,7 +24,8 @@ class CircuitState(Enum):
@dataclass
class CircuitBreakerStats:
- """Circuit breaker statistics"""
+ """Circuit breaker statistics."""
+
state: CircuitState
failure_count: int
success_count: int
@@ -35,7 +36,8 @@ class CircuitBreakerStats:
class CircuitBreakerOpen(Exception):
- """Exception raised when circuit breaker is open"""
+ """Exception raised when circuit breaker is open."""
+
pass
@@ -52,10 +54,11 @@ class CircuitBreaker:
integration_type: str,
failure_threshold: int = None,
timeout_seconds: int = None,
- half_open_max_calls: int = 3
+ half_open_max_calls: int = 3,
):
"""
Args:
+
integration_type: Type of integration (figma, jira, confluence, etc.)
failure_threshold: Number of failures before opening circuit
timeout_seconds: Seconds to wait before trying again
@@ -134,7 +137,7 @@ class CircuitBreaker:
raise
async def _record_success(self):
- """Record successful call"""
+ """Record successful call."""
self.success_count += 1
self.last_success_time = time.time()
@@ -142,7 +145,7 @@ class CircuitBreaker:
await self._update_health_db(is_healthy=True, error=None)
async def _record_failure(self, error_message: str, db_only: bool = False):
- """Record failed call"""
+ """Record failed call."""
if not db_only:
self.failure_count += 1
self.last_failure_time = time.time()
@@ -151,7 +154,7 @@ class CircuitBreaker:
await self._update_health_db(is_healthy=False, error=error_message)
async def _update_health_db(self, is_healthy: bool, error: Optional[str]):
- """Update integration health in database"""
+ """Update integration health in database."""
loop = asyncio.get_event_loop()
def update_db():
@@ -174,7 +177,7 @@ class CircuitBreaker:
updated_at = CURRENT_TIMESTAMP
WHERE integration_type = ?
""",
- (self.integration_type,)
+ (self.integration_type,),
)
else:
conn.execute(
@@ -187,7 +190,7 @@ class CircuitBreaker:
updated_at = CURRENT_TIMESTAMP
WHERE integration_type = ?
""",
- (self.failure_count, circuit_open_until, self.integration_type)
+ (self.failure_count, circuit_open_until, self.integration_type),
)
except Exception as e:
print(f"Error updating integration health: {e}")
@@ -195,7 +198,7 @@ class CircuitBreaker:
await loop.run_in_executor(None, update_db)
def _seconds_until_retry(self) -> float:
- """Get seconds until circuit can be retried"""
+ """Get seconds until circuit can be retried."""
if self.state != CircuitState.OPEN or not self.opened_at:
return 0
elapsed = time.time() - self.opened_at
@@ -203,7 +206,7 @@ class CircuitBreaker:
return max(0, remaining)
def get_stats(self) -> CircuitBreakerStats:
- """Get current circuit breaker statistics"""
+ """Get current circuit breaker statistics."""
next_retry_time = None
if self.state == CircuitState.OPEN and self.opened_at:
next_retry_time = self.opened_at + self.timeout_seconds
@@ -215,16 +218,17 @@ class CircuitBreaker:
last_failure_time=self.last_failure_time,
last_success_time=self.last_success_time,
opened_at=self.opened_at,
- next_retry_time=next_retry_time
+ next_retry_time=next_retry_time,
)
class BaseIntegration:
- """Base class for all external integrations"""
+ """Base class for all external integrations."""
def __init__(self, integration_type: str, config: Dict[str, Any]):
"""
Args:
+
integration_type: Type of integration (figma, jira, etc.)
config: Integration configuration (decrypted)
"""
@@ -250,7 +254,7 @@ class BaseIntegration:
return await self.circuit_breaker.call(func, *args, **kwargs)
def get_health(self) -> Dict[str, Any]:
- """Get integration health status"""
+ """Get integration health status."""
stats = self.circuit_breaker.get_stats()
return {
"integration_type": self.integration_type,
@@ -260,5 +264,5 @@ class BaseIntegration:
"success_count": stats.success_count,
"last_failure_time": stats.last_failure_time,
"last_success_time": stats.last_success_time,
- "next_retry_time": stats.next_retry_time
+ "next_retry_time": stats.next_retry_time,
}
diff --git a/dss/mcp/integrations/confluence.py b/dss/mcp/integrations/confluence.py
index a18ca4a..fc859f5 100644
--- a/dss/mcp/integrations/confluence.py
+++ b/dss/mcp/integrations/confluence.py
@@ -1,16 +1,16 @@
"""
-Confluence Integration for MCP
+Confluence Integration for MCP.
Provides Confluence API tools for documentation and knowledge base.
"""
-from typing import Dict, Any, List, Optional
+from typing import Any, Dict, Optional
+
from atlassian import Confluence
from mcp import types
from .base import BaseIntegration
-
# Confluence MCP Tool Definitions
CONFLUENCE_TOOLS = [
types.Tool(
@@ -19,25 +19,13 @@ CONFLUENCE_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "space_key": {
- "type": "string",
- "description": "Confluence space key"
- },
- "title": {
- "type": "string",
- "description": "Page title"
- },
- "body": {
- "type": "string",
- "description": "Page content (HTML or wiki markup)"
- },
- "parent_id": {
- "type": "string",
- "description": "Optional parent page ID"
- }
+ "space_key": {"type": "string", "description": "Confluence space key"},
+ "title": {"type": "string", "description": "Page title"},
+ "body": {"type": "string", "description": "Page content (HTML or wiki markup)"},
+ "parent_id": {"type": "string", "description": "Optional parent page ID"},
},
- "required": ["space_key", "title", "body"]
- }
+ "required": ["space_key", "title", "body"],
+ },
),
types.Tool(
name="confluence_get_page",
@@ -45,25 +33,19 @@ CONFLUENCE_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "page_id": {
- "type": "string",
- "description": "Page ID (use this OR title)"
- },
+ "page_id": {"type": "string", "description": "Page ID (use this OR title)"},
"space_key": {
"type": "string",
- "description": "Space key (required if using title)"
- },
- "title": {
- "type": "string",
- "description": "Page title (use this OR page_id)"
+ "description": "Space key (required if using title)",
},
+ "title": {"type": "string", "description": "Page title (use this OR page_id)"},
"expand": {
"type": "string",
"description": "Comma-separated list of expansions (body.storage, version, etc.)",
- "default": "body.storage,version"
- }
- }
- }
+ "default": "body.storage,version",
+ },
+ },
+ },
),
types.Tool(
name="confluence_update_page",
@@ -71,21 +53,12 @@ CONFLUENCE_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "page_id": {
- "type": "string",
- "description": "Page ID to update"
- },
- "title": {
- "type": "string",
- "description": "New page title"
- },
- "body": {
- "type": "string",
- "description": "New page content"
- }
+ "page_id": {"type": "string", "description": "Page ID to update"},
+ "title": {"type": "string", "description": "New page title"},
+ "body": {"type": "string", "description": "New page content"},
},
- "required": ["page_id", "title", "body"]
- }
+ "required": ["page_id", "title", "body"],
+ },
),
types.Tool(
name="confluence_search",
@@ -95,36 +68,31 @@ CONFLUENCE_TOOLS = [
"properties": {
"cql": {
"type": "string",
- "description": "CQL query (e.g., 'space=DSS AND type=page')"
+ "description": "CQL query (e.g., 'space=DSS AND type=page')",
},
"limit": {
"type": "integer",
"description": "Maximum number of results",
- "default": 25
- }
+ "default": 25,
+ },
},
- "required": ["cql"]
- }
+ "required": ["cql"],
+ },
),
types.Tool(
name="confluence_get_space",
description="Get Confluence space details",
inputSchema={
"type": "object",
- "properties": {
- "space_key": {
- "type": "string",
- "description": "Space key"
- }
- },
- "required": ["space_key"]
- }
- )
+ "properties": {"space_key": {"type": "string", "description": "Space key"}},
+ "required": ["space_key"],
+ },
+ ),
]
class ConfluenceIntegration(BaseIntegration):
- """Confluence API integration with circuit breaker"""
+ """Confluence API integration with circuit breaker."""
def __init__(self, config: Dict[str, Any]):
"""
@@ -140,30 +108,24 @@ class ConfluenceIntegration(BaseIntegration):
api_token = config.get("api_token")
if not all([url, username, api_token]):
- raise ValueError("Confluence configuration incomplete: url, username, api_token required")
+ raise ValueError(
+ "Confluence configuration incomplete: url, username, api_token required"
+ )
- self.confluence = Confluence(
- url=url,
- username=username,
- password=api_token,
- cloud=True
- )
+ self.confluence = Confluence(url=url, username=username, password=api_token, cloud=True)
async def create_page(
- self,
- space_key: str,
- title: str,
- body: str,
- parent_id: Optional[str] = None
+ self, space_key: str, title: str, body: str, parent_id: Optional[str] = None
) -> Dict[str, Any]:
- """Create a new page"""
+ """Create a new page."""
+
def _create():
return self.confluence.create_page(
space=space_key,
title=title,
body=body,
parent_id=parent_id,
- representation="storage"
+ representation="storage",
)
return await self.call_api(_create)
@@ -173,33 +135,25 @@ class ConfluenceIntegration(BaseIntegration):
page_id: Optional[str] = None,
space_key: Optional[str] = None,
title: Optional[str] = None,
- expand: str = "body.storage,version"
+ expand: str = "body.storage,version",
) -> Dict[str, Any]:
- """Get page by ID or title"""
+ """Get page by ID or title."""
+
def _get():
if page_id:
- return self.confluence.get_page_by_id(
- page_id=page_id,
- expand=expand
- )
+ return self.confluence.get_page_by_id(page_id=page_id, expand=expand)
elif space_key and title:
return self.confluence.get_page_by_title(
- space=space_key,
- title=title,
- expand=expand
+ space=space_key, title=title, expand=expand
)
else:
raise ValueError("Must provide either page_id or (space_key + title)")
return await self.call_api(_get)
- async def update_page(
- self,
- page_id: str,
- title: str,
- body: str
- ) -> Dict[str, Any]:
- """Update an existing page"""
+ async def update_page(self, page_id: str, title: str, body: str) -> Dict[str, Any]:
+ """Update an existing page."""
+
def _update():
# Get current version
page = self.confluence.get_page_by_id(page_id, expand="version")
@@ -214,20 +168,22 @@ class ConfluenceIntegration(BaseIntegration):
representation="storage",
minor_edit=False,
version_comment="Updated via DSS MCP",
- version_number=current_version + 1
+ version_number=current_version + 1,
)
return await self.call_api(_update)
async def search(self, cql: str, limit: int = 25) -> Dict[str, Any]:
- """Search pages using CQL"""
+ """Search pages using CQL."""
+
def _search():
return self.confluence.cql(cql, limit=limit)
return await self.call_api(_search)
async def get_space(self, space_key: str) -> Dict[str, Any]:
- """Get space details"""
+ """Get space details."""
+
def _get():
return self.confluence.get_space(space_key)
@@ -235,19 +191,19 @@ class ConfluenceIntegration(BaseIntegration):
class ConfluenceTools:
- """MCP tool executor for Confluence integration"""
+ """MCP tool executor for Confluence integration."""
def __init__(self, config: Dict[str, Any]):
self.confluence = ConfluenceIntegration(config)
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
- """Execute Confluence tool"""
+ """Execute Confluence tool."""
handlers = {
"confluence_create_page": self.confluence.create_page,
"confluence_get_page": self.confluence.get_page,
"confluence_update_page": self.confluence.update_page,
"confluence_search": self.confluence.search,
- "confluence_get_space": self.confluence.get_space
+ "confluence_get_space": self.confluence.get_space,
}
handler = handlers.get(tool_name)
diff --git a/dss/mcp/integrations/figma.py b/dss/mcp/integrations/figma.py
index 7b2938c..b0e7e36 100644
--- a/dss/mcp/integrations/figma.py
+++ b/dss/mcp/integrations/figma.py
@@ -1,16 +1,16 @@
"""
-Figma Integration for MCP
+Figma Integration for MCP.
Provides Figma API tools through circuit breaker pattern.
"""
+from typing import Any, Dict
+
import httpx
-from typing import Dict, Any, List, Optional
from mcp import types
-from .base import BaseIntegration
from ..config import integration_config
-
+from .base import BaseIntegration
# Figma MCP Tool Definitions
FIGMA_TOOLS = [
@@ -19,56 +19,36 @@ FIGMA_TOOLS = [
description="Get Figma file metadata and structure",
inputSchema={
"type": "object",
- "properties": {
- "file_key": {
- "type": "string",
- "description": "Figma file key"
- }
- },
- "required": ["file_key"]
- }
+ "properties": {"file_key": {"type": "string", "description": "Figma file key"}},
+ "required": ["file_key"],
+ },
),
types.Tool(
name="figma_get_styles",
description="Get design styles (colors, text, effects) from Figma file",
inputSchema={
"type": "object",
- "properties": {
- "file_key": {
- "type": "string",
- "description": "Figma file key"
- }
- },
- "required": ["file_key"]
- }
+ "properties": {"file_key": {"type": "string", "description": "Figma file key"}},
+ "required": ["file_key"],
+ },
),
types.Tool(
name="figma_get_components",
description="Get component definitions from Figma file",
inputSchema={
"type": "object",
- "properties": {
- "file_key": {
- "type": "string",
- "description": "Figma file key"
- }
- },
- "required": ["file_key"]
- }
+ "properties": {"file_key": {"type": "string", "description": "Figma file key"}},
+ "required": ["file_key"],
+ },
),
types.Tool(
name="figma_extract_tokens",
description="Extract design tokens (variables) from Figma file",
inputSchema={
"type": "object",
- "properties": {
- "file_key": {
- "type": "string",
- "description": "Figma file key"
- }
- },
- "required": ["file_key"]
- }
+ "properties": {"file_key": {"type": "string", "description": "Figma file key"}},
+ "required": ["file_key"],
+ },
),
types.Tool(
name="figma_get_node",
@@ -76,23 +56,17 @@ FIGMA_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "file_key": {
- "type": "string",
- "description": "Figma file key"
- },
- "node_id": {
- "type": "string",
- "description": "Node ID to fetch"
- }
+ "file_key": {"type": "string", "description": "Figma file key"},
+ "node_id": {"type": "string", "description": "Node ID to fetch"},
},
- "required": ["file_key", "node_id"]
- }
- )
+ "required": ["file_key", "node_id"],
+ },
+ ),
]
class FigmaIntegration(BaseIntegration):
- """Figma API integration with circuit breaker"""
+ """Figma API integration with circuit breaker."""
FIGMA_API_BASE = "https://api.figma.com/v1"
@@ -109,9 +83,7 @@ class FigmaIntegration(BaseIntegration):
if not self.api_token:
raise ValueError("Figma API token not configured")
- self.headers = {
- "X-Figma-Token": self.api_token
- }
+ self.headers = {"X-Figma-Token": self.api_token}
async def get_file(self, file_key: str) -> Dict[str, Any]:
"""
@@ -123,12 +95,11 @@ class FigmaIntegration(BaseIntegration):
Returns:
File data
"""
+
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
- f"{self.FIGMA_API_BASE}/files/{file_key}",
- headers=self.headers,
- timeout=30.0
+ f"{self.FIGMA_API_BASE}/files/{file_key}", headers=self.headers, timeout=30.0
)
response.raise_for_status()
return response.json()
@@ -145,12 +116,13 @@ class FigmaIntegration(BaseIntegration):
Returns:
Styles data
"""
+
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/styles",
headers=self.headers,
- timeout=30.0
+ timeout=30.0,
)
response.raise_for_status()
return response.json()
@@ -167,12 +139,13 @@ class FigmaIntegration(BaseIntegration):
Returns:
Components data
"""
+
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/components",
headers=self.headers,
- timeout=30.0
+ timeout=30.0,
)
response.raise_for_status()
return response.json()
@@ -189,13 +162,14 @@ class FigmaIntegration(BaseIntegration):
Returns:
Variables/tokens data
"""
+
async def _fetch():
async with httpx.AsyncClient() as client:
# Get local variables
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/variables/local",
headers=self.headers,
- timeout=30.0
+ timeout=30.0,
)
response.raise_for_status()
return response.json()
@@ -213,13 +187,14 @@ class FigmaIntegration(BaseIntegration):
Returns:
Node data
"""
+
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/nodes",
headers=self.headers,
params={"ids": node_id},
- timeout=30.0
+ timeout=30.0,
)
response.raise_for_status()
return response.json()
@@ -228,23 +203,24 @@ class FigmaIntegration(BaseIntegration):
class FigmaTools:
- """MCP tool executor for Figma integration"""
+ """MCP tool executor for Figma integration."""
def __init__(self, config: Dict[str, Any]):
"""
Args:
+
config: Figma configuration (with api_token)
"""
self.figma = FigmaIntegration(config)
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
- """Execute Figma tool"""
+ """Execute Figma tool."""
handlers = {
"figma_get_file": self.figma.get_file,
"figma_get_styles": self.figma.get_styles,
"figma_get_components": self.figma.get_components,
"figma_extract_tokens": self.figma.extract_tokens,
- "figma_get_node": self.figma.get_node
+ "figma_get_node": self.figma.get_node,
}
handler = handlers.get(tool_name)
diff --git a/dss/mcp/integrations/jira.py b/dss/mcp/integrations/jira.py
index 7351945..202ffb7 100644
--- a/dss/mcp/integrations/jira.py
+++ b/dss/mcp/integrations/jira.py
@@ -1,16 +1,16 @@
"""
-Jira Integration for MCP
+Jira Integration for MCP.
Provides Jira API tools for issue tracking and project management.
"""
-from typing import Dict, Any, List, Optional
+from typing import Any, Dict
+
from atlassian import Jira
from mcp import types
from .base import BaseIntegration
-
# Jira MCP Tool Definitions
JIRA_TOOLS = [
types.Tool(
@@ -19,26 +19,17 @@ JIRA_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_key": {
- "type": "string",
- "description": "Jira project key (e.g., 'DSS')"
- },
- "summary": {
- "type": "string",
- "description": "Issue summary/title"
- },
- "description": {
- "type": "string",
- "description": "Issue description"
- },
+ "project_key": {"type": "string", "description": "Jira project key (e.g., 'DSS')"},
+ "summary": {"type": "string", "description": "Issue summary/title"},
+ "description": {"type": "string", "description": "Issue description"},
"issue_type": {
"type": "string",
"description": "Issue type (Story, Task, Bug, etc.)",
- "default": "Task"
- }
+ "default": "Task",
+ },
},
- "required": ["project_key", "summary"]
- }
+ "required": ["project_key", "summary"],
+ },
),
types.Tool(
name="jira_get_issue",
@@ -46,13 +37,10 @@ JIRA_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "issue_key": {
- "type": "string",
- "description": "Issue key (e.g., 'DSS-123')"
- }
+ "issue_key": {"type": "string", "description": "Issue key (e.g., 'DSS-123')"}
},
- "required": ["issue_key"]
- }
+ "required": ["issue_key"],
+ },
),
types.Tool(
name="jira_search_issues",
@@ -62,16 +50,16 @@ JIRA_TOOLS = [
"properties": {
"jql": {
"type": "string",
- "description": "JQL query (e.g., 'project=DSS AND status=Open')"
+ "description": "JQL query (e.g., 'project=DSS AND status=Open')",
},
"max_results": {
"type": "integer",
"description": "Maximum number of results",
- "default": 50
- }
+ "default": 50,
+ },
},
- "required": ["jql"]
- }
+ "required": ["jql"],
+ },
),
types.Tool(
name="jira_update_issue",
@@ -79,17 +67,14 @@ JIRA_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "issue_key": {
- "type": "string",
- "description": "Issue key to update"
- },
+ "issue_key": {"type": "string", "description": "Issue key to update"},
"fields": {
"type": "object",
- "description": "Fields to update (summary, description, status, etc.)"
- }
+ "description": "Fields to update (summary, description, status, etc.)",
+ },
},
- "required": ["issue_key", "fields"]
- }
+ "required": ["issue_key", "fields"],
+ },
),
types.Tool(
name="jira_add_comment",
@@ -97,23 +82,17 @@ JIRA_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "issue_key": {
- "type": "string",
- "description": "Issue key"
- },
- "comment": {
- "type": "string",
- "description": "Comment text"
- }
+ "issue_key": {"type": "string", "description": "Issue key"},
+ "comment": {"type": "string", "description": "Comment text"},
},
- "required": ["issue_key", "comment"]
- }
- )
+ "required": ["issue_key", "comment"],
+ },
+ ),
]
class JiraIntegration(BaseIntegration):
- """Jira API integration with circuit breaker"""
+ """Jira API integration with circuit breaker."""
def __init__(self, config: Dict[str, Any]):
"""
@@ -131,48 +110,43 @@ class JiraIntegration(BaseIntegration):
if not all([url, username, api_token]):
raise ValueError("Jira configuration incomplete: url, username, api_token required")
- self.jira = Jira(
- url=url,
- username=username,
- password=api_token,
- cloud=True
- )
+ self.jira = Jira(url=url, username=username, password=api_token, cloud=True)
async def create_issue(
- self,
- project_key: str,
- summary: str,
- description: str = "",
- issue_type: str = "Task"
+ self, project_key: str, summary: str, description: str = "", issue_type: str = "Task"
) -> Dict[str, Any]:
- """Create a new Jira issue"""
+ """Create a new Jira issue."""
+
def _create():
fields = {
"project": {"key": project_key},
"summary": summary,
"description": description,
- "issuetype": {"name": issue_type}
+ "issuetype": {"name": issue_type},
}
return self.jira.create_issue(fields)
return await self.call_api(_create)
async def get_issue(self, issue_key: str) -> Dict[str, Any]:
- """Get issue details"""
+ """Get issue details."""
+
def _get():
return self.jira.get_issue(issue_key)
return await self.call_api(_get)
async def search_issues(self, jql: str, max_results: int = 50) -> Dict[str, Any]:
- """Search issues with JQL"""
+ """Search issues with JQL."""
+
def _search():
return self.jira.jql(jql, limit=max_results)
return await self.call_api(_search)
async def update_issue(self, issue_key: str, fields: Dict[str, Any]) -> Dict[str, Any]:
- """Update issue fields"""
+ """Update issue fields."""
+
def _update():
self.jira.update_issue_field(issue_key, fields)
return {"status": "updated", "issue_key": issue_key}
@@ -180,7 +154,8 @@ class JiraIntegration(BaseIntegration):
return await self.call_api(_update)
async def add_comment(self, issue_key: str, comment: str) -> Dict[str, Any]:
- """Add comment to issue"""
+ """Add comment to issue."""
+
def _comment():
return self.jira.issue_add_comment(issue_key, comment)
@@ -188,19 +163,19 @@ class JiraIntegration(BaseIntegration):
class JiraTools:
- """MCP tool executor for Jira integration"""
+ """MCP tool executor for Jira integration."""
def __init__(self, config: Dict[str, Any]):
self.jira = JiraIntegration(config)
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
- """Execute Jira tool"""
+ """Execute Jira tool."""
handlers = {
"jira_create_issue": self.jira.create_issue,
"jira_get_issue": self.jira.get_issue,
"jira_search_issues": self.jira.search_issues,
"jira_update_issue": self.jira.update_issue,
- "jira_add_comment": self.jira.add_comment
+ "jira_add_comment": self.jira.add_comment,
}
handler = handlers.get(tool_name)
diff --git a/dss/mcp/integrations/storybook.py b/dss/mcp/integrations/storybook.py
index 1686742..2236557 100644
--- a/dss/mcp/integrations/storybook.py
+++ b/dss/mcp/integrations/storybook.py
@@ -1,16 +1,16 @@
"""
-Storybook Integration for MCP
+Storybook Integration for MCP.
Provides Storybook tools for scanning, generating stories, creating themes, and configuration.
"""
-from typing import Dict, Any, Optional, List
from pathlib import Path
+from typing import Any, Dict, Optional
+
from mcp import types
-from .base import BaseIntegration
from ..context.project_context import get_context_manager
-
+from .base import BaseIntegration
# Storybook MCP Tool Definitions
STORYBOOK_TOOLS = [
@@ -20,17 +20,14 @@ STORYBOOK_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"path": {
"type": "string",
- "description": "Optional: Specific path to scan (defaults to project root)"
- }
+ "description": "Optional: Specific path to scan (defaults to project root)",
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="storybook_generate_stories",
@@ -38,33 +35,30 @@ STORYBOOK_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"component_path": {
"type": "string",
- "description": "Path to component file or directory"
+ "description": "Path to component file or directory",
},
"template": {
"type": "string",
"description": "Story format template",
"enum": ["csf3", "csf2", "mdx"],
- "default": "csf3"
+ "default": "csf3",
},
"include_variants": {
"type": "boolean",
"description": "Generate variant stories (default: true)",
- "default": True
+ "default": True,
},
"dry_run": {
"type": "boolean",
"description": "Preview without writing files (default: true)",
- "default": True
- }
+ "default": True,
+ },
},
- "required": ["project_id", "component_path"]
- }
+ "required": ["project_id", "component_path"],
+ },
),
types.Tool(
name="storybook_generate_theme",
@@ -72,47 +66,39 @@ STORYBOOK_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"brand_title": {
"type": "string",
"description": "Brand title for Storybook UI",
- "default": "Design System"
+ "default": "Design System",
},
"base_theme": {
"type": "string",
"description": "Base theme (light or dark)",
"enum": ["light", "dark"],
- "default": "light"
+ "default": "light",
},
"output_dir": {
"type": "string",
- "description": "Output directory (default: .storybook)"
+ "description": "Output directory (default: .storybook)",
},
"write_files": {
"type": "boolean",
"description": "Write files to disk (default: false - preview only)",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="storybook_get_status",
description="Get Storybook installation and configuration status for a project.",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="storybook_configure",
@@ -120,42 +106,31 @@ STORYBOOK_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"action": {
"type": "string",
"description": "Configuration action",
"enum": ["init", "update", "add_theme"],
- "default": "init"
+ "default": "init",
},
"options": {
"type": "object",
"description": "Configuration options",
"properties": {
- "framework": {
- "type": "string",
- "enum": ["react", "vue", "angular"]
- },
- "builder": {
- "type": "string",
- "enum": ["vite", "webpack5"]
- },
- "typescript": {
- "type": "boolean"
- }
- }
- }
+ "framework": {"type": "string", "enum": ["react", "vue", "angular"]},
+ "builder": {"type": "string", "enum": ["vite", "webpack5"]},
+ "typescript": {"type": "boolean"},
+ },
+ },
},
- "required": ["project_id"]
- }
- )
+ "required": ["project_id"],
+ },
+ ),
]
class StorybookIntegration(BaseIntegration):
- """Storybook integration wrapper for DSS tools"""
+ """Storybook integration wrapper for DSS tools."""
def __init__(self, config: Optional[Dict[str, Any]] = None):
"""
@@ -208,23 +183,26 @@ class StorybookIntegration(BaseIntegration):
scan_path = project_path
scanner = StorybookScanner(str(scan_path))
- result = await scanner.scan() if hasattr(scanner.scan, '__await__') else scanner.scan()
- coverage = await scanner.get_story_coverage() if hasattr(scanner.get_story_coverage, '__await__') else scanner.get_story_coverage()
+ result = await scanner.scan() if hasattr(scanner.scan, "__await__") else scanner.scan()
+ coverage = (
+ await scanner.get_story_coverage()
+ if hasattr(scanner.get_story_coverage, "__await__")
+ else scanner.get_story_coverage()
+ )
return {
"project_id": project_id,
"path": str(scan_path),
"config": result.get("config") if isinstance(result, dict) else None,
"stories_count": result.get("stories_count", 0) if isinstance(result, dict) else 0,
- "components_with_stories": result.get("components_with_stories", []) if isinstance(result, dict) else [],
+ "components_with_stories": result.get("components_with_stories", [])
+ if isinstance(result, dict)
+ else [],
"stories": result.get("stories", []) if isinstance(result, dict) else [],
- "coverage": coverage if coverage else {}
+ "coverage": coverage if coverage else {},
}
except Exception as e:
- return {
- "error": f"Failed to scan Storybook: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to scan Storybook: {str(e)}", "project_id": project_id}
async def generate_stories(
self,
@@ -232,7 +210,7 @@ class StorybookIntegration(BaseIntegration):
component_path: str,
template: str = "csf3",
include_variants: bool = True,
- dry_run: bool = True
+ dry_run: bool = True,
) -> Dict[str, Any]:
"""
Generate stories for components.
@@ -257,49 +235,40 @@ class StorybookIntegration(BaseIntegration):
# Check if path exists and is directory or file
if not full_path.exists():
- return {
- "error": f"Path not found: {component_path}",
- "project_id": project_id
- }
+ return {"error": f"Path not found: {component_path}", "project_id": project_id}
if full_path.is_dir():
# Generate for directory
func = generator.generate_stories_for_directory
- if hasattr(func, '__await__'):
- results = await func(
- component_path,
- template=template.upper(),
- dry_run=dry_run
- )
+ if hasattr(func, "__await__"):
+ results = await func(component_path, template=template.upper(), dry_run=dry_run)
else:
- results = func(
- component_path,
- template=template.upper(),
- dry_run=dry_run
- )
+ results = func(component_path, template=template.upper(), dry_run=dry_run)
return {
"project_id": project_id,
"path": component_path,
- "generated_count": len([r for r in (results if isinstance(results, list) else []) if "story" in str(r)]),
+ "generated_count": len(
+ [
+ r
+ for r in (results if isinstance(results, list) else [])
+ if "story" in str(r)
+ ]
+ ),
"results": results if isinstance(results, list) else [],
"dry_run": dry_run,
- "template": template
+ "template": template,
}
else:
# Generate for single file
func = generator.generate_story
- if hasattr(func, '__await__'):
+ if hasattr(func, "__await__"):
story = await func(
- component_path,
- template=template.upper(),
- include_variants=include_variants
+ component_path, template=template.upper(), include_variants=include_variants
)
else:
story = func(
- component_path,
- template=template.upper(),
- include_variants=include_variants
+ component_path, template=template.upper(), include_variants=include_variants
)
return {
@@ -308,14 +277,14 @@ class StorybookIntegration(BaseIntegration):
"story": story,
"template": template,
"include_variants": include_variants,
- "dry_run": dry_run
+ "dry_run": dry_run,
}
except Exception as e:
return {
"error": f"Failed to generate stories: {str(e)}",
"project_id": project_id,
- "component_path": component_path
+ "component_path": component_path,
}
async def generate_theme(
@@ -324,7 +293,7 @@ class StorybookIntegration(BaseIntegration):
brand_title: str = "Design System",
base_theme: str = "light",
output_dir: Optional[str] = None,
- write_files: bool = False
+ write_files: bool = False,
) -> Dict[str, Any]:
"""
Generate Storybook theme from design tokens.
@@ -341,7 +310,7 @@ class StorybookIntegration(BaseIntegration):
"""
try:
from dss.storybook.theme import ThemeGenerator
- from dss.themes import get_default_light_theme, get_default_dark_theme
+ from dss.themes import get_default_dark_theme, get_default_light_theme
# Get project tokens from context
context = await self.context_manager.get_context(project_id)
@@ -351,7 +320,9 @@ class StorybookIntegration(BaseIntegration):
# Convert tokens to list format for ThemeGenerator
tokens_list = [
{"name": name, "value": token.get("value") if isinstance(token, dict) else token}
- for name, token in (context.tokens.items() if hasattr(context, 'tokens') else {}.items())
+ for name, token in (
+ context.tokens.items() if hasattr(context, "tokens") else {}.items()
+ )
]
generator = ThemeGenerator()
@@ -359,45 +330,43 @@ class StorybookIntegration(BaseIntegration):
if write_files and output_dir:
# Generate and write files
func = generator.generate_full_config
- if hasattr(func, '__await__'):
+ if hasattr(func, "__await__"):
files = await func(
- tokens=tokens_list,
- brand_title=brand_title,
- output_dir=output_dir
+ tokens=tokens_list, brand_title=brand_title, output_dir=output_dir
)
else:
- files = func(
- tokens=tokens_list,
- brand_title=brand_title,
- output_dir=output_dir
- )
+ files = func(tokens=tokens_list, brand_title=brand_title, output_dir=output_dir)
return {
"project_id": project_id,
"files_written": list(files.keys()) if isinstance(files, dict) else [],
"output_dir": output_dir,
- "brand_title": brand_title
+ "brand_title": brand_title,
}
else:
# Preview mode - generate file contents
try:
func = generator.generate_from_tokens
- if hasattr(func, '__await__'):
+ if hasattr(func, "__await__"):
theme = await func(tokens_list, brand_title, base_theme)
else:
theme = func(tokens_list, brand_title, base_theme)
except Exception:
# Fallback to default theme
- theme_obj = get_default_light_theme() if base_theme == "light" else get_default_dark_theme()
+ theme_obj = (
+ get_default_light_theme()
+ if base_theme == "light"
+ else get_default_dark_theme()
+ )
theme = {
- "name": theme_obj.name if hasattr(theme_obj, 'name') else "Default",
- "colors": {}
+ "name": theme_obj.name if hasattr(theme_obj, "name") else "Default",
+ "colors": {},
}
# Generate theme file content
theme_file = f"// Storybook theme for {brand_title}\nexport default {str(theme)};"
- manager_file = f"import addons from '@storybook/addons';\nimport theme from './dss-theme';\naddons.setConfig({{ theme }});"
- preview_file = f"import '../dss-theme';\nexport default {{ parameters: {{ actions: {{ argTypesRegex: '^on[A-Z].*' }} }} }};"
+ manager_file = "import addons from '@storybook/addons';\nimport theme from './dss-theme';\naddons.setConfig({ theme });"
+ preview_file = "import '../dss-theme';\nexport default { parameters: { actions: { argTypesRegex: '^on[A-Z].*' } } };"
return {
"project_id": project_id,
@@ -407,16 +376,13 @@ class StorybookIntegration(BaseIntegration):
"files": {
"dss-theme.ts": theme_file,
"manager.ts": manager_file,
- "preview.ts": preview_file
+ "preview.ts": preview_file,
},
- "token_count": len(tokens_list)
+ "token_count": len(tokens_list),
}
except Exception as e:
- return {
- "error": f"Failed to generate theme: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to generate theme: {str(e)}", "project_id": project_id}
async def get_status(self, project_id: str) -> Dict[str, Any]:
"""
@@ -434,7 +400,7 @@ class StorybookIntegration(BaseIntegration):
project_path = await self._get_project_path(project_id)
func = get_storybook_status
- if hasattr(func, '__await__'):
+ if hasattr(func, "__await__"):
status = await func(str(project_path))
else:
status = func(str(project_path))
@@ -442,21 +408,18 @@ class StorybookIntegration(BaseIntegration):
return {
"project_id": project_id,
"path": str(project_path),
- **(status if isinstance(status, dict) else {})
+ **(status if isinstance(status, dict) else {}),
}
except Exception as e:
return {
"error": f"Failed to get Storybook status: {str(e)}",
"project_id": project_id,
- "installed": False
+ "installed": False,
}
async def configure(
- self,
- project_id: str,
- action: str = "init",
- options: Optional[Dict[str, Any]] = None
+ self, project_id: str, action: str = "init", options: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Configure or update Storybook for project.
@@ -480,11 +443,11 @@ class StorybookIntegration(BaseIntegration):
"action": action,
"framework": options.get("framework", "react"),
"builder": options.get("builder", "vite"),
- "typescript": options.get("typescript", True)
+ "typescript": options.get("typescript", True),
}
func = write_storybook_config_file
- if hasattr(func, '__await__'):
+ if hasattr(func, "__await__"):
result = await func(str(project_path), config)
else:
result = func(str(project_path), config)
@@ -495,7 +458,7 @@ class StorybookIntegration(BaseIntegration):
"success": True,
"path": str(project_path),
"config_path": str(project_path / ".storybook"),
- "options": config
+ "options": config,
}
except Exception as e:
@@ -503,16 +466,17 @@ class StorybookIntegration(BaseIntegration):
"error": f"Failed to configure Storybook: {str(e)}",
"project_id": project_id,
"action": action,
- "success": False
+ "success": False,
}
class StorybookTools:
- """MCP tool executor for Storybook integration"""
+ """MCP tool executor for Storybook integration."""
def __init__(self, config: Optional[Dict[str, Any]] = None):
"""
Args:
+
config: Optional Storybook configuration
"""
self.storybook = StorybookIntegration(config)
@@ -533,7 +497,7 @@ class StorybookTools:
"storybook_generate_stories": self.storybook.generate_stories,
"storybook_generate_theme": self.storybook.generate_theme,
"storybook_get_status": self.storybook.get_status,
- "storybook_configure": self.storybook.configure
+ "storybook_configure": self.storybook.configure,
}
handler = handlers.get(tool_name)
diff --git a/dss/mcp/integrations/translations.py b/dss/mcp/integrations/translations.py
index e8fca6c..143a564 100644
--- a/dss/mcp/integrations/translations.py
+++ b/dss/mcp/integrations/translations.py
@@ -1,5 +1,5 @@
"""
-Translation Dictionary Integration for MCP
+Translation Dictionary Integration for MCP.
Provides tools for managing translation dictionaries, theme configuration,
and code generation for design system tokens.
@@ -10,14 +10,13 @@ capabilities through MCP tools.
import asyncio
import json
-from typing import Dict, Any, Optional, List
from pathlib import Path
-from datetime import datetime
+from typing import Any, Dict, List, Optional
+
from mcp import types
-from .base import BaseIntegration
from ..context.project_context import get_context_manager
-
+from .base import BaseIntegration
# =============================================================================
# MCP Tool Definitions
@@ -31,18 +30,15 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"include_stats": {
"type": "boolean",
"description": "Include mapping statistics (default: true)",
- "default": True
- }
+ "default": True,
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="translation_get_dictionary",
@@ -50,23 +46,29 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"source": {
"type": "string",
"description": "Dictionary source type",
- "enum": ["figma", "css", "scss", "heroui", "shadcn", "tailwind", "json", "custom"]
+ "enum": [
+ "figma",
+ "css",
+ "scss",
+ "heroui",
+ "shadcn",
+ "tailwind",
+ "json",
+ "custom",
+ ],
},
"include_unmapped": {
"type": "boolean",
"description": "Include list of unmapped source tokens (default: true)",
- "default": True
- }
+ "default": True,
+ },
},
- "required": ["project_id", "source"]
- }
+ "required": ["project_id", "source"],
+ },
),
types.Tool(
name="translation_create_dictionary",
@@ -74,42 +76,44 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"source": {
"type": "string",
"description": "Source type for the dictionary",
- "enum": ["figma", "css", "scss", "heroui", "shadcn", "tailwind", "json", "custom"]
+ "enum": [
+ "figma",
+ "css",
+ "scss",
+ "heroui",
+ "shadcn",
+ "tailwind",
+ "json",
+ "custom",
+ ],
},
"token_mappings": {
"type": "object",
"description": "Token mappings: source_token -> DSS canonical path",
- "additionalProperties": {
- "type": "string"
- }
+ "additionalProperties": {"type": "string"},
},
"component_mappings": {
"type": "object",
"description": "Component mappings: source_component -> DSS component",
- "additionalProperties": {
- "type": "string"
- }
+ "additionalProperties": {"type": "string"},
},
"custom_props": {
"type": "object",
"description": "Custom properties (must use DSS namespace like 'color.brand.myproject.primary')",
- "additionalProperties": {}
+ "additionalProperties": {},
},
"notes": {
"type": "array",
"items": {"type": "string"},
- "description": "Human-readable notes"
- }
+ "description": "Human-readable notes",
+ },
},
- "required": ["project_id", "source"]
- }
+ "required": ["project_id", "source"],
+ },
),
types.Tool(
name="translation_update_dictionary",
@@ -117,43 +121,49 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"source": {
"type": "string",
"description": "Dictionary source type to update",
- "enum": ["figma", "css", "scss", "heroui", "shadcn", "tailwind", "json", "custom"]
+ "enum": [
+ "figma",
+ "css",
+ "scss",
+ "heroui",
+ "shadcn",
+ "tailwind",
+ "json",
+ "custom",
+ ],
},
"token_mappings": {
"type": "object",
"description": "Token mappings to add/update",
- "additionalProperties": {"type": "string"}
+ "additionalProperties": {"type": "string"},
},
"component_mappings": {
"type": "object",
"description": "Component mappings to add/update",
- "additionalProperties": {"type": "string"}
+ "additionalProperties": {"type": "string"},
},
"custom_props": {
"type": "object",
"description": "Custom props to add/update",
- "additionalProperties": {}
+ "additionalProperties": {},
},
"remove_tokens": {
"type": "array",
"items": {"type": "string"},
- "description": "Source tokens to remove from mappings"
+ "description": "Source tokens to remove from mappings",
},
"notes": {
"type": "array",
"items": {"type": "string"},
- "description": "Notes to append"
- }
+ "description": "Notes to append",
+ },
},
- "required": ["project_id", "source"]
- }
+ "required": ["project_id", "source"],
+ },
),
types.Tool(
name="translation_validate_dictionary",
@@ -161,39 +171,39 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"source": {
"type": "string",
"description": "Dictionary source type to validate",
- "enum": ["figma", "css", "scss", "heroui", "shadcn", "tailwind", "json", "custom"]
+ "enum": [
+ "figma",
+ "css",
+ "scss",
+ "heroui",
+ "shadcn",
+ "tailwind",
+ "json",
+ "custom",
+ ],
},
"strict": {
"type": "boolean",
"description": "Strict mode - unknown tokens are errors (default: false)",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["project_id", "source"]
- }
+ "required": ["project_id", "source"],
+ },
),
-
# Category 2: Theme Configuration (4 tools)
types.Tool(
name="theme_get_config",
description="Get project theme configuration including base theme, loaded dictionaries, and custom props summary.",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="theme_resolve",
@@ -201,24 +211,21 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"base_theme": {
"type": "string",
"description": "Base theme to use",
"enum": ["light", "dark"],
- "default": "light"
+ "default": "light",
},
"include_provenance": {
"type": "boolean",
"description": "Include token resolution provenance chain (default: false)",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="theme_add_custom_prop",
@@ -226,24 +233,19 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"prop_name": {
"type": "string",
- "description": "Property name using DSS namespace (e.g., 'color.brand.acme.primary')"
- },
- "prop_value": {
- "description": "Property value (string, number, or object)"
+ "description": "Property name using DSS namespace (e.g., 'color.brand.acme.primary')",
},
+ "prop_value": {"description": "Property value (string, number, or object)"},
"description": {
"type": "string",
- "description": "Optional description of the custom prop"
- }
+ "description": "Optional description of the custom prop",
+ },
},
- "required": ["project_id", "prop_name", "prop_value"]
- }
+ "required": ["project_id", "prop_name", "prop_value"],
+ },
),
types.Tool(
name="theme_get_canonical_tokens",
@@ -254,23 +256,32 @@ TRANSLATION_TOOLS = [
"category": {
"type": "string",
"description": "Filter by category (optional)",
- "enum": ["color", "spacing", "typography", "border", "shadow", "motion", "zIndex", "opacity", "breakpoint"]
+ "enum": [
+ "color",
+ "spacing",
+ "typography",
+ "border",
+ "shadow",
+ "motion",
+ "zIndex",
+ "opacity",
+ "breakpoint",
+ ],
},
"include_aliases": {
"type": "boolean",
"description": "Include token aliases (default: true)",
- "default": True
+ "default": True,
},
"include_components": {
"type": "boolean",
"description": "Include canonical components (default: false)",
- "default": False
- }
+ "default": False,
+ },
},
- "required": []
- }
+ "required": [],
+ },
),
-
# Category 3: Code Generation (3 tools)
types.Tool(
name="codegen_export_css",
@@ -278,38 +289,35 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"base_theme": {
"type": "string",
"description": "Base theme to use",
"enum": ["light", "dark"],
- "default": "light"
+ "default": "light",
},
"selector": {
"type": "string",
"description": "CSS selector for variables (default: ':root')",
- "default": ":root"
+ "default": ":root",
},
"prefix": {
"type": "string",
"description": "CSS variable prefix (default: 'dss')",
- "default": "dss"
+ "default": "dss",
},
"include_comments": {
"type": "boolean",
"description": "Include provenance comments (default: true)",
- "default": True
+ "default": True,
},
"output_path": {
"type": "string",
- "description": "Optional: Write to file instead of returning content"
- }
+ "description": "Optional: Write to file instead of returning content",
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="codegen_export_scss",
@@ -317,33 +325,30 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"base_theme": {
"type": "string",
"description": "Base theme to use",
"enum": ["light", "dark"],
- "default": "light"
+ "default": "light",
},
"prefix": {
"type": "string",
"description": "SCSS variable prefix (default: 'dss')",
- "default": "dss"
+ "default": "dss",
},
"generate_map": {
"type": "boolean",
"description": "Generate SCSS map in addition to variables (default: true)",
- "default": True
+ "default": True,
},
"output_path": {
"type": "string",
- "description": "Optional: Write to file instead of returning content"
- }
+ "description": "Optional: Write to file instead of returning content",
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="codegen_export_json",
@@ -351,34 +356,31 @@ TRANSLATION_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"base_theme": {
"type": "string",
"description": "Base theme to use",
"enum": ["light", "dark"],
- "default": "light"
+ "default": "light",
},
"format": {
"type": "string",
"description": "JSON structure format",
"enum": ["flat", "nested", "style-dictionary"],
- "default": "flat"
+ "default": "flat",
},
"include_metadata": {
"type": "boolean",
"description": "Include resolution metadata (default: true)",
- "default": True
+ "default": True,
},
"output_path": {
"type": "string",
- "description": "Optional: Write to file instead of returning content"
- }
+ "description": "Optional: Write to file instead of returning content",
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
]
@@ -387,8 +389,9 @@ TRANSLATION_TOOLS = [
# Integration Class
# =============================================================================
+
class TranslationIntegration(BaseIntegration):
- """Translation dictionary integration wrapper for DSS tools"""
+ """Translation dictionary integration wrapper for DSS tools."""
def __init__(self, config: Optional[Dict[str, Any]] = None):
"""
@@ -420,9 +423,7 @@ class TranslationIntegration(BaseIntegration):
# =========================================================================
async def list_dictionaries(
- self,
- project_id: str,
- include_stats: bool = True
+ self, project_id: str, include_stats: bool = True
) -> Dict[str, Any]:
"""
List all translation dictionaries for project.
@@ -449,17 +450,14 @@ class TranslationIntegration(BaseIntegration):
"project_id": project_id,
"dictionaries": [],
"has_translations": bool(available_files),
- "translations_dir": str(loader.get_translations_dir())
+ "translations_dir": str(loader.get_translations_dir()),
}
if include_stats and available_files:
# Load registry to get statistics
registry = await loader.load_all()
for source in available_files:
- dict_info = {
- "source": source,
- "file": f"{source}.json"
- }
+ dict_info = {"source": source, "file": f"{source}.json"}
if source in registry.dictionaries:
d = registry.dictionaries[source]
dict_info["token_count"] = len(d.mappings.tokens)
@@ -475,16 +473,10 @@ class TranslationIntegration(BaseIntegration):
return result
except Exception as e:
- return {
- "error": f"Failed to list dictionaries: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to list dictionaries: {str(e)}", "project_id": project_id}
async def get_dictionary(
- self,
- project_id: str,
- source: str,
- include_unmapped: bool = True
+ self, project_id: str, source: str, include_unmapped: bool = True
) -> Dict[str, Any]:
"""
Get translation dictionary details.
@@ -505,11 +497,15 @@ class TranslationIntegration(BaseIntegration):
dictionary = await loader.load_dictionary(source)
if not dictionary:
- available = [f.stem for f in loader.translations_dir.glob("*.json")] if loader.translations_dir.exists() else []
+ available = (
+ [f.stem for f in loader.translations_dir.glob("*.json")]
+ if loader.translations_dir.exists()
+ else []
+ )
return {
"error": f"Dictionary not found: {source}",
"project_id": project_id,
- "available": available
+ "available": available,
}
result = {
@@ -517,15 +513,19 @@ class TranslationIntegration(BaseIntegration):
"source": source,
"uuid": str(dictionary.uuid),
"version": dictionary.version,
- "created_at": dictionary.created_at.isoformat() if hasattr(dictionary.created_at, 'isoformat') else str(dictionary.created_at),
- "updated_at": dictionary.updated_at.isoformat() if hasattr(dictionary.updated_at, 'isoformat') else str(dictionary.updated_at),
+ "created_at": dictionary.created_at.isoformat()
+ if hasattr(dictionary.created_at, "isoformat")
+ else str(dictionary.created_at),
+ "updated_at": dictionary.updated_at.isoformat()
+ if hasattr(dictionary.updated_at, "isoformat")
+ else str(dictionary.updated_at),
"mappings": {
"tokens": dictionary.mappings.tokens,
"components": dictionary.mappings.components,
- "patterns": dictionary.mappings.patterns
+ "patterns": dictionary.mappings.patterns,
},
"custom_props": dictionary.custom_props,
- "notes": dictionary.notes or []
+ "notes": dictionary.notes or [],
}
if include_unmapped:
@@ -537,7 +537,7 @@ class TranslationIntegration(BaseIntegration):
return {
"error": f"Failed to get dictionary: {str(e)}",
"project_id": project_id,
- "source": source
+ "source": source,
}
async def create_dictionary(
@@ -547,7 +547,7 @@ class TranslationIntegration(BaseIntegration):
token_mappings: Optional[Dict[str, str]] = None,
component_mappings: Optional[Dict[str, str]] = None,
custom_props: Optional[Dict[str, Any]] = None,
- notes: Optional[List[str]] = None
+ notes: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""
Create new translation dictionary.
@@ -564,8 +564,8 @@ class TranslationIntegration(BaseIntegration):
Creation result
"""
try:
- from dss.translations.writer import TranslationDictionaryWriter
from dss.translations.validator import TranslationValidator
+ from dss.translations.writer import TranslationDictionaryWriter
project_path = await self._get_project_path(project_id)
writer = TranslationDictionaryWriter(str(project_path))
@@ -578,9 +578,9 @@ class TranslationIntegration(BaseIntegration):
"source": source,
"mappings": {
"tokens": token_mappings or {},
- "components": component_mappings or {}
+ "components": component_mappings or {},
},
- "custom_props": custom_props or {}
+ "custom_props": custom_props or {},
}
validation_result = validator.validate_dictionary(test_data)
@@ -588,7 +588,7 @@ class TranslationIntegration(BaseIntegration):
return {
"error": "Validation failed",
"errors": [str(e) for e in validation_result.errors],
- "warnings": [str(w) for w in validation_result.warnings]
+ "warnings": [str(w) for w in validation_result.warnings],
}
# Create the dictionary
@@ -598,7 +598,7 @@ class TranslationIntegration(BaseIntegration):
token_mappings=token_mappings,
component_mappings=component_mappings,
custom_props=custom_props,
- notes=notes
+ notes=notes,
)
return {
@@ -610,14 +610,16 @@ class TranslationIntegration(BaseIntegration):
"token_count": len(dictionary.mappings.tokens),
"component_count": len(dictionary.mappings.components),
"custom_prop_count": len(dictionary.custom_props),
- "warnings": [str(w) for w in validation_result.warnings] if validation_result.warnings else []
+ "warnings": [str(w) for w in validation_result.warnings]
+ if validation_result.warnings
+ else [],
}
except Exception as e:
return {
"error": f"Failed to create dictionary: {str(e)}",
"project_id": project_id,
- "source": source
+ "source": source,
}
async def update_dictionary(
@@ -628,7 +630,7 @@ class TranslationIntegration(BaseIntegration):
component_mappings: Optional[Dict[str, str]] = None,
custom_props: Optional[Dict[str, Any]] = None,
remove_tokens: Optional[List[str]] = None,
- notes: Optional[List[str]] = None
+ notes: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""
Update existing translation dictionary.
@@ -656,10 +658,7 @@ class TranslationIntegration(BaseIntegration):
# Load existing dictionary
existing = await loader.load_dictionary(source)
if not existing:
- return {
- "error": f"Dictionary not found: {source}",
- "project_id": project_id
- }
+ return {"error": f"Dictionary not found: {source}", "project_id": project_id}
# Merge updates
updated_tokens = dict(existing.mappings.tokens)
@@ -688,7 +687,7 @@ class TranslationIntegration(BaseIntegration):
token_mappings=updated_tokens,
component_mappings=updated_components,
custom_props=updated_custom,
- notes=updated_notes
+ notes=updated_notes,
)
return {
@@ -700,21 +699,18 @@ class TranslationIntegration(BaseIntegration):
"token_count": len(dictionary.mappings.tokens),
"component_count": len(dictionary.mappings.components),
"custom_prop_count": len(dictionary.custom_props),
- "notes_count": len(dictionary.notes or [])
+ "notes_count": len(dictionary.notes or []),
}
except Exception as e:
return {
"error": f"Failed to update dictionary: {str(e)}",
"project_id": project_id,
- "source": source
+ "source": source,
}
async def validate_dictionary(
- self,
- project_id: str,
- source: str,
- strict: bool = False
+ self, project_id: str, source: str, strict: bool = False
) -> Dict[str, Any]:
"""
Validate a translation dictionary.
@@ -739,7 +735,7 @@ class TranslationIntegration(BaseIntegration):
return {
"error": f"Dictionary not found: {source}",
"project_id": project_id,
- "valid": False
+ "valid": False,
}
validator = TranslationValidator()
@@ -751,16 +747,20 @@ class TranslationIntegration(BaseIntegration):
"project": project_id,
"source": source,
"version": dictionary.version,
- "created_at": dictionary.created_at.isoformat() if hasattr(dictionary.created_at, 'isoformat') else str(dictionary.created_at),
- "updated_at": dictionary.updated_at.isoformat() if hasattr(dictionary.updated_at, 'isoformat') else str(dictionary.updated_at),
+ "created_at": dictionary.created_at.isoformat()
+ if hasattr(dictionary.created_at, "isoformat")
+ else str(dictionary.created_at),
+ "updated_at": dictionary.updated_at.isoformat()
+ if hasattr(dictionary.updated_at, "isoformat")
+ else str(dictionary.updated_at),
"mappings": {
"tokens": dictionary.mappings.tokens,
"components": dictionary.mappings.components,
- "patterns": dictionary.mappings.patterns
+ "patterns": dictionary.mappings.patterns,
},
"custom_props": dictionary.custom_props,
"unmapped": dictionary.unmapped,
- "notes": dictionary.notes or []
+ "notes": dictionary.notes or [],
}
result = validator.validate_dictionary(dict_data, strict=strict)
@@ -770,7 +770,7 @@ class TranslationIntegration(BaseIntegration):
"source": source,
"valid": result.is_valid,
"errors": [str(e) for e in (result.errors or [])],
- "warnings": [str(w) for w in (result.warnings or [])]
+ "warnings": [str(w) for w in (result.warnings or [])],
}
except Exception as e:
@@ -778,7 +778,7 @@ class TranslationIntegration(BaseIntegration):
"error": f"Failed to validate dictionary: {str(e)}",
"project_id": project_id,
"source": source,
- "valid": False
+ "valid": False,
}
# =========================================================================
@@ -812,20 +812,14 @@ class TranslationIntegration(BaseIntegration):
"total_component_mappings": len(registry.combined_component_map),
"total_custom_props": len(registry.all_custom_props),
"conflicts": registry.conflicts,
- "has_config": loader.has_translations()
+ "has_config": loader.has_translations(),
}
except Exception as e:
- return {
- "error": f"Failed to get config: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to get config: {str(e)}", "project_id": project_id}
async def resolve_theme(
- self,
- project_id: str,
- base_theme: str = "light",
- include_provenance: bool = False
+ self, project_id: str, base_theme: str = "light", include_provenance: bool = False
) -> Dict[str, Any]:
"""
Resolve complete project theme.
@@ -850,10 +844,7 @@ class TranslationIntegration(BaseIntegration):
# Create merger and resolve
merger = ThemeMerger(registry)
- resolved = await merger.merge(
- base_theme=base_theme,
- project_name=project_id
- )
+ resolved = await merger.merge(base_theme=base_theme, project_name=project_id)
# Format tokens for output
tokens = {}
@@ -861,18 +852,16 @@ class TranslationIntegration(BaseIntegration):
token_data = {
"value": str(resolved_token.value),
"source_token": resolved_token.source_token,
- "is_custom": resolved_token.is_custom
+ "is_custom": resolved_token.is_custom,
}
- if include_provenance and hasattr(resolved_token, 'provenance'):
+ if include_provenance and hasattr(resolved_token, "provenance"):
token_data["provenance"] = resolved_token.provenance
tokens[dss_path] = token_data
custom_props = {}
for dss_path, resolved_token in resolved.custom_props.items():
- prop_data = {
- "value": str(resolved_token.value)
- }
- if include_provenance and hasattr(resolved_token, 'provenance'):
+ prop_data = {"value": str(resolved_token.value)}
+ if include_provenance and hasattr(resolved_token, "provenance"):
prop_data["provenance"] = resolved_token.provenance
custom_props[dss_path] = prop_data
@@ -881,26 +870,21 @@ class TranslationIntegration(BaseIntegration):
"name": resolved.name,
"base_theme": resolved.base_theme,
"version": resolved.version,
- "resolved_at": resolved.resolved_at.isoformat() if hasattr(resolved.resolved_at, 'isoformat') else str(resolved.resolved_at),
+ "resolved_at": resolved.resolved_at.isoformat()
+ if hasattr(resolved.resolved_at, "isoformat")
+ else str(resolved.resolved_at),
"translations_applied": resolved.translations_applied,
"token_count": len(tokens),
"custom_prop_count": len(custom_props),
"tokens": tokens,
- "custom_props": custom_props
+ "custom_props": custom_props,
}
except Exception as e:
- return {
- "error": f"Failed to resolve theme: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to resolve theme: {str(e)}", "project_id": project_id}
async def add_custom_prop(
- self,
- project_id: str,
- prop_name: str,
- prop_value: Any,
- description: Optional[str] = None
+ self, project_id: str, prop_name: str, prop_value: Any, description: Optional[str] = None
) -> Dict[str, Any]:
"""
Add custom property to custom.json.
@@ -937,7 +921,7 @@ class TranslationIntegration(BaseIntegration):
source="custom",
project=project_id,
custom_props=custom_props,
- notes=[f"Added {prop_name}"] if not description else [description]
+ notes=[f"Added {prop_name}"] if not description else [description],
)
return {
@@ -945,21 +929,21 @@ class TranslationIntegration(BaseIntegration):
"prop_name": prop_name,
"prop_value": prop_value,
"added": True,
- "custom_prop_count": len(dictionary.custom_props)
+ "custom_prop_count": len(dictionary.custom_props),
}
except Exception as e:
return {
"error": f"Failed to add custom prop: {str(e)}",
"project_id": project_id,
- "prop_name": prop_name
+ "prop_name": prop_name,
}
async def get_canonical_tokens(
self,
category: Optional[str] = None,
include_aliases: bool = True,
- include_components: bool = False
+ include_components: bool = False,
) -> Dict[str, Any]:
"""
Get DSS canonical token structure.
@@ -974,15 +958,13 @@ class TranslationIntegration(BaseIntegration):
"""
try:
from dss.translations.canonical import (
+ DSS_CANONICAL_COMPONENTS,
DSS_CANONICAL_TOKENS,
DSS_TOKEN_ALIASES,
- DSS_CANONICAL_COMPONENTS,
- get_canonical_token_categories
+ get_canonical_token_categories,
)
- result = {
- "total_tokens": len(DSS_CANONICAL_TOKENS)
- }
+ result = {"total_tokens": len(DSS_CANONICAL_TOKENS)}
if category:
# Filter by category
@@ -1005,9 +987,7 @@ class TranslationIntegration(BaseIntegration):
return result
except Exception as e:
- return {
- "error": f"Failed to get canonical tokens: {str(e)}"
- }
+ return {"error": f"Failed to get canonical tokens: {str(e)}"}
# =========================================================================
# Category 3: Code Generation
@@ -1020,7 +1000,7 @@ class TranslationIntegration(BaseIntegration):
selector: str = ":root",
prefix: str = "dss",
include_comments: bool = True,
- output_path: Optional[str] = None
+ output_path: Optional[str] = None,
) -> Dict[str, Any]:
"""
Generate CSS variables from resolved theme.
@@ -1039,9 +1019,7 @@ class TranslationIntegration(BaseIntegration):
try:
# Resolve theme first
resolved_result = await self.resolve_theme(
- project_id,
- base_theme,
- include_provenance=include_comments
+ project_id, base_theme, include_provenance=include_comments
)
if "error" in resolved_result:
@@ -1051,8 +1029,12 @@ class TranslationIntegration(BaseIntegration):
css_lines = []
if include_comments:
css_lines.append(f"/* DSS Theme: {resolved_result['name']} */")
- css_lines.append(f"/* Base: {base_theme} | Generated: {resolved_result['resolved_at']} */")
- css_lines.append(f"/* Translations: {', '.join(resolved_result['translations_applied'])} */")
+ css_lines.append(
+ f"/* Base: {base_theme} | Generated: {resolved_result['resolved_at']} */"
+ )
+ css_lines.append(
+ f"/* Translations: {', '.join(resolved_result['translations_applied'])} */"
+ )
css_lines.append("")
css_lines.append(f"{selector} {{")
@@ -1089,7 +1071,7 @@ class TranslationIntegration(BaseIntegration):
except ValueError:
return {
"error": "Output path must be within project directory",
- "project_id": project_id
+ "project_id": project_id,
}
full_path.parent.mkdir(parents=True, exist_ok=True)
@@ -1101,21 +1083,18 @@ class TranslationIntegration(BaseIntegration):
"output_path": str(full_path),
"written": True,
"token_count": resolved_result["token_count"],
- "custom_prop_count": resolved_result["custom_prop_count"]
+ "custom_prop_count": resolved_result["custom_prop_count"],
}
return {
"project_id": project_id,
"content": css_content,
"token_count": resolved_result["token_count"],
- "custom_prop_count": resolved_result["custom_prop_count"]
+ "custom_prop_count": resolved_result["custom_prop_count"],
}
except Exception as e:
- return {
- "error": f"Failed to export CSS: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to export CSS: {str(e)}", "project_id": project_id}
async def export_scss(
self,
@@ -1123,7 +1102,7 @@ class TranslationIntegration(BaseIntegration):
base_theme: str = "light",
prefix: str = "dss",
generate_map: bool = True,
- output_path: Optional[str] = None
+ output_path: Optional[str] = None,
) -> Dict[str, Any]:
"""
Generate SCSS variables from resolved theme.
@@ -1140,7 +1119,9 @@ class TranslationIntegration(BaseIntegration):
"""
try:
# Resolve theme first
- resolved_result = await self.resolve_theme(project_id, base_theme, include_provenance=False)
+ resolved_result = await self.resolve_theme(
+ project_id, base_theme, include_provenance=False
+ )
if "error" in resolved_result:
return resolved_result
@@ -1148,8 +1129,12 @@ class TranslationIntegration(BaseIntegration):
# Generate SCSS
scss_lines = []
scss_lines.append(f"// DSS Theme: {resolved_result['name']}")
- scss_lines.append(f"// Base: {base_theme} | Generated: {resolved_result['resolved_at']}")
- scss_lines.append(f"// Translations: {', '.join(resolved_result['translations_applied'])}")
+ scss_lines.append(
+ f"// Base: {base_theme} | Generated: {resolved_result['resolved_at']}"
+ )
+ scss_lines.append(
+ f"// Translations: {', '.join(resolved_result['translations_applied'])}"
+ )
scss_lines.append("")
# Variables
@@ -1173,7 +1158,7 @@ class TranslationIntegration(BaseIntegration):
# Add all tokens to map
token_list = list(resolved_result["tokens"].items())
for i, (dss_path, token_data) in enumerate(token_list):
- key = dss_path.replace('.', '-')
+ key = dss_path.replace(".", "-")
value = token_data["value"]
comma = "," if i < len(token_list) - 1 else ""
scss_lines.append(f' "{key}": {value}{comma}')
@@ -1183,7 +1168,7 @@ class TranslationIntegration(BaseIntegration):
if custom_list and token_list:
scss_lines[-1] = scss_lines[-1] + ","
for i, (dss_path, prop_data) in enumerate(custom_list):
- key = dss_path.replace('.', '-')
+ key = dss_path.replace(".", "-")
value = prop_data["value"]
comma = "," if i < len(custom_list) - 1 else ""
scss_lines.append(f' "{key}": {value}{comma}')
@@ -1202,7 +1187,7 @@ class TranslationIntegration(BaseIntegration):
except ValueError:
return {
"error": "Output path must be within project directory",
- "project_id": project_id
+ "project_id": project_id,
}
full_path.parent.mkdir(parents=True, exist_ok=True)
@@ -1214,21 +1199,18 @@ class TranslationIntegration(BaseIntegration):
"output_path": str(full_path),
"written": True,
"token_count": resolved_result["token_count"],
- "custom_prop_count": resolved_result["custom_prop_count"]
+ "custom_prop_count": resolved_result["custom_prop_count"],
}
return {
"project_id": project_id,
"content": scss_content,
"token_count": resolved_result["token_count"],
- "custom_prop_count": resolved_result["custom_prop_count"]
+ "custom_prop_count": resolved_result["custom_prop_count"],
}
except Exception as e:
- return {
- "error": f"Failed to export SCSS: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to export SCSS: {str(e)}", "project_id": project_id}
async def export_json(
self,
@@ -1236,7 +1218,7 @@ class TranslationIntegration(BaseIntegration):
base_theme: str = "light",
format: str = "flat",
include_metadata: bool = True,
- output_path: Optional[str] = None
+ output_path: Optional[str] = None,
) -> Dict[str, Any]:
"""
Export resolved theme as JSON.
@@ -1253,7 +1235,9 @@ class TranslationIntegration(BaseIntegration):
"""
try:
# Resolve theme first
- resolved_result = await self.resolve_theme(project_id, base_theme, include_provenance=False)
+ resolved_result = await self.resolve_theme(
+ project_id, base_theme, include_provenance=False
+ )
if "error" in resolved_result:
return resolved_result
@@ -1269,7 +1253,7 @@ class TranslationIntegration(BaseIntegration):
# Flat format (default)
json_data = {
"tokens": resolved_result["tokens"],
- "customProps": resolved_result["custom_props"]
+ "customProps": resolved_result["custom_props"],
}
if include_metadata:
@@ -1279,7 +1263,7 @@ class TranslationIntegration(BaseIntegration):
"generated_at": resolved_result["resolved_at"],
"token_count": resolved_result["token_count"],
"custom_prop_count": resolved_result["custom_prop_count"],
- "translations": resolved_result["translations_applied"]
+ "translations": resolved_result["translations_applied"],
}
json_content = json.dumps(json_data, indent=2)
@@ -1294,7 +1278,7 @@ class TranslationIntegration(BaseIntegration):
except ValueError:
return {
"error": "Output path must be within project directory",
- "project_id": project_id
+ "project_id": project_id,
}
full_path.parent.mkdir(parents=True, exist_ok=True)
@@ -1306,28 +1290,25 @@ class TranslationIntegration(BaseIntegration):
"output_path": str(full_path),
"written": True,
"token_count": resolved_result["token_count"],
- "custom_prop_count": resolved_result["custom_prop_count"]
+ "custom_prop_count": resolved_result["custom_prop_count"],
}
return {
"project_id": project_id,
"content": json_data,
"token_count": resolved_result["token_count"],
- "custom_prop_count": resolved_result["custom_prop_count"]
+ "custom_prop_count": resolved_result["custom_prop_count"],
}
except Exception as e:
- return {
- "error": f"Failed to export JSON: {str(e)}",
- "project_id": project_id
- }
+ return {"error": f"Failed to export JSON: {str(e)}", "project_id": project_id}
def _build_nested_tokens(self, resolved_result: Dict[str, Any]) -> Dict[str, Any]:
"""Build nested token structure from flat tokens."""
nested = {}
for dss_path, token_data in resolved_result["tokens"].items():
- parts = dss_path.split('.')
+ parts = dss_path.split(".")
current = nested
for part in parts[:-1]:
if part not in current:
@@ -1339,7 +1320,7 @@ class TranslationIntegration(BaseIntegration):
if "customProps" not in nested:
nested["customProps"] = {}
for dss_path, prop_data in resolved_result["custom_props"].items():
- parts = dss_path.split('.')
+ parts = dss_path.split(".")
current = nested["customProps"]
for part in parts[:-1]:
if part not in current:
@@ -1354,7 +1335,7 @@ class TranslationIntegration(BaseIntegration):
style_dict = {}
for dss_path, token_data in resolved_result["tokens"].items():
- parts = dss_path.split('.')
+ parts = dss_path.split(".")
current = style_dict
for part in parts[:-1]:
if part not in current:
@@ -1363,14 +1344,14 @@ class TranslationIntegration(BaseIntegration):
current[parts[-1]] = {
"value": token_data["value"],
"type": self._infer_token_type(dss_path),
- "description": f"DSS token {dss_path}"
+ "description": f"DSS token {dss_path}",
}
# Add custom props
if "custom" not in style_dict:
style_dict["custom"] = {}
for dss_path, prop_data in resolved_result["custom_props"].items():
- parts = dss_path.split('.')
+ parts = dss_path.split(".")
current = style_dict["custom"]
for part in parts[:-1]:
if part not in current:
@@ -1379,7 +1360,7 @@ class TranslationIntegration(BaseIntegration):
current[parts[-1]] = {
"value": prop_data["value"],
"type": "custom",
- "description": f"Custom token {dss_path}"
+ "description": f"Custom token {dss_path}",
}
return style_dict
@@ -1404,8 +1385,9 @@ class TranslationIntegration(BaseIntegration):
# MCP Tool Executor
# =============================================================================
+
class TranslationTools:
- """MCP tool executor for translation integration"""
+ """MCP tool executor for translation integration."""
def __init__(self, config: Optional[Dict[str, Any]] = None):
"""
diff --git a/dss/mcp/operations.py b/dss/mcp/operations.py
index 3af95ef..7e806bc 100644
--- a/dss/mcp/operations.py
+++ b/dss/mcp/operations.py
@@ -1,5 +1,5 @@
"""
-DSS MCP Operations Module
+DSS MCP Operations Module.
Handles long-running operations with status tracking, result storage, and cancellation support.
Operations are queued and executed asynchronously with persistent state.
@@ -8,16 +8,14 @@ Operations are queued and executed asynchronously with persistent state.
import asyncio
import json
import uuid
-from typing import Optional, Dict, Any, Callable
from datetime import datetime
from enum import Enum
-
-from .config import mcp_config
-from dss.storage.json_store import ActivityLog, read_json, write_json, DATA_DIR # JSON storage
+from typing import Any, Dict, Optional
class OperationStatus(Enum):
- """Operation execution status"""
+ """Operation execution status."""
+
PENDING = "pending"
RUNNING = "running"
COMPLETED = "completed"
@@ -26,14 +24,9 @@ class OperationStatus(Enum):
class Operation:
- """Represents a single operation"""
+ """Represents a single operation."""
- def __init__(
- self,
- operation_type: str,
- args: Dict[str, Any],
- user_id: Optional[str] = None
- ):
+ def __init__(self, operation_type: str, args: Dict[str, Any], user_id: Optional[str] = None):
self.id = str(uuid.uuid4())
self.operation_type = operation_type
self.args = args
@@ -47,7 +40,7 @@ class Operation:
self.completed_at = None
def to_dict(self) -> Dict[str, Any]:
- """Convert to dictionary for storage"""
+ """Convert to dictionary for storage."""
return {
"id": self.id,
"operation_type": self.operation_type,
@@ -59,16 +52,16 @@ class Operation:
"progress": self.progress,
"created_at": self.created_at.isoformat(),
"started_at": self.started_at.isoformat() if self.started_at else None,
- "completed_at": self.completed_at.isoformat() if self.completed_at else None
+ "completed_at": self.completed_at.isoformat() if self.completed_at else None,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Operation":
- """Reconstruct from dictionary"""
+ """Reconstruct from dictionary."""
op = cls(
operation_type=data["operation_type"],
args=json.loads(data["args"]),
- user_id=data.get("user_id")
+ user_id=data.get("user_id"),
)
op.id = data["id"]
op.status = OperationStatus(data["status"])
@@ -99,7 +92,7 @@ class OperationQueue:
@classmethod
async def initialize(cls, num_workers: int = 4):
- """Initialize operation queue with worker pool"""
+ """Initialize operation queue with worker pool."""
cls._queue = asyncio.Queue()
cls._workers = []
@@ -109,10 +102,7 @@ class OperationQueue:
@classmethod
async def enqueue(
- cls,
- operation_type: str,
- args: Dict[str, Any],
- user_id: Optional[str] = None
+ cls, operation_type: str, args: Dict[str, Any], user_id: Optional[str] = None
) -> str:
"""
Enqueue a new operation.
@@ -140,7 +130,7 @@ class OperationQueue:
@classmethod
def get_status(cls, operation_id: str) -> Optional[Dict[str, Any]]:
- """Get operation status and result"""
+ """Get operation status and result."""
# Check in-memory first
if operation_id in cls._active_operations:
op = cls._active_operations[operation_id]
@@ -149,7 +139,7 @@ class OperationQueue:
"status": op.status.value,
"progress": op.progress,
"result": op.result,
- "error": op.error
+ "error": op.error,
}
# Check database for completed operations
@@ -167,12 +157,12 @@ class OperationQueue:
"status": op.status.value,
"progress": op.progress,
"result": op.result,
- "error": op.error
+ "error": op.error,
}
@classmethod
def get_result(cls, operation_id: str) -> Optional[Any]:
- """Get operation result (blocks if still running)"""
+ """Get operation result (blocks if still running)."""
status = cls.get_status(operation_id)
if not status:
raise ValueError(f"Operation not found: {operation_id}")
@@ -182,13 +172,11 @@ class OperationQueue:
elif status["status"] == OperationStatus.FAILED.value:
raise RuntimeError(f"Operation failed: {status['error']}")
else:
- raise RuntimeError(
- f"Operation still {status['status']}: {operation_id}"
- )
+ raise RuntimeError(f"Operation still {status['status']}: {operation_id}")
@classmethod
def cancel(cls, operation_id: str) -> bool:
- """Cancel a pending operation"""
+ """Cancel a pending operation."""
if operation_id not in cls._active_operations:
return False
@@ -208,9 +196,9 @@ class OperationQueue:
operation_type: Optional[str] = None,
status: Optional[str] = None,
user_id: Optional[str] = None,
- limit: int = 100
+ limit: int = 100,
) -> list:
- """List operations with optional filtering"""
+ """List operations with optional filtering."""
with get_connection() as conn:
cursor = conn.cursor()
@@ -239,20 +227,23 @@ class OperationQueue:
@classmethod
def _save_operation(cls, operation: Operation):
- """Save operation to database"""
+ """Save operation to database."""
data = operation.to_dict()
with get_connection() as conn:
- conn.execute("""
+ conn.execute(
+ """
INSERT OR REPLACE INTO operations (
id, operation_type, args, user_id, status, result,
error, progress, created_at, started_at, completed_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- """, tuple(data.values()))
+ """,
+ tuple(data.values()),
+ )
@classmethod
async def _worker(cls, worker_id: int):
- """Worker coroutine that processes operations from queue"""
+ """Worker coroutine that processes operations from queue."""
while True:
try:
operation = await cls._queue.get()
@@ -292,9 +283,10 @@ class OperationQueue:
@classmethod
def ensure_operations_table(cls):
- """Ensure operations table exists"""
+ """Ensure operations table exists."""
with get_connection() as conn:
- conn.execute("""
+ conn.execute(
+ """
CREATE TABLE IF NOT EXISTS operations (
id TEXT PRIMARY KEY,
operation_type TEXT NOT NULL,
@@ -308,16 +300,13 @@ class OperationQueue:
started_at TEXT,
completed_at TEXT
)
- """)
+ """
+ )
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_operations_type ON operations(operation_type)"
)
- conn.execute(
- "CREATE INDEX IF NOT EXISTS idx_operations_status ON operations(status)"
- )
- conn.execute(
- "CREATE INDEX IF NOT EXISTS idx_operations_user ON operations(user_id)"
- )
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_operations_status ON operations(status)")
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_operations_user ON operations(user_id)")
# Initialize table on import
diff --git a/dss/mcp/plugin_registry.py b/dss/mcp/plugin_registry.py
index aaf1406..32bf063 100644
--- a/dss/mcp/plugin_registry.py
+++ b/dss/mcp/plugin_registry.py
@@ -1,16 +1,17 @@
"""
-Dynamic Plugin Registry for DSS MCP Server
+Dynamic Plugin Registry for DSS MCP Server.
Automatically discovers and registers MCP tools from the plugins/ directory.
Plugins follow a simple contract: export TOOLS list and a handler class with execute_tool() method.
"""
-import pkgutil
import importlib
import inspect
import logging
+import pkgutil
import types as python_types
-from typing import List, Dict, Any, Optional
+from typing import Any, Dict, List, Optional
+
from mcp import types
logger = logging.getLogger("dss.mcp.plugins")
@@ -80,8 +81,8 @@ class PluginRegistry:
continue
# Skip template and private modules
- module_basename = name.split('.')[-1]
- if module_basename.startswith('_'):
+ module_basename = name.split(".")[-1]
+ if module_basename.startswith("_"):
logger.debug(f"Skipping private module: {module_basename}")
continue
@@ -110,7 +111,7 @@ class PluginRegistry:
return
# Contract Check 1: Must export TOOLS list
- if not hasattr(module, 'TOOLS'):
+ if not hasattr(module, "TOOLS"):
logger.debug(f"Module {module_name} has no TOOLS export, skipping")
return
@@ -129,7 +130,7 @@ class PluginRegistry:
return
# Contract Check 3: execute_tool must be async (coroutine)
- execute_tool_method = getattr(handler_instance, 'execute_tool', None)
+ execute_tool_method = getattr(handler_instance, "execute_tool", None)
if execute_tool_method and not inspect.iscoroutinefunction(execute_tool_method):
logger.error(
f"Plugin '{module_name}' is invalid: 'PluginTools.execute_tool' must be "
@@ -138,14 +139,14 @@ class PluginRegistry:
return
# Extract metadata
- metadata = getattr(module, 'PLUGIN_METADATA', {})
- plugin_name = metadata.get('name', module_name.split('.')[-1])
- plugin_version = metadata.get('version', 'unknown')
+ metadata = getattr(module, "PLUGIN_METADATA", {})
+ plugin_name = metadata.get("name", module_name.split(".")[-1])
+ plugin_version = metadata.get("version", "unknown")
# Validate tools and check for name collisions
registered_count = 0
for tool in module.TOOLS:
- if not hasattr(tool, 'name'):
+ if not hasattr(tool, "name"):
logger.error(f"Tool in {module_name} missing 'name' attribute")
continue
@@ -164,13 +165,15 @@ class PluginRegistry:
logger.debug(f"Registered tool: {tool.name}")
# Track plugin metadata
- self.plugins.append({
- "name": plugin_name,
- "version": plugin_version,
- "module": module_name,
- "tools_count": registered_count,
- "author": metadata.get('author', 'unknown')
- })
+ self.plugins.append(
+ {
+ "name": plugin_name,
+ "version": plugin_version,
+ "module": module_name,
+ "tools_count": registered_count,
+ "author": metadata.get("author", "unknown"),
+ }
+ )
self._loaded_modules.add(module_name)
@@ -195,7 +198,7 @@ class PluginRegistry:
continue
# Look for execute_tool method
- if hasattr(obj, 'execute_tool'):
+ if hasattr(obj, "execute_tool"):
try:
# Try to instantiate with no args
instance = obj()
@@ -213,9 +216,7 @@ class PluginRegistry:
)
return None
except Exception as e:
- logger.error(
- f"Failed to instantiate handler {name} in {module.__name__}: {e}"
- )
+ logger.error(f"Failed to instantiate handler {name} in {module.__name__}: {e}")
return None
return None
@@ -246,16 +247,17 @@ class PluginRegistry:
return handler.execute_tool(name, arguments)
def get_all_tools(self) -> List[types.Tool]:
- """Get merged list of all plugin tools"""
+ """Get merged list of all plugin tools."""
return self.tools.copy()
def get_plugin_info(self) -> List[Dict[str, Any]]:
- """Get metadata for all loaded plugins"""
+ """Get metadata for all loaded plugins."""
return self.plugins.copy()
def reload_plugins(self, plugins_package_name: str = "dss_mcp.plugins"):
"""
Reload all plugins (useful for development).
+
WARNING: This clears all registered plugins and reloads from scratch.
Args:
@@ -272,4 +274,6 @@ class PluginRegistry:
# Reload
self.load_plugins(plugins_package_name)
- logger.info(f"Plugin reload complete. Loaded {len(self.plugins)} plugins, {len(self.tools)} tools")
+ logger.info(
+ f"Plugin reload complete. Loaded {len(self.plugins)} plugins, {len(self.tools)} tools"
+ )
diff --git a/dss/mcp/plugins/__init__.py b/dss/mcp/plugins/__init__.py
index ff86d80..ee7937f 100644
--- a/dss/mcp/plugins/__init__.py
+++ b/dss/mcp/plugins/__init__.py
@@ -1,5 +1,5 @@
"""
-DSS MCP Server Plugins
+DSS MCP Server Plugins.
This directory contains dynamically loaded plugins for the DSS MCP server.
diff --git a/dss/mcp/plugins/_template.py b/dss/mcp/plugins/_template.py
index 9fd7030..8146161 100644
--- a/dss/mcp/plugins/_template.py
+++ b/dss/mcp/plugins/_template.py
@@ -1,5 +1,5 @@
"""
-Plugin Template for DSS MCP Server
+Plugin Template for DSS MCP Server.
This file serves as both documentation and a starting point for new plugins.
@@ -13,9 +13,9 @@ To create a new plugin:
The plugin will be automatically discovered and registered.
"""
-from typing import Dict, Any, List
-from mcp import types
+from typing import Any, Dict, List
+from mcp import types
# =============================================================================
# 1. PLUGIN METADATA (Optional but recommended)
@@ -25,7 +25,7 @@ PLUGIN_METADATA = {
"name": "Template Plugin",
"version": "1.0.0",
"author": "DSS Team",
- "description": "Template plugin demonstrating the plugin contract"
+ "description": "Template plugin demonstrating the plugin contract",
}
@@ -43,10 +43,10 @@ TOOLS = [
"name": {
"type": "string",
"description": "Name to greet (optional)",
- "default": "World"
+ "default": "World",
}
- }
- }
+ },
+ },
),
types.Tool(
name="template_echo",
@@ -54,19 +54,16 @@ TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "message": {
- "type": "string",
- "description": "Message to echo back"
- },
+ "message": {"type": "string", "description": "Message to echo back"},
"uppercase": {
"type": "boolean",
"description": "Convert to uppercase (optional)",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["message"]
- }
- )
+ "required": ["message"],
+ },
+ ),
]
@@ -74,6 +71,7 @@ TOOLS = [
# 3. PLUGIN TOOLS HANDLER (Required)
# =============================================================================
+
class PluginTools:
"""
Handler class for plugin tools.
@@ -95,9 +93,9 @@ class PluginTools:
**kwargs: Optional context/dependencies (context_manager, user_id, etc.)
"""
# Extract any dependencies you need
- self.context_manager = kwargs.get('context_manager')
- self.user_id = kwargs.get('user_id')
- self.audit_log = kwargs.get('audit_log')
+ self.context_manager = kwargs.get("context_manager")
+ self.user_id = kwargs.get("user_id")
+ self.audit_log = kwargs.get("audit_log")
# Initialize any plugin-specific state
self.call_count = 0
@@ -140,12 +138,7 @@ class PluginTools:
message = f"Hello, {name}! The plugin system is operational. (Call #{self.call_count})"
- return [
- types.TextContent(
- type="text",
- text=message
- )
- ]
+ return [types.TextContent(type="text", text=message)]
async def _handle_echo(self, arguments: Dict[str, Any]) -> List[types.TextContent]:
"""
@@ -163,12 +156,7 @@ class PluginTools:
if uppercase:
message = message.upper()
- return [
- types.TextContent(
- type="text",
- text=f"Echo: {message}"
- )
- ]
+ return [types.TextContent(type="text", text=f"Echo: {message}")]
# =============================================================================
diff --git a/dss/mcp/plugins/hello_world.py b/dss/mcp/plugins/hello_world.py
index 344f959..92d2371 100644
--- a/dss/mcp/plugins/hello_world.py
+++ b/dss/mcp/plugins/hello_world.py
@@ -1,18 +1,18 @@
"""
-Hello World Plugin - Test Plugin for DSS MCP Server
+Hello World Plugin - Test Plugin for DSS MCP Server.
Simple plugin to validate the plugin loading system is working correctly.
"""
-from typing import Dict, Any, List
-from mcp import types
+from typing import Any, Dict, List
+from mcp import types
PLUGIN_METADATA = {
"name": "Hello World Plugin",
"version": "1.0.0",
"author": "DSS Team",
- "description": "Simple test plugin to validate plugin system"
+ "description": "Simple test plugin to validate plugin system",
}
@@ -23,33 +23,26 @@ TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "name": {
- "type": "string",
- "description": "Name to greet",
- "default": "World"
- }
- }
- }
+ "name": {"type": "string", "description": "Name to greet", "default": "World"}
+ },
+ },
),
types.Tool(
name="plugin_status",
description="Get status of the plugin system",
- inputSchema={
- "type": "object",
- "properties": {}
- }
- )
+ inputSchema={"type": "object", "properties": {}},
+ ),
]
class PluginTools:
- """Handler for hello world plugin tools"""
+ """Handler for hello world plugin tools."""
def __init__(self, **kwargs):
self.call_count = 0
async def execute_tool(self, name: str, arguments: Dict[str, Any]) -> List:
- """Execute tool by name"""
+ """Execute tool by name."""
self.call_count += 1
if name == "hello_world":
@@ -60,7 +53,7 @@ class PluginTools:
raise ValueError(f"Unknown tool: {name}")
async def _hello_world(self, arguments: Dict[str, Any]) -> List[types.TextContent]:
- """Simple hello world implementation"""
+ """Simple hello world implementation."""
name = arguments.get("name", "World")
message = (
@@ -71,28 +64,19 @@ class PluginTools:
f"✓ Call count: {self.call_count}"
)
- return [
- types.TextContent(
- type="text",
- text=message
- )
- ]
+ return [types.TextContent(type="text", text=message)]
async def _plugin_status(self, arguments: Dict[str, Any]) -> List[types.TextContent]:
- """Return plugin system status"""
+ """Return plugin system status."""
status = {
"status": "operational",
"plugin_name": PLUGIN_METADATA["name"],
"plugin_version": PLUGIN_METADATA["version"],
"tools_count": len(TOOLS),
"call_count": self.call_count,
- "tools": [tool.name for tool in TOOLS]
+ "tools": [tool.name for tool in TOOLS],
}
import json
- return [
- types.TextContent(
- type="text",
- text=json.dumps(status, indent=2)
- )
- ]
+
+ return [types.TextContent(type="text", text=json.dumps(status, indent=2))]
diff --git a/dss/mcp/security.py b/dss/mcp/security.py
index bda6583..27bed3b 100644
--- a/dss/mcp/security.py
+++ b/dss/mcp/security.py
@@ -1,22 +1,20 @@
"""
-DSS MCP Security Module
+DSS MCP Security Module.
Handles encryption, decryption, and secure storage of sensitive credentials.
Uses cryptography library for AES-256 encryption with per-credential salt.
"""
-import os
import json
+import os
import secrets
-from typing import Optional, Dict, Any
from datetime import datetime
+from typing import Any, Dict, Optional
+
from cryptography.fernet import Fernet
+from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
-from cryptography.hazmat.backends import default_backend
-
-from .config import mcp_config
-from dss.storage.json_store import read_json, write_json, SYSTEM_DIR # JSON storage
class CredentialVault:
@@ -28,11 +26,11 @@ class CredentialVault:
"""
# Master encryption key (should be set via environment variable)
- MASTER_KEY = os.environ.get('DSS_ENCRYPTION_KEY', '').encode()
+ MASTER_KEY = os.environ.get("DSS_ENCRYPTION_KEY", "").encode()
@classmethod
def _get_cipher_suite(cls, salt: bytes) -> Fernet:
- """Derive encryption cipher from master key and salt"""
+ """Derive encryption cipher from master key and salt."""
if not cls.MASTER_KEY:
raise ValueError(
"DSS_ENCRYPTION_KEY environment variable not set. "
@@ -45,21 +43,19 @@ class CredentialVault:
length=32,
salt=salt,
iterations=100000,
- backend=default_backend()
+ backend=default_backend(),
)
key = kdf.derive(cls.MASTER_KEY)
# Encode key for Fernet
import base64
+
key_b64 = base64.urlsafe_b64encode(key)
return Fernet(key_b64)
@classmethod
def encrypt_credential(
- cls,
- credential_type: str,
- credential_data: Dict[str, Any],
- user_id: Optional[str] = None
+ cls, credential_type: str, credential_data: Dict[str, Any], user_id: Optional[str] = None
) -> str:
"""
Encrypt and store a credential.
@@ -72,8 +68,8 @@ class CredentialVault:
Returns:
Credential ID for later retrieval
"""
- import uuid
import base64
+ import uuid
credential_id = str(uuid.uuid4())
salt = secrets.token_bytes(16) # 128-bit salt
@@ -87,26 +83,26 @@ class CredentialVault:
# Store in database
with get_connection() as conn:
- conn.execute("""
+ conn.execute(
+ """
INSERT INTO credentials (
id, credential_type, encrypted_data, salt, user_id, created_at
) VALUES (?, ?, ?, ?, ?, ?)
- """, (
- credential_id,
- credential_type,
- encrypted.decode(),
- base64.b64encode(salt).decode(),
- user_id,
- datetime.utcnow().isoformat()
- ))
+ """,
+ (
+ credential_id,
+ credential_type,
+ encrypted.decode(),
+ base64.b64encode(salt).decode(),
+ user_id,
+ datetime.utcnow().isoformat(),
+ ),
+ )
return credential_id
@classmethod
- def decrypt_credential(
- cls,
- credential_id: str
- ) -> Optional[Dict[str, Any]]:
+ def decrypt_credential(cls, credential_id: str) -> Optional[Dict[str, Any]]:
"""
Decrypt and retrieve a credential.
@@ -120,9 +116,12 @@ class CredentialVault:
with get_connection() as conn:
cursor = conn.cursor()
- cursor.execute("""
+ cursor.execute(
+ """
SELECT encrypted_data, salt FROM credentials WHERE id = ?
- """, (credential_id,))
+ """,
+ (credential_id,),
+ )
row = cursor.fetchone()
if not row:
@@ -139,7 +138,7 @@ class CredentialVault:
@classmethod
def delete_credential(cls, credential_id: str) -> bool:
- """Delete a credential"""
+ """Delete a credential."""
with get_connection() as conn:
cursor = conn.cursor()
cursor.execute("DELETE FROM credentials WHERE id = ?", (credential_id,))
@@ -147,11 +146,9 @@ class CredentialVault:
@classmethod
def list_credentials(
- cls,
- credential_type: Optional[str] = None,
- user_id: Optional[str] = None
+ cls, credential_type: Optional[str] = None, user_id: Optional[str] = None
) -> list:
- """List credentials (metadata only, not decrypted)"""
+ """List credentials (metadata only, not decrypted)."""
with get_connection() as conn:
cursor = conn.cursor()
@@ -177,11 +174,9 @@ class CredentialVault:
This re-encrypts all credentials with a new master key.
Requires new key to be set in DSS_ENCRYPTION_KEY_NEW environment variable.
"""
- new_key = os.environ.get('DSS_ENCRYPTION_KEY_NEW', '').encode()
+ new_key = os.environ.get("DSS_ENCRYPTION_KEY_NEW", "").encode()
if not new_key:
- raise ValueError(
- "DSS_ENCRYPTION_KEY_NEW environment variable not set for key rotation"
- )
+ raise ValueError("DSS_ENCRYPTION_KEY_NEW environment variable not set for key rotation")
try:
with get_connection() as conn:
@@ -213,13 +208,13 @@ class CredentialVault:
# Update database
conn.execute(
"UPDATE credentials SET encrypted_data = ? WHERE id = ?",
- (new_encrypted.decode(), credential_id)
+ (new_encrypted.decode(), credential_id),
)
finally:
cls.MASTER_KEY = old_master
# Update environment
- os.environ['DSS_ENCRYPTION_KEY'] = new_key.decode()
+ os.environ["DSS_ENCRYPTION_KEY"] = new_key.decode()
return True
@@ -228,9 +223,10 @@ class CredentialVault:
@classmethod
def ensure_credentials_table(cls):
- """Ensure credentials table exists"""
+ """Ensure credentials table exists."""
with get_connection() as conn:
- conn.execute("""
+ conn.execute(
+ """
CREATE TABLE IF NOT EXISTS credentials (
id TEXT PRIMARY KEY,
credential_type TEXT NOT NULL,
@@ -240,13 +236,12 @@ class CredentialVault:
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
updated_at TEXT DEFAULT CURRENT_TIMESTAMP
)
- """)
+ """
+ )
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_credentials_type ON credentials(credential_type)"
)
- conn.execute(
- "CREATE INDEX IF NOT EXISTS idx_credentials_user ON credentials(user_id)"
- )
+ conn.execute("CREATE INDEX IF NOT EXISTS idx_credentials_user ON credentials(user_id)")
# Initialize table on import
diff --git a/dss/mcp/server.py b/dss/mcp/server.py
index bf2fa64..77e2263 100644
--- a/dss/mcp/server.py
+++ b/dss/mcp/server.py
@@ -1,5 +1,5 @@
"""
-DSS MCP Server
+DSS MCP Server.
SSE-based Model Context Protocol server for Claude.
Provides project-isolated context and tools with user-scoped integrations.
@@ -8,27 +8,27 @@ Provides project-isolated context and tools with user-scoped integrations.
import asyncio
import json
import logging
+from typing import Any, Dict, Optional
+
import structlog
-from typing import Optional, Dict, Any
-from fastapi import FastAPI, Query, HTTPException
+from fastapi import FastAPI, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
-from sse_starlette.sse import EventSourceResponse
-from mcp.server import Server
from mcp import types
+from mcp.server import Server
+from sse_starlette.sse import EventSourceResponse
from .config import mcp_config, validate_config
from .context.project_context import get_context_manager
-from .tools.project_tools import PROJECT_TOOLS, ProjectTools
-from .tools.workflow_tools import WORKFLOW_TOOLS, WorkflowTools
-from .tools.debug_tools import DEBUG_TOOLS, DebugTools
from .integrations.storybook import STORYBOOK_TOOLS
from .integrations.translations import TRANSLATION_TOOLS
from .plugin_registry import PluginRegistry
+from .tools.debug_tools import DEBUG_TOOLS, DebugTools
+from .tools.project_tools import PROJECT_TOOLS, ProjectTools
+from .tools.workflow_tools import WORKFLOW_TOOLS, WorkflowTools
# Configure logging
logging.basicConfig(
- level=mcp_config.LOG_LEVEL,
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+ level=mcp_config.LOG_LEVEL, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = structlog.get_logger()
@@ -36,7 +36,7 @@ logger = structlog.get_logger()
app = FastAPI(
title="DSS MCP Server",
description="Model Context Protocol server for Design System Server",
- version="0.8.0"
+ version="0.8.0",
)
# CORS configuration
@@ -60,13 +60,13 @@ _active_sessions: Dict[str, Dict[str, Any]] = {}
def get_session_key(project_id: str, user_id: Optional[int] = None) -> str:
- """Generate session key for caching"""
+ """Generate session key for caching."""
return f"{project_id}:{user_id or 'anonymous'}"
@app.on_event("startup")
async def startup():
- """Startup tasks"""
+ """Startup tasks."""
logger.info("Starting DSS MCP Server")
# Validate configuration
@@ -75,36 +75,32 @@ async def startup():
for warning in warnings:
logger.warning(warning)
- logger.info(
- "DSS MCP Server started",
- host=mcp_config.HOST,
- port=mcp_config.PORT
- )
+ logger.info("DSS MCP Server started", host=mcp_config.HOST, port=mcp_config.PORT)
@app.on_event("shutdown")
async def shutdown():
- """Cleanup on shutdown"""
+ """Cleanup on shutdown."""
logger.info("Shutting down DSS MCP Server")
@app.get("/health")
async def health_check():
- """Health check endpoint"""
+ """Health check endpoint."""
context_manager = get_context_manager()
return {
"status": "healthy",
"server": "dss-mcp",
"version": "0.8.0",
"cache_size": len(context_manager._cache),
- "active_sessions": len(_active_sessions)
+ "active_sessions": len(_active_sessions),
}
@app.get("/sse")
async def sse_endpoint(
project_id: str = Query(..., description="Project ID for context isolation"),
- user_id: Optional[int] = Query(None, description="User ID for user-scoped integrations")
+ user_id: Optional[int] = Query(None, description="User ID for user-scoped integrations"),
):
"""
Server-Sent Events endpoint for MCP communication.
@@ -118,7 +114,7 @@ async def sse_endpoint(
"SSE connection established",
project_id=project_id,
user_id=user_id,
- session_key=session_key
+ session_key=session_key,
)
# Load project context
@@ -139,21 +135,23 @@ async def sse_endpoint(
"project_id": project_id,
"user_id": user_id,
"connected_at": asyncio.get_event_loop().time(),
- "project_tools": project_tools
+ "project_tools": project_tools,
}
async def event_generator():
- """Generate SSE events for MCP communication"""
+ """Generate SSE events for MCP communication."""
try:
# Send initial connection confirmation
yield {
"event": "connected",
- "data": json.dumps({
- "project_id": project_id,
- "project_name": project_context.name,
- "available_tools": len(PROJECT_TOOLS),
- "integrations_enabled": list(project_context.integrations.keys())
- })
+ "data": json.dumps(
+ {
+ "project_id": project_id,
+ "project_name": project_context.name,
+ "available_tools": len(PROJECT_TOOLS),
+ "integrations_enabled": list(project_context.integrations.keys()),
+ }
+ ),
}
# Keep connection alive
@@ -161,7 +159,7 @@ async def sse_endpoint(
await asyncio.sleep(30) # Heartbeat every 30 seconds
yield {
"event": "heartbeat",
- "data": json.dumps({"timestamp": asyncio.get_event_loop().time()})
+ "data": json.dumps({"timestamp": asyncio.get_event_loop().time()}),
}
except asyncio.CancelledError:
@@ -230,10 +228,7 @@ async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
project_id = arguments.get("project_id")
if not project_id:
return [
- types.TextContent(
- type="text",
- text=json.dumps({"error": "project_id is required"})
- )
+ types.TextContent(type="text", text=json.dumps({"error": "project_id is required"}))
]
# Find active session for this project
@@ -262,6 +257,7 @@ async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
if name in workflow_tool_names:
# Handle workflow orchestration tools
from .audit import AuditLog
+
audit_log = AuditLog()
workflow_tools = WorkflowTools(audit_log)
result = await workflow_tools.handle_tool_call(name, arguments)
@@ -272,11 +268,13 @@ async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
elif name in storybook_tool_names:
# Handle Storybook tools
from .integrations.storybook import StorybookTools
+
storybook_tools = StorybookTools()
result = await storybook_tools.execute_tool(name, arguments)
elif name in translation_tool_names:
# Handle Translation tools
from .integrations.translations import TranslationTools
+
translation_tools = TranslationTools()
result = await translation_tools.execute_tool(name, arguments)
elif name in plugin_registry.handlers:
@@ -289,20 +287,10 @@ async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
# Handle regular project tools
result = await project_tools.execute_tool(name, arguments)
- return [
- types.TextContent(
- type="text",
- text=json.dumps(result, indent=2)
- )
- ]
+ return [types.TextContent(type="text", text=json.dumps(result, indent=2))]
except Exception as e:
logger.error("Tool execution failed", tool_name=name, error=str(e))
- return [
- types.TextContent(
- type="text",
- text=json.dumps({"error": str(e)})
- )
- ]
+ return [types.TextContent(type="text", text=json.dumps({"error": str(e)}))]
@mcp_server.list_resources()
@@ -359,10 +347,7 @@ async def get_prompt(name: str, arguments: dict) -> types.GetPromptResult:
Prompt content
"""
# TODO: Implement prompt templates
- return types.GetPromptResult(
- description="Prompt not found",
- messages=[]
- )
+ return types.GetPromptResult(description="Prompt not found", messages=[])
# API endpoint to call MCP tools directly (for testing/debugging)
@@ -381,46 +366,39 @@ async def call_tool_api(tool_name: str, arguments: Dict[str, Any]):
# API endpoint to list active sessions
@app.get("/api/sessions")
async def list_sessions():
- """List all active SSE sessions"""
+ """List all active SSE sessions."""
return {
"active_sessions": len(_active_sessions),
"sessions": [
{
"project_id": session["project_id"],
"user_id": session["user_id"],
- "connected_at": session["connected_at"]
+ "connected_at": session["connected_at"],
}
for session in _active_sessions.values()
- ]
+ ],
}
# API endpoint to clear context cache
@app.post("/api/cache/clear")
async def clear_cache(project_id: Optional[str] = None):
- """Clear context cache for a project or all projects"""
+ """Clear context cache for a project or all projects."""
context_manager = get_context_manager()
context_manager.clear_cache(project_id)
- return {
- "status": "cache_cleared",
- "project_id": project_id or "all"
- }
+ return {"status": "cache_cleared", "project_id": project_id or "all"}
if __name__ == "__main__":
import uvicorn
- logger.info(
- "Starting DSS MCP Server",
- host=mcp_config.HOST,
- port=mcp_config.PORT
- )
+ logger.info("Starting DSS MCP Server", host=mcp_config.HOST, port=mcp_config.PORT)
uvicorn.run(
"server:app",
host=mcp_config.HOST,
port=mcp_config.PORT,
reload=True,
- log_level=mcp_config.LOG_LEVEL.lower()
+ log_level=mcp_config.LOG_LEVEL.lower(),
)
diff --git a/dss/mcp/tools/analysis_tools.py b/dss/mcp/tools/analysis_tools.py
index e194aba..ea2819f 100644
--- a/dss/mcp/tools/analysis_tools.py
+++ b/dss/mcp/tools/analysis_tools.py
@@ -1,20 +1,22 @@
-"""
-DSS MCP - Code Analysis Tools
-"""
+"""DSS MCP - Code Analysis Tools."""
+
import asyncio
-from typing import Dict, Any
+from typing import Any, Dict
# Adjust the import path to find the project_analyzer
# This assumes the script is run from the project root.
from tools.analysis.project_analyzer import analyze_react_project, save_analysis
+
class Tool:
- """Basic tool definition for MCP"""
+ """Basic tool definition for MCP."""
+
def __init__(self, name: str, description: str, input_schema: Dict[str, Any]):
self.name = name
self.description = description
self.inputSchema = input_schema
+
# Define the new tool
analyze_project_tool = Tool(
name="analyze_project",
@@ -24,17 +26,17 @@ analyze_project_tool = Tool(
"properties": {
"project_path": {
"type": "string",
- "description": "The absolute path to the project to be analyzed."
+ "description": "The absolute path to the project to be analyzed.",
}
},
- "required": ["project_path"]
- }
+ "required": ["project_path"],
+ },
)
+
class AnalysisTools:
- """
- A wrapper class for analysis-related tools.
- """
+ """A wrapper class for analysis-related tools."""
+
def __init__(self, user_id: str = None):
self.user_id = user_id
@@ -45,9 +47,7 @@ class AnalysisTools:
return {"error": f"Analysis tool '{tool_name}' not found."}
async def analyze_project(self, project_path: str) -> Dict[str, Any]:
- """
- Triggers the analysis of a project.
- """
+ """Triggers the analysis of a project."""
if not project_path:
return {"error": "project_path is a required argument."}
@@ -56,27 +56,22 @@ class AnalysisTools:
# In a real scenario, this should be offloaded to a background worker.
# For now, we run it asynchronously.
loop = asyncio.get_event_loop()
-
+
# Run the analysis in a separate thread to avoid blocking the event loop
- analysis_data = await loop.run_in_executor(
- None, analyze_react_project, project_path
- )
-
+ analysis_data = await loop.run_in_executor(None, analyze_react_project, project_path)
+
# Save the analysis data
- await loop.run_in_executor(
- None, save_analysis, project_path, analysis_data
- )
+ await loop.run_in_executor(None, save_analysis, project_path, analysis_data)
return {
"status": "success",
"message": f"Analysis complete for project at {project_path}.",
"graph_nodes": len(analysis_data.get("nodes", [])),
- "graph_edges": len(analysis_data.get("links", []))
+ "graph_edges": len(analysis_data.get("links", [])),
}
except Exception as e:
return {"error": f"An error occurred during project analysis: {str(e)}"}
+
# A list of all tools in this module
-ANALYSIS_TOOLS = [
- analyze_project_tool
-]
+ANALYSIS_TOOLS = [analyze_project_tool]
diff --git a/dss/mcp/tools/debug_tools.py b/dss/mcp/tools/debug_tools.py
index 5f6d34d..04f3d1c 100644
--- a/dss/mcp/tools/debug_tools.py
+++ b/dss/mcp/tools/debug_tools.py
@@ -1,5 +1,5 @@
"""
-DSS Debug Tools for MCP
+DSS Debug Tools for MCP.
This module implements the MCP tool layer that bridges Claude Code to the DSS Debug API.
It allows the LLM to inspect browser sessions, check server health, and run debug workflows.
@@ -8,12 +8,13 @@ Configuration:
DSS_DEBUG_API_URL: Base URL for the DSS Debug API (default: http://localhost:3456)
"""
-import os
import json
import logging
-from pathlib import Path
-from typing import Dict, Any, List, Optional
+import os
from datetime import datetime
+from pathlib import Path
+from typing import Any, Dict, Optional
+
from mcp import types
try:
@@ -34,11 +35,7 @@ DEBUG_TOOLS = [
types.Tool(
name="dss_list_browser_sessions",
description="List all browser log sessions that have been captured. Use this to find session IDs for detailed analysis.",
- inputSchema={
- "type": "object",
- "properties": {},
- "required": []
- }
+ inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_get_browser_diagnostic",
@@ -48,11 +45,11 @@ DEBUG_TOOLS = [
"properties": {
"session_id": {
"type": "string",
- "description": "Session ID to inspect. If omitted, uses the most recent session."
+ "description": "Session ID to inspect. If omitted, uses the most recent session.",
}
},
- "required": []
- }
+ "required": [],
+ },
),
types.Tool(
name="dss_get_browser_errors",
@@ -62,16 +59,16 @@ DEBUG_TOOLS = [
"properties": {
"session_id": {
"type": "string",
- "description": "Session ID. Defaults to most recent if omitted."
+ "description": "Session ID. Defaults to most recent if omitted.",
},
"limit": {
"type": "integer",
"description": "Maximum number of errors to retrieve (default: 50)",
- "default": 50
- }
+ "default": 50,
+ },
},
- "required": []
- }
+ "required": [],
+ },
),
types.Tool(
name="dss_get_browser_network",
@@ -81,43 +78,31 @@ DEBUG_TOOLS = [
"properties": {
"session_id": {
"type": "string",
- "description": "Session ID. Defaults to most recent if omitted."
+ "description": "Session ID. Defaults to most recent if omitted.",
},
"limit": {
"type": "integer",
"description": "Maximum number of entries to retrieve (default: 50)",
- "default": 50
- }
+ "default": 50,
+ },
},
- "required": []
- }
+ "required": [],
+ },
),
types.Tool(
name="dss_get_server_status",
description="Quick check if the DSS Debug Server is up and running. Returns simple UP/DOWN status from health check.",
- inputSchema={
- "type": "object",
- "properties": {},
- "required": []
- }
+ inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_get_server_diagnostic",
description="Get detailed server health diagnostics including memory usage, database size, process info, and recent errors. Use for deep debugging of infrastructure.",
- inputSchema={
- "type": "object",
- "properties": {},
- "required": []
- }
+ inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_list_workflows",
description="List available debug workflows that can be executed. Workflows are predefined diagnostic procedures.",
- inputSchema={
- "type": "object",
- "properties": {},
- "required": []
- }
+ inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_run_workflow",
@@ -127,24 +112,24 @@ DEBUG_TOOLS = [
"properties": {
"workflow_id": {
"type": "string",
- "description": "The ID of the workflow to run (see dss_list_workflows for available IDs)"
+ "description": "The ID of the workflow to run (see dss_list_workflows for available IDs)",
}
},
- "required": ["workflow_id"]
- }
- )
+ "required": ["workflow_id"],
+ },
+ ),
]
class DebugTools:
- """Debug tool implementations"""
+ """Debug tool implementations."""
def __init__(self):
self.api_base = DSS_API_URL
self.browser_logs_dir = None
def _get_browser_logs_dir(self) -> Path:
- """Get the browser logs directory path"""
+ """Get the browser logs directory path."""
if self.browser_logs_dir is None:
# Assuming we're in tools/dss_mcp/tools/debug_tools.py
# Root is 3 levels up
@@ -157,11 +142,9 @@ class DebugTools:
method: str,
endpoint: str,
params: Optional[Dict[str, Any]] = None,
- json_data: Optional[Dict[str, Any]] = None
+ json_data: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
- """
- Internal helper to make safe HTTP requests to the DSS Debug API.
- """
+ """Internal helper to make safe HTTP requests to the DSS Debug API."""
if httpx is None:
return {"error": "httpx library not installed. Run: pip install httpx"}
@@ -179,7 +162,7 @@ class DebugTools:
error_detail = response.text
return {
"error": f"API returned status {response.status_code}",
- "detail": error_detail
+ "detail": error_detail,
}
# Return JSON if possible
@@ -191,7 +174,7 @@ class DebugTools:
except httpx.ConnectError:
return {
"error": f"Could not connect to DSS Debug API at {self.api_base}",
- "suggestion": "Please ensure the debug server is running (cd tools/api && python3 -m uvicorn server:app --port 3456)"
+ "suggestion": "Please ensure the debug server is running (cd tools/api && python3 -m uvicorn server:app --port 3456)",
}
except httpx.TimeoutException:
return {"error": f"Request to DSS Debug API timed out ({url})"}
@@ -200,7 +183,7 @@ class DebugTools:
return {"error": f"Unexpected error: {str(e)}"}
def _get_latest_session_id(self) -> Optional[str]:
- """Get the most recent browser session ID from filesystem"""
+ """Get the most recent browser session ID from filesystem."""
logs_dir = self._get_browser_logs_dir()
if not logs_dir.exists():
@@ -219,7 +202,7 @@ class DebugTools:
return json_files[0].stem
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
- """Execute a tool by name"""
+ """Execute a tool by name."""
handlers = {
"dss_list_browser_sessions": self.list_browser_sessions,
"dss_get_browser_diagnostic": self.get_browser_diagnostic,
@@ -228,7 +211,7 @@ class DebugTools:
"dss_get_server_status": self.get_server_status,
"dss_get_server_diagnostic": self.get_server_diagnostic,
"dss_list_workflows": self.list_workflows,
- "dss_run_workflow": self.run_workflow
+ "dss_run_workflow": self.run_workflow,
}
handler = handlers.get(tool_name)
@@ -243,14 +226,14 @@ class DebugTools:
return {"error": str(e)}
async def list_browser_sessions(self) -> Dict[str, Any]:
- """List all browser log sessions"""
+ """List all browser log sessions."""
logs_dir = self._get_browser_logs_dir()
if not logs_dir.exists():
return {
"sessions": [],
"count": 0,
- "message": "No browser logs directory found. Browser logger may not have captured any sessions yet."
+ "message": "No browser logs directory found. Browser logger may not have captured any sessions yet.",
}
# Get all .json files
@@ -260,7 +243,7 @@ class DebugTools:
return {
"sessions": [],
"count": 0,
- "message": "No sessions found in browser logs directory."
+ "message": "No sessions found in browser logs directory.",
}
# Sort by modification time, most recent first
@@ -270,31 +253,30 @@ class DebugTools:
for json_file in json_files:
try:
# Read session metadata
- with open(json_file, 'r') as f:
+ with open(json_file, "r") as f:
data = json.load(f)
- sessions.append({
- "session_id": json_file.stem,
- "exported_at": data.get("exportedAt", "unknown"),
- "log_count": len(data.get("logs", [])),
- "file_size_bytes": json_file.stat().st_size,
- "modified_at": datetime.fromtimestamp(json_file.stat().st_mtime).isoformat()
- })
+ sessions.append(
+ {
+ "session_id": json_file.stem,
+ "exported_at": data.get("exportedAt", "unknown"),
+ "log_count": len(data.get("logs", [])),
+ "file_size_bytes": json_file.stat().st_size,
+ "modified_at": datetime.fromtimestamp(
+ json_file.stat().st_mtime
+ ).isoformat(),
+ }
+ )
except Exception as e:
logger.warning(f"Could not read session file {json_file}: {e}")
- sessions.append({
- "session_id": json_file.stem,
- "error": f"Could not parse: {str(e)}"
- })
+ sessions.append(
+ {"session_id": json_file.stem, "error": f"Could not parse: {str(e)}"}
+ )
- return {
- "sessions": sessions,
- "count": len(sessions),
- "directory": str(logs_dir)
- }
+ return {"sessions": sessions, "count": len(sessions), "directory": str(logs_dir)}
async def get_browser_diagnostic(self, session_id: Optional[str] = None) -> Dict[str, Any]:
- """Get diagnostic summary for a browser session"""
+ """Get diagnostic summary for a browser session."""
# Resolve session_id
if not session_id:
session_id = self._get_latest_session_id()
@@ -320,15 +302,13 @@ class DebugTools:
"total_logs": len(logs),
"error_count": error_count,
"diagnostic": diagnostic,
- "summary": f"Session {session_id}: {len(logs)} logs, {error_count} errors/warnings"
+ "summary": f"Session {session_id}: {len(logs)} logs, {error_count} errors/warnings",
}
async def get_browser_errors(
- self,
- session_id: Optional[str] = None,
- limit: int = DEFAULT_LOG_LIMIT
+ self, session_id: Optional[str] = None, limit: int = DEFAULT_LOG_LIMIT
) -> Dict[str, Any]:
- """Get console errors from a browser session"""
+ """Get console errors from a browser session."""
# Resolve session_id
if not session_id:
session_id = self._get_latest_session_id()
@@ -343,10 +323,7 @@ class DebugTools:
# Filter for errors and warnings
logs = response.get("logs", [])
- errors = [
- log for log in logs
- if log.get("level") in ["error", "warn"]
- ]
+ errors = [log for log in logs if log.get("level") in ["error", "warn"]]
# Apply limit
errors = errors[:limit] if limit else errors
@@ -356,22 +333,20 @@ class DebugTools:
"session_id": session_id,
"errors": [],
"count": 0,
- "message": "No errors or warnings found in this session"
+ "message": "No errors or warnings found in this session",
}
return {
"session_id": session_id,
"errors": errors,
"count": len(errors),
- "total_logs": len(logs)
+ "total_logs": len(logs),
}
async def get_browser_network(
- self,
- session_id: Optional[str] = None,
- limit: int = DEFAULT_LOG_LIMIT
+ self, session_id: Optional[str] = None, limit: int = DEFAULT_LOG_LIMIT
) -> Dict[str, Any]:
- """Get network logs from a browser session"""
+ """Get network logs from a browser session."""
# Resolve session_id
if not session_id:
session_id = self._get_latest_session_id()
@@ -392,9 +367,12 @@ class DebugTools:
# Fallback: look for logs that mention network/fetch/xhr
logs = response.get("logs", [])
network_logs = [
- log for log in logs
- if any(keyword in str(log.get("message", "")).lower()
- for keyword in ["fetch", "xhr", "request", "response", "http"])
+ log
+ for log in logs
+ if any(
+ keyword in str(log.get("message", "")).lower()
+ for keyword in ["fetch", "xhr", "request", "response", "http"]
+ )
]
# Apply limit
@@ -405,25 +383,17 @@ class DebugTools:
"session_id": session_id,
"network_logs": [],
"count": 0,
- "message": "No network logs recorded in this session"
+ "message": "No network logs recorded in this session",
}
- return {
- "session_id": session_id,
- "network_logs": network_logs,
- "count": len(network_logs)
- }
+ return {"session_id": session_id, "network_logs": network_logs, "count": len(network_logs)}
async def get_server_status(self) -> Dict[str, Any]:
- """Quick health check of the debug server"""
+ """Quick health check of the debug server."""
response = await self._request("GET", "/api/debug/diagnostic")
if "error" in response:
- return {
- "status": "DOWN",
- "error": response["error"],
- "detail": response.get("detail")
- }
+ return {"status": "DOWN", "error": response["error"], "detail": response.get("detail")}
# Extract just the status
status = response.get("status", "unknown")
@@ -433,11 +403,11 @@ class DebugTools:
"status": status.upper(),
"health_status": health.get("status"),
"timestamp": response.get("timestamp"),
- "message": f"Server is {status}"
+ "message": f"Server is {status}",
}
async def get_server_diagnostic(self) -> Dict[str, Any]:
- """Get detailed server diagnostics"""
+ """Get detailed server diagnostics."""
response = await self._request("GET", "/api/debug/diagnostic")
if "error" in response:
@@ -446,7 +416,7 @@ class DebugTools:
return response
async def list_workflows(self) -> Dict[str, Any]:
- """List available debug workflows"""
+ """List available debug workflows."""
response = await self._request("GET", "/api/debug/workflows")
if "error" in response:
@@ -455,7 +425,7 @@ class DebugTools:
return response
async def run_workflow(self, workflow_id: str) -> Dict[str, Any]:
- """Execute a debug workflow"""
+ """Execute a debug workflow."""
# For now, read the workflow markdown and return its content
# In the future, this could actually execute the workflow steps
@@ -470,23 +440,20 @@ class DebugTools:
if not workflow:
return {
"error": f"Workflow not found: {workflow_id}",
- "available_workflows": [w.get("id") for w in workflows]
+ "available_workflows": [w.get("id") for w in workflows],
}
# Read workflow file
workflow_path = workflow.get("path")
if workflow_path and Path(workflow_path).exists():
- with open(workflow_path, 'r') as f:
+ with open(workflow_path, "r") as f:
content = f.read()
return {
"workflow_id": workflow_id,
"title": workflow.get("title"),
"content": content,
- "message": "Workflow loaded. Follow the steps in the content."
+ "message": "Workflow loaded. Follow the steps in the content.",
}
- return {
- "error": "Workflow file not found",
- "workflow": workflow
- }
+ return {"error": "Workflow file not found", "workflow": workflow}
diff --git a/dss/mcp/tools/project_tools.py b/dss/mcp/tools/project_tools.py
index 9907ab3..d0eed2e 100644
--- a/dss/mcp/tools/project_tools.py
+++ b/dss/mcp/tools/project_tools.py
@@ -1,5 +1,5 @@
"""
-DSS Project Tools for MCP
+DSS Project Tools for MCP.
Base tools that Claude can use to interact with DSS projects.
All tools are project-scoped and context-aware.
@@ -13,16 +13,14 @@ Tools include:
"""
import uuid
-from typing import Dict, Any, List, Optional
-from datetime import datetime
+from typing import Any, Dict, Optional
+
from mcp import types
-from ..context.project_context import get_context_manager
-from ..security import CredentialVault
-from ..audit import AuditLog, AuditEventType
-from dss.storage.json_store import Projects, Components, Tokens, ActivityLog # JSON storage
-from ..handler import get_mcp_handler, MCPContext
+from dss.storage.json_store import Projects # JSON storage
+from ..context.project_context import get_context_manager
+from ..handler import MCPContext, get_mcp_handler
# Tool definitions (metadata for Claude)
PROJECT_TOOLS = [
@@ -32,18 +30,15 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID to query"
- },
+ "project_id": {"type": "string", "description": "Project ID to query"},
"include_components": {
"type": "boolean",
"description": "Include full component list (default: false)",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_list_components",
@@ -51,22 +46,19 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"filter_name": {
"type": "string",
- "description": "Optional: Filter by component name (partial match)"
+ "description": "Optional: Filter by component name (partial match)",
},
"code_generated_only": {
"type": "boolean",
"description": "Optional: Only show components with generated code",
- "default": False
- }
+ "default": False,
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_get_component",
@@ -74,17 +66,11 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
- "component_name": {
- "type": "string",
- "description": "Component name (exact match)"
- }
+ "project_id": {"type": "string", "description": "Project ID"},
+ "component_name": {"type": "string", "description": "Component name (exact match)"},
},
- "required": ["project_id", "component_name"]
- }
+ "required": ["project_id", "component_name"],
+ },
),
types.Tool(
name="dss_get_design_tokens",
@@ -92,32 +78,24 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"token_category": {
"type": "string",
"description": "Optional: Filter by token category (colors, typography, spacing, etc.)",
- "enum": ["colors", "typography", "spacing", "shadows", "borders", "all"]
- }
+ "enum": ["colors", "typography", "spacing", "shadows", "borders", "all"],
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_get_project_health",
description="Get project health score, grade, and list of issues",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_list_styles",
@@ -125,32 +103,24 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"style_type": {
"type": "string",
"description": "Optional: Filter by style type",
- "enum": ["TEXT", "FILL", "EFFECT", "GRID", "all"]
- }
+ "enum": ["TEXT", "FILL", "EFFECT", "GRID", "all"],
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_get_discovery_data",
description="Get project discovery/scan data (file counts, technologies detected, etc.)",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
# === Project Management Tools ===
types.Tool(
@@ -159,21 +129,15 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "name": {
- "type": "string",
- "description": "Project name"
- },
- "description": {
- "type": "string",
- "description": "Project description"
- },
+ "name": {"type": "string", "description": "Project name"},
+ "description": {"type": "string", "description": "Project description"},
"root_path": {
"type": "string",
- "description": "Root directory path for the project. Can be a git URL or a local folder path."
- }
+ "description": "Root directory path for the project. Can be a git URL or a local folder path.",
+ },
},
- "required": ["name", "root_path"]
- }
+ "required": ["name", "root_path"],
+ },
),
types.Tool(
name="dss_list_projects",
@@ -184,24 +148,19 @@ PROJECT_TOOLS = [
"filter_status": {
"type": "string",
"description": "Optional: Filter by project status (active, archived)",
- "enum": ["active", "archived", "all"]
+ "enum": ["active", "archived", "all"],
}
- }
- }
+ },
+ },
),
types.Tool(
name="dss_get_project",
description="Get detailed information about a specific project",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_update_project",
@@ -209,17 +168,14 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID to update"
- },
+ "project_id": {"type": "string", "description": "Project ID to update"},
"updates": {
"type": "object",
- "description": "Fields to update (name, description, etc.)"
- }
+ "description": "Fields to update (name, description, etc.)",
+ },
},
- "required": ["project_id", "updates"]
- }
+ "required": ["project_id", "updates"],
+ },
),
types.Tool(
name="dss_delete_project",
@@ -227,17 +183,14 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID to delete"
- },
+ "project_id": {"type": "string", "description": "Project ID to delete"},
"confirm": {
"type": "boolean",
- "description": "Confirmation to delete (must be true)"
- }
+ "description": "Confirmation to delete (must be true)",
+ },
},
- "required": ["project_id", "confirm"]
- }
+ "required": ["project_id", "confirm"],
+ },
),
# === Figma Integration Tools ===
types.Tool(
@@ -246,31 +199,20 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
- "api_token": {
- "type": "string",
- "description": "Figma API token"
- }
+ "project_id": {"type": "string", "description": "Project ID"},
+ "api_token": {"type": "string", "description": "Figma API token"},
},
- "required": ["project_id", "api_token"]
- }
+ "required": ["project_id", "api_token"],
+ },
),
types.Tool(
name="dss_discover_figma_files",
description="Discover Figma files accessible with current credentials",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_add_figma_file",
@@ -278,35 +220,21 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
- "file_key": {
- "type": "string",
- "description": "Figma file key"
- },
- "file_name": {
- "type": "string",
- "description": "Display name for the file"
- }
+ "project_id": {"type": "string", "description": "Project ID"},
+ "file_key": {"type": "string", "description": "Figma file key"},
+ "file_name": {"type": "string", "description": "Display name for the file"},
},
- "required": ["project_id", "file_key", "file_name"]
- }
+ "required": ["project_id", "file_key", "file_name"],
+ },
),
types.Tool(
name="dss_list_figma_files",
description="List all Figma files linked to a project",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
# === Token Management Tools ===
types.Tool(
@@ -315,18 +243,15 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
+ "project_id": {"type": "string", "description": "Project ID"},
"output_format": {
"type": "string",
"description": "Output format for tokens (css, json, tailwind)",
- "enum": ["css", "json", "tailwind", "figma-tokens"]
- }
+ "enum": ["css", "json", "tailwind", "figma-tokens"],
+ },
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_extract_tokens",
@@ -334,45 +259,29 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
- "file_key": {
- "type": "string",
- "description": "Figma file key"
- }
+ "project_id": {"type": "string", "description": "Project ID"},
+ "file_key": {"type": "string", "description": "Figma file key"},
},
- "required": ["project_id", "file_key"]
- }
+ "required": ["project_id", "file_key"],
+ },
),
types.Tool(
name="dss_validate_tokens",
description="Validate design tokens for consistency and completeness",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_detect_token_drift",
description="Detect inconsistencies between Figma and project tokens",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
# === Component Analysis Tools ===
types.Tool(
@@ -381,31 +290,20 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
- "path": {
- "type": "string",
- "description": "Optional: Specific path to scan"
- }
+ "project_id": {"type": "string", "description": "Project ID"},
+ "path": {"type": "string", "description": "Optional: Specific path to scan"},
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_analyze_components",
description="Analyze components for design system alignment and quality",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_get_quick_wins",
@@ -413,17 +311,11 @@ PROJECT_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- },
- "path": {
- "type": "string",
- "description": "Optional: Specific path to analyze"
- }
+ "project_id": {"type": "string", "description": "Project ID"},
+ "path": {"type": "string", "description": "Optional: Specific path to analyze"},
},
- "required": ["project_id"]
- }
+ "required": ["project_id"],
+ },
),
# === Status & Info Tools ===
types.Tool(
@@ -431,29 +323,21 @@ PROJECT_TOOLS = [
description="Get current project status and progress",
inputSchema={
"type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "description": "Project ID"
- }
- },
- "required": ["project_id"]
- }
+ "properties": {"project_id": {"type": "string", "description": "Project ID"}},
+ "required": ["project_id"],
+ },
),
types.Tool(
name="dss_get_system_health",
description="Get overall system health and statistics",
- inputSchema={
- "type": "object",
- "properties": {}
- }
- )
+ inputSchema={"type": "object", "properties": {}},
+ ),
]
# Tool implementations
class ProjectTools:
- """Project tool implementations"""
+ """Project tool implementations."""
def __init__(self, user_id: Optional[int] = None):
self.context_manager = get_context_manager()
@@ -461,7 +345,7 @@ class ProjectTools:
self.projects_db = Projects()
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
- """Execute a tool by name"""
+ """Execute a tool by name."""
handlers = {
# Project Management
"dss_create_project": self.create_project,
@@ -474,7 +358,7 @@ class ProjectTools:
"dss_get_design_tokens": self.get_design_tokens,
"dss_get_project_health": self.get_project_health,
"dss_list_styles": self.list_styles,
- "dss_get_discovery_.dat": self.get_discovery_data
+ "dss_get_discovery_.dat": self.get_discovery_data,
}
handler = handlers.get(tool_name)
@@ -487,25 +371,22 @@ class ProjectTools:
except Exception as e:
return {"error": str(e)}
- async def create_project(self, name: str, root_path: str, description: str = "") -> Dict[str, Any]:
+ async def create_project(
+ self, name: str, root_path: str, description: str = ""
+ ) -> Dict[str, Any]:
"""Create a new project and trigger initial analysis."""
project_id = str(uuid.uuid4())
-
+
# The `create` method in json_store handles the creation of the manifest
- self.projects_db.create(
- id=project_id,
- name=name,
- description=description
- )
-
+ self.projects_db.create(id=project_id, name=name, description=description)
+
# We may still want to update the root_path if it's not part of the manifest
self.projects_db.update(project_id, root_path=root_path)
-
# Trigger the analysis as a background task
# We don't want to block the creation call
mcp_handler = get_mcp_handler()
-
+
# Create a context for the tool call
# The user_id might be important for permissions later
mcp_context = MCPContext(project_id=project_id, user_id=self.user_id)
@@ -515,14 +396,14 @@ class ProjectTools:
mcp_handler.execute_tool(
tool_name="analyze_project",
arguments={"project_path": root_path},
- context=mcp_context
+ context=mcp_context,
)
)
return {
"status": "success",
"message": "Project created successfully. Analysis has been started in the background.",
- "project_id": project_id
+ "project_id": project_id,
}
async def list_projects(self, filter_status: Optional[str] = None) -> Dict[str, Any]:
@@ -538,11 +419,9 @@ class ProjectTools:
return {"project": project}
async def get_project_summary(
- self,
- project_id: str,
- include_components: bool = False
+ self, project_id: str, include_components: bool = False
) -> Dict[str, Any]:
- """Get comprehensive project summary"""
+ """Get comprehensive project summary."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
@@ -556,7 +435,7 @@ class ProjectTools:
"stats": context.stats,
"config": context.config,
"integrations_enabled": list(context.integrations.keys()),
- "loaded_at": context.loaded_at.isoformat()
+ "loaded_at": context.loaded_at.isoformat(),
}
if include_components:
@@ -565,12 +444,9 @@ class ProjectTools:
return summary
async def list_components(
- self,
- project_id: str,
- filter_name: Optional[str] = None,
- code_generated_only: bool = False
+ self, project_id: str, filter_name: Optional[str] = None, code_generated_only: bool = False
) -> Dict[str, Any]:
- """List components with optional filtering"""
+ """List components with optional filtering."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
@@ -579,50 +455,31 @@ class ProjectTools:
# Apply filters
if filter_name:
- components = [
- c for c in components
- if filter_name.lower() in c['name'].lower()
- ]
+ components = [c for c in components if filter_name.lower() in c["name"].lower()]
if code_generated_only:
- components = [c for c in components if c.get('code_generated')]
+ components = [c for c in components if c.get("code_generated")]
- return {
- "project_id": project_id,
- "total_count": len(components),
- "components": components
- }
+ return {"project_id": project_id, "total_count": len(components), "components": components}
- async def get_component(
- self,
- project_id: str,
- component_name: str
- ) -> Dict[str, Any]:
- """Get detailed component information"""
+ async def get_component(self, project_id: str, component_name: str) -> Dict[str, Any]:
+ """Get detailed component information."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
# Find component by name
- component = next(
- (c for c in context.components if c['name'] == component_name),
- None
- )
+ component = next((c for c in context.components if c["name"] == component_name), None)
if not component:
return {"error": f"Component not found: {component_name}"}
- return {
- "project_id": project_id,
- "component": component
- }
+ return {"project_id": project_id, "component": component}
async def get_design_tokens(
- self,
- project_id: str,
- token_category: Optional[str] = None
+ self, project_id: str, token_category: Optional[str] = None
) -> Dict[str, Any]:
- """Get design tokens, optionally filtered by category"""
+ """Get design tokens, optionally filtered by category."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
@@ -636,29 +493,20 @@ class ProjectTools:
else:
tokens = {}
- return {
- "project_id": project_id,
- "tokens": tokens,
- "categories": list(tokens.keys())
- }
+ return {"project_id": project_id, "tokens": tokens, "categories": list(tokens.keys())}
async def get_project_health(self, project_id: str) -> Dict[str, Any]:
- """Get project health information"""
+ """Get project health information."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
- return {
- "project_id": project_id,
- "health": context.health
- }
+ return {"project_id": project_id, "health": context.health}
async def list_styles(
- self,
- project_id: str,
- style_type: Optional[str] = None
+ self, project_id: str, style_type: Optional[str] = None
) -> Dict[str, Any]:
- """List design styles with optional type filter"""
+ """List design styles with optional type filter."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
@@ -666,21 +514,14 @@ class ProjectTools:
styles = context.styles
if style_type and style_type != "all":
- styles = [s for s in styles if s['type'] == style_type]
+ styles = [s for s in styles if s["type"] == style_type]
- return {
- "project_id": project_id,
- "total_count": len(styles),
- "styles": styles
- }
+ return {"project_id": project_id, "total_count": len(styles), "styles": styles}
async def get_discovery_data(self, project_id: str) -> Dict[str, Any]:
- """Get project discovery/scan data"""
+ """Get project discovery/scan data."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
- return {
- "project_id": project_id,
- "discovery": context.discovery
- }
+ return {"project_id": project_id, "discovery": context.discovery}
diff --git a/dss/mcp/tools/workflow_tools.py b/dss/mcp/tools/workflow_tools.py
index 8c29f42..3e0a5dd 100644
--- a/dss/mcp/tools/workflow_tools.py
+++ b/dss/mcp/tools/workflow_tools.py
@@ -1,18 +1,16 @@
"""
-DSS Workflow Orchestration Tools
+DSS Workflow Orchestration Tools.
(This file has been modified to remove the AI orchestration logic
as per user request. The original file contained complex, multi-step
workflows that have now been stubbed out.)
"""
-import json
-from typing import Dict, Any, List, Optional
-from datetime import datetime
+from typing import Any, Dict
+
from mcp import types
-from ..audit import AuditLog, AuditEventType
-
+from ..audit import AuditLog
# Workflow tool definitions
WORKFLOW_TOOLS = [
@@ -22,28 +20,23 @@ WORKFLOW_TOOLS = [
inputSchema={
"type": "object",
"properties": {
- "workflow_id": {
- "type": "string",
- "description": "Workflow execution ID"
- }
+ "workflow_id": {"type": "string", "description": "Workflow execution ID"}
},
- "required": ["workflow_id"]
- }
+ "required": ["workflow_id"],
+ },
)
]
class WorkflowOrchestrator:
- """
- (This class has been stubbed out.)
- """
+ """(This class has been stubbed out.)."""
def __init__(self, audit_log: AuditLog):
self.audit_log = audit_log
self.active_workflows = {} # workflow_id -> state
def get_workflow_status(self, workflow_id: str) -> Dict[str, Any]:
- """Get current status of a workflow"""
+ """Get current status of a workflow."""
workflow = self.active_workflows.get(workflow_id)
if not workflow:
return {"error": "Workflow not found", "workflow_id": workflow_id}
@@ -56,16 +49,16 @@ class WorkflowOrchestrator:
# Handler class that MCP server will use
class WorkflowTools:
- """Handler for workflow orchestration tools"""
+ """Handler for workflow orchestration tools."""
def __init__(self, audit_log: AuditLog):
self.orchestrator = WorkflowOrchestrator(audit_log)
async def handle_tool_call(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
- """Route tool calls to appropriate handlers"""
+ """Route tool calls to appropriate handlers."""
if tool_name == "dss_workflow_status":
return self.orchestrator.get_workflow_status(arguments["workflow_id"])
else:
- return {"error": f"Unknown or deprecated workflow tool: {tool_name}"}
\ No newline at end of file
+ return {"error": f"Unknown or deprecated workflow tool: {tool_name}"}
diff --git a/dss/models/__init__.py b/dss/models/__init__.py
index 64c6abb..4c905dd 100644
--- a/dss/models/__init__.py
+++ b/dss/models/__init__.py
@@ -1,8 +1,8 @@
-"""Pydantic models for DSS domain objects"""
+"""Pydantic models for DSS domain objects."""
-from .project import Project, ProjectMetadata
from .component import Component, ComponentVariant
-from .theme import Theme, DesignToken, TokenCategory
+from .project import Project, ProjectMetadata
+from .theme import DesignToken, Theme, TokenCategory
__all__ = [
"Project",
diff --git a/dss/models/component.py b/dss/models/component.py
index adc8d89..0ecb1d7 100644
--- a/dss/models/component.py
+++ b/dss/models/component.py
@@ -1,19 +1,24 @@
-"""Component models"""
+"""Component models."""
+from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import uuid4
-from pydantic import BaseModel, Field, ConfigDict
-from enum import Enum
+
+from pydantic import BaseModel, ConfigDict, Field
class AtomicType(str, Enum):
"""
Classification of components based on their composition.
+
- PRIMITIVE_COMPONENT: Fundamental UI elements (e.g., Button, Icon).
- - COMPOSITE_COMPONENT: Composed of multiple primitive or other composite components (e.g., Card, NavBar).
- - TEMPLATE: Page-level structures, arranging composite components to show underlying page structure.
+ - COMPOSITE_COMPONENT: Composed of multiple primitive or other composite components
+ (e.g., Card, NavBar).
+ - TEMPLATE: Page-level structures, arranging composite components to show underlying
+ page structure.
- PAGE: Instances of templates, with real content in place.
"""
+
PRIMITIVE_COMPONENT = "primitive_component"
COMPOSITE_COMPONENT = "composite_component"
TEMPLATE = "template"
@@ -30,6 +35,7 @@ class ComponentVariant(BaseModel):
name (str): Variant name.
props (Dict[str, Any]): Variant-specific props.
"""
+
model_config = ConfigDict(arbitrary_types_allowed=True)
uuid: str = Field(default_factory=lambda: str(uuid4()), description="UUID for export/import")
@@ -50,9 +56,12 @@ class Component(BaseModel):
classification (AtomicType): Atomic design classification.
variants (List[str]): Available variants.
props (Dict[str, Any]): Component props schema.
- dependencies (List[str]): UUIDs of components this component depends on (e.g., a composite component depends on primitive components).
- sub_components (List[str]): UUIDs of components that are children of this component in the atomic hierarchy.
+ dependencies (List[str]): UUIDs of components this component depends on
+ (e.g., a composite component depends on primitive components).
+ sub_components (List[str]): UUIDs of components that are children of this
+ component in the atomic hierarchy.
"""
+
model_config = ConfigDict(arbitrary_types_allowed=True)
uuid: str = Field(default_factory=lambda: str(uuid4()), description="UUID for export/import")
@@ -60,12 +69,14 @@ class Component(BaseModel):
name: str = Field(..., description="Component name (e.g., 'Button')")
source: str = Field(..., description="Component source (e.g., shadcn, custom, figma)")
description: Optional[str] = Field(None, description="Component description")
-
- classification: AtomicType = Field(default=AtomicType.UNKNOWN, description="Atomic design classification")
-
+ classification: AtomicType = Field(
+ default=AtomicType.UNKNOWN, description="Atomic design classification"
+ )
variants: List[str] = Field(default_factory=list, description="Available variants")
props: Dict[str, Any] = Field(default_factory=dict, description="Component props schema")
-
- dependencies: List[str] = Field(default_factory=list, description="UUIDs of components this component depends on (e.g., a composite component depends on primitive components)")
- sub_components: List[str] = Field(default_factory=list, description="UUIDs of components that are children of this component in the atomic hierarchy")
-
+ dependencies: List[str] = Field(
+ default_factory=list, description="UUIDs of components this component depends on"
+ )
+ sub_components: List[str] = Field(
+ default_factory=list, description="UUIDs of components that are children of this component"
+ )
diff --git a/dss/models/project.py b/dss/models/project.py
index be39819..8fc26db 100644
--- a/dss/models/project.py
+++ b/dss/models/project.py
@@ -1,15 +1,18 @@
-"""Project models"""
+"""Project models."""
from datetime import datetime
-from typing import Dict, List, Optional
+from typing import List, Optional
from uuid import uuid4
-from pydantic import BaseModel, Field, ConfigDict
-from .theme import Theme
+
+from pydantic import BaseModel, ConfigDict, Field
+
from .component import Component
+from .theme import Theme
class ProjectMetadata(BaseModel):
- """Project metadata"""
+ """Project metadata."""
+
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
author: Optional[str] = None
@@ -18,7 +21,8 @@ class ProjectMetadata(BaseModel):
class Project(BaseModel):
- """A design system project"""
+ """A design system project."""
+
model_config = ConfigDict(arbitrary_types_allowed=True)
id: str = Field(..., description="Unique project ID")
@@ -31,7 +35,7 @@ class Project(BaseModel):
metadata: ProjectMetadata = Field(default_factory=ProjectMetadata)
def get_component(self, name: str) -> Optional[Component]:
- """Get component by name"""
+ """Get component by name."""
for component in self.components:
if component.name == name:
return component
diff --git a/dss/models/team_dashboard.py b/dss/models/team_dashboard.py
index 5d9f2bd..36a362d 100644
--- a/dss/models/team_dashboard.py
+++ b/dss/models/team_dashboard.py
@@ -1,5 +1,5 @@
"""
-Team Dashboard Models - Component-Centric Architecture
+Team Dashboard Models - Component-Centric Architecture.
Following expert recommendation: Component is the central entity,
with team-specific views as relationships.
@@ -7,14 +7,16 @@ with team-specific views as relationships.
Expert insight: "Teams are *views*; Components are the *truth*."
"""
-from pydantic import BaseModel, Field
-from typing import Optional, List, Dict, Any
from datetime import datetime
from enum import Enum
+from typing import Any, Dict, List, Optional
+
+from pydantic import BaseModel, Field
class TeamRole(str, Enum):
- """Team roles for dashboard views"""
+ """Team roles for dashboard views."""
+
QA = "qa"
UI = "ui"
UX = "ux"
@@ -22,7 +24,8 @@ class TeamRole(str, Enum):
class TokenSource(str, Enum):
- """Source of design tokens"""
+ """Source of design tokens."""
+
FIGMA = "figma"
CSS = "css"
SCSS = "scss"
@@ -32,7 +35,8 @@ class TokenSource(str, Enum):
class ComplianceStatus(str, Enum):
- """Compliance check status"""
+ """Compliance check status."""
+
PASS = "pass"
FAIL = "fail"
WARNING = "warning"
@@ -40,7 +44,8 @@ class ComplianceStatus(str, Enum):
class Severity(str, Enum):
- """Issue severity levels"""
+ """Issue severity levels."""
+
CRITICAL = "critical"
HIGH = "high"
MEDIUM = "medium"
@@ -52,14 +57,16 @@ class Severity(str, Enum):
# Component-Centric Core Models
# ============================================================================
+
class ComponentToken(BaseModel):
"""
- Tracks which design tokens a component uses (UX Team View)
+ Tracks which design tokens a component uses (UX Team View).
Enables queries like:
- "Which components use the old 'blue-500' token?"
- "Show me all components using color tokens from Figma"
"""
+
id: Optional[int] = None
component_id: int
token_name: str = Field(..., description="e.g., 'color-primary-500'")
@@ -74,12 +81,13 @@ class ComponentToken(BaseModel):
class CodeMetric(BaseModel):
"""
- Tracks implementation details (UI Team View)
+ Tracks implementation details (UI Team View).
Enables queries like:
- "Which components have high complexity but low test coverage?"
- "Show me components with the most props"
"""
+
id: Optional[int] = None
component_id: int
file_path: str
@@ -95,12 +103,13 @@ class CodeMetric(BaseModel):
class TestResult(BaseModel):
"""
- Tracks compliance and regression tests (QA Team View)
+ Tracks compliance and regression tests (QA Team View).
Enables queries like:
- "Which components failed the last ESRE check?"
- "Show me components with regressions"
"""
+
id: Optional[int] = None
component_id: int
test_type: str = Field(..., description="'esre', 'regression', 'visual', 'unit'")
@@ -118,12 +127,14 @@ class TestResult(BaseModel):
# Supporting Models for Team Dashboards
# ============================================================================
+
class FigmaFile(BaseModel):
"""
- Figma file tracking (UX Dashboard)
+ Figma file tracking (UX Dashboard).
Supports multiple Figma files per project
"""
+
id: Optional[int] = None
project_id: int
figma_url: str
@@ -137,11 +148,11 @@ class FigmaFile(BaseModel):
class ImplementationSnapshot(BaseModel):
- """
- Implementation snapshot for regression testing (UI Dashboard)
+ """Implementation snapshot for regression testing (UI Dashboard).
"Golden Master" approach for comparison
"""
+
id: Optional[int] = None
project_id: int
snapshot_name: str
@@ -158,10 +169,11 @@ class ImplementationSnapshot(BaseModel):
class ESREDefinition(BaseModel):
"""
- ESRE (Expected System Response Evaluation) Definition (QA Dashboard)
+ ESRE (Expected System Response Evaluation) Definition (QA Dashboard).
Natural language requirements that should be validated
"""
+
id: Optional[int] = None
project_id: int
name: str = Field(..., description="Requirement name, e.g., 'Primary Button Color'")
@@ -178,10 +190,11 @@ class ESREDefinition(BaseModel):
class TokenDrift(BaseModel):
"""
- Token drift detection result (UI Dashboard)
+ Token drift detection result (UI Dashboard).
Tracks when code uses values that differ from design tokens
"""
+
id: Optional[int] = None
component_id: int
property_name: str = Field(..., description="CSS property or prop name")
@@ -200,12 +213,14 @@ class TokenDrift(BaseModel):
# Dashboard View Models (API Responses)
# ============================================================================
+
class DashboardSummary(BaseModel):
"""
- Summary for dashboard overview
+ Summary for dashboard overview.
This is the "thin slice" endpoint response
"""
+
project_id: int
project_name: str
total_components: int
@@ -230,7 +245,8 @@ class DashboardSummary(BaseModel):
class QADashboardView(BaseModel):
- """QA Dashboard data"""
+ """QA Dashboard data."""
+
esre_definitions: List[ESREDefinition]
failed_tests: List[TestResult]
compliance_rate: float
@@ -238,7 +254,8 @@ class QADashboardView(BaseModel):
class UIDashboardView(BaseModel):
- """UI Dashboard data"""
+ """UI Dashboard data."""
+
token_drifts: List[TokenDrift]
high_complexity_components: List[Dict[str, Any]]
recent_snapshots: List[ImplementationSnapshot]
@@ -246,7 +263,8 @@ class UIDashboardView(BaseModel):
class UXDashboardView(BaseModel):
- """UX Dashboard data"""
+ """UX Dashboard data."""
+
figma_files: List[FigmaFile]
component_tokens: List[ComponentToken]
recent_syncs: List[Dict[str, Any]]
diff --git a/dss/models/theme.py b/dss/models/theme.py
index 243b781..c7c6a9f 100644
--- a/dss/models/theme.py
+++ b/dss/models/theme.py
@@ -1,14 +1,16 @@
-"""Theme and design token models"""
+"""Theme and design token models."""
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
from uuid import uuid4
-from pydantic import BaseModel, Field, ConfigDict
+
+from pydantic import BaseModel, ConfigDict, Field
class TokenCategory(str, Enum):
- """Categories of design tokens"""
+ """Categories of design tokens."""
+
COLOR = "color"
SPACING = "spacing"
TYPOGRAPHY = "typography"
@@ -19,7 +21,8 @@ class TokenCategory(str, Enum):
class DesignToken(BaseModel):
- """A single design token with value and metadata"""
+ """A single design token with value and metadata."""
+
model_config = ConfigDict(arbitrary_types_allowed=True)
uuid: str = Field(default_factory=lambda: str(uuid4()), description="UUID for export/import")
@@ -35,7 +38,8 @@ class DesignToken(BaseModel):
class Theme(BaseModel):
- """Complete theme configuration"""
+ """Complete theme configuration."""
+
model_config = ConfigDict(arbitrary_types_allowed=True)
uuid: str = Field(default_factory=lambda: str(uuid4()), description="UUID for export/import")
@@ -46,9 +50,5 @@ class Theme(BaseModel):
updated_at: datetime = Field(default_factory=datetime.utcnow, description="Update timestamp")
def get_tokens_by_category(self, category: TokenCategory) -> Dict[str, DesignToken]:
- """Filter tokens by category"""
- return {
- name: token
- for name, token in self.tokens.items()
- if token.category == category
- }
+ """Filter tokens by category."""
+ return {name: token for name, token in self.tokens.items() if token.category == category}
diff --git a/dss/project/__init__.py b/dss/project/__init__.py
index d9a875e..cd2adf5 100644
--- a/dss/project/__init__.py
+++ b/dss/project/__init__.py
@@ -1,5 +1,5 @@
"""
-DSS Project Management Module
+DSS Project Management Module.
Handles project lifecycle: initialization, configuration, syncing, and building.
@@ -7,45 +7,33 @@ DSS "eats its own dog food" - the shadcn/ui Figma (team 857274453634536756)
is the canonical base layer. All skins and projects inherit from it.
"""
-from dss.project.models import (
- DSSProject,
- ProjectConfig,
- FigmaSource,
- FigmaFile,
- OutputConfig,
- ProjectStatus,
-)
-
-from dss.project.manager import (
- ProjectManager,
- ProjectRegistry,
-)
-
-from dss.project.figma import (
- FigmaProjectSync,
- FigmaRateLimitError,
- RateLimitConfig,
- RateLimitState,
-)
-
from dss.project.core import (
- DSS_FIGMA_REFERENCE,
- DSSFigmaReference,
- DSS_CORE_TOKEN_CATEGORIES,
DSS_CORE_COMPONENTS,
DSS_CORE_THEMES,
- get_dss_figma_reference,
+ DSS_CORE_TOKEN_CATEGORIES,
+ DSS_FIGMA_REFERENCE,
+ DSSFigmaReference,
ensure_dss_directories,
- is_dss_core_component,
get_component_variants,
+ get_dss_figma_reference,
+ is_dss_core_component,
+)
+from dss.project.figma import FigmaProjectSync, FigmaRateLimitError, RateLimitConfig, RateLimitState
+from dss.project.manager import ProjectManager, ProjectRegistry
+from dss.project.models import (
+ DSSProject,
+ FigmaFile,
+ FigmaSource,
+ OutputConfig,
+ ProjectConfig,
+ ProjectStatus,
)
-
from dss.project.sync import (
DSSCoreSync,
- sync_dss_core,
get_dss_core_status,
- get_dss_core_tokens,
get_dss_core_themes,
+ get_dss_core_tokens,
+ sync_dss_core,
)
__all__ = [
diff --git a/dss/project/core.py b/dss/project/core.py
index 6636088..8f7e071 100644
--- a/dss/project/core.py
+++ b/dss/project/core.py
@@ -1,5 +1,5 @@
"""
-DSS Core Configuration
+DSS Core Configuration.
Defines the canonical DSS design system reference.
DSS "eats its own dog food" - using shadcn/ui as the base layer.
@@ -14,14 +14,15 @@ from dataclasses import dataclass
from pathlib import Path
from typing import Optional
-
# =============================================================================
# DSS CANONICAL FIGMA REFERENCE
# =============================================================================
+
@dataclass(frozen=True)
class DSSFigmaReference:
"""Immutable reference to DSS's canonical Figma source."""
+
team_id: str = "857274453634536756"
team_name: str = "bruno.sarlo.uy"
project_id: str = "10864574"
@@ -57,30 +58,46 @@ DSS_CORE_TOKEN_CATEGORIES = {
"colors": {
"description": "Color palette based on shadcn/ui",
"includes": [
- "background", "foreground", "card", "popover", "primary",
- "secondary", "muted", "accent", "destructive", "border",
- "input", "ring", "chart"
- ]
+ "background",
+ "foreground",
+ "card",
+ "popover",
+ "primary",
+ "secondary",
+ "muted",
+ "accent",
+ "destructive",
+ "border",
+ "input",
+ "ring",
+ "chart",
+ ],
},
"typography": {
"description": "Typography scale from shadcn/ui",
"includes": [
- "heading-1", "heading-2", "heading-3", "heading-4",
- "paragraph-large", "paragraph-small", "label", "caption"
- ]
+ "heading-1",
+ "heading-2",
+ "heading-3",
+ "heading-4",
+ "paragraph-large",
+ "paragraph-small",
+ "label",
+ "caption",
+ ],
},
"spacing": {
"description": "Spacing scale",
- "includes": ["0", "1", "2", "3", "4", "5", "6", "8", "10", "12", "16", "20", "24"]
+ "includes": ["0", "1", "2", "3", "4", "5", "6", "8", "10", "12", "16", "20", "24"],
},
"radius": {
"description": "Border radius values",
- "includes": ["none", "sm", "md", "lg", "xl", "full"]
+ "includes": ["none", "sm", "md", "lg", "xl", "full"],
},
"shadows": {
"description": "Shadow/elevation scale",
- "includes": ["none", "sm", "md", "lg", "xl", "2xl", "inner"]
- }
+ "includes": ["none", "sm", "md", "lg", "xl", "2xl", "inner"],
+ },
}
@@ -99,18 +116,15 @@ DSS_CORE_COMPONENTS = {
"Switch": {"variants": ["default"]},
"Slider": {"variants": ["default"]},
"Toggle": {"variants": ["default", "outline"]},
-
# Layout
"Card": {"variants": ["default"]},
"Separator": {"variants": ["default"]},
"AspectRatio": {"variants": ["default"]},
"ScrollArea": {"variants": ["default"]},
-
# Data Display
"Avatar": {"variants": ["default"]},
"Badge": {"variants": ["default", "secondary", "destructive", "outline"]},
"Table": {"variants": ["default"]},
-
# Feedback
"Alert": {"variants": ["default", "destructive"]},
"AlertDialog": {"variants": ["default"]},
@@ -118,7 +132,6 @@ DSS_CORE_COMPONENTS = {
"Skeleton": {"variants": ["default"]},
"Toast": {"variants": ["default", "destructive"]},
"Tooltip": {"variants": ["default"]},
-
# Overlay
"Dialog": {"variants": ["default"]},
"Drawer": {"variants": ["default"]},
@@ -127,25 +140,21 @@ DSS_CORE_COMPONENTS = {
"ContextMenu": {"variants": ["default"]},
"Sheet": {"variants": ["default"]},
"HoverCard": {"variants": ["default"]},
-
# Navigation
"Tabs": {"variants": ["default"]},
"NavigationMenu": {"variants": ["default"]},
"Breadcrumb": {"variants": ["default"]},
"Pagination": {"variants": ["default"]},
"Menubar": {"variants": ["default"]},
-
# Form
"Form": {"variants": ["default"]},
"Label": {"variants": ["default"]},
"Calendar": {"variants": ["default"]},
"DatePicker": {"variants": ["default"]},
"Combobox": {"variants": ["default"]},
-
# Data
"DataTable": {"variants": ["default"]},
"Command": {"variants": ["default"]},
-
# Layout Containers
"Accordion": {"variants": ["default"]},
"Collapsible": {"variants": ["default"]},
@@ -181,7 +190,7 @@ DSS_CORE_THEMES = {
"border": "240 5.9% 90%",
"input": "240 5.9% 90%",
"ring": "240 5.9% 10%",
- }
+ },
},
"dark": {
"description": "Default dark theme based on shadcn/ui zinc",
@@ -205,8 +214,8 @@ DSS_CORE_THEMES = {
"border": "240 3.7% 15.9%",
"input": "240 3.7% 15.9%",
"ring": "240 4.9% 83.9%",
- }
- }
+ },
+ },
}
@@ -214,6 +223,7 @@ DSS_CORE_THEMES = {
# HELPER FUNCTIONS
# =============================================================================
+
def get_dss_figma_reference() -> DSSFigmaReference:
"""Get the canonical DSS Figma reference."""
return DSS_FIGMA_REFERENCE
diff --git a/dss/project/figma.py b/dss/project/figma.py
index 9201269..1744035 100644
--- a/dss/project/figma.py
+++ b/dss/project/figma.py
@@ -1,20 +1,21 @@
"""
-Figma Integration for DSS Projects
+Figma Integration for DSS Projects.
Handles Figma API communication, project/file listing, and token extraction.
Includes rate limit handling with exponential backoff.
"""
-import os
-import json
import asyncio
+import json
+import logging
+import os
import time
-import requests
+from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
-from dataclasses import dataclass, field
-import logging
+
+import requests
logger = logging.getLogger(__name__)
@@ -23,9 +24,11 @@ logger = logging.getLogger(__name__)
# RATE LIMIT CONFIGURATION
# =============================================================================
+
@dataclass
class RateLimitConfig:
"""Configuration for rate limit handling."""
+
max_retries: int = 5
initial_delay: float = 1.0 # seconds
max_delay: float = 60.0 # seconds
@@ -36,6 +39,7 @@ class RateLimitConfig:
@dataclass
class RateLimitState:
"""Track rate limit state across requests."""
+
remaining: Optional[int] = None
reset_time: Optional[float] = None
last_request_time: float = 0
@@ -43,10 +47,10 @@ class RateLimitState:
def update_from_headers(self, headers: Dict[str, str]):
"""Update state from Figma response headers."""
- if 'X-RateLimit-Remaining' in headers:
- self.remaining = int(headers['X-RateLimit-Remaining'])
- if 'X-RateLimit-Reset' in headers:
- self.reset_time = float(headers['X-RateLimit-Reset'])
+ if "X-RateLimit-Remaining" in headers:
+ self.remaining = int(headers["X-RateLimit-Remaining"])
+ if "X-RateLimit-Reset" in headers:
+ self.reset_time = float(headers["X-RateLimit-Reset"])
self.last_request_time = time.time()
def get_wait_time(self) -> float:
@@ -68,13 +72,16 @@ class RateLimitState:
class FigmaRateLimitError(Exception):
"""Raised when rate limit is exceeded after retries."""
+
def __init__(self, message: str, retry_after: Optional[float] = None):
super().__init__(message)
self.retry_after = retry_after
+
# Optional aiohttp import for async operations
try:
import aiohttp
+
AIOHTTP_AVAILABLE = True
except ImportError:
AIOHTTP_AVAILABLE = False
@@ -82,6 +89,7 @@ except ImportError:
# Fallback to requests for sync operations
try:
import requests
+
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
@@ -90,6 +98,7 @@ except ImportError:
@dataclass
class FigmaAPIConfig:
"""Figma API configuration."""
+
token: str
base_url: str = "https://api.figma.com/v1"
timeout: int = 30
@@ -99,6 +108,7 @@ class FigmaAPIConfig:
@dataclass
class FigmaStyleData:
"""Extracted style data from Figma."""
+
colors: Dict[str, Any] = field(default_factory=dict)
typography: Dict[str, Any] = field(default_factory=dict)
effects: Dict[str, Any] = field(default_factory=dict)
@@ -117,7 +127,9 @@ class FigmaProjectSync:
- Converting to DSS token format
"""
- def __init__(self, token: Optional[str] = None, rate_limit_config: Optional[RateLimitConfig] = None):
+ def __init__(
+ self, token: Optional[str] = None, rate_limit_config: Optional[RateLimitConfig] = None
+ ):
"""
Initialize Figma sync.
@@ -127,11 +139,12 @@ class FigmaProjectSync:
"""
self.token = token or os.environ.get("FIGMA_TOKEN", "")
if not self.token:
- raise ValueError("Figma token required. Set FIGMA_TOKEN env var or pass token parameter.")
+ raise ValueError(
+ "Figma token required. Set FIGMA_TOKEN env var or pass token parameter."
+ )
self.config = FigmaAPIConfig(
- token=self.token,
- rate_limit=rate_limit_config or RateLimitConfig()
+ token=self.token, rate_limit=rate_limit_config or RateLimitConfig()
)
self._session: Optional[aiohttp.ClientSession] = None
self._rate_limit_state = RateLimitState()
@@ -155,7 +168,7 @@ class FigmaProjectSync:
if retry_after:
base_delay = retry_after
else:
- base_delay = config.initial_delay * (config.backoff_factor ** attempt)
+ base_delay = config.initial_delay * (config.backoff_factor**attempt)
# Cap at max delay
delay = min(base_delay, config.max_delay)
@@ -164,12 +177,7 @@ class FigmaProjectSync:
jitter = delay * config.jitter * random.random()
return delay + jitter
- def _request_with_retry(
- self,
- method: str,
- url: str,
- **kwargs
- ) -> requests.Response:
+ def _request_with_retry(self, method: str, url: str, **kwargs) -> requests.Response:
"""
Make HTTP request with rate limit retry logic.
@@ -201,11 +209,7 @@ class FigmaProjectSync:
try:
# Make request
response = requests.request(
- method,
- url,
- headers=self.headers,
- timeout=self.config.timeout,
- **kwargs
+ method, url, headers=self.headers, timeout=self.config.timeout, **kwargs
)
# Update rate limit state from headers
@@ -217,9 +221,9 @@ class FigmaProjectSync:
# Get retry-after from header
retry_after = None
- if 'Retry-After' in response.headers:
+ if "Retry-After" in response.headers:
try:
- retry_after = float(response.headers['Retry-After'])
+ retry_after = float(response.headers["Retry-After"])
except ValueError:
pass
@@ -234,7 +238,7 @@ class FigmaProjectSync:
else:
raise FigmaRateLimitError(
f"Rate limit exceeded after {config.max_retries} retries",
- retry_after=retry_after
+ retry_after=retry_after,
)
# Success
@@ -260,10 +264,7 @@ class FigmaProjectSync:
raise RuntimeError("Unexpected state in retry loop")
async def _request_with_retry_async(
- self,
- method: str,
- url: str,
- **kwargs
+ self, method: str, url: str, **kwargs
) -> Tuple[int, Dict[str, Any]]:
"""
Make async HTTP request with rate limit retry logic.
@@ -274,7 +275,6 @@ class FigmaProjectSync:
if not AIOHTTP_AVAILABLE:
raise ImportError("aiohttp library required for async operations")
- import random
config = self.config.rate_limit
session = await self._get_session()
last_error = None
@@ -296,9 +296,9 @@ class FigmaProjectSync:
self._rate_limit_state.record_429()
retry_after = None
- if 'Retry-After' in response.headers:
+ if "Retry-After" in response.headers:
try:
- retry_after = float(response.headers['Retry-After'])
+ retry_after = float(response.headers["Retry-After"])
except ValueError:
pass
@@ -313,7 +313,7 @@ class FigmaProjectSync:
else:
raise FigmaRateLimitError(
f"Rate limit exceeded after {config.max_retries} retries",
- retry_after=retry_after
+ retry_after=retry_after,
)
# Success
@@ -343,7 +343,9 @@ class FigmaProjectSync:
return {
"remaining": state.remaining,
"reset_time": state.reset_time,
- "reset_in_seconds": max(0, state.reset_time - time.time()) if state.reset_time else None,
+ "reset_in_seconds": max(0, state.reset_time - time.time())
+ if state.reset_time
+ else None,
"consecutive_429s": state.consecutive_429s,
"last_request_time": state.last_request_time,
}
@@ -376,7 +378,7 @@ class FigmaProjectSync:
"last_modified": f.get("last_modified"),
}
for f in data.get("files", [])
- ]
+ ],
}
def list_team_projects(self, team_id: str) -> Dict[str, Any]:
@@ -401,7 +403,7 @@ class FigmaProjectSync:
"name": p.get("name"),
}
for p in data.get("projects", [])
- ]
+ ],
}
def discover_team_structure(self, team_id: str) -> Dict[str, Any]:
@@ -447,16 +449,25 @@ class FigmaProjectSync:
for file in project_data["files"]:
file_name_lower = file.get("name", "").lower()
# Look for common UIKit naming patterns
- if any(pattern in file_name_lower for pattern in [
- "uikit", "ui-kit", "ui kit",
- "design system", "design-system",
- "tokens", "foundations",
- "core", "base"
- ]):
+ if any(
+ pattern in file_name_lower
+ for pattern in [
+ "uikit",
+ "ui-kit",
+ "ui kit",
+ "design system",
+ "design-system",
+ "tokens",
+ "foundations",
+ "core",
+ "base",
+ ]
+ ):
# Prefer exact "uikit" match
is_better_match = (
- result["uikit"] is None or
- "uikit" in file_name_lower and "uikit" not in result["uikit"]["name"].lower()
+ result["uikit"] is None
+ or "uikit" in file_name_lower
+ and "uikit" not in result["uikit"]["name"].lower()
)
if is_better_match:
result["uikit"] = {
@@ -533,10 +544,7 @@ class FigmaProjectSync:
if self._session is None or self._session.closed:
timeout = aiohttp.ClientTimeout(total=self.config.timeout)
- self._session = aiohttp.ClientSession(
- headers=self.headers,
- timeout=timeout
- )
+ self._session = aiohttp.ClientSession(headers=self.headers, timeout=timeout)
return self._session
async def close(self):
@@ -562,7 +570,7 @@ class FigmaProjectSync:
"last_modified": f.get("last_modified"),
}
for f in data.get("files", [])
- ]
+ ],
}
async def get_file_styles_async(self, file_key: str) -> FigmaStyleData:
@@ -599,9 +607,7 @@ class FigmaProjectSync:
return self._parse_styles(file_data, styles_data, variables)
async def sync_project_files_async(
- self,
- project_id: str,
- file_keys: Optional[List[str]] = None
+ self, project_id: str, file_keys: Optional[List[str]] = None
) -> Dict[str, FigmaStyleData]:
"""
Sync styles from multiple files in a project (async).
@@ -636,10 +642,7 @@ class FigmaProjectSync:
# =========================================================================
def _parse_styles(
- self,
- file_data: Dict[str, Any],
- styles_data: Dict[str, Any],
- variables: Dict[str, Any]
+ self, file_data: Dict[str, Any], styles_data: Dict[str, Any], variables: Dict[str, Any]
) -> FigmaStyleData:
"""Parse Figma API responses into FigmaStyleData."""
result = FigmaStyleData()
@@ -670,9 +673,7 @@ class FigmaProjectSync:
return result
def _extract_colors(
- self,
- global_styles: Dict[str, Any],
- document: Dict[str, Any]
+ self, global_styles: Dict[str, Any], document: Dict[str, Any]
) -> Dict[str, Any]:
"""Extract color styles."""
colors = {}
@@ -691,9 +692,7 @@ class FigmaProjectSync:
return colors
def _extract_typography(
- self,
- global_styles: Dict[str, Any],
- document: Dict[str, Any]
+ self, global_styles: Dict[str, Any], document: Dict[str, Any]
) -> Dict[str, Any]:
"""Extract typography styles."""
typography = {}
@@ -711,9 +710,7 @@ class FigmaProjectSync:
return typography
def _extract_effects(
- self,
- global_styles: Dict[str, Any],
- document: Dict[str, Any]
+ self, global_styles: Dict[str, Any], document: Dict[str, Any]
) -> Dict[str, Any]:
"""Extract effect styles (shadows, blurs)."""
effects = {}
@@ -773,11 +770,7 @@ class FigmaProjectSync:
Returns a dict compatible with DSS TokenCollection.
"""
- tokens = {
- "source": "figma",
- "timestamp": datetime.now().isoformat(),
- "tokens": {}
- }
+ tokens = {"source": "figma", "timestamp": datetime.now().isoformat(), "tokens": {}}
# Add color tokens
for path, data in style_data.colors.items():
@@ -828,10 +821,7 @@ class FigmaProjectSync:
return tokens
def save_tokens(
- self,
- style_data: FigmaStyleData,
- output_path: Path,
- format: str = "json"
+ self, style_data: FigmaStyleData, output_path: Path, format: str = "json"
) -> Path:
"""
Save extracted tokens to file.
@@ -855,12 +845,16 @@ class FigmaProjectSync:
elif format == "raw":
file_path = output_path / "figma-raw.json"
with open(file_path, "w") as f:
- json.dump({
- "colors": style_data.colors,
- "typography": style_data.typography,
- "effects": style_data.effects,
- "variables": style_data.variables,
- }, f, indent=2)
+ json.dump(
+ {
+ "colors": style_data.colors,
+ "typography": style_data.typography,
+ "effects": style_data.effects,
+ "variables": style_data.variables,
+ },
+ f,
+ indent=2,
+ )
else:
raise ValueError(f"Unknown format: {format}")
diff --git a/dss/project/manager.py b/dss/project/manager.py
index 7195e65..60bd328 100644
--- a/dss/project/manager.py
+++ b/dss/project/manager.py
@@ -1,5 +1,5 @@
"""
-DSS Project Manager
+DSS Project Manager.
Handles project lifecycle operations: init, sync, build, list.
@@ -7,32 +7,21 @@ Projects inherit from DSS core (shadcn/ui) as the base layer.
The hierarchy is: DSS Core → Skins → Project customizations.
"""
-import json
-import os
import asyncio
+import json
+import logging
+import os
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
-import logging
-from dss.project.models import (
- DSSProject,
- ProjectConfig,
- FigmaSource,
- FigmaFile,
- OutputConfig,
- ProjectStatus,
-)
-from dss.project.figma import FigmaProjectSync, FigmaStyleData, FigmaRateLimitError
-from dss.project.core import (
- DSS_FIGMA_REFERENCE,
- DSS_CORE_THEMES,
- DSS_CORE_COMPONENTS,
-)
-from dss.project.sync import DSSCoreSync, get_dss_core_tokens, get_dss_core_themes
-from dss.ingest.sources.figma import FigmaTokenSource
-from dss.ingest.merge import TokenMerger, MergeStrategy
from dss.ingest.base import TokenCollection
+from dss.ingest.merge import MergeStrategy, TokenMerger
+from dss.ingest.sources.figma import FigmaTokenSource
+from dss.project.core import DSS_CORE_THEMES
+from dss.project.figma import FigmaProjectSync
+from dss.project.models import DSSProject, FigmaSource, OutputConfig, ProjectConfig, ProjectStatus
+from dss.project.sync import get_dss_core_tokens
logger = logging.getLogger(__name__)
@@ -70,11 +59,15 @@ class ProjectRegistry:
"""Save registry to disk."""
self.registry_path.parent.mkdir(parents=True, exist_ok=True)
with open(self.registry_path, "w") as f:
- json.dump({
- "version": "1.0",
- "updated_at": datetime.now().isoformat(),
- "projects": self._projects,
- }, f, indent=2)
+ json.dump(
+ {
+ "version": "1.0",
+ "updated_at": datetime.now().isoformat(),
+ "projects": self._projects,
+ },
+ f,
+ indent=2,
+ )
def register(self, project: DSSProject):
"""
@@ -273,7 +266,9 @@ class ProjectManager:
uikit_info = team_structure.get("uikit")
if uikit_info:
project.config.figma.uikit_file_key = uikit_info["key"]
- logger.info(f"Found UIKit file: '{uikit_info['name']}' in project '{uikit_info['project_name']}'")
+ logger.info(
+ f"Found UIKit file: '{uikit_info['name']}' in project '{uikit_info['project_name']}'"
+ )
total_files = sum(len(p.get("files", [])) for p in team_structure.get("projects", []))
project.config.updated_at = datetime.now()
@@ -282,7 +277,9 @@ class ProjectManager:
self._save_config(project)
self.registry.update_status(project.config.name, project.status)
- logger.info(f"Added Figma team {team_id} with {len(team_structure.get('projects', []))} projects, {total_files} files")
+ logger.info(
+ f"Added Figma team {team_id} with {len(team_structure.get('projects', []))} projects, {total_files} files"
+ )
return project
def add_figma_project(
@@ -327,11 +324,18 @@ class ProjectManager:
# Look for UIKit file
if auto_find_uikit and uikit_key is None:
file_name_lower = file_data["name"].lower()
- if any(pattern in file_name_lower for pattern in [
- "uikit", "ui-kit", "ui kit",
- "design system", "design-system",
- "tokens", "foundations",
- ]):
+ if any(
+ pattern in file_name_lower
+ for pattern in [
+ "uikit",
+ "ui-kit",
+ "ui kit",
+ "design system",
+ "design-system",
+ "tokens",
+ "foundations",
+ ]
+ ):
uikit_key = file_data["key"]
logger.info(f"Found UIKit file: '{file_data['name']}'")
@@ -345,7 +349,9 @@ class ProjectManager:
self._save_config(project)
self.registry.update_status(project.config.name, project.status)
- logger.info(f"Added Figma project {figma_project_id} with {len(project_data['files'])} files")
+ logger.info(
+ f"Added Figma project {figma_project_id} with {len(project_data['files'])} files"
+ )
return project
def add_figma_file(
@@ -414,14 +420,14 @@ class ProjectManager:
raise ValueError("Figma token not provided and FIGMA_TOKEN env var is not set.")
source = FigmaTokenSource(figma_token=token, verbose=verbose)
-
+
# Determine which files to sync
files_to_sync = []
if file_keys:
files_to_sync = [f for f in project.config.figma.files if f.key in file_keys]
else:
files_to_sync = project.config.figma.files
-
+
if not files_to_sync:
logger.warning("No matching Figma files found to sync.")
return project
@@ -445,7 +451,9 @@ class ProjectManager:
all_collections.append(token_collection)
all_components.extend(extracted_components)
- logger.info(f"Synced {len(token_collection)} tokens and {len(extracted_components)} components from '{file_info.name}'")
+ logger.info(
+ f"Synced {len(token_collection)} tokens and {len(extracted_components)} components from '{file_info.name}'"
+ )
# Update sync timestamp
file_info.last_synced = datetime.now()
@@ -453,7 +461,9 @@ class ProjectManager:
# --- Merge Token Collections ---
if len(all_collections) > 1:
logger.info(f"Merging {len(all_collections)} token collections...")
- merger = TokenMerger(strategy=MergeStrategy.PREFER_FIGMA) # or another appropriate strategy
+ merger = TokenMerger(
+ strategy=MergeStrategy.PREFER_FIGMA
+ ) # or another appropriate strategy
merge_result = merger.merge(all_collections)
final_collection = merge_result.collection
logger.info(f"Merge complete. Total unique tokens: {len(final_collection)}")
@@ -466,24 +476,23 @@ class ProjectManager:
# --- Update Project Model ---
# Add extracted components to the project
project.components = all_components
-
+
# Associate tokens with components (basic example)
for component in project.components:
for token in final_collection.tokens:
if component.name.lower() in token.name.lower():
- if not hasattr(component, 'associated_tokens'):
+ if not hasattr(component, "associated_tokens"):
component.associated_tokens = []
component.associated_tokens.append(token.name)
-
# --- Save Final TokenCollection ---
cache_dir = project.path / ".dss" / "cache"
cache_dir.mkdir(parents=True, exist_ok=True)
output_path = cache_dir / "raw_figma_tokencollection.json"
-
+
with open(output_path, "w") as f:
f.write(final_collection.to_json())
-
+
logger.info(f"Raw TokenCollection saved to: {output_path}")
# Update project state
diff --git a/dss/project/models.py b/dss/project/models.py
index 8a13adb..a42f137 100644
--- a/dss/project/models.py
+++ b/dss/project/models.py
@@ -1,5 +1,4 @@
-"""
-DSS Project Models
+"""DSS Project Models.
Pydantic models for project configuration and state.
"""
@@ -7,19 +6,24 @@ Pydantic models for project configuration and state.
from datetime import datetime
from enum import Enum
from pathlib import Path
-from typing import Any, Dict, List, Optional, Union
+from typing import Any, Dict, List, Optional
+
from pydantic import BaseModel, Field, field_validator
+from dss.models.component import Component
+
class ProjectStatus(str, Enum):
"""
Project lifecycle status.
+
- CREATED: Project initialized but not yet configured.
- CONFIGURED: Project configuration is complete.
- SYNCED: Project data has been synchronized with external sources (e.g., Figma).
- BUILT: Project output files have been generated.
- ERROR: An error occurred during a project operation.
"""
+
CREATED = "created"
CONFIGURED = "configured"
SYNCED = "synced"
@@ -37,12 +41,15 @@ class FigmaFile(BaseModel):
last_synced (Optional[datetime]): Last sync timestamp.
thumbnail_url (Optional[str]): Figma thumbnail URL.
"""
+
key: str = Field(..., description="Figma file key from URL")
name: str = Field(..., description="Human-readable file name")
last_synced: Optional[datetime] = Field(None, description="Last sync timestamp")
thumbnail_url: Optional[str] = Field(None, description="Figma thumbnail URL")
class Config:
+ """Pydantic configuration."""
+
json_encoders = {datetime: lambda v: v.isoformat() if v else None}
@@ -62,6 +69,7 @@ class FigmaSource(BaseModel):
uikit_file_key (Optional[str]): Key of the UIKit reference file.
auto_sync (bool): Enable automatic sync on changes.
"""
+
team_id: Optional[str] = Field(None, description="Figma team ID (main resource)")
project_id: Optional[str] = Field(None, description="Figma project ID within team")
project_name: Optional[str] = Field(None, description="Figma project name")
@@ -95,20 +103,18 @@ class OutputConfig(BaseModel):
components_dir (str): Directory for component files.
formats (List[str]): Output formats to generate (e.g., "css", "scss", "json").
"""
+
tokens_dir: str = Field("./tokens", description="Directory for token files")
themes_dir: str = Field("./themes", description="Directory for theme files")
components_dir: str = Field("./components", description="Directory for component files")
formats: List[str] = Field(
- default_factory=lambda: ["css", "scss", "json"],
- description="Output formats to generate"
+ default_factory=lambda: ["css", "scss", "json"], description="Output formats to generate"
)
@field_validator("formats")
@classmethod
def validate_formats(cls, v):
- """
- Validate that all specified output formats are supported.
- """
+ """Validate that all specified output formats are supported."""
valid = {"css", "scss", "json", "js", "ts"}
for fmt in v:
if fmt not in valid:
@@ -117,64 +123,117 @@ class OutputConfig(BaseModel):
class ProjectConfig(BaseModel):
- """
- Main project configuration (ds.config.json).
+
+ """Main project configuration (ds.config.json).
+
+
+
+
Attributes:
+
+
name (str): Project name.
+
+
version (str): Project version.
+
+
description (Optional[str]): Project description.
+
+
figma (Optional[FigmaSource]): Figma source configuration.
+
+
skin (Optional[str]): Base skin/theme to extend (e.g., 'shadcn', 'material').
+
+
base_theme (str): Default theme variant.
+
+
output (OutputConfig): Output settings.
+
+
created_at (datetime): Timestamp of project creation.
+
+
updated_at (datetime): Timestamp of last project update.
+
+
"""
+
name: str = Field(..., description="Project name")
+
version: str = Field("1.0.0", description="Project version")
+
description: Optional[str] = Field(None, description="Project description")
# Sources
+
figma: Optional[FigmaSource] = Field(None, description="Figma source configuration")
# Design system settings
- skin: Optional[str] = Field(None, description="Base skin/theme to extend (e.g., 'shadcn', 'material')")
+
+ skin: Optional[str] = Field(
+ None, description="Base skin/theme to extend " "(e.g., 'shadcn', 'material')"
+ )
+
base_theme: str = Field("light", description="Default theme variant")
# Output configuration
+
output: OutputConfig = Field(default_factory=OutputConfig, description="Output settings")
# Metadata
+
created_at: datetime = Field(default_factory=datetime.now)
+
updated_at: datetime = Field(default_factory=datetime.now)
class Config:
+ """Pydantic configuration."""
+
json_encoders = {datetime: lambda v: v.isoformat() if v else None}
-from dss.models.component import Component
-
class DSSProject(BaseModel):
- """
- Complete DSS Project representation.
+
+ """Complete DSS Project representation.
+
+
+
+
Combines configuration with runtime state.
+
+
"""
+
config: ProjectConfig = Field(..., description="Project configuration")
+
path: Path = Field(..., description="Absolute path to project directory")
+
status: ProjectStatus = Field(ProjectStatus.CREATED, description="Current project status")
# Runtime state
+
errors: List[str] = Field(default_factory=list, description="Error messages")
+
warnings: List[str] = Field(default_factory=list, description="Warning messages")
# Extracted data (populated after sync)
+
extracted_tokens: Optional[Dict[str, Any]] = Field(None, description="Tokens from sources")
- components: List[Component] = Field(default_factory=list, description="List of extracted components")
+
+ components: List[Component] = Field(
+ default_factory=list, description="List of extracted components"
+ )
class Config:
+ """Pydantic configuration."""
+
arbitrary_types_allowed = True
+
json_encoders = {
datetime: lambda v: v.isoformat() if v else None,
Path: str,
@@ -183,25 +242,30 @@ class DSSProject(BaseModel):
@property
def config_path(self) -> Path:
"""Path to ds.config.json."""
+
return self.path / "ds.config.json"
@property
def tokens_path(self) -> Path:
"""Path to tokens directory."""
+
return self.path / self.config.output.tokens_dir
@property
def themes_path(self) -> Path:
"""Path to themes directory."""
+
return self.path / self.config.output.themes_dir
def to_config_dict(self) -> Dict[str, Any]:
"""Export configuration for saving to ds.config.json."""
+
return self.config.model_dump(mode="json", exclude_none=True)
@classmethod
def from_config_file(cls, config_path: Path) -> "DSSProject":
"""Load project from ds.config.json file."""
+
import json
if not config_path.exists():
@@ -211,6 +275,7 @@ class DSSProject(BaseModel):
config_data = json.load(f)
config = ProjectConfig(**config_data)
+
project_path = config_path.parent
return cls(
diff --git a/dss/project/sync.py b/dss/project/sync.py
index 13ed527..79297c2 100644
--- a/dss/project/sync.py
+++ b/dss/project/sync.py
@@ -1,5 +1,5 @@
"""
-DSS Core Sync
+DSS Core Sync.
Syncs the canonical DSS Figma (shadcn/ui) to the DSS core tokens.
This is the base layer that all skins and projects inherit from.
@@ -13,10 +13,9 @@ from pathlib import Path
from typing import Any, Dict, Optional
from dss.project.core import (
- DSS_FIGMA_REFERENCE,
DSS_CORE_DIR,
- DSS_CACHE_DIR,
DSS_CORE_THEMES,
+ DSS_FIGMA_REFERENCE,
ensure_dss_directories,
)
from dss.project.figma import FigmaProjectSync, FigmaStyleData
@@ -86,7 +85,7 @@ class DSSCoreSync:
"tokens": self.core_tokens_path.exists(),
"themes": self.core_themes_path.exists(),
"components": self.core_components_path.exists(),
- }
+ },
}
def sync(self, force: bool = False) -> Dict[str, Any]:
@@ -102,7 +101,7 @@ class DSSCoreSync:
if not self.figma_token:
return {
"success": False,
- "error": "FIGMA_TOKEN not configured. Set env var or pass token."
+ "error": "FIGMA_TOKEN not configured. Set env var or pass token.",
}
# Check if sync needed
@@ -150,7 +149,7 @@ class DSSCoreSync:
str(self.core_tokens_path),
str(self.core_themes_path),
str(self.core_components_path),
- ]
+ ],
}
except Exception as e:
@@ -164,7 +163,7 @@ class DSSCoreSync:
"source": "figma",
"figma_file": self.reference.uikit_file_key,
"synced_at": datetime.now().isoformat(),
- "categories": {}
+ "categories": {},
}
# Colors
@@ -211,7 +210,7 @@ class DSSCoreSync:
"version": "1.0.0",
"source": "dss-core",
"synced_at": datetime.now().isoformat(),
- "themes": {}
+ "themes": {},
}
# Start with DSS core defaults
@@ -241,7 +240,7 @@ class DSSCoreSync:
"version": "1.0.0",
"source": "dss-core",
"synced_at": datetime.now().isoformat(),
- "components": {}
+ "components": {},
}
# Start with DSS core component definitions
@@ -281,7 +280,7 @@ class DSSCoreSync:
"typography": len(styles.typography),
"effects": len(styles.effects),
"variables": len(styles.variables),
- }
+ },
}
with open(self.core_manifest_path, "w") as f:
@@ -328,6 +327,7 @@ class DSSCoreSync:
# CONVENIENCE FUNCTIONS
# =============================================================================
+
def sync_dss_core(figma_token: Optional[str] = None, force: bool = False) -> Dict[str, Any]:
"""Sync DSS core from Figma."""
sync = DSSCoreSync(figma_token=figma_token)
diff --git a/dss/services/__init__.py b/dss/services/__init__.py
index bad14e4..63636c1 100644
--- a/dss/services/__init__.py
+++ b/dss/services/__init__.py
@@ -1,5 +1,5 @@
"""
-DSS Services - Core business logic for the Design System Server
+DSS Services - Core business logic for the Design System Server.
Services:
- SandboxedFS: Secure file system operations within project boundaries
@@ -7,8 +7,8 @@ Services:
- ConfigService: Project configuration loading and saving
"""
-from .sandboxed_fs import SandboxedFS
-from .project_manager import ProjectManager
from .config_service import ConfigService, DSSConfig
+from .project_manager import ProjectManager
+from .sandboxed_fs import SandboxedFS
-__all__ = ['SandboxedFS', 'ProjectManager', 'ConfigService', 'DSSConfig']
+__all__ = ["SandboxedFS", "ProjectManager", "ConfigService", "DSSConfig"]
diff --git a/dss/services/config_service.py b/dss/services/config_service.py
index b7b5bbc..212afbd 100644
--- a/dss/services/config_service.py
+++ b/dss/services/config_service.py
@@ -1,36 +1,40 @@
"""
-ConfigService - Project Configuration Management
+ConfigService - Project Configuration Management.
Handles loading, saving, and validating project-specific .dss/config.json files.
Uses Pydantic for schema validation with sensible defaults.
"""
import json
-import os
-from pathlib import Path
-from typing import Optional, List, Dict, Any
-from pydantic import BaseModel, Field
import logging
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
# === Configuration Schema ===
+
class FigmaConfig(BaseModel):
"""Figma integration settings."""
+
file_id: Optional[str] = None
team_id: Optional[str] = None
class TokensConfig(BaseModel):
"""Design token export settings."""
+
output_path: str = "./tokens"
format: str = "css" # css | scss | json | js
class AIConfig(BaseModel):
"""AI assistant behavior settings."""
+
allowed_operations: List[str] = Field(default_factory=lambda: ["read", "write"])
context_files: List[str] = Field(default_factory=lambda: ["./README.md"])
max_file_size_kb: int = 500
@@ -42,6 +46,7 @@ class DSSConfig(BaseModel):
Stored in: [project_root]/.dss/config.json
"""
+
schema_version: str = "1.0"
figma: FigmaConfig = Field(default_factory=FigmaConfig)
tokens: TokensConfig = Field(default_factory=TokensConfig)
@@ -54,6 +59,7 @@ class DSSConfig(BaseModel):
# === Config Service ===
+
class ConfigService:
"""
Service for managing project configuration files.
@@ -112,7 +118,7 @@ class ConfigService:
# Ensure .dss directory exists
config_path.parent.mkdir(parents=True, exist_ok=True)
- with open(config_path, 'w') as f:
+ with open(config_path, "w") as f:
json.dump(config.dict(), f, indent=2)
logger.info(f"Saved config to {config_path}")
diff --git a/dss/services/project_manager.py b/dss/services/project_manager.py
index f58eef0..b691cd9 100644
--- a/dss/services/project_manager.py
+++ b/dss/services/project_manager.py
@@ -1,5 +1,5 @@
"""
-ProjectManager - Project Registry Service
+ProjectManager - Project Registry Service.
Manages the server-side registry of projects, including:
- Project registration with path validation
@@ -7,10 +7,10 @@ Manages the server-side registry of projects, including:
- Project lifecycle management
"""
+import logging
import os
from pathlib import Path
-from typing import Optional, List, Dict, Any
-import logging
+from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
@@ -36,11 +36,7 @@ class ProjectManager:
logger.info("ProjectManager initialized")
def register_project(
- self,
- name: str,
- root_path: str,
- description: str = "",
- figma_file_key: str = ""
+ self, name: str, root_path: str, description: str = "", figma_file_key: str = ""
) -> Dict[str, Any]:
"""
Register a new project with validated root path.
@@ -74,19 +70,17 @@ class ProjectManager:
# Generate project ID
import uuid
+
project_id = str(uuid.uuid4())[:8]
# Create project in database
project = self.db.create(
- id=project_id,
- name=name,
- description=description,
- figma_file_key=figma_file_key
+ id=project_id, name=name, description=description, figma_file_key=figma_file_key
)
# Update with root_path (need to add this column)
self._update_root_path(project_id, root_path)
- project['root_path'] = root_path
+ project["root_path"] = root_path
# Initialize .dss folder and config if config_service available
if self.config_service:
@@ -116,13 +110,13 @@ class ProjectManager:
if not project:
return None
- root_path = project.get('root_path')
+ root_path = project.get("root_path")
if root_path and not os.path.isdir(root_path):
logger.warning(f"Project path no longer exists: {root_path}")
# Don't raise, just mark it
- project['path_valid'] = False
+ project["path_valid"] = False
else:
- project['path_valid'] = True
+ project["path_valid"] = True
return project
@@ -141,11 +135,11 @@ class ProjectManager:
# Add path validation status
for project in projects:
- root_path = project.get('root_path')
- project['path_valid'] = bool(root_path and os.path.isdir(root_path))
+ root_path = project.get("root_path")
+ project["path_valid"] = bool(root_path and os.path.isdir(root_path))
if valid_only:
- projects = [p for p in projects if p.get('path_valid', False)]
+ projects = [p for p in projects if p.get("path_valid", False)]
return projects
@@ -163,7 +157,7 @@ class ProjectManager:
projects = self.list_projects()
for project in projects:
- if project.get('root_path') == root_path:
+ if project.get("root_path") == root_path:
return project
return None
@@ -175,7 +169,7 @@ class ProjectManager:
description: str = None,
root_path: str = None,
figma_file_key: str = None,
- status: str = None
+ status: str = None,
) -> Optional[Dict[str, Any]]:
"""
Update project fields.
@@ -207,13 +201,13 @@ class ProjectManager:
# Update other fields via existing update method
updates = {}
if name is not None:
- updates['name'] = name
+ updates["name"] = name
if description is not None:
- updates['description'] = description
+ updates["description"] = description
if figma_file_key is not None:
- updates['figma_file_key'] = figma_file_key
+ updates["figma_file_key"] = figma_file_key
if status is not None:
- updates['status'] = status
+ updates["status"] = status
if updates:
self.db.update(project_id, **updates)
@@ -235,9 +229,10 @@ class ProjectManager:
if not project:
return False
- if delete_config and project.get('root_path'):
+ if delete_config and project.get("root_path"):
import shutil
- dss_path = Path(project['root_path']) / '.dss'
+
+ dss_path = Path(project["root_path"]) / ".dss"
if dss_path.exists():
shutil.rmtree(dss_path)
logger.info(f"Deleted .dss folder at {dss_path}")
@@ -247,15 +242,14 @@ class ProjectManager:
return True
def _update_root_path(self, project_id: str, root_path: str) -> None:
- """
- Update root_path in JSON storage.
- """
+ """Update root_path in JSON storage."""
self.db.update(project_id, root_path=root_path)
@staticmethod
def ensure_schema():
"""
Legacy schema migration - no longer needed with JSON storage.
+
Kept for API compatibility.
"""
logger.debug("Schema check: Using JSON storage, no migration needed")
diff --git a/dss/services/sandboxed_fs.py b/dss/services/sandboxed_fs.py
index 38297bf..13948b4 100644
--- a/dss/services/sandboxed_fs.py
+++ b/dss/services/sandboxed_fs.py
@@ -1,5 +1,5 @@
"""
-SandboxedFS - Secure File System Operations
+SandboxedFS - Secure File System Operations.
This service restricts all file operations to within a project's root directory,
preventing path traversal attacks and ensuring AI operations are safely scoped.
@@ -10,10 +10,10 @@ Security Features:
- Read/write operation logging
"""
+import logging
import os
from pathlib import Path
-from typing import List, Dict, Optional
-import logging
+from typing import Dict, List
logger = logging.getLogger(__name__)
@@ -95,7 +95,7 @@ class SandboxedFS:
if size_kb > max_size_kb:
raise ValueError(f"File too large: {size_kb:.1f}KB > {max_size_kb}KB limit")
- content = path.read_text(encoding='utf-8')
+ content = path.read_text(encoding="utf-8")
logger.debug(f"Read file: {relative_path} ({len(content)} chars)")
return content
@@ -115,7 +115,7 @@ class SandboxedFS:
# Create parent directories if needed
path.parent.mkdir(parents=True, exist_ok=True)
- path.write_text(content, encoding='utf-8')
+ path.write_text(content, encoding="utf-8")
logger.info(f"Wrote file: {relative_path} ({len(content)} chars)")
def delete_file(self, relative_path: str) -> None:
@@ -195,6 +195,7 @@ class SandboxedFS:
Returns:
Nested dict representing file tree with sizes
"""
+
def build_tree(path: Path, depth: int) -> Dict:
if depth > max_depth:
return {"...": "truncated"}
@@ -207,13 +208,13 @@ class SandboxedFS:
for item in items:
# Skip hidden files unless requested
- if not include_hidden and item.name.startswith('.'):
+ if not include_hidden and item.name.startswith("."):
# Always include .dss config folder
- if item.name != '.dss':
+ if item.name != ".dss":
continue
# Skip common non-essential directories
- if item.name in ('node_modules', '__pycache__', '.git', 'dist', 'build'):
+ if item.name in ("node_modules", "__pycache__", ".git", "dist", "build"):
result[item.name + "/"] = {"...": "skipped"}
continue
diff --git a/dss/settings.py b/dss/settings.py
index 3077e31..ba56654 100644
--- a/dss/settings.py
+++ b/dss/settings.py
@@ -1,24 +1,22 @@
"""
-DSS Settings and Configuration Management
+DSS Settings and Configuration Management.
+
Includes test utilities and reset functionality
"""
-import os
import shutil
import subprocess
from pathlib import Path
-from typing import Dict, List, Optional
+from typing import Dict, Optional
+
from pydantic import ConfigDict
from pydantic_settings import BaseSettings
class DSSSettings(BaseSettings):
- """DSS Configuration Settings"""
- model_config = ConfigDict(
- env_file=".env",
- case_sensitive=True,
- extra="ignore"
- )
+ """DSS Configuration Settings."""
+
+ model_config = ConfigDict(env_file=".env", case_sensitive=True, extra="ignore")
# Project paths
PROJECT_ROOT: Path = Path(__file__).parent.parent
@@ -77,7 +75,7 @@ class DSSManager:
test_path: Optional[str] = None,
verbose: bool = True,
coverage: bool = False,
- markers: Optional[str] = None
+ markers: Optional[str] = None,
) -> subprocess.CompletedProcess:
"""
Run pytest test suite.
@@ -119,22 +117,18 @@ class DSSManager:
return result
def run_unit_tests(self) -> subprocess.CompletedProcess:
- """Run only unit tests"""
+ """Run only unit tests."""
return self.run_tests(markers="unit", verbose=True)
def run_integration_tests(self) -> subprocess.CompletedProcess:
- """Run only integration tests"""
+ """Run only integration tests."""
return self.run_tests(markers="integration", verbose=True)
def run_all_tests_with_coverage(self) -> subprocess.CompletedProcess:
- """Run all tests with coverage report"""
+ """Run all tests with coverage report."""
return self.run_tests(coverage=True, verbose=True)
- def reset_dss(
- self,
- keep_structure: bool = True,
- confirm: bool = True
- ) -> Dict[str, any]:
+ def reset_dss(self, keep_structure: bool = True, confirm: bool = True) -> Dict[str, any]:
"""
Reset DSS to a fresh state.
@@ -156,12 +150,7 @@ class DSSManager:
if response != "RESET":
return {"status": "cancelled", "message": "Reset cancelled"}
- results = {
- "status": "success",
- "deleted": [],
- "kept": [],
- "errors": []
- }
+ results = {"status": "success", "deleted": [], "kept": [], "errors": []}
# Delete user-created themes
themes_dir = self.dss_dir / "themes"
@@ -250,7 +239,7 @@ class DSSManager:
"database_path": str(self.settings.DATABASE_PATH),
"has_anthropic_key": bool(self.settings.ANTHROPIC_API_KEY),
"has_figma_token": bool(self.settings.FIGMA_TOKEN),
- "use_mock_apis": self.settings.USE_MOCK_APIS
+ "use_mock_apis": self.settings.USE_MOCK_APIS,
}
def check_dependencies(self) -> Dict[str, bool]:
@@ -265,6 +254,7 @@ class DSSManager:
# Pydantic for data validation
try:
import pydantic
+
dependencies["pydantic"] = True
except ImportError:
dependencies["pydantic"] = False
@@ -272,6 +262,7 @@ class DSSManager:
# FastAPI for API framework
try:
import fastapi
+
dependencies["fastapi"] = True
except ImportError:
dependencies["fastapi"] = False
@@ -279,6 +270,7 @@ class DSSManager:
# Pytest for testing
try:
import pytest
+
dependencies["pytest"] = True
except ImportError:
dependencies["pytest"] = False
@@ -286,6 +278,7 @@ class DSSManager:
# Requests for HTTP operations
try:
import requests
+
dependencies["requests"] = True
except ImportError:
dependencies["requests"] = False
@@ -293,9 +286,7 @@ class DSSManager:
# Style Dictionary for token transformation
try:
result = subprocess.run(
- ["npx", "style-dictionary", "--version"],
- capture_output=True,
- timeout=5
+ ["npx", "style-dictionary", "--version"], capture_output=True, timeout=5
)
dependencies["style-dictionary"] = result.returncode == 0
except Exception:
@@ -314,7 +305,8 @@ if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
- print("""
+ print(
+ """
DSS Settings Management
Manage DSS configuration, testing, and system health.
@@ -332,7 +324,8 @@ Management Commands:
reset Reset DSS to fresh state
info Display system information and status
check-deps Verify all dependencies are installed
- """)
+ """
+ )
sys.exit(0)
command = sys.argv[1]
@@ -358,7 +351,7 @@ Management Commands:
print("\nReset complete:")
print(f" Deleted: {len(results.get('deleted', []))} items")
print(f" Preserved: {len(results.get('kept', []))} items")
- if results.get('errors'):
+ if results.get("errors"):
print(f" Errors: {len(results['errors'])} items failed")
elif command == "info":
@@ -369,7 +362,9 @@ Management Commands:
print(f" Tests directory: {info['tests_dir']}")
print(f" Cache directory: {info['cache_dir']}")
print(f" Database path: {info['database_path']}")
- print(f" Anthropic API: {'Configured' if info['has_anthropic_key'] else 'Not configured'}")
+ print(
+ f" Anthropic API: {'Configured' if info['has_anthropic_key'] else 'Not configured'}"
+ )
print(f" Figma token: {'Configured' if info['has_figma_token'] else 'Not configured'}")
print(f" API mode: {'Mock' if info['use_mock_apis'] else 'Live'}")
diff --git a/dss/status/__init__.py b/dss/status/__init__.py
index e0f1024..7a9d968 100644
--- a/dss/status/__init__.py
+++ b/dss/status/__init__.py
@@ -1,7 +1,5 @@
-"""
-DSS Status Module - Comprehensive system status visualization
-"""
+"""DSS Status Module - Comprehensive system status visualization."""
-from .dashboard import StatusDashboard, HealthMetric
+from .dashboard import HealthMetric, StatusDashboard
__all__ = ["StatusDashboard", "HealthMetric"]
diff --git a/dss/status/dashboard.py b/dss/status/dashboard.py
index 095f13d..a73c839 100644
--- a/dss/status/dashboard.py
+++ b/dss/status/dashboard.py
@@ -1,5 +1,5 @@
"""
-DSS Status Dashboard - Comprehensive system status visualization
+DSS Status Dashboard - Comprehensive system status visualization.
Provides a beautiful ASCII art dashboard that aggregates data from:
- DSSManager (system info, dependencies)
@@ -16,9 +16,9 @@ Expert-validated design with:
"""
import shutil
-from datetime import datetime, timedelta
-from typing import Dict, List, Optional, Any
from dataclasses import dataclass, field
+from datetime import datetime
+from typing import Any, Dict, List, Optional
# Health score weight constants (expert recommendation)
HEALTH_WEIGHT_DEPENDENCIES = 0.40
@@ -30,6 +30,7 @@ HEALTH_WEIGHT_ACTIVITY = 0.15
@dataclass
class HealthMetric:
"""Individual health check result."""
+
name: str
status: str # ok, warning, error
value: str
@@ -40,6 +41,7 @@ class HealthMetric:
@dataclass
class StatusData:
"""Aggregated status data container."""
+
version: str = ""
healthy: bool = True
health_score: int = 0
@@ -95,7 +97,8 @@ class StatusDashboard:
def _ensure_initialized(self):
"""Lazy initialization of DSS components."""
if self._settings is None:
- from dss.settings import DSSSettings, DSSManager
+ from dss.settings import DSSManager, DSSSettings
+
self._settings = DSSSettings()
self._manager = DSSManager(self._settings)
@@ -123,25 +126,22 @@ class StatusDashboard:
"components": data.components_count,
"styles": data.styles_count,
"tokens": data.tokens_count,
- "adoption_percent": data.adoption_percent
+ "adoption_percent": data.adoption_percent,
},
"activity": {
"recent": data.recent_activity,
"total": data.total_activities,
- "recent_syncs": data.recent_syncs
- },
- "quick_wins": {
- "count": data.quick_wins_count,
- "items": data.quick_wins
+ "recent_syncs": data.recent_syncs,
},
+ "quick_wins": {"count": data.quick_wins_count, "items": data.quick_wins},
"configuration": {
"project_root": data.project_root,
"database": data.database_path,
"cache": data.cache_dir,
"figma_configured": data.figma_configured,
- "anthropic_configured": data.anthropic_configured
+ "anthropic_configured": data.anthropic_configured,
},
- "recommendations": data.recommendations
+ "recommendations": data.recommendations,
}
def _gather_data(self) -> StatusData:
@@ -152,6 +152,7 @@ class StatusDashboard:
# Version and timestamp
from dss import __version__
+
data.version = __version__
data.timestamp = datetime.now().isoformat()
@@ -167,30 +168,36 @@ class StatusDashboard:
# Dependencies health
deps = self._manager.check_dependencies()
for dep, ok in deps.items():
- data.health_metrics.append(HealthMetric(
- name=dep,
- status="ok" if ok else "error",
- value="Installed" if ok else "Missing",
- category="dependency"
- ))
+ data.health_metrics.append(
+ HealthMetric(
+ name=dep,
+ status="ok" if ok else "error",
+ value="Installed" if ok else "Missing",
+ category="dependency",
+ )
+ )
# Integration health
- data.health_metrics.append(HealthMetric(
- name="Figma",
- status="ok" if data.figma_configured else "warning",
- value="Connected" if data.figma_configured else "No token",
- category="integration"
- ))
- data.health_metrics.append(HealthMetric(
- name="Anthropic",
- status="ok" if data.anthropic_configured else "warning",
- value="Connected" if data.anthropic_configured else "No key",
- category="integration"
- ))
+ data.health_metrics.append(
+ HealthMetric(
+ name="Figma",
+ status="ok" if data.figma_configured else "warning",
+ value="Connected" if data.figma_configured else "No token",
+ category="integration",
+ )
+ )
+ data.health_metrics.append(
+ HealthMetric(
+ name="Anthropic",
+ status="ok" if data.anthropic_configured else "warning",
+ value="Connected" if data.anthropic_configured else "No key",
+ category="integration",
+ )
+ )
# Database stats
try:
- from dss.storage.json_store import get_stats, ActivityLog, SyncHistory, Projects, Components
+ from dss.storage.json_store import ActivityLog, Projects, SyncHistory, get_stats
stats = get_stats()
data.projects_count = stats.get("projects", 0)
@@ -199,12 +206,14 @@ class StatusDashboard:
# Database size metric
db_size = stats.get("db_size_mb", 0)
- data.health_metrics.append(HealthMetric(
- name="Database",
- status="ok" if db_size < 100 else "warning",
- value=f"{db_size} MB",
- category="database"
- ))
+ data.health_metrics.append(
+ HealthMetric(
+ name="Database",
+ status="ok" if db_size < 100 else "warning",
+ value=f"{db_size} MB",
+ category="database",
+ )
+ )
# Projects
projects = Projects.list()
@@ -218,7 +227,7 @@ class StatusDashboard:
"action": a.get("action", ""),
"description": a.get("description", ""),
"created_at": a.get("created_at", ""),
- "category": a.get("category", "")
+ "category": a.get("category", ""),
}
for a in activities
]
@@ -231,18 +240,20 @@ class StatusDashboard:
"sync_type": s.get("sync_type", ""),
"status": s.get("status", ""),
"items_synced": s.get("items_synced", 0),
- "started_at": s.get("started_at", "")
+ "started_at": s.get("started_at", ""),
}
for s in syncs
]
except Exception as e:
- data.health_metrics.append(HealthMetric(
- name="Database",
- status="error",
- value=f"Error: {str(e)[:30]}",
- category="database"
- ))
+ data.health_metrics.append(
+ HealthMetric(
+ name="Database",
+ status="error",
+ value=f"Error: {str(e)[:30]}",
+ category="database",
+ )
+ )
# Calculate health score
data.health_score = self._calculate_health_score(data)
@@ -289,10 +300,10 @@ class StatusDashboard:
# Weighted score using named constants
score = (
- deps_ok * HEALTH_WEIGHT_DEPENDENCIES +
- int_ok * HEALTH_WEIGHT_INTEGRATIONS +
- db_ok * HEALTH_WEIGHT_DATABASE +
- activity_ok * HEALTH_WEIGHT_ACTIVITY
+ deps_ok * HEALTH_WEIGHT_DEPENDENCIES
+ + int_ok * HEALTH_WEIGHT_INTEGRATIONS
+ + db_ok * HEALTH_WEIGHT_DATABASE
+ + activity_ok * HEALTH_WEIGHT_ACTIVITY
) * 100
return int(score)
@@ -354,7 +365,11 @@ class StatusDashboard:
def _render_header(self, data: StatusData, width: int) -> str:
"""Render the header section."""
health_icon = "\u2705" if data.healthy else "\u26a0\ufe0f"
- health_text = f"{health_icon} Healthy ({data.health_score}%)" if data.healthy else f"{health_icon} Issues ({data.health_score}%)"
+ health_text = (
+ f"{health_icon} Healthy ({data.health_score}%)"
+ if data.healthy
+ else f"{health_icon} Issues ({data.health_score}%)"
+ )
lines = []
lines.append("\u2554" + "\u2550" * width + "\u2557")
@@ -412,22 +427,40 @@ class StatusDashboard:
lines.append("\u2502" + " \U0001f4ca DESIGN SYSTEM METRICS".ljust(width) + "\u2502")
lines.append("\u251c" + "\u2500" * width + "\u2524")
- lines.append("\u2502" + f" Projects: {data.projects_count} total ({data.projects_active} active)".ljust(width) + "\u2502")
- lines.append("\u2502" + f" Components: {data.components_count} tracked".ljust(width) + "\u2502")
- lines.append("\u2502" + f" Styles: {data.styles_count} defined".ljust(width) + "\u2502")
+ lines.append(
+ "\u2502"
+ + f" Projects: {data.projects_count} total ({data.projects_active} active)".ljust(
+ width
+ )
+ + "\u2502"
+ )
+ lines.append(
+ "\u2502" + f" Components: {data.components_count} tracked".ljust(width) + "\u2502"
+ )
+ lines.append(
+ "\u2502" + f" Styles: {data.styles_count} defined".ljust(width) + "\u2502"
+ )
# Tokens
if data.tokens_count > 0:
- lines.append("\u2502" + f" Tokens: {data.tokens_count} extracted".ljust(width) + "\u2502")
+ lines.append(
+ "\u2502" + f" Tokens: {data.tokens_count} extracted".ljust(width) + "\u2502"
+ )
else:
- lines.append("\u2502" + " Tokens: -- (run dss_extract_tokens)".ljust(width) + "\u2502")
+ lines.append(
+ "\u2502" + " Tokens: -- (run dss_extract_tokens)".ljust(width) + "\u2502"
+ )
# Adoption progress bar
if data.adoption_percent > 0:
bar_width = 30
filled = int(bar_width * data.adoption_percent / 100)
bar = "\u2588" * filled + "\u2591" * (bar_width - filled)
- lines.append("\u2502" + f" Adoption: [{bar}] {data.adoption_percent}%".ljust(width) + "\u2502")
+ lines.append(
+ "\u2502"
+ + f" Adoption: [{bar}] {data.adoption_percent}%".ljust(width)
+ + "\u2502"
+ )
lines.append("\u2514" + "\u2500" * width + "\u2518")
@@ -451,7 +484,9 @@ class StatusDashboard:
lines.append("\u2502" + " No recent activity".ljust(width) + "\u2502")
lines.append("\u2502" + "".ljust(width) + "\u2502")
- lines.append("\u2502" + f" Total activities: {data.total_activities}".ljust(width) + "\u2502")
+ lines.append(
+ "\u2502" + f" Total activities: {data.total_activities}".ljust(width) + "\u2502"
+ )
lines.append("\u2514" + "\u2500" * width + "\u2518")
@@ -471,7 +506,7 @@ class StatusDashboard:
line = f" {i}. {rec}"
# Truncate if too long
if len(line) > width - 1:
- line = line[:width-4] + "..."
+ line = line[: width - 4] + "..."
lines.append("\u2502" + line.ljust(width) + "\u2502")
lines.append("\u2514" + "\u2500" * width + "\u2518")
@@ -493,6 +528,7 @@ if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--json":
import json
+
print(json.dumps(dashboard.get_status(), indent=2))
else:
print(dashboard.render_text())
diff --git a/dss/storage/json_store.py b/dss/storage/json_store.py
index 5e3d81c..9759c24 100644
--- a/dss/storage/json_store.py
+++ b/dss/storage/json_store.py
@@ -1,5 +1,5 @@
"""
-DSS JSON Storage Layer
+DSS JSON Storage Layer.
Pure JSON file-based storage following DSS canonical structure.
No SQLite - everything is JSON for git-friendly diffs.
@@ -11,17 +11,15 @@ Structure:
└── teams/ # Team definitions
"""
+import fcntl
+import hashlib
import json
import time
-import hashlib
-import fcntl
-from pathlib import Path
-from datetime import datetime, date
-from typing import Optional, Dict, List, Any, Union
-from contextlib import contextmanager
-from dataclasses import dataclass, asdict, field
import uuid
-import os
+from contextlib import contextmanager
+from datetime import date, datetime
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Union
# Base paths
DATA_DIR = Path(__file__).parent.parent.parent / ".dss" / "data"
@@ -30,19 +28,27 @@ PROJECTS_DIR = DATA_DIR / "projects"
TEAMS_DIR = DATA_DIR / "teams"
# Ensure directories exist
-for d in [DATA_DIR, SYSTEM_DIR, SYSTEM_DIR / "cache", SYSTEM_DIR / "activity", PROJECTS_DIR, TEAMS_DIR]:
+for d in [
+ DATA_DIR,
+ SYSTEM_DIR,
+ SYSTEM_DIR / "cache",
+ SYSTEM_DIR / "activity",
+ PROJECTS_DIR,
+ TEAMS_DIR,
+]:
d.mkdir(parents=True, exist_ok=True)
# === File Locking Utilities ===
+
@contextmanager
def file_lock(path: Path, exclusive: bool = True):
"""Context manager for file locking."""
lock_path = path.with_suffix(path.suffix + ".lock")
lock_path.parent.mkdir(parents=True, exist_ok=True)
- with open(lock_path, 'w') as lock_file:
+ with open(lock_path, "w") as lock_file:
try:
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
yield
@@ -75,8 +81,8 @@ def append_jsonl(path: Path, record: Dict) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with file_lock(path, exclusive=True):
- with open(path, 'a') as f:
- f.write(json.dumps(record, default=str) + '\n')
+ with open(path, "a") as f:
+ f.write(json.dumps(record, default=str) + "\n")
def read_jsonl(path: Path, limit: int = None, offset: int = 0) -> List[Dict]:
@@ -86,7 +92,7 @@ def read_jsonl(path: Path, limit: int = None, offset: int = 0) -> List[Dict]:
records = []
with file_lock(path, exclusive=False):
- with open(path, 'r') as f:
+ with open(path, "r") as f:
lines = f.readlines()
# Reverse for newest first
@@ -107,6 +113,7 @@ def read_jsonl(path: Path, limit: int = None, offset: int = 0) -> List[Dict]:
# === Cache (TTL-based) ===
+
class Cache:
"""TTL-based cache using JSON files."""
@@ -116,7 +123,7 @@ class Cache:
@staticmethod
def _key_to_path(key: str) -> Path:
"""Convert cache key to file path."""
- key_hash = hashlib.md5(key.encode()).hexdigest()
+ key_hash = hashlib.sha256(key.encode()).hexdigest()
return Cache.CACHE_DIR / f"{key_hash}.json"
@staticmethod
@@ -127,7 +134,7 @@ class Cache:
"key": key,
"value": value,
"created_at": int(time.time()),
- "expires_at": int(time.time()) + ttl
+ "expires_at": int(time.time()) + ttl,
}
write_json(Cache._key_to_path(key), data)
@@ -174,6 +181,7 @@ class Cache:
# === Projects ===
+
class Projects:
"""Project CRUD operations using JSON files."""
@@ -205,7 +213,7 @@ class Projects:
"figma_file_key": figma_file_key,
"status": "active",
"created_at": now,
- "updated_at": now
+ "updated_at": now,
}
write_json(Projects._manifest_path(id), manifest)
@@ -264,12 +272,14 @@ class Projects:
archived_dir.mkdir(exist_ok=True)
import shutil
+
shutil.move(str(project_dir), str(archived_dir / f"{id}_{int(time.time())}"))
return True
# === Components ===
+
class Components:
"""Component operations using JSON files."""
@@ -304,7 +314,7 @@ class Components:
"variants": comp.get("variants", []),
"code_generated": comp.get("code_generated", False),
"created_at": existing.get("created_at", now) if existing else now,
- "updated_at": now
+ "updated_at": now,
}
write_json(Components._component_path(project_id, comp_id), component_data)
@@ -347,6 +357,7 @@ class Components:
# === Tokens ===
+
class Tokens:
"""Token operations following DSS canonical structure."""
@@ -380,11 +391,10 @@ class Tokens:
def set_by_type(project_id: str, token_type: str, tokens: Dict) -> None:
"""Set tokens of a specific type."""
path = Tokens._tokens_dir(project_id) / f"{token_type}.json"
- write_json(path, {
- "$type": token_type,
- "updated_at": datetime.utcnow().isoformat(),
- "tokens": tokens
- })
+ write_json(
+ path,
+ {"$type": token_type, "updated_at": datetime.utcnow().isoformat(), "tokens": tokens},
+ )
@staticmethod
def merge(project_id: str, token_type: str, new_tokens: Dict, strategy: str = "LAST") -> Dict:
@@ -414,6 +424,7 @@ class Tokens:
# === Styles ===
+
class Styles:
"""Style operations."""
@@ -438,11 +449,9 @@ class Styles:
style["created_at"] = now
existing_styles[style["id"]] = style
- write_json(path, {
- "$type": style_type,
- "updated_at": now,
- "styles": list(existing_styles.values())
- })
+ write_json(
+ path, {"$type": style_type, "updated_at": now, "styles": list(existing_styles.values())}
+ )
return len(styles)
@@ -455,7 +464,9 @@ class Styles:
if not styles_dir.exists():
return []
- types_to_check = [style_type.lower()] if style_type else [t.lower() for t in Styles.STYLE_TYPES]
+ types_to_check = (
+ [style_type.lower()] if style_type else [t.lower() for t in Styles.STYLE_TYPES]
+ )
for st in types_to_check:
path = styles_dir / f"{st}.json"
@@ -468,6 +479,7 @@ class Styles:
# === Sync History ===
+
class SyncHistory:
"""Sync history using JSON Lines."""
@@ -490,15 +502,21 @@ class SyncHistory:
"items_synced": 0,
"changes": None,
"error_message": None,
- "duration_ms": None
+ "duration_ms": None,
}
append_jsonl(SyncHistory._history_path(project_id), record)
return sync_id
@staticmethod
- def complete(project_id: str, sync_id: str, status: str, items_synced: int = 0,
- changes: Dict = None, error: str = None) -> None:
+ def complete(
+ project_id: str,
+ sync_id: str,
+ status: str,
+ items_synced: int = 0,
+ changes: Dict = None,
+ error: str = None,
+ ) -> None:
"""Complete a sync."""
path = SyncHistory._history_path(project_id)
records = read_jsonl(path, limit=1000)
@@ -522,7 +540,7 @@ class SyncHistory:
"items_synced": items_synced,
"changes": changes,
"error_message": error,
- "duration_ms": duration_ms
+ "duration_ms": duration_ms,
}
append_jsonl(path, completion)
break
@@ -547,16 +565,22 @@ class SyncHistory:
# === Activity Log ===
+
class ActivityLog:
"""Activity logging using daily JSON Lines files."""
CATEGORIES = {
- 'design_system': ['extract_tokens', 'extract_components', 'sync_tokens', 'validate_tokens'],
- 'code': ['analyze_components', 'find_inline_styles', 'generate_code', 'get_quick_wins'],
- 'configuration': ['config_updated', 'figma_token_updated', 'mode_changed', 'service_configured'],
- 'project': ['project_created', 'project_updated', 'project_deleted'],
- 'team': ['team_context_changed', 'project_context_changed'],
- 'storybook': ['scan_storybook', 'generate_story', 'generate_theme']
+ "design_system": ["extract_tokens", "extract_components", "sync_tokens", "validate_tokens"],
+ "code": ["analyze_components", "find_inline_styles", "generate_code", "get_quick_wins"],
+ "configuration": [
+ "config_updated",
+ "figma_token_updated",
+ "mode_changed",
+ "service_configured",
+ ],
+ "project": ["project_created", "project_updated", "project_deleted"],
+ "team": ["team_context_changed", "project_context_changed"],
+ "storybook": ["scan_storybook", "generate_story", "generate_theme"],
}
@staticmethod
@@ -565,20 +589,22 @@ class ActivityLog:
return SYSTEM_DIR / "activity" / f"{day.isoformat()}.jsonl"
@staticmethod
- def log(action: str,
- entity_type: str = None,
- entity_id: str = None,
- entity_name: str = None,
- project_id: str = None,
- user_id: str = None,
- user_name: str = None,
- team_context: str = None,
- description: str = None,
- category: str = None,
- severity: str = 'info',
- details: Dict = None,
- ip_address: str = None,
- user_agent: str = None) -> None:
+ def log(
+ action: str,
+ entity_type: str = None,
+ entity_id: str = None,
+ entity_name: str = None,
+ project_id: str = None,
+ user_id: str = None,
+ user_name: str = None,
+ team_context: str = None,
+ description: str = None,
+ category: str = None,
+ severity: str = "info",
+ details: Dict = None,
+ ip_address: str = None,
+ user_agent: str = None,
+ ) -> None:
"""Log an activity."""
# Auto-detect category
if not category:
@@ -586,11 +612,13 @@ class ActivityLog:
if action in actions:
category = cat
break
- category = category or 'other'
+ category = category or "other"
# Generate description if not provided
if not description:
- entity_str = f"{entity_type} '{entity_name}'" if entity_name else (entity_type or "item")
+ entity_str = (
+ f"{entity_type} '{entity_name}'" if entity_name else (entity_type or "item")
+ )
description = f"{action.replace('_', ' ').title()} {entity_str}"
record = {
@@ -609,19 +637,21 @@ class ActivityLog:
"description": description,
"details": details,
"ip_address": ip_address,
- "user_agent": user_agent
+ "user_agent": user_agent,
}
append_jsonl(ActivityLog._log_path(), record)
@staticmethod
- def recent(project_id: str = None, limit: int = 50, offset: int = 0, days: int = 7) -> List[Dict]:
+ def recent(
+ project_id: str = None, limit: int = 50, offset: int = 0, days: int = 7
+ ) -> List[Dict]:
"""Get recent activity."""
all_records = []
# Read from recent days
for i in range(days):
- day = date.today() - __import__('datetime').timedelta(days=i)
+ day = date.today() - __import__("datetime").timedelta(days=i)
records = read_jsonl(ActivityLog._log_path(day), limit=limit * 2)
if project_id:
@@ -632,17 +662,24 @@ class ActivityLog:
# Sort by timestamp descending
all_records.sort(key=lambda r: r.get("timestamp", ""), reverse=True)
- return all_records[offset:offset + limit]
+ return all_records[offset : offset + limit]
@staticmethod
- def search(project_id: str = None, user_id: str = None, action: str = None,
- category: str = None, severity: str = None, days: int = 30,
- limit: int = 100, offset: int = 0) -> List[Dict]:
+ def search(
+ project_id: str = None,
+ user_id: str = None,
+ action: str = None,
+ category: str = None,
+ severity: str = None,
+ days: int = 30,
+ limit: int = 100,
+ offset: int = 0,
+ ) -> List[Dict]:
"""Search activity logs."""
all_records = []
for i in range(days):
- day = date.today() - __import__('datetime').timedelta(days=i)
+ day = date.today() - __import__("datetime").timedelta(days=i)
records = read_jsonl(ActivityLog._log_path(day))
for r in records:
@@ -659,11 +696,12 @@ class ActivityLog:
all_records.append(r)
all_records.sort(key=lambda r: r.get("timestamp", ""), reverse=True)
- return all_records[offset:offset + limit]
+ return all_records[offset : offset + limit]
# === Teams ===
+
class Teams:
"""Team management using JSON files."""
@@ -687,7 +725,7 @@ class Teams:
"name": name,
"description": description,
"settings": {},
- "created_at": now
+ "created_at": now,
}
write_json(Teams._manifest_path(id), manifest)
@@ -729,11 +767,9 @@ class Teams:
m["updated_at"] = datetime.utcnow().isoformat()
break
else:
- members.append({
- "user_id": user_id,
- "role": role,
- "joined_at": datetime.utcnow().isoformat()
- })
+ members.append(
+ {"user_id": user_id, "role": role, "joined_at": datetime.utcnow().isoformat()}
+ )
data["members"] = members
write_json(path, data)
@@ -762,7 +798,7 @@ class Teams:
data["projects"][project_id] = {
"access_level": access_level,
- "granted_at": datetime.utcnow().isoformat()
+ "granted_at": datetime.utcnow().isoformat(),
}
write_json(path, data)
@@ -770,6 +806,7 @@ class Teams:
# === Figma Files ===
+
class FigmaFiles:
"""Figma file management."""
@@ -794,7 +831,7 @@ class FigmaFiles:
"file_key": file_key,
"sync_status": "pending",
"last_synced": None,
- "created_at": now
+ "created_at": now,
}
data["files"].append(new_file)
@@ -827,6 +864,7 @@ class FigmaFiles:
# === Metrics ===
+
class CodeMetrics:
"""Code metrics storage."""
@@ -863,8 +901,14 @@ class TestResults:
return PROJECTS_DIR / project_id / "metrics" / "tests.json"
@staticmethod
- def record(project_id: str, component_id: str, test_type: str,
- passed: bool, score: float = None, failures: List[str] = None) -> Dict:
+ def record(
+ project_id: str,
+ component_id: str,
+ test_type: str,
+ passed: bool,
+ score: float = None,
+ failures: List[str] = None,
+ ) -> Dict:
"""Record test result."""
path = TestResults._results_path(project_id)
data = read_json(path, {"results": []})
@@ -876,7 +920,7 @@ class TestResults:
"passed": passed,
"score": score,
"failures": failures or [],
- "run_at": datetime.utcnow().isoformat()
+ "run_at": datetime.utcnow().isoformat(),
}
data["results"].append(result)
@@ -907,9 +951,16 @@ class TokenDrift:
return PROJECTS_DIR / project_id / "metrics" / "drift.json"
@staticmethod
- def record(project_id: str, component_id: str, property_name: str,
- hardcoded_value: str, file_path: str, line_number: int,
- severity: str = "warning", suggested_token: str = None) -> Dict:
+ def record(
+ project_id: str,
+ component_id: str,
+ property_name: str,
+ hardcoded_value: str,
+ file_path: str,
+ line_number: int,
+ severity: str = "warning",
+ suggested_token: str = None,
+ ) -> Dict:
"""Record token drift."""
path = TokenDrift._drift_path(project_id)
data = read_json(path, {"drift": []})
@@ -924,7 +975,7 @@ class TokenDrift:
"file_path": file_path,
"line_number": line_number,
"status": "pending",
- "detected_at": datetime.utcnow().isoformat()
+ "detected_at": datetime.utcnow().isoformat(),
}
data["drift"].append(drift)
@@ -962,6 +1013,7 @@ class TokenDrift:
# === Integrations ===
+
class Integrations:
"""Project integration configuration storage."""
@@ -990,8 +1042,9 @@ class Integrations:
return None
@staticmethod
- def upsert(project_id: str, user_id: int, integration_type: str,
- config: str, enabled: bool = True) -> Dict:
+ def upsert(
+ project_id: str, user_id: int, integration_type: str, config: str, enabled: bool = True
+ ) -> Dict:
"""Create or update integration."""
path = Integrations._integrations_path(project_id)
data = read_json(path, {"integrations": []})
@@ -1016,7 +1069,7 @@ class Integrations:
"enabled": enabled,
"created_at": now,
"updated_at": now,
- "last_used_at": None
+ "last_used_at": None,
}
data["integrations"].append(new_integration)
@@ -1024,8 +1077,13 @@ class Integrations:
return new_integration
@staticmethod
- def update(project_id: str, user_id: int, integration_type: str,
- config: str = None, enabled: bool = None) -> Optional[Dict]:
+ def update(
+ project_id: str,
+ user_id: int,
+ integration_type: str,
+ config: str = None,
+ enabled: bool = None,
+ ) -> Optional[Dict]:
"""Update integration fields."""
path = Integrations._integrations_path(project_id)
data = read_json(path, {"integrations": []})
@@ -1050,7 +1108,8 @@ class Integrations:
original_len = len(data["integrations"])
data["integrations"] = [
- i for i in data["integrations"]
+ i
+ for i in data["integrations"]
if not (i.get("user_id") == user_id and i.get("integration_type") == integration_type)
]
@@ -1071,10 +1130,7 @@ class IntegrationHealth:
def list_all() -> List[Dict]:
"""List all integration health status."""
data = read_json(IntegrationHealth._health_path(), {"health": {}})
- return [
- {"integration_type": k, **v}
- for k, v in data.get("health", {}).items()
- ]
+ return [{"integration_type": k, **v} for k, v in data.get("health", {}).items()]
@staticmethod
def get(integration_type: str) -> Optional[Dict]:
@@ -1085,8 +1141,12 @@ class IntegrationHealth:
return None
@staticmethod
- def update(integration_type: str, is_healthy: bool = True,
- failure_count: int = None, circuit_open_until: str = None) -> Dict:
+ def update(
+ integration_type: str,
+ is_healthy: bool = True,
+ failure_count: int = None,
+ circuit_open_until: str = None,
+ ) -> Dict:
"""Update integration health."""
path = IntegrationHealth._health_path()
data = read_json(path, {"health": {}})
@@ -1097,7 +1157,7 @@ class IntegrationHealth:
"failure_count": 0,
"last_failure_at": None,
"last_success_at": None,
- "circuit_open_until": None
+ "circuit_open_until": None,
}
now = datetime.utcnow().isoformat()
@@ -1120,13 +1180,20 @@ class IntegrationHealth:
# === Stats ===
+
def get_stats() -> Dict:
"""Get storage statistics."""
stats = {
- "projects": len(list(PROJECTS_DIR.iterdir())) - 1 if PROJECTS_DIR.exists() else 0, # -1 for _archived
+ "projects": len(list(PROJECTS_DIR.iterdir())) - 1
+ if PROJECTS_DIR.exists()
+ else 0, # -1 for _archived
"teams": len(list(TEAMS_DIR.iterdir())) if TEAMS_DIR.exists() else 0,
- "cache_files": len(list((SYSTEM_DIR / "cache").glob("*.json"))) if (SYSTEM_DIR / "cache").exists() else 0,
- "activity_days": len(list((SYSTEM_DIR / "activity").glob("*.jsonl"))) if (SYSTEM_DIR / "activity").exists() else 0,
+ "cache_files": len(list((SYSTEM_DIR / "cache").glob("*.json")))
+ if (SYSTEM_DIR / "cache").exists()
+ else 0,
+ "activity_days": len(list((SYSTEM_DIR / "activity").glob("*.jsonl")))
+ if (SYSTEM_DIR / "activity").exists()
+ else 0,
}
# Calculate total size
@@ -1142,9 +1209,17 @@ def get_stats() -> Dict:
# === Initialization ===
+
def init_storage() -> None:
"""Initialize storage directories."""
- for d in [DATA_DIR, SYSTEM_DIR, SYSTEM_DIR / "cache", SYSTEM_DIR / "activity", PROJECTS_DIR, TEAMS_DIR]:
+ for d in [
+ DATA_DIR,
+ SYSTEM_DIR,
+ SYSTEM_DIR / "cache",
+ SYSTEM_DIR / "activity",
+ PROJECTS_DIR,
+ TEAMS_DIR,
+ ]:
d.mkdir(parents=True, exist_ok=True)
print(f"[Storage] JSON storage initialized at {DATA_DIR}")
@@ -1171,7 +1246,7 @@ if __name__ == "__main__":
elif cmd == "cache-test":
Cache.set("test_key", {"foo": "bar"}, ttl=60)
- print(f"Set: test_key")
+ print("Set: test_key")
print(f"Get: {Cache.get('test_key')}")
elif cmd == "clear-cache":
diff --git a/dss/storybook/__init__.py b/dss/storybook/__init__.py
index f65c672..ebc1e1b 100644
--- a/dss/storybook/__init__.py
+++ b/dss/storybook/__init__.py
@@ -1,5 +1,5 @@
"""
-DSS Storybook Integration Module
+DSS Storybook Integration Module.
Provides tools for:
- Scanning existing Storybook stories
@@ -9,18 +9,18 @@ Provides tools for:
- Host configuration management (uses settings host, not localhost)
"""
-from .scanner import StorybookScanner, StoryInfo, StorybookConfig
-from .generator import StoryGenerator, StoryTemplate
-from .theme import ThemeGenerator, StorybookTheme
from .config import (
- get_storybook_host,
- get_storybook_port,
- get_storybook_url,
create_storybook_config,
generate_storybook_start_command,
- write_storybook_config_file,
+ get_storybook_host,
+ get_storybook_port,
get_storybook_status,
+ get_storybook_url,
+ write_storybook_config_file,
)
+from .generator import StoryGenerator, StoryTemplate
+from .scanner import StorybookConfig, StorybookScanner, StoryInfo
+from .theme import StorybookTheme, ThemeGenerator
__all__ = [
# Scanner
diff --git a/dss/storybook/config.py b/dss/storybook/config.py
index 2cb263c..04f277f 100644
--- a/dss/storybook/config.py
+++ b/dss/storybook/config.py
@@ -1,13 +1,14 @@
"""
-Storybook Configuration Management
+Storybook Configuration Management.
Ensures Storybook uses project host settings instead of localhost.
"""
-import os
import json
+import os
from pathlib import Path
-from typing import Dict, Any, Optional
+from typing import Any, Dict, Optional
+
from dss.settings import settings
@@ -21,7 +22,9 @@ def get_storybook_host() -> str:
3. SERVER_HOST from settings
4. Fall back to 0.0.0.0
"""
- return os.getenv("STORYBOOK_HOST") or settings.STORYBOOK_HOST or settings.SERVER_HOST or "0.0.0.0"
+ return (
+ os.getenv("STORYBOOK_HOST") or settings.STORYBOOK_HOST or settings.SERVER_HOST or "0.0.0.0"
+ )
def get_storybook_port() -> int:
@@ -55,28 +58,23 @@ def create_storybook_config(project_path: Path) -> Dict[str, Any]:
config = {
"stories": [
"../src/**/*.stories.@(js|jsx|ts|tsx|mdx)",
- "../components/**/*.stories.@(js|jsx|ts|tsx|mdx)"
+ "../components/**/*.stories.@(js|jsx|ts|tsx|mdx)",
],
"addons": [
"@storybook/addon-links",
"@storybook/addon-essentials",
- "@storybook/addon-interactions"
+ "@storybook/addon-interactions",
],
- "framework": {
- "name": "@storybook/react-vite",
- "options": {}
- },
- "core": {
- "builder": "@storybook/builder-vite"
- },
+ "framework": {"name": "@storybook/react-vite", "options": {}},
+ "core": {"builder": "@storybook/builder-vite"},
"viteFinal": {
"server": {
"host": host,
"port": port,
"strictPort": False,
- "open": settings.STORYBOOK_AUTO_OPEN
+ "open": settings.STORYBOOK_AUTO_OPEN,
}
- }
+ },
}
return config
@@ -218,5 +216,5 @@ def get_storybook_status(project_path: Path) -> Dict[str, Any]:
"url": get_storybook_url(project_path),
"host": get_storybook_host(),
"port": get_storybook_port(),
- "start_command": generate_storybook_start_command(project_path)
+ "start_command": generate_storybook_start_command(project_path),
}
diff --git a/dss/storybook/generator.py b/dss/storybook/generator.py
index 7136eeb..596a6b3 100644
--- a/dss/storybook/generator.py
+++ b/dss/storybook/generator.py
@@ -1,5 +1,5 @@
"""
-Storybook Story Generator for Design System Components
+Storybook Story Generator for Design System Components.
Generates interactive Storybook stories for design system components,
creating comprehensive documentation that showcases component usage,
@@ -11,33 +11,30 @@ for how components should be used in applications.
Supports both React components and Web Components with JSDoc annotations.
"""
-import re
import json
-import subprocess
import logging
-from pathlib import Path
-from typing import List, Dict, Any, Optional, Tuple
+import re
from dataclasses import dataclass, field
from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Tuple
log = logging.getLogger(__name__)
class StoryTemplate(str, Enum):
- """
- Available story format templates for component documentation.
- """
- CSF3 = "csf3" # Component Story Format 3 (latest, recommended)
- CSF2 = "csf2" # Component Story Format 2 (legacy)
- MDX = "mdx" # MDX format (documentation + interactive)
+ """Available story format templates for component documentation."""
+
+ CSF3 = "csf3" # Component Story Format 3 (latest, recommended)
+ CSF2 = "csf2" # Component Story Format 2 (legacy)
+ MDX = "mdx" # MDX format (documentation + interactive)
class ComponentType(str, Enum):
- """
- Type of component detected during parsing.
- """
- REACT = "react" # React functional or class component
- WEB_COMPONENT = "web" # Custom Element / Web Component
+ """Type of component detected during parsing."""
+
+ REACT = "react" # React functional or class component
+ WEB_COMPONENT = "web" # Custom Element / Web Component
UNKNOWN = "unknown"
@@ -49,6 +46,7 @@ class PropInfo:
Captures prop name, type, required status, default value,
description, and valid options for code generation.
"""
+
name: str
type: str = "unknown"
required: bool = False
@@ -65,6 +63,7 @@ class ComponentMeta:
Describes component name, file path, props, description,
and whether it accepts children for story creation.
"""
+
name: str
path: str
props: List[PropInfo] = field(default_factory=list)
@@ -88,9 +87,11 @@ class StoryGenerator:
def __init__(self, root_path: str):
self.root = Path(root_path).resolve()
# Path to the Babel parser script (shared with project_analyzer)
- self._parser_script = Path(__file__).parent.parent / 'analyze' / 'parser.js'
+ self._parser_script = Path(__file__).parent.parent / "analyze" / "parser.js"
- def generate(self, template: StoryTemplate = StoryTemplate.CSF3, dry_run: bool = True) -> List[Dict[str, str]]:
+ def generate(
+ self, template: StoryTemplate = StoryTemplate.CSF3, dry_run: bool = True
+ ) -> List[Dict[str, str]]:
"""
Generate stories for all components in the project.
@@ -108,13 +109,13 @@ class StoryGenerator:
# Common component directories to scan
component_dirs = [
- 'src/components',
- 'components',
- 'src/ui',
- 'ui',
- 'lib/components',
- 'packages/ui/src',
- 'app/components',
+ "src/components",
+ "components",
+ "src/ui",
+ "ui",
+ "lib/components",
+ "packages/ui/src",
+ "app/components",
]
results = []
@@ -143,7 +144,7 @@ class StoryGenerator:
asyncio.set_event_loop(loop)
results = loop.run_until_complete(
- self.generate_stories_for_directory('.', template, dry_run)
+ self.generate_stories_for_directory(".", template, dry_run)
)
return results
@@ -193,7 +194,11 @@ class StoryGenerator:
Supports both React components (TypeScript interfaces) and
Web Components (JSDoc annotations, observedAttributes, customElements.define).
"""
- path = self.root / component_path if not Path(component_path).is_absolute() else Path(component_path)
+ path = (
+ self.root / component_path
+ if not Path(component_path).is_absolute()
+ else Path(component_path)
+ )
content = path.read_text(encoding="utf-8", errors="ignore")
component_name = path.stem
@@ -205,10 +210,7 @@ class StoryGenerator:
has_children = False
# Detect Web Component patterns
- is_web_component = (
- 'extends HTMLElement' in content or
- 'customElements.define' in content
- )
+ is_web_component = "extends HTMLElement" in content or "customElements.define" in content
if is_web_component:
component_type = ComponentType.WEB_COMPONENT
@@ -222,15 +224,13 @@ class StoryGenerator:
props = self._parse_react_props(content)
# Check if component uses children (React)
- has_children = 'children' in content.lower() and (
- 'React.ReactNode' in content or
- 'ReactNode' in content or
- '{children}' in content
+ has_children = "children" in content.lower() and (
+ "React.ReactNode" in content or "ReactNode" in content or "{children}" in content
)
# Extract description from JSDoc if not already found
if not description:
- jsdoc_match = re.search(r'/\*\*\s*\n\s*\*\s*([^\n*]+)', content)
+ jsdoc_match = re.search(r"/\*\*\s*\n\s*\*\s*([^\n*]+)", content)
if jsdoc_match:
description = jsdoc_match.group(1).strip()
@@ -257,10 +257,7 @@ class StoryGenerator:
has_children = False
# Extract tag name from customElements.define('tag-name', ClassName)
- define_match = re.search(
- r"customElements\.define\s*\(\s*['\"]([^'\"]+)['\"]",
- content
- )
+ define_match = re.search(r"customElements\.define\s*\(\s*['\"]([^'\"]+)['\"]", content)
if define_match:
tag_name = define_match.group(1)
@@ -269,7 +266,7 @@ class StoryGenerator:
observed_match = re.search(
r"static\s+get\s+observedAttributes\s*\(\s*\)\s*\{\s*return\s*\[([^\]]+)\]",
content,
- re.DOTALL
+ re.DOTALL,
)
observed_attrs = []
if observed_match:
@@ -279,12 +276,12 @@ class StoryGenerator:
# Parse JSDoc to extract attribute info
# Look for @param or attribute descriptions in JSDoc
- jsdoc_match = re.search(r'/\*\*([^*]|\*(?!/))*\*/', content, re.DOTALL)
+ jsdoc_match = re.search(r"/\*\*([^*]|\*(?!/))*\*/", content, re.DOTALL)
if jsdoc_match:
jsdoc = jsdoc_match.group(0)
# Extract main description (first line after /**)
- desc_match = re.search(r'/\*\*\s*\n?\s*\*?\s*([^\n@*]+)', jsdoc)
+ desc_match = re.search(r"/\*\*\s*\n?\s*\*?\s*([^\n@*]+)", jsdoc)
if desc_match:
description = desc_match.group(1).strip()
@@ -293,57 +290,59 @@ class StoryGenerator:
# - variant: primary | secondary | ...
# - size: sm | default | lg
attrs_section = re.search(
- r'Attributes:\s*\n((?:\s*\*?\s*-[^\n]+\n?)+)',
- jsdoc,
- re.IGNORECASE
+ r"Attributes:\s*\n((?:\s*\*?\s*-[^\n]+\n?)+)", jsdoc, re.IGNORECASE
)
if attrs_section:
attr_lines = attrs_section.group(1)
- for line in attr_lines.split('\n'):
- line = line.strip().lstrip('*').strip()
- if not line.startswith('-'):
+ for line in attr_lines.split("\n"):
+ line = line.strip().lstrip("*").strip()
+ if not line.startswith("-"):
continue
# Parse: - attr_name: value1 | value2 | ...
- attr_match = re.match(r'-\s*(\w+):\s*(.+)', line)
+ attr_match = re.match(r"-\s*(\w+):\s*(.+)", line)
if attr_match:
attr_name = attr_match.group(1)
attr_values = attr_match.group(2).strip()
# Parse options from pipe-separated values
options = []
- if '|' in attr_values:
- options = [v.strip() for v in attr_values.split('|')]
+ if "|" in attr_values:
+ options = [v.strip() for v in attr_values.split("|")]
- prop_type = 'string'
- if attr_values == 'boolean':
- prop_type = 'boolean'
+ prop_type = "string"
+ if attr_values == "boolean":
+ prop_type = "boolean"
elif options:
- prop_type = ' | '.join(f"'{o}'" for o in options)
+ prop_type = " | ".join(f"'{o}'" for o in options)
- props.append(PropInfo(
- name=attr_name,
- type=prop_type,
- required=False,
- options=options,
- ))
+ props.append(
+ PropInfo(
+ name=attr_name,
+ type=prop_type,
+ required=False,
+ options=options,
+ )
+ )
# Add any observed attributes not found in JSDoc
jsdoc_attr_names = {p.name for p in props}
for attr in observed_attrs:
if attr not in jsdoc_attr_names:
# Skip ARIA attributes for story generation
- if attr.startswith('aria-') or attr == 'tabindex':
+ if attr.startswith("aria-") or attr == "tabindex":
continue
- props.append(PropInfo(
- name=attr,
- type='string',
- required=False,
- ))
+ props.append(
+ PropInfo(
+ name=attr,
+ type="string",
+ required=False,
+ )
+ )
# Check for slot usage (Web Component children)
- has_children = '' in content
+ has_children = "" in content
return props, tag_name, description, has_children
@@ -354,8 +353,7 @@ class StoryGenerator:
# Extract props from interface/type
# interface ButtonProps { variant?: 'primary' | 'secondary'; ... }
props_pattern = re.compile(
- r'(?:interface|type)\s+\w*Props\s*(?:=\s*)?\{([^}]+)\}',
- re.DOTALL
+ r"(?:interface|type)\s+\w*Props\s*(?:=\s*)?\{([^}]+)\}", re.DOTALL
)
props_match = props_pattern.search(content)
@@ -363,44 +361,43 @@ class StoryGenerator:
props_content = props_match.group(1)
# Parse each prop line
- for line in props_content.split('\n'):
+ for line in props_content.split("\n"):
line = line.strip()
- if not line or line.startswith('//'):
+ if not line or line.startswith("//"):
continue
# Match: propName?: type; or propName: type;
- prop_match = re.match(
- r'(\w+)(\?)?:\s*([^;/]+)',
- line
- )
+ prop_match = re.match(r"(\w+)(\?)?:\s*([^;/]+)", line)
if prop_match:
prop_name = prop_match.group(1)
- is_optional = prop_match.group(2) == '?'
+ is_optional = prop_match.group(2) == "?"
prop_type = prop_match.group(3).strip()
# Extract options from union types
options = []
- if '|' in prop_type:
+ if "|" in prop_type:
# 'primary' | 'secondary' | 'ghost'
options = [
o.strip().strip("'\"")
- for o in prop_type.split('|')
+ for o in prop_type.split("|")
if o.strip().startswith(("'", '"'))
]
- props.append(PropInfo(
- name=prop_name,
- type=prop_type,
- required=not is_optional,
- options=options,
- ))
+ props.append(
+ PropInfo(
+ name=prop_name,
+ type=prop_type,
+ required=not is_optional,
+ options=options,
+ )
+ )
return props
@staticmethod
def _tag_to_pascal(tag_name: str) -> str:
"""Convert kebab-case tag name to PascalCase (e.g., 'ds-button' -> 'DsButton')."""
- return ''.join(word.capitalize() for word in tag_name.split('-'))
+ return "".join(word.capitalize() for word in tag_name.split("-"))
def _generate_csf3(self, meta: ComponentMeta, include_variants: bool) -> str:
"""Generate CSF3 format story."""
@@ -411,7 +408,7 @@ class StoryGenerator:
def _generate_csf3_react(self, meta: ComponentMeta, include_variants: bool) -> str:
"""Generate CSF3 format story for React components."""
lines = [
- f"import type {{ Meta, StoryObj }} from '@storybook/react';",
+ "import type { Meta, StoryObj } from '@storybook/react';",
f"import {{ {meta.name} }} from './{meta.name}';",
"",
f"const meta: Meta = {{",
@@ -439,79 +436,82 @@ class StoryGenerator:
lines.extend(arg_types)
lines.append(" },")
- lines.extend([
- "};",
- "",
- "export default meta;",
- f"type Story = StoryObj;",
- "",
- ])
+ lines.extend(
+ [
+ "};",
+ "",
+ "export default meta;",
+ f"type Story = StoryObj;",
+ "",
+ ]
+ )
# Generate default story
default_args = self._get_default_args(meta)
- lines.extend([
- "export const Default: Story = {",
- " args: {",
- ])
+ lines.extend(
+ [
+ "export const Default: Story = {",
+ " args: {",
+ ]
+ )
for key, value in default_args.items():
lines.append(f" {key}: {value},")
- lines.extend([
- " },",
- "};",
- ])
+ lines.extend(
+ [
+ " },",
+ "};",
+ ]
+ )
# Generate variant stories
if include_variants:
- variant_prop = next(
- (p for p in meta.props if p.name == 'variant' and p.options),
- None
- )
+ variant_prop = next((p for p in meta.props if p.name == "variant" and p.options), None)
if variant_prop:
for variant in variant_prop.options:
- story_name = variant.title().replace('-', '').replace('_', '')
- lines.extend([
- "",
- f"export const {story_name}: Story = {{",
- " args: {",
- f" ...Default.args,",
- f" variant: '{variant}',",
- " },",
- "};",
- ])
+ story_name = variant.title().replace("-", "").replace("_", "")
+ lines.extend(
+ [
+ "",
+ f"export const {story_name}: Story = {{",
+ " args: {",
+ " ...Default.args,",
+ f" variant: '{variant}',",
+ " },",
+ "};",
+ ]
+ )
# Size variants
- size_prop = next(
- (p for p in meta.props if p.name == 'size' and p.options),
- None
- )
+ size_prop = next((p for p in meta.props if p.name == "size" and p.options), None)
if size_prop:
for size in size_prop.options:
story_name = f"Size{size.title()}"
- lines.extend([
- "",
- f"export const {story_name}: Story = {{",
- " args: {",
- f" ...Default.args,",
- f" size: '{size}',",
- " },",
- "};",
- ])
+ lines.extend(
+ [
+ "",
+ f"export const {story_name}: Story = {{",
+ " args: {",
+ " ...Default.args,",
+ f" size: '{size}',",
+ " },",
+ "};",
+ ]
+ )
# Disabled state
- disabled_prop = next(
- (p for p in meta.props if p.name == 'disabled'),
- None
- )
+ disabled_prop = next((p for p in meta.props if p.name == "disabled"), None)
if disabled_prop:
- lines.extend([
- "",
- "export const Disabled: Story = {",
- " args: {",
- " ...Default.args,",
- " disabled: true,",
- " },",
- "};",
- ])
+ lines.extend(
+ [
+ "",
+ "export const Disabled: Story = {",
+ " args: {",
+ " ...Default.args,",
+ " disabled: true,",
+ " },",
+ "};",
+ ]
+ )
return "\n".join(lines)
@@ -522,7 +522,7 @@ class StoryGenerator:
lines = [
"/**",
f" * Storybook stories for <{tag_name}> Web Component",
- f" * Auto-generated by DSS Storybook Generator",
+ " * Auto-generated by DSS Storybook Generator",
" */",
"",
"// Import the component to ensure it's registered",
@@ -542,7 +542,7 @@ class StoryGenerator:
f" description: '{prop.description or prop.name + ' attribute'}',\n"
f" }}"
)
- elif prop.type == 'boolean':
+ elif prop.type == "boolean":
arg_types_entries.append(
f" {prop.name}: {{\n"
f" control: {{ type: 'boolean' }},\n"
@@ -557,137 +557,141 @@ class StoryGenerator:
f" }}"
)
- lines.extend([
- "export default {",
- f" title: 'Web Components/{meta.name}',",
- " parameters: {",
- " layout: 'centered',",
- " docs: {",
- f" description: {{ component: `{meta.description or f'{meta.name} Web Component'}` }},",
- " },",
- " },",
- " tags: ['autodocs'],",
- " argTypes: {",
- ])
+ lines.extend(
+ [
+ "export default {",
+ f" title: 'Web Components/{meta.name}',",
+ " parameters: {",
+ " layout: 'centered',",
+ " docs: {",
+ f" description: {{ component: `{meta.description or f'{meta.name} Web Component'}` }},",
+ " },",
+ " },",
+ " tags: ['autodocs'],",
+ " argTypes: {",
+ ]
+ )
lines.append(",\n".join(arg_types_entries))
- lines.extend([
- " },",
- "};",
- "",
- ])
+ lines.extend(
+ [
+ " },",
+ "};",
+ "",
+ ]
+ )
# Generate render function
- lines.extend([
- "/**",
- " * Render function that creates the Web Component with attributes",
- " */",
- "const render = (args) => {",
- f" const el = document.createElement('{tag_name}');",
- "",
- " // Set attributes from args",
- " Object.entries(args).forEach(([key, value]) => {",
- " if (key === 'children' || key === 'slot') {",
- " el.innerHTML = value;",
- " } else if (typeof value === 'boolean') {",
- " if (value) el.setAttribute(key, '');",
- " else el.removeAttribute(key);",
- " } else if (value !== undefined && value !== null) {",
- " el.setAttribute(key, String(value));",
- " }",
- " });",
- "",
- " return el;",
- "};",
- "",
- ])
+ lines.extend(
+ [
+ "/**",
+ " * Render function that creates the Web Component with attributes",
+ " */",
+ "const render = (args) => {",
+ f" const el = document.createElement('{tag_name}');",
+ "",
+ " // Set attributes from args",
+ " Object.entries(args).forEach(([key, value]) => {",
+ " if (key === 'children' || key === 'slot') {",
+ " el.innerHTML = value;",
+ " } else if (typeof value === 'boolean') {",
+ " if (value) el.setAttribute(key, '');",
+ " else el.removeAttribute(key);",
+ " } else if (value !== undefined && value !== null) {",
+ " el.setAttribute(key, String(value));",
+ " }",
+ " });",
+ "",
+ " return el;",
+ "};",
+ "",
+ ]
+ )
# Get default args
default_args = self._get_default_args_web_component(meta)
- default_args_str = json.dumps(default_args, indent=2).replace('\n', '\n ')
+ default_args_str = json.dumps(default_args, indent=2).replace("\n", "\n ")
# Generate default story
- lines.extend([
- "/**",
- " * Default story showing the component in its default state",
- " */",
- "export const Default = {",
- " render,",
- f" args: {default_args_str},",
- "};",
- ])
+ lines.extend(
+ [
+ "/**",
+ " * Default story showing the component in its default state",
+ " */",
+ "export const Default = {",
+ " render,",
+ f" args: {default_args_str},",
+ "};",
+ ]
+ )
# Generate variant stories
if include_variants:
- variant_prop = next(
- (p for p in meta.props if p.name == 'variant' and p.options),
- None
- )
+ variant_prop = next((p for p in meta.props if p.name == "variant" and p.options), None)
if variant_prop:
for variant in variant_prop.options:
- story_name = variant.title().replace('-', '').replace('_', '')
- lines.extend([
- "",
- f"export const {story_name} = {{",
- " render,",
- " args: {",
- " ...Default.args,",
- f" variant: '{variant}',",
- " },",
- "};",
- ])
+ story_name = variant.title().replace("-", "").replace("_", "")
+ lines.extend(
+ [
+ "",
+ f"export const {story_name} = {{",
+ " render,",
+ " args: {",
+ " ...Default.args,",
+ f" variant: '{variant}',",
+ " },",
+ "};",
+ ]
+ )
# Size variants
- size_prop = next(
- (p for p in meta.props if p.name == 'size' and p.options),
- None
- )
+ size_prop = next((p for p in meta.props if p.name == "size" and p.options), None)
if size_prop:
for size in size_prop.options:
story_name = f"Size{size.title().replace('-', '')}"
- lines.extend([
+ lines.extend(
+ [
+ "",
+ f"export const {story_name} = {{",
+ " render,",
+ " args: {",
+ " ...Default.args,",
+ f" size: '{size}',",
+ " },",
+ "};",
+ ]
+ )
+
+ # Disabled state
+ disabled_prop = next((p for p in meta.props if p.name == "disabled"), None)
+ if disabled_prop:
+ lines.extend(
+ [
"",
- f"export const {story_name} = {{",
+ "export const Disabled = {",
" render,",
" args: {",
" ...Default.args,",
- f" size: '{size}',",
+ " disabled: true,",
" },",
"};",
- ])
-
- # Disabled state
- disabled_prop = next(
- (p for p in meta.props if p.name == 'disabled'),
- None
- )
- if disabled_prop:
- lines.extend([
- "",
- "export const Disabled = {",
- " render,",
- " args: {",
- " ...Default.args,",
- " disabled: true,",
- " },",
- "};",
- ])
+ ]
+ )
# Loading state (common for buttons)
- loading_prop = next(
- (p for p in meta.props if p.name == 'loading'),
- None
- )
+ loading_prop = next((p for p in meta.props if p.name == "loading"), None)
if loading_prop:
- lines.extend([
- "",
- "export const Loading = {",
- " render,",
- " args: {",
- " ...Default.args,",
- " loading: true,",
- " },",
- "};",
- ])
+ lines.extend(
+ [
+ "",
+ "export const Loading = {",
+ " render,",
+ " args: {",
+ " ...Default.args,",
+ " loading: true,",
+ " },",
+ "};",
+ ]
+ )
return "\n".join(lines)
@@ -696,27 +700,27 @@ class StoryGenerator:
args = {}
for prop in meta.props:
- if prop.name == 'variant' and prop.options:
- args['variant'] = prop.options[0]
- elif prop.name == 'size' and prop.options:
- args['size'] = prop.options[0]
- elif prop.name == 'type' and prop.options:
- args['type'] = prop.options[0]
- elif prop.name == 'disabled':
- args['disabled'] = False
- elif prop.name == 'loading':
- args['loading'] = False
+ if prop.name == "variant" and prop.options:
+ args["variant"] = prop.options[0]
+ elif prop.name == "size" and prop.options:
+ args["size"] = prop.options[0]
+ elif prop.name == "type" and prop.options:
+ args["type"] = prop.options[0]
+ elif prop.name == "disabled":
+ args["disabled"] = False
+ elif prop.name == "loading":
+ args["loading"] = False
# Add children/slot content for components with slots
if meta.has_children:
- args['children'] = f'Click me'
+ args["children"] = "Click me"
return args
def _generate_csf2(self, meta: ComponentMeta, include_variants: bool) -> str:
"""Generate CSF2 format story."""
lines = [
- f"import React from 'react';",
+ "import React from 'react';",
f"import {{ {meta.name} }} from './{meta.name}';",
"",
"export default {",
@@ -738,31 +742,30 @@ class StoryGenerator:
# Generate variant stories
if include_variants:
- variant_prop = next(
- (p for p in meta.props if p.name == 'variant' and p.options),
- None
- )
+ variant_prop = next((p for p in meta.props if p.name == "variant" and p.options), None)
if variant_prop:
for variant in variant_prop.options:
- story_name = variant.title().replace('-', '').replace('_', '')
- lines.extend([
- "",
- f"export const {story_name} = Template.bind({{}});",
- f"{story_name}.args = {{",
- f" ...Default.args,",
- f" variant: '{variant}',",
- "};",
- ])
+ story_name = variant.title().replace("-", "").replace("_", "")
+ lines.extend(
+ [
+ "",
+ f"export const {story_name} = Template.bind({{}});",
+ f"{story_name}.args = {{",
+ " ...Default.args,",
+ f" variant: '{variant}',",
+ "};",
+ ]
+ )
return "\n".join(lines)
def _generate_mdx(self, meta: ComponentMeta, include_variants: bool) -> str:
"""Generate MDX format story."""
lines = [
- f"import {{ Meta, Story, Canvas, ArgsTable }} from '@storybook/blocks';",
+ "import { Meta, Story, Canvas, ArgsTable } from '@storybook/blocks';",
f"import {{ {meta.name} }} from './{meta.name}';",
"",
- f" ",
+ f' ',
"",
f"# {meta.name}",
"",
@@ -771,27 +774,31 @@ class StoryGenerator:
if meta.description:
lines.extend([meta.description, ""])
- lines.extend([
- "## Default",
- "",
- "",
- f" ",
- f" <{meta.name}",
- ])
+ lines.extend(
+ [
+ "## Default",
+ "",
+ "",
+ ' ',
+ f" <{meta.name}",
+ ]
+ )
default_args = self._get_default_args(meta)
for key, value in default_args.items():
lines.append(f" {key}={value}")
- lines.extend([
- f" />",
- " ",
- " ",
- "",
- "## Props",
- "",
- f" ",
- ])
+ lines.extend(
+ [
+ " />",
+ " ",
+ " ",
+ "",
+ "## Props",
+ "",
+ f" ",
+ ]
+ )
return "\n".join(lines)
@@ -800,22 +807,22 @@ class StoryGenerator:
args = {}
for prop in meta.props:
- if prop.name == 'children' and meta.has_children:
- args['children'] = f"'{meta.name}'"
- elif prop.name == 'variant' and prop.options:
- args['variant'] = f"'{prop.options[0]}'"
- elif prop.name == 'size' and prop.options:
- args['size'] = f"'{prop.options[0]}'"
- elif prop.name == 'disabled':
- args['disabled'] = 'false'
- elif prop.name == 'onClick':
- args['onClick'] = '() => console.log("clicked")'
+ if prop.name == "children" and meta.has_children:
+ args["children"] = f"'{meta.name}'"
+ elif prop.name == "variant" and prop.options:
+ args["variant"] = f"'{prop.options[0]}'"
+ elif prop.name == "size" and prop.options:
+ args["size"] = f"'{prop.options[0]}'"
+ elif prop.name == "disabled":
+ args["disabled"] = "false"
+ elif prop.name == "onClick":
+ args["onClick"] = '() => console.log("clicked")'
elif prop.required and prop.default_value:
args[prop.name] = prop.default_value
# Ensure children for button-like components
- if meta.has_children and 'children' not in args:
- args['children'] = f"'{meta.name}'"
+ if meta.has_children and "children" not in args:
+ args["children"] = f"'{meta.name}'"
return args
@@ -845,24 +852,26 @@ class StoryGenerator:
return results
# Find component files (React + Web Components)
- for pattern in ['*.tsx', '*.jsx', '*.js']:
+ for pattern in ["*.tsx", "*.jsx", "*.js"]:
for comp_path in dir_path.glob(pattern):
# Skip story files, test files, index files
- if any(x in comp_path.name.lower() for x in ['.stories.', '.test.', '.spec.', 'index.']):
+ if any(
+ x in comp_path.name.lower() for x in [".stories.", ".test.", ".spec.", "index."]
+ ):
continue
# Check if it's a valid component file
is_valid = False
# React components: PascalCase naming (e.g., Button.tsx)
- if comp_path.suffix in ['.tsx', '.jsx'] and comp_path.stem[0].isupper():
+ if comp_path.suffix in [".tsx", ".jsx"] and comp_path.stem[0].isupper():
is_valid = True
# Web Components: check file content for class extends HTMLElement
- elif comp_path.suffix == '.js':
+ elif comp_path.suffix == ".js":
try:
- content = comp_path.read_text(encoding='utf-8', errors='ignore')
- if 'extends HTMLElement' in content or 'customElements.define' in content:
+ content = comp_path.read_text(encoding="utf-8", errors="ignore")
+ if "extends HTMLElement" in content or "customElements.define" in content:
is_valid = True
log.debug(f"Found Web Component: {comp_path}")
except Exception:
@@ -876,28 +885,30 @@ class StoryGenerator:
story = await self.generate_story(rel_path, template)
# Determine story output path (use .stories.js for Web Components)
- if comp_path.suffix == '.js':
- story_path = comp_path.with_name(comp_path.stem + '.stories.js')
+ if comp_path.suffix == ".js":
+ story_path = comp_path.with_name(comp_path.stem + ".stories.js")
else:
- story_path = comp_path.with_suffix('.stories.tsx')
+ story_path = comp_path.with_suffix(".stories.tsx")
result = {
- 'component': rel_path,
- 'story_path': str(story_path.relative_to(self.root)),
- 'story': story,
+ "component": rel_path,
+ "story_path": str(story_path.relative_to(self.root)),
+ "story": story,
}
if not dry_run:
story_path.write_text(story)
- result['written'] = True
+ result["written"] = True
results.append(result)
except Exception as e:
log.error(f"Error generating story for {comp_path}: {e}")
- results.append({
- 'component': str(comp_path),
- 'error': str(e),
- })
+ results.append(
+ {
+ "component": str(comp_path),
+ "error": str(e),
+ }
+ )
return results
diff --git a/dss/storybook/scanner.py b/dss/storybook/scanner.py
index 98a95d5..1fbd323 100644
--- a/dss/storybook/scanner.py
+++ b/dss/storybook/scanner.py
@@ -1,23 +1,24 @@
"""
-Storybook Scanner
+Storybook Scanner.
Discovers and analyzes existing Storybook stories in a project.
"""
-import re
import json
-from pathlib import Path
-from typing import List, Dict, Any, Optional, Set
+import re
from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
@dataclass
class StoryInfo:
"""Information about a Storybook story."""
- name: str # Story name (e.g., "Primary")
- title: str # Story title (e.g., "Components/Button")
- component: str # Component name
- file_path: str # Path to story file
+
+ name: str # Story name (e.g., "Primary")
+ title: str # Story title (e.g., "Components/Button")
+ component: str # Component name
+ file_path: str # Path to story file
args: Dict[str, Any] = field(default_factory=dict) # Default args
parameters: Dict[str, Any] = field(default_factory=dict)
decorators: List[str] = field(default_factory=list)
@@ -39,9 +40,10 @@ class StoryInfo:
@dataclass
class StorybookConfig:
"""Storybook configuration details."""
+
version: str = ""
- framework: str = "" # react, vue, angular, etc.
- builder: str = "" # vite, webpack5, etc.
+ framework: str = "" # react, vue, angular, etc.
+ builder: str = "" # vite, webpack5, etc.
addons: List[str] = field(default_factory=list)
stories_patterns: List[str] = field(default_factory=list)
static_dirs: List[str] = field(default_factory=list)
@@ -60,17 +62,15 @@ class StorybookConfig:
class StorybookScanner:
- """
- Scans a project for Storybook configuration and stories.
- """
+ """Scans a project for Storybook configuration and stories."""
# Common story file patterns
STORY_PATTERNS = [
- '*.stories.tsx',
- '*.stories.ts',
- '*.stories.jsx',
- '*.stories.js',
- '*.stories.mdx',
+ "*.stories.tsx",
+ "*.stories.ts",
+ "*.stories.jsx",
+ "*.stories.js",
+ "*.stories.mdx",
]
def __init__(self, root_path: str):
@@ -148,10 +148,7 @@ class StorybookScanner:
config.builder = "webpack5"
# Get addons
- config.addons = [
- pkg for pkg in deps.keys()
- if pkg.startswith("@storybook/addon-")
- ]
+ config.addons = [pkg for pkg in deps.keys() if pkg.startswith("@storybook/addon-")]
except (json.JSONDecodeError, KeyError):
pass
@@ -164,32 +161,21 @@ class StorybookScanner:
content = main_path.read_text(encoding="utf-8")
# Extract stories patterns
- stories_match = re.search(
- r'stories\s*:\s*\[([^\]]+)\]',
- content,
- re.DOTALL
- )
+ stories_match = re.search(r"stories\s*:\s*\[([^\]]+)\]", content, re.DOTALL)
if stories_match:
patterns_str = stories_match.group(1)
patterns = re.findall(r'["\']([^"\']+)["\']', patterns_str)
config.stories_patterns = patterns
# Extract static dirs
- static_match = re.search(
- r'staticDirs\s*:\s*\[([^\]]+)\]',
- content,
- re.DOTALL
- )
+ static_match = re.search(r"staticDirs\s*:\s*\[([^\]]+)\]", content, re.DOTALL)
if static_match:
dirs_str = static_match.group(1)
dirs = re.findall(r'["\']([^"\']+)["\']', dirs_str)
config.static_dirs = dirs
# Extract framework
- framework_match = re.search(
- r'framework\s*:\s*["\'](@storybook/[^"\']+)["\']',
- content
- )
+ framework_match = re.search(r'framework\s*:\s*["\'](@storybook/[^"\']+)["\']', content)
if framework_match:
config.framework = framework_match.group(1)
@@ -199,7 +185,7 @@ class StorybookScanner:
async def _find_stories(self) -> List[StoryInfo]:
"""Find all story files in the project."""
stories = []
- skip_dirs = {'node_modules', '.git', 'dist', 'build'}
+ skip_dirs = {"node_modules", ".git", "dist", "build"}
for pattern in self.STORY_PATTERNS:
for story_path in self.root.rglob(pattern):
@@ -226,9 +212,7 @@ class StorybookScanner:
# CSF3 format: const meta = { title: '...', component: ... }
meta_match = re.search(
- r'(?:const\s+meta|export\s+default)\s*[=:]\s*\{([^}]+)\}',
- content,
- re.DOTALL
+ r"(?:const\s+meta|export\s+default)\s*[=:]\s*\{([^}]+)\}", content, re.DOTALL
)
if meta_match:
meta_content = meta_match.group(1)
@@ -237,24 +221,23 @@ class StorybookScanner:
if title_match:
title = title_match.group(1)
- comp_match = re.search(r'component\s*:\s*(\w+)', meta_content)
+ comp_match = re.search(r"component\s*:\s*(\w+)", meta_content)
if comp_match:
component = comp_match.group(1)
# If no title, derive from file path
if not title:
# Convert path to title (e.g., src/components/Button.stories.tsx -> Components/Button)
- parts = story_path.stem.replace('.stories', '').split('/')
- title = '/'.join(p.title() for p in parts[-2:] if p)
+ parts = story_path.stem.replace(".stories", "").split("/")
+ title = "/".join(p.title() for p in parts[-2:] if p)
if not component:
- component = story_path.stem.replace('.stories', '')
+ component = story_path.stem.replace(".stories", "")
# Find exported stories (CSF3 format)
# export const Primary: Story = { ... }
story_pattern = re.compile(
- r'export\s+const\s+(\w+)\s*(?::\s*\w+)?\s*=\s*\{([^}]*)\}',
- re.DOTALL
+ r"export\s+const\s+(\w+)\s*(?::\s*\w+)?\s*=\s*\{([^}]*)\}", re.DOTALL
)
for match in story_pattern.finditer(content):
@@ -262,47 +245,46 @@ class StorybookScanner:
story_content = match.group(2)
# Skip meta export
- if story_name.lower() in ['meta', 'default']:
+ if story_name.lower() in ["meta", "default"]:
continue
# Parse args
args = {}
- args_match = re.search(r'args\s*:\s*\{([^}]*)\}', story_content)
+ args_match = re.search(r"args\s*:\s*\{([^}]*)\}", story_content)
if args_match:
args_str = args_match.group(1)
# Simple key-value extraction
for kv_match in re.finditer(r'(\w+)\s*:\s*["\']?([^,\n"\']+)["\']?', args_str):
args[kv_match.group(1)] = kv_match.group(2).strip()
- stories.append(StoryInfo(
- name=story_name,
- title=title,
- component=component,
- file_path=rel_path,
- args=args,
- ))
-
- # Also check for older CSF2 format
- # export const Primary = Template.bind({})
- csf2_pattern = re.compile(
- r'export\s+const\s+(\w+)\s*=\s*Template\.bind\(\{\}\)'
- )
- for match in csf2_pattern.finditer(content):
- story_name = match.group(1)
- if not any(s.name == story_name for s in stories):
- stories.append(StoryInfo(
+ stories.append(
+ StoryInfo(
name=story_name,
title=title,
component=component,
file_path=rel_path,
- ))
+ args=args,
+ )
+ )
+
+ # Also check for older CSF2 format
+ # export const Primary = Template.bind({})
+ csf2_pattern = re.compile(r"export\s+const\s+(\w+)\s*=\s*Template\.bind\(\{\}\)")
+ for match in csf2_pattern.finditer(content):
+ story_name = match.group(1)
+ if not any(s.name == story_name for s in stories):
+ stories.append(
+ StoryInfo(
+ name=story_name,
+ title=title,
+ component=component,
+ file_path=rel_path,
+ )
+ )
return stories
- async def get_components_without_stories(
- self,
- component_files: List[str]
- ) -> List[str]:
+ async def get_components_without_stories(self, component_files: List[str]) -> List[str]:
"""
Find components that don't have Storybook stories.
@@ -340,13 +322,12 @@ class StorybookScanner:
# Count stories per component
by_component = result.get("by_component", {})
- stories_per_component = {
- comp: len(stories) for comp, stories in by_component.items()
- }
+ stories_per_component = {comp: len(stories) for comp, stories in by_component.items()}
avg_stories = (
sum(stories_per_component.values()) / len(stories_per_component)
- if stories_per_component else 0
+ if stories_per_component
+ else 0
)
return {
diff --git a/dss/storybook/theme.py b/dss/storybook/theme.py
index 607ea50..31f8bc2 100644
--- a/dss/storybook/theme.py
+++ b/dss/storybook/theme.py
@@ -1,18 +1,19 @@
"""
-Storybook Theme Generator
+Storybook Theme Generator.
Generates Storybook theme configurations from design tokens.
"""
import json
+from dataclasses import dataclass
from pathlib import Path
-from typing import List, Dict, Any, Optional
-from dataclasses import dataclass, field
+from typing import Any, Dict, List, Optional
@dataclass
class StorybookTheme:
"""Storybook theme configuration."""
+
name: str = "dss-theme"
base: str = "light" # 'light' or 'dark'
@@ -79,9 +80,7 @@ class StorybookTheme:
class ThemeGenerator:
- """
- Generates Storybook theme configurations from design tokens.
- """
+ """Generates Storybook theme configurations from design tokens."""
def __init__(self, root_path: Optional[str] = None):
"""
@@ -108,12 +107,12 @@ class ThemeGenerator:
"""
# Look for tokens in common locations
token_paths = [
- self.root / 'tokens' / 'tokens.json',
- self.root / 'design-tokens' / 'tokens.json',
- self.root / 'src' / 'tokens' / 'tokens.json',
- self.root / '.dss' / 'tokens.json',
- self.root / 'dss_output' / 'tokens.json',
- self.root / 'dss' / 'core_tokens' / 'tokens.json', # DSS core tokens
+ self.root / "tokens" / "tokens.json",
+ self.root / "design-tokens" / "tokens.json",
+ self.root / "src" / "tokens" / "tokens.json",
+ self.root / ".dss" / "tokens.json",
+ self.root / "dss_output" / "tokens.json",
+ self.root / "dss" / "core_tokens" / "tokens.json", # DSS core tokens
]
tokens = []
@@ -137,7 +136,7 @@ class ThemeGenerator:
theme = self.generate_from_tokens(tokens, brand_title, base)
# Determine output directory for Storybook config
- storybook_dir = self.root / '.storybook'
+ storybook_dir = self.root / ".storybook"
output_dir = str(storybook_dir) if storybook_dir.exists() else None
# Generate configuration files
@@ -175,23 +174,19 @@ class ThemeGenerator:
"color.primary.600": "color_primary",
"color.secondary.500": "color_secondary",
"color.accent.500": "color_secondary",
-
# Backgrounds
"color.neutral.50": "app_bg",
"color.background": "app_bg",
"color.surface": "app_content_bg",
-
# Borders
"color.neutral.200": "app_border_color",
"color.border": "app_border_color",
-
# Text
"color.neutral.900": "text_color",
"color.neutral.800": "text_color",
"color.foreground": "text_color",
"color.neutral.500": "text_muted_color",
"color.muted": "text_muted_color",
-
# Input
"color.neutral.300": "input_border",
"radius.md": "input_border_radius",
@@ -333,10 +328,12 @@ class ThemeGenerator:
else:
lines.append(f" {key}: {value},")
- lines.extend([
- "});",
- "",
- ])
+ lines.extend(
+ [
+ "});",
+ "",
+ ]
+ )
return "\n".join(lines)
@@ -386,44 +383,50 @@ addons.setConfig({{
value = token.get("value", "")
css_vars.append(f" --{name}: {value};")
- lines.extend([
- "// Inject design tokens as CSS variables",
- "const tokenStyles = `",
- ":root {",
- ])
+ lines.extend(
+ [
+ "// Inject design tokens as CSS variables",
+ "const tokenStyles = `",
+ ":root {",
+ ]
+ )
lines.extend(css_vars)
- lines.extend([
- "}",
- "`;",
- "",
- "// Add styles to document",
- "const styleSheet = document.createElement('style');",
- "styleSheet.textContent = tokenStyles;",
- "document.head.appendChild(styleSheet);",
- "",
- ])
+ lines.extend(
+ [
+ "}",
+ "`;",
+ "",
+ "// Add styles to document",
+ "const styleSheet = document.createElement('style');",
+ "styleSheet.textContent = tokenStyles;",
+ "document.head.appendChild(styleSheet);",
+ "",
+ ]
+ )
- lines.extend([
- "const preview: Preview = {",
- " parameters: {",
- " controls: {",
- " matchers: {",
- " color: /(background|color)$/i,",
- " date: /Date$/i,",
- " },",
- " },",
- " backgrounds: {",
- " default: 'light',",
- " values: [",
- " { name: 'light', value: '#FFFFFF' },",
- " { name: 'dark', value: '#1F2937' },",
- " ],",
- " },",
- " },",
- "};",
- "",
- "export default preview;",
- ])
+ lines.extend(
+ [
+ "const preview: Preview = {",
+ " parameters: {",
+ " controls: {",
+ " matchers: {",
+ " color: /(background|color)$/i,",
+ " date: /Date$/i,",
+ " },",
+ " },",
+ " backgrounds: {",
+ " default: 'light',",
+ " values: [",
+ " { name: 'light', value: '#FFFFFF' },",
+ " { name: 'dark', value: '#1F2937' },",
+ " ],",
+ " },",
+ " },",
+ "};",
+ "",
+ "export default preview;",
+ ]
+ )
return "\n".join(lines)
diff --git a/dss/themes/__init__.py b/dss/themes/__init__.py
index 7f48f45..3553687 100644
--- a/dss/themes/__init__.py
+++ b/dss/themes/__init__.py
@@ -1,5 +1,5 @@
-"""Default DSS themes (light & dark)"""
+"""Default DSS themes (light & dark)."""
-from .default_themes import get_default_light_theme, get_default_dark_theme
+from .default_themes import get_default_dark_theme, get_default_light_theme
__all__ = ["get_default_light_theme", "get_default_dark_theme"]
diff --git a/dss/themes/default_themes.py b/dss/themes/default_themes.py
index 599da20..88f847d 100644
--- a/dss/themes/default_themes.py
+++ b/dss/themes/default_themes.py
@@ -1,14 +1,16 @@
"""
-Default DSS Light & Dark Themes
+Default DSS Light & Dark Themes.
+
Perfect implementation showcasing the design system
"""
-from dss.models.theme import Theme, DesignToken, TokenCategory
+from dss.models.theme import DesignToken, Theme, TokenCategory
def get_default_light_theme() -> Theme:
"""
- DSS Default Light Theme
+ DSS Default Light Theme.
+
Clean, modern light theme optimized for readability
"""
return Theme(
@@ -21,175 +23,173 @@ def get_default_light_theme() -> Theme:
value="oklch(0.99 0.005 285)",
type="color",
category=TokenCategory.COLOR,
- description="Main background color"
+ description="Main background color",
),
"foreground": DesignToken(
name="foreground",
value="oklch(0.15 0.015 285)",
type="color",
category=TokenCategory.COLOR,
- description="Main text color"
+ description="Main text color",
),
"primary": DesignToken(
name="primary",
value="oklch(0.65 0.18 250)",
type="color",
category=TokenCategory.COLOR,
- description="Primary brand color - vibrant blue"
+ description="Primary brand color - vibrant blue",
),
"secondary": DesignToken(
name="secondary",
value="oklch(0.55 0.05 285)",
type="color",
category=TokenCategory.COLOR,
- description="Secondary color - subtle purple-gray"
+ description="Secondary color - subtle purple-gray",
),
"accent": DesignToken(
name="accent",
value="oklch(0.70 0.15 180)",
type="color",
category=TokenCategory.COLOR,
- description="Accent color - cyan"
+ description="Accent color - cyan",
),
"destructive": DesignToken(
name="destructive",
value="oklch(0.55 0.22 25)",
type="color",
category=TokenCategory.COLOR,
- description="Destructive actions - red"
+ description="Destructive actions - red",
),
"success": DesignToken(
name="success",
value="oklch(0.60 0.18 145)",
type="color",
category=TokenCategory.COLOR,
- description="Success states - green"
+ description="Success states - green",
),
"warning": DesignToken(
name="warning",
value="oklch(0.75 0.15 85)",
type="color",
category=TokenCategory.COLOR,
- description="Warning states - yellow"
+ description="Warning states - yellow",
),
"muted": DesignToken(
name="muted",
value="oklch(0.95 0.01 285)",
type="color",
category=TokenCategory.COLOR,
- description="Muted background"
+ description="Muted background",
),
"border": DesignToken(
name="border",
value="oklch(0.90 0.01 285)",
type="color",
category=TokenCategory.COLOR,
- description="Border color"
+ description="Border color",
),
-
# Spacing
"space-xs": DesignToken(
name="space-xs",
value="4px",
type="dimension",
category=TokenCategory.SPACING,
- description="Extra small spacing"
+ description="Extra small spacing",
),
"space-sm": DesignToken(
name="space-sm",
value="8px",
type="dimension",
category=TokenCategory.SPACING,
- description="Small spacing"
+ description="Small spacing",
),
"space-md": DesignToken(
name="space-md",
value="16px",
type="dimension",
category=TokenCategory.SPACING,
- description="Medium spacing"
+ description="Medium spacing",
),
"space-lg": DesignToken(
name="space-lg",
value="24px",
type="dimension",
category=TokenCategory.SPACING,
- description="Large spacing"
+ description="Large spacing",
),
"space-xl": DesignToken(
name="space-xl",
value="32px",
type="dimension",
category=TokenCategory.SPACING,
- description="Extra large spacing"
+ description="Extra large spacing",
),
-
# Border Radius
"radius-sm": DesignToken(
name="radius-sm",
value="4px",
type="dimension",
category=TokenCategory.RADIUS,
- description="Small border radius"
+ description="Small border radius",
),
"radius-md": DesignToken(
name="radius-md",
value="8px",
type="dimension",
category=TokenCategory.RADIUS,
- description="Medium border radius"
+ description="Medium border radius",
),
"radius-lg": DesignToken(
name="radius-lg",
value="12px",
type="dimension",
category=TokenCategory.RADIUS,
- description="Large border radius"
+ description="Large border radius",
),
-
# Typography
"text-xs": DesignToken(
name="text-xs",
value="0.75rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Extra small text"
+ description="Extra small text",
),
"text-sm": DesignToken(
name="text-sm",
value="0.875rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Small text"
+ description="Small text",
),
"text-base": DesignToken(
name="text-base",
value="1rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Base text size"
+ description="Base text size",
),
"text-lg": DesignToken(
name="text-lg",
value="1.125rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Large text"
+ description="Large text",
),
"text-xl": DesignToken(
name="text-xl",
value="1.25rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Extra large text"
+ description="Extra large text",
),
- }
+ },
)
def get_default_dark_theme() -> Theme:
"""
- DSS Default Dark Theme
+ DSS Default Dark Theme.
+
Sleek dark theme optimized for low-light environments
"""
return Theme(
@@ -202,167 +202,164 @@ def get_default_dark_theme() -> Theme:
value="oklch(0.15 0.015 285)",
type="color",
category=TokenCategory.COLOR,
- description="Main background color"
+ description="Main background color",
),
"foreground": DesignToken(
name="foreground",
value="oklch(0.95 0.01 285)",
type="color",
category=TokenCategory.COLOR,
- description="Main text color"
+ description="Main text color",
),
"primary": DesignToken(
name="primary",
value="oklch(0.70 0.20 250)",
type="color",
category=TokenCategory.COLOR,
- description="Primary brand color - brighter blue for dark mode"
+ description="Primary brand color - brighter blue for dark mode",
),
"secondary": DesignToken(
name="secondary",
value="oklch(0.60 0.08 285)",
type="color",
category=TokenCategory.COLOR,
- description="Secondary color - subtle purple-gray"
+ description="Secondary color - subtle purple-gray",
),
"accent": DesignToken(
name="accent",
value="oklch(0.75 0.18 180)",
type="color",
category=TokenCategory.COLOR,
- description="Accent color - brighter cyan"
+ description="Accent color - brighter cyan",
),
"destructive": DesignToken(
name="destructive",
value="oklch(0.60 0.24 25)",
type="color",
category=TokenCategory.COLOR,
- description="Destructive actions - brighter red"
+ description="Destructive actions - brighter red",
),
"success": DesignToken(
name="success",
value="oklch(0.65 0.20 145)",
type="color",
category=TokenCategory.COLOR,
- description="Success states - brighter green"
+ description="Success states - brighter green",
),
"warning": DesignToken(
name="warning",
value="oklch(0.80 0.17 85)",
type="color",
category=TokenCategory.COLOR,
- description="Warning states - brighter yellow"
+ description="Warning states - brighter yellow",
),
"muted": DesignToken(
name="muted",
value="oklch(0.22 0.02 285)",
type="color",
category=TokenCategory.COLOR,
- description="Muted background"
+ description="Muted background",
),
"border": DesignToken(
name="border",
value="oklch(0.30 0.02 285)",
type="color",
category=TokenCategory.COLOR,
- description="Border color"
+ description="Border color",
),
-
# Spacing - Same as light theme
"space-xs": DesignToken(
name="space-xs",
value="4px",
type="dimension",
category=TokenCategory.SPACING,
- description="Extra small spacing"
+ description="Extra small spacing",
),
"space-sm": DesignToken(
name="space-sm",
value="8px",
type="dimension",
category=TokenCategory.SPACING,
- description="Small spacing"
+ description="Small spacing",
),
"space-md": DesignToken(
name="space-md",
value="16px",
type="dimension",
category=TokenCategory.SPACING,
- description="Medium spacing"
+ description="Medium spacing",
),
"space-lg": DesignToken(
name="space-lg",
value="24px",
type="dimension",
category=TokenCategory.SPACING,
- description="Large spacing"
+ description="Large spacing",
),
"space-xl": DesignToken(
name="space-xl",
value="32px",
type="dimension",
category=TokenCategory.SPACING,
- description="Extra large spacing"
+ description="Extra large spacing",
),
-
# Border Radius - Same as light theme
"radius-sm": DesignToken(
name="radius-sm",
value="4px",
type="dimension",
category=TokenCategory.RADIUS,
- description="Small border radius"
+ description="Small border radius",
),
"radius-md": DesignToken(
name="radius-md",
value="8px",
type="dimension",
category=TokenCategory.RADIUS,
- description="Medium border radius"
+ description="Medium border radius",
),
"radius-lg": DesignToken(
name="radius-lg",
value="12px",
type="dimension",
category=TokenCategory.RADIUS,
- description="Large border radius"
+ description="Large border radius",
),
-
# Typography - Same as light theme
"text-xs": DesignToken(
name="text-xs",
value="0.75rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Extra small text"
+ description="Extra small text",
),
"text-sm": DesignToken(
name="text-sm",
value="0.875rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Small text"
+ description="Small text",
),
"text-base": DesignToken(
name="text-base",
value="1rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Base text size"
+ description="Base text size",
),
"text-lg": DesignToken(
name="text-lg",
value="1.125rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Large text"
+ description="Large text",
),
"text-xl": DesignToken(
name="text-xl",
value="1.25rem",
type="dimension",
category=TokenCategory.TYPOGRAPHY,
- description="Extra large text"
+ description="Extra large text",
),
- }
+ },
)
diff --git a/dss/themes/translator.py b/dss/themes/translator.py
index ad90d76..e9f2dd7 100644
--- a/dss/themes/translator.py
+++ b/dss/themes/translator.py
@@ -1,20 +1,20 @@
"""
-DSS Theme Translator
+DSS Theme Translator.
Translates a DSS project's tokens and components into a specific
theme or "skin" for a target framework (e.g., shadcn, material-ui).
"""
from pathlib import Path
-from typing import Dict, Any
-from dss.models.project import Project
from dss.ingest.base import TokenCollection
+from dss.models.project import Project
class ThemeTranslator:
"""
- Translates a DSS project's tokens and components into a specific
+ Translates a DSS project's tokens and components into a specific.
+
theme or "skin" for a target framework (e.g., shadcn, material-ui).
"""
@@ -53,7 +53,9 @@ class ThemeTranslator:
file, a globals.css file, etc.).
"""
# Load the token collection
- token_collection_path = self.project.path / ".dss" / "cache" / "raw_figma_tokencollection.json"
+ token_collection_path = (
+ self.project.path / ".dss" / "cache" / "raw_figma_tokencollection.json"
+ )
if not token_collection_path.exists():
raise FileNotFoundError("Token collection not found. Run sync first.")
diff --git a/dss/translations/__init__.py b/dss/translations/__init__.py
index 8e026fc..e842729 100644
--- a/dss/translations/__init__.py
+++ b/dss/translations/__init__.py
@@ -1,5 +1,5 @@
"""
-DSS Translation Dictionary Module
+DSS Translation Dictionary Module.
Provides translation between external design token formats and DSS canonical structure.
"""
diff --git a/dss/translations/canonical.py b/dss/translations/canonical.py
index 922c01e..1b7106e 100644
--- a/dss/translations/canonical.py
+++ b/dss/translations/canonical.py
@@ -1,5 +1,5 @@
"""
-DSS Canonical Structure Definitions
+DSS Canonical Structure Definitions.
Defines the immutable DSS canonical token and component structure.
These definitions are used for validation and auto-completion.
diff --git a/dss/translations/loader.py b/dss/translations/loader.py
index 9f22d14..9be996f 100644
--- a/dss/translations/loader.py
+++ b/dss/translations/loader.py
@@ -1,12 +1,12 @@
"""
-Translation Dictionary Loader
+Translation Dictionary Loader.
Loads and parses translation dictionaries from project .dss directory.
"""
import json
from pathlib import Path
-from typing import Dict, List, Optional, Union
+from typing import List, Optional, Union
from .models import TranslationDictionary, TranslationRegistry, TranslationSource
from .validator import TranslationValidator
@@ -27,7 +27,10 @@ class TranslationDictionaryLoader:
DEFAULT_DIR = ".dss/translations"
def __init__(
- self, project_path: Union[str, Path], translations_dir: Optional[str] = None, validate: bool = True
+ self,
+ project_path: Union[str, Path],
+ translations_dir: Optional[str] = None,
+ validate: bool = True,
):
"""
Initialize loader.
@@ -140,7 +143,9 @@ class TranslationDictionaryLoader:
return TranslationDictionary(**data)
- def _merge_to_registry(self, registry: TranslationRegistry, dictionary: TranslationDictionary) -> None:
+ def _merge_to_registry(
+ self, registry: TranslationRegistry, dictionary: TranslationDictionary
+ ) -> None:
"""Merge dictionary mappings into registry."""
# Merge token mappings
for source_token, dss_token in dictionary.mappings.tokens.items():
diff --git a/dss/translations/merger.py b/dss/translations/merger.py
index 4e8e57a..2607a54 100644
--- a/dss/translations/merger.py
+++ b/dss/translations/merger.py
@@ -1,11 +1,11 @@
"""
-Theme Merger
+Theme Merger.
Merges base DSS theme with translation mappings and custom props.
"""
from datetime import datetime, timezone
-from typing import Any, Dict, Optional, Union
+from typing import Any, Dict, Optional
from dss.models.theme import DesignToken, Theme, TokenCategory
from dss.themes.default_themes import get_default_dark_theme, get_default_light_theme
diff --git a/dss/translations/models.py b/dss/translations/models.py
index 7d16300..5a9cf33 100644
--- a/dss/translations/models.py
+++ b/dss/translations/models.py
@@ -1,5 +1,5 @@
"""
-Translation Dictionary Data Models
+Translation Dictionary Data Models.
Pydantic models for translation dictionary system.
"""
@@ -9,7 +9,7 @@ from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import uuid4
-from pydantic import BaseModel, Field, ConfigDict, field_validator
+from pydantic import BaseModel, ConfigDict, Field, field_validator
class TranslationSource(str, Enum):
@@ -41,15 +41,17 @@ class TokenMapping(BaseModel):
source_token: str = Field(
..., description="Source token name (e.g., '--brand-blue', '$primary-color')"
)
- dss_token: str = Field(
- ..., description="DSS canonical token path (e.g., 'color.primary.500')"
+ dss_token: str = Field(..., description="DSS canonical token path (e.g., 'color.primary.500')")
+ source_value: Optional[str] = Field(
+ None, description="Original value from source (for reference)"
)
- source_value: Optional[str] = Field(None, description="Original value from source (for reference)")
notes: Optional[str] = Field(None, description="Human-readable notes about this mapping")
confidence: float = Field(
default=1.0, ge=0.0, le=1.0, description="Confidence score for auto-generated mappings"
)
- auto_generated: bool = Field(default=False, description="Whether this mapping was auto-generated")
+ auto_generated: bool = Field(
+ default=False, description="Whether this mapping was auto-generated"
+ )
class ComponentMapping(BaseModel):
@@ -63,7 +65,9 @@ class ComponentMapping(BaseModel):
dss_component: str = Field(
..., description="DSS canonical component (e.g., 'Button[variant=primary]')"
)
- prop_mappings: Dict[str, str] = Field(default_factory=dict, description="Prop name mappings (source -> DSS)")
+ prop_mappings: Dict[str, str] = Field(
+ default_factory=dict, description="Prop name mappings (source -> DSS)"
+ )
notes: Optional[str] = Field(None)
@@ -82,7 +86,9 @@ class CustomProp(BaseModel):
model_config = ConfigDict(extra="forbid")
- name: str = Field(..., description="Token name in DSS namespace (e.g., 'color.brand.acme.primary')")
+ name: str = Field(
+ ..., description="Token name in DSS namespace (e.g., 'color.brand.acme.primary')"
+ )
value: Any = Field(..., description="Token value")
type: str = Field(default="string", description="Value type (color, dimension, string, etc.)")
description: Optional[str] = Field(None)
@@ -101,7 +107,9 @@ class TranslationMappings(BaseModel):
components: Dict[str, str] = Field(
default_factory=dict, description="Component mappings: source_component -> dss_component"
)
- patterns: Dict[str, str] = Field(default_factory=dict, description="Pattern mappings: source_pattern -> dss_pattern")
+ patterns: Dict[str, str] = Field(
+ default_factory=dict, description="Pattern mappings: source_pattern -> dss_pattern"
+ )
class TranslationDictionary(BaseModel):
@@ -113,7 +121,9 @@ class TranslationDictionary(BaseModel):
schema_version: str = Field(
default="dss-translation-v1", alias="$schema", description="Schema version identifier"
)
- uuid: str = Field(default_factory=lambda: str(uuid4()), description="Unique identifier for this dictionary")
+ uuid: str = Field(
+ default_factory=lambda: str(uuid4()), description="Unique identifier for this dictionary"
+ )
project: str = Field(..., description="Project identifier")
source: TranslationSource = Field(..., description="Source type for this dictionary")
version: str = Field(default="1.0.0", description="Dictionary version")
@@ -126,10 +136,14 @@ class TranslationDictionary(BaseModel):
)
# Custom extensions
- custom_props: Dict[str, Any] = Field(default_factory=dict, description="Custom props not in DSS core (namespaced)")
+ custom_props: Dict[str, Any] = Field(
+ default_factory=dict, description="Custom props not in DSS core (namespaced)"
+ )
# Tracking
- unmapped: List[str] = Field(default_factory=list, description="Source tokens that couldn't be mapped")
+ unmapped: List[str] = Field(
+ default_factory=list, description="Source tokens that couldn't be mapped"
+ )
notes: List[str] = Field(default_factory=list, description="Human-readable notes")
@field_validator("custom_props")
@@ -154,12 +168,18 @@ class TranslationRegistry(BaseModel):
dictionaries: Dict[str, TranslationDictionary] = Field(
default_factory=dict, description="Loaded dictionaries by source type"
)
- combined_token_map: Dict[str, str] = Field(default_factory=dict, description="Combined source->DSS token mappings")
+ combined_token_map: Dict[str, str] = Field(
+ default_factory=dict, description="Combined source->DSS token mappings"
+ )
combined_component_map: Dict[str, str] = Field(
default_factory=dict, description="Combined source->DSS component mappings"
)
- all_custom_props: Dict[str, Any] = Field(default_factory=dict, description="Merged custom props from all dictionaries")
- conflicts: List[Dict[str, Any]] = Field(default_factory=list, description="Detected mapping conflicts")
+ all_custom_props: Dict[str, Any] = Field(
+ default_factory=dict, description="Merged custom props from all dictionaries"
+ )
+ conflicts: List[Dict[str, Any]] = Field(
+ default_factory=list, description="Detected mapping conflicts"
+ )
class ResolvedToken(BaseModel):
@@ -172,7 +192,9 @@ class ResolvedToken(BaseModel):
source_token: Optional[str] = Field(None, description="Original source token if translated")
source_type: Optional[TranslationSource] = Field(None, description="Source type if translated")
is_custom: bool = Field(default=False, description="Whether this is a custom prop")
- provenance: List[str] = Field(default_factory=list, description="Resolution chain for debugging")
+ provenance: List[str] = Field(
+ default_factory=list, description="Resolution chain for debugging"
+ )
class ResolvedTheme(BaseModel):
@@ -185,5 +207,7 @@ class ResolvedTheme(BaseModel):
base_theme: str = Field(..., description="Base theme name (light/dark)")
tokens: Dict[str, ResolvedToken] = Field(default_factory=dict)
custom_props: Dict[str, ResolvedToken] = Field(default_factory=dict)
- translations_applied: List[str] = Field(default_factory=list, description="List of translation dictionaries applied")
+ translations_applied: List[str] = Field(
+ default_factory=list, description="List of translation dictionaries applied"
+ )
resolved_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
diff --git a/dss/translations/resolver.py b/dss/translations/resolver.py
index a2146b9..c18e223 100644
--- a/dss/translations/resolver.py
+++ b/dss/translations/resolver.py
@@ -1,5 +1,5 @@
"""
-Token Resolver
+Token Resolver.
Resolves tokens between source formats and DSS canonical structure.
Supports bidirectional translation.
@@ -80,7 +80,9 @@ class TokenResolver:
normalized = self._normalize_token_name(source_token)
return self.registry.combined_token_map.get(normalized)
- def resolve_to_source(self, dss_token: str, source_type: Union[str, TranslationSource]) -> Optional[str]:
+ def resolve_to_source(
+ self, dss_token: str, source_type: Union[str, TranslationSource]
+ ) -> Optional[str]:
"""
Resolve DSS token to source format (reverse translation).
diff --git a/dss/translations/validator.py b/dss/translations/validator.py
index 7f724df..a4a7dfb 100644
--- a/dss/translations/validator.py
+++ b/dss/translations/validator.py
@@ -1,18 +1,17 @@
"""
-Translation Dictionary Validator
+Translation Dictionary Validator.
Validates translation dictionary schema and semantic correctness.
"""
import json
import re
-from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import ValidationError as PydanticValidationError
from .canonical import DSS_CANONICAL_COMPONENTS, DSS_CANONICAL_TOKENS
-from .models import TranslationDictionary, TranslationSource
+from .models import TranslationDictionary
class ValidationError:
diff --git a/dss/translations/writer.py b/dss/translations/writer.py
index 346e855..c9ce08b 100644
--- a/dss/translations/writer.py
+++ b/dss/translations/writer.py
@@ -1,5 +1,5 @@
"""
-Translation Dictionary Writer
+Translation Dictionary Writer.
Writes and updates translation dictionary files.
"""
@@ -200,7 +200,9 @@ class TranslationDictionaryWriter:
await self.update(source=source, custom_props={prop_name: prop_value})
- async def remove_mapping(self, source: Union[str, TranslationSource], source_token: str) -> None:
+ async def remove_mapping(
+ self, source: Union[str, TranslationSource], source_token: str
+ ) -> None:
"""
Remove a token mapping from a dictionary.
diff --git a/dss/validators/__init__.py b/dss/validators/__init__.py
index c8aced2..0c26c27 100644
--- a/dss/validators/__init__.py
+++ b/dss/validators/__init__.py
@@ -1,5 +1,5 @@
-"""Validation logic for projects, components, and themes"""
+"""Validation logic for projects, components, and themes."""
-from .schema import ProjectValidator, ValidationResult, ValidationError, ValidationStage
+from .schema import ProjectValidator, ValidationError, ValidationResult, ValidationStage
__all__ = ["ProjectValidator", "ValidationResult", "ValidationError", "ValidationStage"]
diff --git a/dss/validators/schema.py b/dss/validators/schema.py
index ab04d6f..262d0c7 100644
--- a/dss/validators/schema.py
+++ b/dss/validators/schema.py
@@ -23,15 +23,17 @@ Stages:
from enum import Enum
from typing import Any, Dict, List, Optional
-from pydantic import BaseModel, Field, ConfigDict, ValidationError as PydanticValidationError
+
+from pydantic import BaseModel, ConfigDict, Field
+from pydantic import ValidationError as PydanticValidationError
from dss.models.project import Project
-from dss.models.theme import Theme, DesignToken, TokenCategory
-from dss.models.component import Component
+from dss.models.theme import TokenCategory
class ValidationStage(str, Enum):
- """Validation pipeline stages"""
+ """Validation pipeline stages."""
+
SCHEMA = "schema"
STRUCTURE = "structure"
TOKEN_VALIDATION = "token_validation"
@@ -41,6 +43,7 @@ class ValidationStage(str, Enum):
class ValidationError(BaseModel):
"""Single validation error from the pipeline."""
+
model_config = ConfigDict(arbitrary_types_allowed=True)
stage: ValidationStage = Field(..., description="Validation stage where error occurred")
@@ -55,21 +58,24 @@ class ValidationError(BaseModel):
class ValidationResult(BaseModel):
"""Complete result from validation pipeline."""
+
model_config = ConfigDict(arbitrary_types_allowed=True)
is_valid: bool = Field(..., description="Whether validation passed without errors")
stage: ValidationStage = Field(..., description="Which validation stage completed")
- errors: List[ValidationError] = Field(default_factory=list, description="All validation errors detected")
+ errors: List[ValidationError] = Field(
+ default_factory=list, description="All validation errors detected"
+ )
def add_error(self, stage: ValidationStage, message: str, field: Optional[str] = None):
- """Add validation error"""
+ """Add validation error."""
self.errors.append(ValidationError(stage=stage, message=message, field=field))
self.is_valid = False
class ProjectValidator:
"""
- 4-stage validation pipeline for DSS projects
+ 4-stage validation pipeline for DSS projects.
Stage 1: Schema validation (JSON structure)
Stage 2: Structure validation (required fields)
@@ -79,7 +85,7 @@ class ProjectValidator:
def validate(self, data: Dict[str, Any]) -> ValidationResult:
"""
- Run full immune system validation pipeline
+ Run full immune system validation pipeline.
The DSS immune system antibodies check the data through 4 stages,
detecting pathogens (invalid data) before they infect the organism.
@@ -153,24 +159,16 @@ class ProjectValidator:
result.add_error(
ValidationStage.STRUCTURE,
f"Required field '{field}' is missing or empty",
- field
+ field,
)
# Check theme structure
if "theme" in data:
theme_data = data["theme"]
if not isinstance(theme_data, dict):
- result.add_error(
- ValidationStage.STRUCTURE,
- "Theme must be an object",
- "theme"
- )
+ result.add_error(ValidationStage.STRUCTURE, "Theme must be an object", "theme")
elif "name" not in theme_data:
- result.add_error(
- ValidationStage.STRUCTURE,
- "Theme must have a name",
- "theme.name"
- )
+ result.add_error(ValidationStage.STRUCTURE, "Theme must have a name", "theme.name")
return result.is_valid
@@ -189,7 +187,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.TOKEN_VALIDATION,
"Tokens must be defined as an object",
- "theme.tokens"
+ "theme.tokens",
)
return False
@@ -199,8 +197,8 @@ class ProjectValidator:
if not isinstance(token_data, dict):
result.add_error(
ValidationStage.TOKEN_VALIDATION,
- f"Token must be defined as an object",
- f"theme.tokens.{token_name}"
+ "Token must be defined as an object",
+ f"theme.tokens.{token_name}",
)
continue
@@ -209,7 +207,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.TOKEN_VALIDATION,
"Token value cannot be empty",
- f"theme.tokens.{token_name}.value"
+ f"theme.tokens.{token_name}.value",
)
# Check token references (format: {token-name})
@@ -219,7 +217,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.TOKEN_VALIDATION,
f"Referenced token '{referenced_token}' does not exist",
- f"theme.tokens.{token_name}.value"
+ f"theme.tokens.{token_name}.value",
)
# Validate category is a valid enum value
@@ -232,7 +230,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.TOKEN_VALIDATION,
f"Category '{category}' is invalid. Valid options: {', '.join(valid_categories)}",
- f"theme.tokens.{token_name}.category"
+ f"theme.tokens.{token_name}.category",
)
return result.is_valid
@@ -252,7 +250,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.COMPONENT_VALIDATION,
"Components must be defined as an array",
- "components"
+ "components",
)
return False
@@ -263,7 +261,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.COMPONENT_VALIDATION,
"Component must be defined as an object",
- f"components[{i}]"
+ f"components[{i}]",
)
continue
@@ -283,7 +281,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.COMPONENT_VALIDATION,
"Dependencies must be defined as an array",
- f"{comp_name}.dependencies"
+ f"{comp_name}.dependencies",
)
continue
@@ -293,7 +291,7 @@ class ProjectValidator:
result.add_error(
ValidationStage.COMPONENT_VALIDATION,
f"Dependency '{dep}' does not exist",
- f"{comp_name}.dependencies"
+ f"{comp_name}.dependencies",
)
return result.is_valid
diff --git a/examples/01_basic_ingestion.py b/examples/01_basic_ingestion.py
index 0789430..9bed345 100755
--- a/examples/01_basic_ingestion.py
+++ b/examples/01_basic_ingestion.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
-Example 1: Basic Token Ingestion
+Example 1: Basic Token Ingestion.
Shows how to ingest design tokens from different sources.
"""
@@ -62,19 +62,18 @@ async def main():
# 3. JSON Tokens (W3C Format)
print("\n3. JSON Design Tokens (W3C)")
print("-" * 40)
- from tools.ingest.json_tokens import JSONTokenSource
import json
+ from tools.ingest.json_tokens import JSONTokenSource
+
json_content = {
"color": {
"primary": {
"500": {"value": "#3B82F6", "type": "color"},
- "600": {"value": "#2563EB", "type": "color"}
+ "600": {"value": "#2563EB", "type": "color"},
}
},
- "spacing": {
- "md": {"value": "16px", "type": "dimension"}
- }
+ "spacing": {"md": {"value": "16px", "type": "dimension"}},
}
json_parser = JSONTokenSource()
@@ -85,7 +84,9 @@ async def main():
print(f" {token.name} = {token.value} ({token.type.value})")
print("\n" + "=" * 60)
- print(f"Total tokens extracted: {len(css_result.tokens) + len(scss_result.tokens) + len(json_result.tokens)}")
+ print(
+ f"Total tokens extracted: {len(css_result.tokens) + len(scss_result.tokens) + len(json_result.tokens)}"
+ )
print("=" * 60)
diff --git a/examples/02_token_merge.py b/examples/02_token_merge.py
index 4d7057a..be4e35d 100755
--- a/examples/02_token_merge.py
+++ b/examples/02_token_merge.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
-Example 2: Token Merging with Conflict Resolution
+Example 2: Token Merging with Conflict Resolution.
Shows how to merge tokens from multiple sources using different strategies.
"""
@@ -17,28 +17,40 @@ async def main():
print("EXAMPLE 2: Token Merging & Conflict Resolution")
print("=" * 60)
- from tools.ingest.merge import TokenMerger, MergeStrategy
- from tools.ingest.base import TokenCollection, DesignToken, TokenType
+ from tools.ingest.base import DesignToken, TokenCollection, TokenType
+ from tools.ingest.merge import MergeStrategy, TokenMerger
# Create tokens from different sources
print("\n1. Creating token collections from different sources...")
print("-" * 60)
- css_tokens = TokenCollection([
- DesignToken(name="color.primary", value="#FF0000", type=TokenType.COLOR, source="css"),
- DesignToken(name="color.secondary", value="#00FF00", type=TokenType.COLOR, source="css"),
- DesignToken(name="spacing.md", value="16px", type=TokenType.SPACING, source="css"),
- ])
+ css_tokens = TokenCollection(
+ [
+ DesignToken(name="color.primary", value="#FF0000", type=TokenType.COLOR, source="css"),
+ DesignToken(
+ name="color.secondary", value="#00FF00", type=TokenType.COLOR, source="css"
+ ),
+ DesignToken(name="spacing.md", value="16px", type=TokenType.SPACING, source="css"),
+ ]
+ )
- figma_tokens = TokenCollection([
- DesignToken(name="color.primary", value="#3B82F6", type=TokenType.COLOR, source="figma"),
- DesignToken(name="color.accent", value="#F59E0B", type=TokenType.COLOR, source="figma"),
- ])
+ figma_tokens = TokenCollection(
+ [
+ DesignToken(
+ name="color.primary", value="#3B82F6", type=TokenType.COLOR, source="figma"
+ ),
+ DesignToken(name="color.accent", value="#F59E0B", type=TokenType.COLOR, source="figma"),
+ ]
+ )
- tailwind_tokens = TokenCollection([
- DesignToken(name="color.primary", value="#2563EB", type=TokenType.COLOR, source="tailwind"),
- DesignToken(name="spacing.lg", value="24px", type=TokenType.SPACING, source="tailwind"),
- ])
+ tailwind_tokens = TokenCollection(
+ [
+ DesignToken(
+ name="color.primary", value="#2563EB", type=TokenType.COLOR, source="tailwind"
+ ),
+ DesignToken(name="spacing.lg", value="24px", type=TokenType.SPACING, source="tailwind"),
+ ]
+ )
print(f"CSS: {len(css_tokens.tokens)} tokens")
print(f"Figma: {len(figma_tokens.tokens)} tokens")
@@ -67,7 +79,9 @@ async def main():
print(f" • {conflict.token_name}:")
print(f" Existing: {conflict.existing.value} (from {conflict.existing.source})")
print(f" Incoming: {conflict.incoming.value} (from {conflict.incoming.source})")
- print(f" ✓ Chose: {conflict.resolved_token.value} (from {conflict.resolved_token.source})")
+ print(
+ f" ✓ Chose: {conflict.resolved_token.value} (from {conflict.resolved_token.source})"
+ )
# Show final token values
print("\nFinal Tokens:")
diff --git a/examples/03_project_analysis.py b/examples/03_project_analysis.py
index cc1afcc..02208be 100755
--- a/examples/03_project_analysis.py
+++ b/examples/03_project_analysis.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
-Example 3: Project Analysis & Quick Wins
+Example 3: Project Analysis & Quick Wins.
Shows how to analyze a React project and identify improvement opportunities.
"""
@@ -28,8 +28,8 @@ async def main():
scanner = ProjectScanner(project_path)
analysis = await scanner.scan()
- print(f"✅ Project scanned successfully")
- print(f"\nProject Details:")
+ print("✅ Project scanned successfully")
+ print("\nProject Details:")
print(f" Framework: {analysis.framework}")
print(f" Styling: {analysis.styling_approach}")
print(f" Package Manager: {analysis.package_manager}")
@@ -63,7 +63,8 @@ async def main():
print("-" * 60)
high_roi = [
- w for w in wins.opportunities
+ w
+ for w in wins.opportunities
if w.impact.value == "high" and w.effort.value in ["low", "medium"]
]
diff --git a/examples/merge_cases.py b/examples/merge_cases.py
index c5820e6..6fc08f0 100755
--- a/examples/merge_cases.py
+++ b/examples/merge_cases.py
@@ -1,5 +1,5 @@
"""
-DSS Merge Case Examples
+DSS Merge Case Examples.
Demonstrates how DSS handles multi-source token ingestion and translation
to the canonical DSS structure. Uses HeroUI and shadcn as examples.
@@ -8,18 +8,14 @@ Key Principle: DSS is MONOLITHIC. External systems translate TO us.
"""
import asyncio
-import json
-from pathlib import Path
# Add parent to path for imports
import sys
-sys.path.insert(0, str(Path(__file__).parent.parent / "tools"))
+from pathlib import Path
-from ingest import (
- DesignToken, TokenCollection, TokenMerger, MergeStrategy,
- CSSTokenSource, JSONTokenSource
-)
-from ingest.base import TokenType, TokenCategory
+from ingest import CSSTokenSource, MergeStrategy, TokenMerger
+
+sys.path.insert(0, str(Path(__file__).parent.parent / "tools"))
# =============================================================================
@@ -162,27 +158,39 @@ LEGACY_CORPORATE_TOKENS = """
DSS_CANONICAL = {
"colors": {
- "primary": {"50": None, "100": None, "200": None, "300": None, "400": None,
- "500": None, "600": None, "700": None, "800": None, "900": None},
+ "primary": {
+ "50": None,
+ "100": None,
+ "200": None,
+ "300": None,
+ "400": None,
+ "500": None,
+ "600": None,
+ "700": None,
+ "800": None,
+ "900": None,
+ },
"secondary": {"500": None},
"success": {"500": None},
"warning": {"500": None},
"danger": {"500": None},
- "neutral": {"50": None, "100": None, "200": None, "300": None, "400": None,
- "500": None, "600": None, "700": None, "800": None, "900": None},
+ "neutral": {
+ "50": None,
+ "100": None,
+ "200": None,
+ "300": None,
+ "400": None,
+ "500": None,
+ "600": None,
+ "700": None,
+ "800": None,
+ "900": None,
+ },
},
- "spacing": {
- "xs": None, "sm": None, "md": None, "lg": None, "xl": None, "2xl": None
- },
- "radius": {
- "sm": None, "md": None, "lg": None, "xl": None, "full": None
- },
- "shadows": {
- "sm": None, "md": None, "lg": None, "xl": None
- },
- "typography": {
- "fontSize": {"xs": None, "sm": None, "base": None, "lg": None, "xl": None}
- }
+ "spacing": {"xs": None, "sm": None, "md": None, "lg": None, "xl": None, "2xl": None},
+ "radius": {"sm": None, "md": None, "lg": None, "xl": None, "full": None},
+ "shadows": {"sm": None, "md": None, "lg": None, "xl": None},
+ "typography": {"fontSize": {"xs": None, "sm": None, "base": None, "lg": None, "xl": None}},
}
@@ -190,9 +198,10 @@ DSS_CANONICAL = {
# TRANSLATION DICTIONARIES
# =============================================================================
+
def create_heroui_translation():
"""
- Translation dictionary: HeroUI → DSS Canonical
+ Translation dictionary: HeroUI → DSS Canonical.
HeroUI uses numeric color scales (like DSS) but different naming.
Mapping is mostly 1:1 with prefix removal.
@@ -214,33 +223,27 @@ def create_heroui_translation():
"--heroui-primary-700": "color.primary.700",
"--heroui-primary-800": "color.primary.800",
"--heroui-primary-900": "color.primary.900",
-
"--heroui-secondary-500": "color.secondary.500",
"--heroui-success-500": "color.success.500",
"--heroui-warning-500": "color.warning.500",
"--heroui-danger-500": "color.danger.500",
-
# Content layers → Neutral scale
"--heroui-content1": "color.neutral.50",
"--heroui-content2": "color.neutral.100",
"--heroui-content3": "color.neutral.200",
"--heroui-content4": "color.neutral.300",
-
# Layout tokens
"--heroui-radius-small": "radius.sm",
"--heroui-radius-medium": "radius.md",
"--heroui-radius-large": "radius.lg",
-
"--heroui-shadow-small": "shadow.sm",
"--heroui-shadow-medium": "shadow.md",
"--heroui-shadow-large": "shadow.lg",
-
# Typography
"--heroui-font-size-tiny": "typography.fontSize.xs",
"--heroui-font-size-small": "typography.fontSize.sm",
"--heroui-font-size-medium": "typography.fontSize.base",
"--heroui-font-size-large": "typography.fontSize.lg",
-
# Spacing
"--heroui-spacing-unit": "spacing.unit",
},
@@ -248,7 +251,7 @@ def create_heroui_translation():
"Button": "Button", # HeroUI Button → DSS Button
"Card": "Card",
"Input": "Input",
- }
+ },
},
"custom_props": {
# HeroUI-specific that don't map to DSS core
@@ -257,14 +260,14 @@ def create_heroui_translation():
"notes": [
"HeroUI uses numeric scales similar to DSS - easy mapping",
"Content layers (1-4) map to neutral scale",
- "Component mapping is mostly 1:1"
- ]
+ "Component mapping is mostly 1:1",
+ ],
}
def create_shadcn_translation():
"""
- Translation dictionary: shadcn → DSS Canonical
+ Translation dictionary: shadcn → DSS Canonical.
shadcn uses semantic naming (primary, secondary, muted) without scales.
We map to the 500 (default) value in DSS scales.
@@ -280,31 +283,22 @@ def create_shadcn_translation():
# Semantic colors → DSS scale defaults
"--background": "color.neutral.50",
"--foreground": "color.neutral.900",
-
"--primary": "color.primary.500",
"--primary-foreground": "color.primary.50",
-
"--secondary": "color.secondary.500",
"--secondary-foreground": "color.secondary.50",
-
"--muted": "color.neutral.200",
"--muted-foreground": "color.neutral.600",
-
"--accent": "color.accent.500",
"--accent-foreground": "color.accent.50",
-
"--destructive": "color.danger.500",
-
"--card": "color.neutral.50",
"--card-foreground": "color.neutral.900",
-
"--popover": "color.neutral.50",
"--popover-foreground": "color.neutral.900",
-
"--border": "color.neutral.200",
"--input": "color.neutral.200",
"--ring": "color.primary.500",
-
# Layout
"--radius": "radius.md",
},
@@ -315,7 +309,7 @@ def create_shadcn_translation():
"Input": "Input",
"Dialog": "Modal",
"Popover": "Popover",
- }
+ },
},
"custom_props": {
# shadcn-specific that don't exist in DSS
@@ -330,14 +324,14 @@ def create_shadcn_translation():
"shadcn is HEADLESS - no numeric color scales",
"Semantic names map to 500 (default) DSS values",
"foreground variants map to contrast colors (50)",
- "Chart colors are shadcn-specific custom props"
- ]
+ "Chart colors are shadcn-specific custom props",
+ ],
}
def create_legacy_translation():
"""
- Translation dictionary: Legacy Corporate → DSS Canonical
+ Translation dictionary: Legacy Corporate → DSS Canonical.
Messy legacy code with inconsistent naming needs careful mapping.
"""
@@ -352,23 +346,19 @@ def create_legacy_translation():
"--brand-dark-blue": "color.primary.700",
"--brand-light": "color.primary.100",
"--brandAccent": "color.warning.500", # camelCase → DSS
-
# Button colors → Component tokens
"--btn-primary-bg": "color.primary.500",
"--btn-primary-text": "color.neutral.50",
"--btn-secondary-bg": "color.neutral.100",
-
# Spacing chaos → DSS order
"--space-xs": "spacing.xs",
"--space-sm": "spacing.sm",
"--spacing-md": "spacing.md",
"--SPACING_LG": "spacing.lg",
-
# Typography normalization
"--font-base": "typography.fontSize.base",
"--fontSize-lg": "typography.fontSize.lg",
"--text-xl": "typography.fontSize.xl",
-
# Radius normalization
"--rounded": "radius.sm",
"--border-radius-md": "radius.md",
@@ -379,7 +369,7 @@ def create_legacy_translation():
".btn-secondary": "Button[variant=secondary]",
".card-wrapper": "Card",
".input-field": "Input",
- }
+ },
},
"custom_props": {
# ACME-specific branding that extends DSS
@@ -389,13 +379,13 @@ def create_legacy_translation():
"validation_warnings": [
"Inconsistent spacing prefixes detected",
"Mixed case conventions found",
- "Some values may need manual review"
+ "Some values may need manual review",
],
"notes": [
"Legacy system had 4 different naming conventions",
"All mapped to DSS canonical structure",
- "Brand-specific colors isolated in custom_props"
- ]
+ "Brand-specific colors isolated in custom_props",
+ ],
}
@@ -403,15 +393,16 @@ def create_legacy_translation():
# MERGE DEMONSTRATIONS
# =============================================================================
+
async def demonstrate_merge_heroui_shadcn():
"""
CASE: Merge HeroUI and shadcn tokens into DSS canonical structure.
This demonstrates how two different atomic structures become one.
"""
- print("\n" + "="*70)
+ print("\n" + "=" * 70)
print("MERGE CASE: HeroUI + shadcn → DSS Canonical")
- print("="*70)
+ print("=" * 70)
css_source = CSSTokenSource()
@@ -441,9 +432,9 @@ async def demonstrate_merge_heroui_shadcn():
print(f" Resolution: {conflict.resolution}")
# Demonstrate atomic structure difference
- print("\n" + "-"*70)
+ print("\n" + "-" * 70)
print("ATOMIC STRUCTURE COMPARISON:")
- print("-"*70)
+ print("-" * 70)
print("\n🎨 HeroUI (Numeric Scale System):")
print(" Uses: --heroui-primary-{50-900}")
@@ -468,9 +459,9 @@ async def demonstrate_merge_with_legacy():
Shows how messy legacy code gets normalized.
"""
- print("\n" + "="*70)
+ print("\n" + "=" * 70)
print("MERGE CASE: Legacy Corporate → DSS Canonical")
- print("="*70)
+ print("=" * 70)
css_source = CSSTokenSource()
@@ -499,12 +490,10 @@ async def demonstrate_merge_with_legacy():
async def demonstrate_conflict_strategies():
- """
- CASE: Show different merge strategies and their outcomes.
- """
- print("\n" + "="*70)
+ """CASE: Show different merge strategies and their outcomes."""
+ print("\n" + "=" * 70)
print("MERGE STRATEGIES COMPARISON")
- print("="*70)
+ print("=" * 70)
css_source = CSSTokenSource()
@@ -538,8 +527,7 @@ async def demonstrate_conflict_strategies():
result = merger.merge([collection_a, collection_b])
primary_token = next(
- (t for t in result.collection.tokens if 'primary' in t.name.lower()),
- None
+ (t for t in result.collection.tokens if "primary" in t.name.lower()), None
)
print(f"\n📋 {strategy.value}: {description}")
@@ -551,10 +539,10 @@ async def demonstrate_conflict_strategies():
async def main():
"""Run all merge demonstrations."""
- print("\n" + "="*70)
+ print("\n" + "=" * 70)
print("DSS MERGE CASE EXAMPLES")
print("Demonstrating Multi-Source Token Ingestion")
- print("="*70)
+ print("=" * 70)
# Run demonstrations
await demonstrate_merge_heroui_shadcn()
@@ -562,11 +550,12 @@ async def main():
await demonstrate_conflict_strategies()
# Summary
- print("\n" + "="*70)
+ print("\n" + "=" * 70)
print("SUMMARY: ATOMIC STRUCTURE DIFFERENCES")
- print("="*70)
+ print("=" * 70)
- print("""
+ print(
+ """
┌─────────────────────────────────────────────────────────────────────┐
│ ATOMIC STRUCTURE COMPARISON │
├─────────────────┬──────────────────────┬────────────────────────────┤
@@ -588,9 +577,11 @@ async def main():
│ DSS Mapping │ Direct 1:1 │ Expand to scales │
│ │ (strip prefix) │ (500 = default) │
└─────────────────┴──────────────────────┴────────────────────────────┘
- """)
+ """
+ )
- print("""
+ print(
+ """
┌─────────────────────────────────────────────────────────────────────┐
│ DSS TRANSLATION APPROACH │
├─────────────────────────────────────────────────────────────────────┤
@@ -606,7 +597,8 @@ async def main():
│ │ (IMMUTABLE) │ │
│ │ │ │
└─────────────────────────────────────────────────────────────────────┘
- """)
+ """
+ )
if __name__ == "__main__":
diff --git a/scripts/figma-sync.py b/scripts/figma-sync.py
index 077d8b4..75e9799 100755
--- a/scripts/figma-sync.py
+++ b/scripts/figma-sync.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
-DSS Figma Sync CLI
+DSS Figma Sync CLI.
This script is a lightweight CLI wrapper around the FigmaTokenSource from the
dss.ingest module. It fetches tokens and components from Figma and saves them
@@ -10,22 +10,21 @@ The core extraction and processing logic resides in:
dss.ingest.sources.figma.FigmaTokenSource
"""
-import sys
-import os
-import json
-import asyncio
-from pathlib import Path
-from datetime import datetime
-from dataclasses import asdict
import argparse
+import asyncio
+import json
+import os
+import sys
+from pathlib import Path
+
+from dss.ingest.base import TokenCollection
+from dss.ingest.sources.figma import FigmaTokenSource
# Ensure the project root is in the Python path
DSS_ROOT = Path(__file__).parent.parent
if str(DSS_ROOT) not in sys.path:
sys.path.insert(0, str(DSS_ROOT))
-from dss.ingest.sources.figma import FigmaTokenSource
-from dss.ingest.base import TokenCollection
# =============================================================================
# CONFIGURATION
@@ -39,6 +38,7 @@ COMPONENTS_DIR = DSS_ROOT / ".dss/components"
# OUTPUT WRITER
# =============================================================================
+
class OutputWriter:
"""Writes extraction results to the DSS file structure."""
@@ -49,10 +49,10 @@ class OutputWriter:
"""Write TokenCollection to a structured JSON file."""
output_dir.mkdir(parents=True, exist_ok=True)
tokens_file = output_dir / "figma-tokens.json"
-
+
if self.verbose:
print(f" [OUT] Writing {len(collection)} tokens to {tokens_file}")
-
+
with open(tokens_file, "w") as f:
json.dump(json.loads(collection.to_json()), f, indent=2)
print(f" [OUT] Tokens: {tokens_file}")
@@ -61,18 +61,22 @@ class OutputWriter:
"""Write component registry."""
output_dir.mkdir(parents=True, exist_ok=True)
comp_file = output_dir / "figma-registry.json"
-
+
if self.verbose:
- print(f" [OUT] Writing {components.get('component_count', 0)} components to {comp_file}")
+ print(
+ f" [OUT] Writing {components.get('component_count', 0)} components to {comp_file}"
+ )
with open(comp_file, "w") as f:
json.dump(components, f, indent=2)
print(f" [OUT] Components: {comp_file}")
+
# =============================================================================
# MAIN ORCHESTRATOR
# =============================================================================
+
async def main():
"""Main CLI orchestration function."""
parser = argparse.ArgumentParser(description="DSS Intelligent Figma Sync")
@@ -95,7 +99,7 @@ async def main():
print("[ERROR] No Figma token found.", file=sys.stderr)
print(" Set FIGMA_TOKEN env var or add 'token' to .dss/config/figma.json", file=sys.stderr)
sys.exit(1)
-
+
print_header(file_key, token, args.force)
# --- Extraction ---
@@ -107,6 +111,7 @@ async def main():
# In verbose mode, print more details
if args.verbose:
import traceback
+
traceback.print_exc()
sys.exit(1)
@@ -120,13 +125,14 @@ async def main():
print_summary(
file_name=component_registry.get("file_name", "Unknown"),
token_count=len(token_collection),
- component_count=component_registry.get("component_count", 0)
+ component_count=component_registry.get("component_count", 0),
)
-
+
print("\n[OK] Sync successful!")
print(" Next: Run the translation and theming pipeline.")
sys.exit(0)
+
def load_config() -> Dict:
"""Load Figma config from .dss/config/figma.json."""
config_path = DSS_ROOT / ".dss/config/figma.json"
@@ -135,9 +141,12 @@ def load_config() -> Dict:
with open(config_path) as f:
return json.load(f)
except (json.JSONDecodeError, IOError) as e:
- print(f"[WARN] Could not read or parse config file: {config_path}\n{e}", file=sys.stderr)
+ print(
+ f"[WARN] Could not read or parse config file: {config_path}\n{e}", file=sys.stderr
+ )
return {}
+
def print_header(file_key: str, token: str, force: bool):
"""Prints the CLI header."""
print("╔══════════════════════════════════════════════════════════════╗")
@@ -148,6 +157,7 @@ def print_header(file_key: str, token: str, force: bool):
print(f" Force: {force}")
print("\n[1/3] Initializing Figma Ingestion Source...")
+
def print_summary(file_name: str, token_count: int, component_count: int):
"""Prints the final summary."""
print("\n" + "=" * 60)
diff --git a/scripts/generate-storybook.py b/scripts/generate-storybook.py
index 6bc872a..7a204dd 100755
--- a/scripts/generate-storybook.py
+++ b/scripts/generate-storybook.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
"""
-DSS Storybook Generator
+DSS Storybook Generator.
+
Generates Storybook stories from DSS tokens and component registry.
Hierarchy:
@@ -13,20 +14,17 @@ Usage: python3 scripts/generate-storybook.py [--output PATH] [--skin SKIN]
Default output: admin-ui/src/stories/
"""
-import sys
-import os
-import json
import argparse
-from pathlib import Path
+import json
from datetime import datetime
-from typing import Dict, Any, List
+from pathlib import Path
DSS_ROOT = Path(__file__).parent.parent
DSS_DATA = DSS_ROOT / ".dss"
def load_json(path: Path) -> dict:
- """Load JSON file, return empty dict if not found"""
+ """Load JSON file, return empty dict if not found."""
if not path.exists():
return {}
with open(path) as f:
@@ -34,12 +32,12 @@ def load_json(path: Path) -> dict:
def ensure_dir(path: Path):
- """Ensure directory exists"""
+ """Ensure directory exists."""
path.mkdir(parents=True, exist_ok=True)
def generate_color_primitives_story(primitives: dict, output_dir: Path):
- """Generate story for color primitives (full Tailwind palette)"""
+ """Generate story for color primitives (full Tailwind palette)."""
colors = primitives.get("color", {})
if not colors:
return
@@ -56,19 +54,23 @@ def generate_color_primitives_story(primitives: dict, output_dir: Path):
if name.startswith("_"):
continue
if isinstance(data, dict) and "value" in data:
- border = "border: 1px solid #e5e7eb;" if data["value"] in ["#ffffff", "transparent"] else ""
- base_swatches.append(f'''
+ border = (
+ "border: 1px solid #e5e7eb;" if data["value"] in ["#ffffff", "transparent"] else ""
+ )
+ base_swatches.append(
+ f"""
''')
+ """
+ )
if base_swatches:
- base_section = f'''
+ base_section = f"""
Base
{''.join(base_swatches)}
-
'''
+ """
# Neutral scales
neutrals = colors.get("neutral", {})
@@ -78,25 +80,31 @@ def generate_color_primitives_story(primitives: dict, output_dir: Path):
continue
if isinstance(scale, dict):
shades = []
- for shade, data in sorted(scale.items(), key=lambda x: int(x[0]) if x[0].isdigit() else 0):
+ for shade, data in sorted(
+ scale.items(), key=lambda x: int(x[0]) if x[0].isdigit() else 0
+ ):
if isinstance(data, dict) and "value" in data:
text_color = "#000" if int(shade) < 500 else "#fff"
- shades.append(f'''
+ shades.append(
+ f"""
{shade}
-
''')
+
"""
+ )
if shades:
- neutral_palettes.append(f'''
+ neutral_palettes.append(
+ f"""
{scale_name}
{''.join(shades)}
-
''')
+
"""
+ )
if neutral_palettes:
- neutral_section = f'''
+ neutral_section = f"""
Neutral Scales
{''.join(neutral_palettes)}
-
'''
+ """
# Semantic scales
semantics = colors.get("semantic", {})
@@ -106,27 +114,33 @@ def generate_color_primitives_story(primitives: dict, output_dir: Path):
continue
if isinstance(scale, dict):
shades = []
- for shade, data in sorted(scale.items(), key=lambda x: int(x[0]) if x[0].isdigit() else 0):
+ for shade, data in sorted(
+ scale.items(), key=lambda x: int(x[0]) if x[0].isdigit() else 0
+ ):
if isinstance(data, dict) and "value" in data:
text_color = "#000" if int(shade) < 500 else "#fff"
- shades.append(f'''
+ shades.append(
+ f"""