Initial commit: Clean DSS implementation

Migrated from design-system-swarm with fresh git history.
Old project history preserved in /home/overbits/apps/design-system-swarm

Core components:
- MCP Server (Python FastAPI with mcp 1.23.1)
- Claude Plugin (agents, commands, skills, strategies, hooks, core)
- DSS Backend (dss-mvp1 - token translation, Figma sync)
- Admin UI (Node.js/React)
- Server (Node.js/Express)
- Storybook integration (dss-mvp1/.storybook)

Self-contained configuration:
- All paths relative or use DSS_BASE_PATH=/home/overbits/dss
- PYTHONPATH configured for dss-mvp1 and dss-claude-plugin
- .env file with all configuration
- Claude plugin uses ${CLAUDE_PLUGIN_ROOT} for portability

Migration completed: $(date)
🤖 Clean migration with full functionality preserved
This commit is contained in:
Digital Production Factory
2025-12-09 18:45:48 -03:00
commit 276ed71f31
884 changed files with 373737 additions and 0 deletions

View File

@@ -0,0 +1,11 @@
"""
Documentation Generators
Extract structured data from source code and generate documentation.
"""
from .base_generator import DocGenerator
from .api_extractor import APIExtractor
from .mcp_extractor import MCPExtractor
__all__ = ['DocGenerator', 'APIExtractor', 'MCPExtractor']

View File

@@ -0,0 +1,234 @@
#!/usr/bin/env python3
"""
API Extractor
Extract FastAPI route definitions from server.py files.
"""
import ast
import re
from pathlib import Path
from typing import Dict, List, Any, Optional
import logging
from .base_generator import DocGenerator
logger = logging.getLogger(__name__)
class APIExtractor(DocGenerator):
"""
Extract FastAPI endpoints from server.py files.
Extracts:
- Route paths and HTTP methods
- Function names and docstrings
- Route parameters
- Response models
"""
def extract(self, source_path: Path) -> Dict[str, Any]:
"""
Extract API endpoints from FastAPI server file.
Args:
source_path: Path to server.py file
Returns:
Dictionary with extracted endpoint data
"""
logger.info(f"Extracting API endpoints from {source_path}")
with open(source_path, 'r') as f:
source_code = f.read()
tree = ast.parse(source_code)
endpoints = []
app_mounts = []
# Find @app.get, @app.post, etc. decorators
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
endpoint = self._extract_endpoint(node, source_code)
if endpoint:
endpoints.append(endpoint)
# Find app.mount() calls for static files
if isinstance(node, ast.Expr):
mount = self._extract_mount(node)
if mount:
app_mounts.append(mount)
return {
"source_file": str(source_path),
"endpoints": endpoints,
"mounts": app_mounts,
"total_endpoints": len(endpoints),
"total_mounts": len(app_mounts)
}
def _extract_endpoint(
self,
func_node: ast.FunctionDef,
source_code: str
) -> Optional[Dict[str, Any]]:
"""
Extract endpoint information from function with decorator.
Args:
func_node: AST function definition node
source_code: Full source code (for extracting decorator args)
Returns:
Endpoint data or None
"""
for decorator in func_node.decorator_list:
# Check if decorator is app.get, app.post, etc.
if isinstance(decorator, ast.Call):
if isinstance(decorator.func, ast.Attribute):
# app.get("/path")
if decorator.func.attr in ['get', 'post', 'put', 'delete', 'patch']:
method = decorator.func.attr.upper()
path = self._extract_route_path(decorator)
return {
"path": path,
"method": method,
"function": func_node.name,
"docstring": ast.get_docstring(func_node),
"parameters": self._extract_parameters(func_node),
"line_number": func_node.lineno
}
return None
def _extract_route_path(self, decorator: ast.Call) -> str:
"""
Extract route path from decorator arguments.
Args:
decorator: AST Call node for decorator
Returns:
Route path string
"""
if decorator.args:
first_arg = decorator.args[0]
if isinstance(first_arg, ast.Constant):
return first_arg.value
elif isinstance(first_arg, ast.Str): # Python 3.7 compatibility
return first_arg.s
return "/"
def _extract_parameters(self, func_node: ast.FunctionDef) -> List[Dict[str, str]]:
"""
Extract function parameters.
Args:
func_node: AST function definition node
Returns:
List of parameter dictionaries
"""
params = []
for arg in func_node.args.args:
param = {"name": arg.arg}
# Extract type annotation if present
if arg.annotation:
param["type"] = ast.unparse(arg.annotation) if hasattr(ast, 'unparse') else str(arg.annotation)
params.append(param)
return params
def _extract_mount(self, expr_node: ast.Expr) -> Optional[Dict[str, Any]]:
"""
Extract app.mount() call for static files.
Args:
expr_node: AST expression node
Returns:
Mount data or None
"""
if isinstance(expr_node.value, ast.Call):
call = expr_node.value
# Check if it's app.mount()
if isinstance(call.func, ast.Attribute):
if call.func.attr == 'mount' and len(call.args) >= 2:
path_arg = call.args[0]
mount_path = None
if isinstance(path_arg, ast.Constant):
mount_path = path_arg.value
elif isinstance(path_arg, ast.Str):
mount_path = path_arg.s
if mount_path:
return {
"path": mount_path,
"type": "StaticFiles",
"line_number": expr_node.lineno
}
return None
def transform(self, extracted_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Transform extracted API data to .knowledge/dss-architecture.json schema.
Args:
extracted_data: Raw extracted endpoint data
Returns:
Transformed data for knowledge base
"""
# Read existing architecture.json
target_path = self.project_root / ".knowledge" / "dss-architecture.json"
existing = self.read_existing_target(target_path)
if not existing:
# Create new structure
existing = {
"$schema": "dss-knowledge-v1",
"type": "architecture",
"version": "1.0.0",
"last_updated": None,
"modules": []
}
# Ensure modules list exists
if "modules" not in existing:
existing["modules"] = []
# Create REST API module data
rest_api_module = {
"name": "rest_api",
"path": extracted_data["source_file"],
"purpose": "FastAPI server providing REST API and static file serving",
"port": 3456,
"endpoints": extracted_data["endpoints"],
"mounts": extracted_data["mounts"],
"total_endpoints": extracted_data["total_endpoints"]
}
# Update or append REST API module
rest_api_index = next(
(i for i, m in enumerate(existing["modules"]) if m.get("name") == "rest_api"),
None
)
if rest_api_index is not None:
existing["modules"][rest_api_index] = rest_api_module
else:
existing["modules"].append(rest_api_module)
existing["last_updated"] = self.metadata["generated_at"]
return existing

View File

@@ -0,0 +1,191 @@
#!/usr/bin/env python3
"""
Base Documentation Generator
Abstract base class for all documentation generators.
"""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Any, Optional
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class DocGenerator(ABC):
"""
Abstract base class for documentation generators.
Subclasses must implement:
- extract(): Extract data from source file
- transform(): Transform extracted data to target schema
- load(): Write transformed data to target file
"""
def __init__(self, project_root: Path):
"""
Initialize generator.
Args:
project_root: Project root directory
"""
self.project_root = Path(project_root)
self.metadata = {
"generator": self.__class__.__name__,
"generated_at": None,
"source_files": [],
"version": "1.0.0"
}
@abstractmethod
def extract(self, source_path: Path) -> Dict[str, Any]:
"""
Extract data from source file.
Args:
source_path: Path to source file
Returns:
Extracted data dictionary
"""
pass
@abstractmethod
def transform(self, extracted_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Transform extracted data to target schema.
Args:
extracted_data: Raw extracted data
Returns:
Transformed data matching target schema
"""
pass
def load(self, transformed_data: Dict[str, Any], target_path: Path) -> None:
"""
Write transformed data to target file.
Args:
transformed_data: Data to write
target_path: Target file path
"""
target_path.parent.mkdir(parents=True, exist_ok=True)
# Backup existing file if it exists
if target_path.exists():
backup_dir = self.project_root / ".dss" / "backups" / "knowledge"
backup_dir.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = backup_dir / f"{target_path.stem}_{timestamp}.json"
with open(target_path, 'r') as f:
backup_data = f.read()
with open(backup_path, 'w') as f:
f.write(backup_data)
logger.info(f"Backed up {target_path} to {backup_path}")
# Write new data
with open(target_path, 'w') as f:
json.dump(transformed_data, f, indent=2)
logger.info(f"Generated documentation: {target_path}")
def run(self, source_path: Path, target_path: Path) -> Dict[str, Any]:
"""
Execute full ETL pipeline: Extract → Transform → Load
Args:
source_path: Source file to extract from
target_path: Target file to write to
Returns:
Generated documentation data
"""
logger.info(f"Running {self.__class__.__name__}: {source_path}{target_path}")
# Extract
extracted_data = self.extract(source_path)
self.metadata["source_files"].append(str(source_path))
# Transform
transformed_data = self.transform(extracted_data)
# Add metadata
self.metadata["generated_at"] = datetime.now().isoformat()
transformed_data["_metadata"] = self.metadata
# Load
self.load(transformed_data, target_path)
return transformed_data
def validate_json_schema(self, data: Dict[str, Any], schema: Dict[str, Any]) -> bool:
"""
Validate data against JSON schema.
Args:
data: Data to validate
schema: JSON schema
Returns:
True if valid, False otherwise
"""
try:
import jsonschema
jsonschema.validate(instance=data, schema=schema)
return True
except ImportError:
logger.warning("jsonschema not installed, skipping validation")
return True
except jsonschema.ValidationError as e:
logger.error(f"Schema validation failed: {e}")
return False
def read_existing_target(self, target_path: Path) -> Optional[Dict[str, Any]]:
"""
Read existing target file if it exists.
Args:
target_path: Target file path
Returns:
Existing data or None
"""
if not target_path.exists():
return None
try:
with open(target_path, 'r') as f:
return json.load(f)
except Exception as e:
logger.error(f"Failed to read existing target {target_path}: {e}")
return None
def merge_with_existing(
self,
new_data: Dict[str, Any],
existing_data: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
"""
Merge new data with existing data (incremental update).
Args:
new_data: New extracted data
existing_data: Existing data from target file
Returns:
Merged data
"""
if not existing_data:
return new_data
# Default: Replace completely
# Subclasses can override for smarter merging
return new_data

View File

@@ -0,0 +1,273 @@
#!/usr/bin/env python3
"""
MCP Extractor
Extract MCP tool definitions from dss-mcp-server.py.
"""
import ast
import re
from pathlib import Path
from typing import Dict, List, Any, Optional
import logging
from .base_generator import DocGenerator
logger = logging.getLogger(__name__)
class MCPExtractor(DocGenerator):
"""
Extract MCP tool definitions from dss-mcp-server.py.
Extracts:
- Tool names and descriptions
- Input parameters and schemas
- Tool handlers
- Tool categories
"""
def extract(self, source_path: Path) -> Dict[str, Any]:
"""
Extract MCP tools from dss-mcp-server.py.
Args:
source_path: Path to dss-mcp-server.py
Returns:
Dictionary with extracted tool data
"""
logger.info(f"Extracting MCP tools from {source_path}")
with open(source_path, 'r') as f:
source_code = f.read()
# Extract tool definitions (Tool objects)
tools = self._extract_tool_definitions(source_code)
# Extract tool handlers (elif name == "tool_name" blocks)
handlers = self._extract_tool_handlers(source_code)
# Match tools with handlers
for tool in tools:
if tool["name"] in handlers:
tool["handler"] = handlers[tool["name"]]
return {
"source_file": str(source_path),
"tools": tools,
"total_tools": len(tools),
"categories": self._categorize_tools(tools)
}
def _extract_tool_definitions(self, source_code: str) -> List[Dict[str, Any]]:
"""
Extract Tool() object definitions from source code.
Args:
source_code: Full source code
Returns:
List of tool dictionaries
"""
tools = []
# Pattern: Tool(name="...", description="...", inputSchema={...})
tool_pattern = re.compile(
r'Tool\s*\(\s*name\s*=\s*["\']([^"\']+)["\']\s*,\s*description\s*=\s*["\']([^"\']+)["\']',
re.MULTILINE | re.DOTALL
)
for match in tool_pattern.finditer(source_code):
tool_name = match.group(1)
tool_description = match.group(2)
# Extract input schema (complex, best effort)
tool_start = match.start()
tool_block = source_code[tool_start:tool_start + 2000]
# Find inputSchema
input_schema = self._extract_input_schema(tool_block)
tools.append({
"name": tool_name,
"description": tool_description,
"input_schema": input_schema,
"category": self._infer_category(tool_name)
})
return tools
def _extract_input_schema(self, tool_block: str) -> Dict[str, Any]:
"""
Extract inputSchema from Tool() definition.
Args:
tool_block: Code block containing Tool() definition
Returns:
Input schema dictionary (best effort)
"""
# Look for inputSchema={...}
schema_match = re.search(r'inputSchema\s*=\s*\{', tool_block)
if not schema_match:
return {}
# This is complex - just extract parameter names for now
properties_match = re.search(
r'"properties"\s*:\s*\{([^}]+)\}',
tool_block,
re.DOTALL
)
if properties_match:
properties_block = properties_match.group(1)
# Extract parameter names (keys in properties)
param_names = re.findall(r'"([^"]+)"\s*:', properties_block)
return {
"type": "object",
"properties": {name: {"type": "string"} for name in param_names}
}
return {}
def _extract_tool_handlers(self, source_code: str) -> Dict[str, Dict[str, Any]]:
"""
Extract tool handler code from call_tool() function.
Args:
source_code: Full source code
Returns:
Dictionary mapping tool name to handler info
"""
handlers = {}
# Pattern: elif name == "tool_name":
handler_pattern = re.compile(
r'elif\s+name\s*==\s*["\']([^"\']+)["\']:',
re.MULTILINE
)
for match in handler_pattern.finditer(source_code):
tool_name = match.group(1)
line_number = source_code[:match.start()].count('\n') + 1
handlers[tool_name] = {
"line_number": line_number,
"implemented": True
}
return handlers
def _infer_category(self, tool_name: str) -> str:
"""
Infer tool category from name.
Args:
tool_name: Tool name
Returns:
Category string
"""
if "project" in tool_name or "create" in tool_name:
return "project_management"
elif "figma" in tool_name:
return "figma_integration"
elif "token" in tool_name or "extract" in tool_name:
return "token_ingestion"
elif "analyze" in tool_name or "audit" in tool_name:
return "analysis"
elif "storybook" in tool_name:
return "storybook"
elif "devtools" in tool_name or "browser" in tool_name:
return "browser_tools"
elif "context" in tool_name or "resolve" in tool_name or "compiler" in tool_name:
return "context_compiler"
else:
return "utilities"
def _categorize_tools(self, tools: List[Dict[str, Any]]) -> Dict[str, List[str]]:
"""
Group tools by category.
Args:
tools: List of tool dictionaries
Returns:
Dictionary mapping category to tool names
"""
categories = {}
for tool in tools:
category = tool["category"]
if category not in categories:
categories[category] = []
categories[category].append(tool["name"])
return categories
def transform(self, extracted_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Transform extracted MCP data to .knowledge/mcp-tools.json schema.
Args:
extracted_data: Raw extracted tool data
Returns:
Transformed data for knowledge base
"""
# Read existing mcp-tools.json
target_path = self.project_root / ".knowledge" / "mcp-tools.json"
existing = self.read_existing_target(target_path)
if existing:
# Merge: preserve manual sections, update extracted tools
result = existing.copy()
result["tools"] = self._format_tools(extracted_data["tools"])
result["total_tools"] = extracted_data["total_tools"]
result["categories"] = extracted_data["categories"]
result["last_updated"] = self.metadata["generated_at"]
else:
# Create new structure
result = {
"$schema": "dss-knowledge-v1",
"type": "mcp_tools",
"version": "1.0.0",
"last_updated": self.metadata["generated_at"],
"architecture": "MCP-first - All work via MCP tools, no REST endpoints",
"tools": self._format_tools(extracted_data["tools"]),
"total_tools": extracted_data["total_tools"],
"categories": extracted_data["categories"]
}
return result
def _format_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Format tools for knowledge base schema.
Args:
tools: Raw tool data
Returns:
Formatted tool list
"""
formatted = []
for tool in tools:
formatted_tool = {
"name": tool["name"],
"description": tool["description"],
"category": tool["category"],
"parameters": list(tool["input_schema"].get("properties", {}).keys())
}
if "handler" in tool:
formatted_tool["handler_line"] = tool["handler"]["line_number"]
formatted.append(formatted_tool)
return formatted