Initial commit: Clean DSS implementation
Migrated from design-system-swarm with fresh git history.
Old project history preserved in /home/overbits/apps/design-system-swarm
Core components:
- MCP Server (Python FastAPI with mcp 1.23.1)
- Claude Plugin (agents, commands, skills, strategies, hooks, core)
- DSS Backend (dss-mvp1 - token translation, Figma sync)
- Admin UI (Node.js/React)
- Server (Node.js/Express)
- Storybook integration (dss-mvp1/.storybook)
Self-contained configuration:
- All paths relative or use DSS_BASE_PATH=/home/overbits/dss
- PYTHONPATH configured for dss-mvp1 and dss-claude-plugin
- .env file with all configuration
- Claude plugin uses ${CLAUDE_PLUGIN_ROOT} for portability
Migration completed: $(date)
🤖 Clean migration with full functionality preserved
This commit is contained in:
40
demo/tools/analyze/__init__.py
Normal file
40
demo/tools/analyze/__init__.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""
|
||||
DSS Code Analysis Module
|
||||
|
||||
Provides tools for analyzing React projects, detecting style patterns,
|
||||
building dependency graphs, and identifying quick-win improvements.
|
||||
"""
|
||||
|
||||
from .base import (
|
||||
ProjectAnalysis,
|
||||
StylePattern,
|
||||
QuickWin,
|
||||
QuickWinType,
|
||||
QuickWinPriority,
|
||||
Location,
|
||||
ComponentInfo,
|
||||
StyleFile,
|
||||
)
|
||||
from .scanner import ProjectScanner
|
||||
from .react import ReactAnalyzer
|
||||
from .styles import StyleAnalyzer
|
||||
from .graph import DependencyGraph
|
||||
from .quick_wins import QuickWinFinder
|
||||
|
||||
__all__ = [
|
||||
# Data classes
|
||||
"ProjectAnalysis",
|
||||
"StylePattern",
|
||||
"QuickWin",
|
||||
"QuickWinType",
|
||||
"QuickWinPriority",
|
||||
"Location",
|
||||
"ComponentInfo",
|
||||
"StyleFile",
|
||||
# Analyzers
|
||||
"ProjectScanner",
|
||||
"ReactAnalyzer",
|
||||
"StyleAnalyzer",
|
||||
"DependencyGraph",
|
||||
"QuickWinFinder",
|
||||
]
|
||||
298
demo/tools/analyze/base.py
Normal file
298
demo/tools/analyze/base.py
Normal file
@@ -0,0 +1,298 @@
|
||||
"""
|
||||
Base classes and data structures for code analysis.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import List, Dict, Any, Optional, Set
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class QuickWinType(str, Enum):
|
||||
"""Types of quick-win improvements."""
|
||||
INLINE_STYLE = "inline_style" # Inline styles that can be extracted
|
||||
DUPLICATE_VALUE = "duplicate_value" # Duplicate color/spacing values
|
||||
UNUSED_STYLE = "unused_style" # Unused CSS/SCSS
|
||||
HARDCODED_VALUE = "hardcoded_value" # Hardcoded values that should be tokens
|
||||
NAMING_INCONSISTENCY = "naming" # Inconsistent naming patterns
|
||||
DEPRECATED_PATTERN = "deprecated" # Deprecated styling patterns
|
||||
ACCESSIBILITY = "accessibility" # A11y improvements
|
||||
PERFORMANCE = "performance" # Performance improvements
|
||||
|
||||
|
||||
class QuickWinPriority(str, Enum):
|
||||
"""Priority levels for quick-wins."""
|
||||
CRITICAL = "critical" # Must fix - breaking issues
|
||||
HIGH = "high" # Should fix - significant improvement
|
||||
MEDIUM = "medium" # Nice to fix - moderate improvement
|
||||
LOW = "low" # Optional - minor improvement
|
||||
|
||||
|
||||
class StylingApproach(str, Enum):
|
||||
"""Detected styling approaches in a project."""
|
||||
CSS_MODULES = "css-modules"
|
||||
STYLED_COMPONENTS = "styled-components"
|
||||
EMOTION = "emotion"
|
||||
TAILWIND = "tailwind"
|
||||
INLINE_STYLES = "inline-styles"
|
||||
CSS_IN_JS = "css-in-js"
|
||||
SASS_SCSS = "sass-scss"
|
||||
LESS = "less"
|
||||
VANILLA_CSS = "vanilla-css"
|
||||
CSS_VARIABLES = "css-variables"
|
||||
|
||||
|
||||
class Framework(str, Enum):
|
||||
"""Detected UI frameworks."""
|
||||
REACT = "react"
|
||||
NEXT = "next"
|
||||
VUE = "vue"
|
||||
NUXT = "nuxt"
|
||||
ANGULAR = "angular"
|
||||
SVELTE = "svelte"
|
||||
SOLID = "solid"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Location:
|
||||
"""Represents a location in source code."""
|
||||
file_path: str
|
||||
line: int
|
||||
column: int = 0
|
||||
end_line: Optional[int] = None
|
||||
end_column: Optional[int] = None
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.file_path}:{self.line}"
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"file": self.file_path,
|
||||
"line": self.line,
|
||||
"column": self.column,
|
||||
"end_line": self.end_line,
|
||||
"end_column": self.end_column,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class StyleFile:
|
||||
"""Represents a style file in the project."""
|
||||
path: str
|
||||
type: str # css, scss, less, styled, etc.
|
||||
size_bytes: int = 0
|
||||
line_count: int = 0
|
||||
variable_count: int = 0
|
||||
selector_count: int = 0
|
||||
imports: List[str] = field(default_factory=list)
|
||||
imported_by: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"path": self.path,
|
||||
"type": self.type,
|
||||
"size_bytes": self.size_bytes,
|
||||
"line_count": self.line_count,
|
||||
"variable_count": self.variable_count,
|
||||
"selector_count": self.selector_count,
|
||||
"imports": self.imports,
|
||||
"imported_by": self.imported_by,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComponentInfo:
|
||||
"""Information about a React component."""
|
||||
name: str
|
||||
path: str
|
||||
type: str = "functional" # functional, class, forwardRef, memo
|
||||
props: List[str] = field(default_factory=list)
|
||||
has_styles: bool = False
|
||||
style_files: List[str] = field(default_factory=list)
|
||||
inline_style_count: int = 0
|
||||
imports: List[str] = field(default_factory=list)
|
||||
exports: List[str] = field(default_factory=list)
|
||||
children: List[str] = field(default_factory=list) # Child components used
|
||||
line_count: int = 0
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"path": self.path,
|
||||
"type": self.type,
|
||||
"props": self.props,
|
||||
"has_styles": self.has_styles,
|
||||
"style_files": self.style_files,
|
||||
"inline_style_count": self.inline_style_count,
|
||||
"imports": self.imports,
|
||||
"exports": self.exports,
|
||||
"children": self.children,
|
||||
"line_count": self.line_count,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class StylePattern:
|
||||
"""A detected style pattern in code."""
|
||||
type: StylingApproach
|
||||
locations: List[Location] = field(default_factory=list)
|
||||
count: int = 0
|
||||
examples: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": self.type.value,
|
||||
"count": self.count,
|
||||
"locations": [loc.to_dict() for loc in self.locations[:10]],
|
||||
"examples": self.examples[:5],
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class TokenCandidate:
|
||||
"""A value that could be extracted as a design token."""
|
||||
value: str # The actual value (e.g., "#3B82F6")
|
||||
suggested_name: str # Suggested token name
|
||||
category: str # colors, spacing, typography, etc.
|
||||
occurrences: int = 1 # How many times it appears
|
||||
locations: List[Location] = field(default_factory=list)
|
||||
confidence: float = 0.0 # 0-1 confidence score
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"value": self.value,
|
||||
"suggested_name": self.suggested_name,
|
||||
"category": self.category,
|
||||
"occurrences": self.occurrences,
|
||||
"locations": [loc.to_dict() for loc in self.locations[:5]],
|
||||
"confidence": self.confidence,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuickWin:
|
||||
"""A quick improvement opportunity."""
|
||||
type: QuickWinType
|
||||
priority: QuickWinPriority
|
||||
title: str
|
||||
description: str
|
||||
location: Optional[Location] = None
|
||||
affected_files: List[str] = field(default_factory=list)
|
||||
estimated_impact: str = "" # e.g., "Remove 50 lines of duplicate code"
|
||||
fix_suggestion: str = "" # Suggested fix
|
||||
auto_fixable: bool = False # Can be auto-fixed
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": self.type.value,
|
||||
"priority": self.priority.value,
|
||||
"title": self.title,
|
||||
"description": self.description,
|
||||
"location": self.location.to_dict() if self.location else None,
|
||||
"affected_files": self.affected_files,
|
||||
"estimated_impact": self.estimated_impact,
|
||||
"fix_suggestion": self.fix_suggestion,
|
||||
"auto_fixable": self.auto_fixable,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProjectAnalysis:
|
||||
"""Complete analysis result for a project."""
|
||||
# Basic info
|
||||
project_path: str
|
||||
analyzed_at: datetime = field(default_factory=datetime.now)
|
||||
|
||||
# Framework detection
|
||||
framework: Framework = Framework.UNKNOWN
|
||||
framework_version: str = ""
|
||||
|
||||
# Styling detection
|
||||
styling_approaches: List[StylePattern] = field(default_factory=list)
|
||||
primary_styling: Optional[StylingApproach] = None
|
||||
|
||||
# Components
|
||||
components: List[ComponentInfo] = field(default_factory=list)
|
||||
component_count: int = 0
|
||||
|
||||
# Style files
|
||||
style_files: List[StyleFile] = field(default_factory=list)
|
||||
style_file_count: int = 0
|
||||
|
||||
# Issues and opportunities
|
||||
inline_style_locations: List[Location] = field(default_factory=list)
|
||||
token_candidates: List[TokenCandidate] = field(default_factory=list)
|
||||
quick_wins: List[QuickWin] = field(default_factory=list)
|
||||
|
||||
# Dependency graph
|
||||
dependency_graph: Dict[str, List[str]] = field(default_factory=dict)
|
||||
|
||||
# Statistics
|
||||
stats: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
if not self.stats:
|
||||
self.stats = {
|
||||
"total_files_scanned": 0,
|
||||
"total_lines": 0,
|
||||
"component_count": 0,
|
||||
"style_file_count": 0,
|
||||
"inline_style_count": 0,
|
||||
"token_candidates": 0,
|
||||
"quick_wins_count": 0,
|
||||
}
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"project_path": self.project_path,
|
||||
"analyzed_at": self.analyzed_at.isoformat(),
|
||||
"framework": self.framework.value,
|
||||
"framework_version": self.framework_version,
|
||||
"styling_approaches": [sp.to_dict() for sp in self.styling_approaches],
|
||||
"primary_styling": self.primary_styling.value if self.primary_styling else None,
|
||||
"component_count": self.component_count,
|
||||
"style_file_count": self.style_file_count,
|
||||
"inline_style_count": len(self.inline_style_locations),
|
||||
"token_candidates_count": len(self.token_candidates),
|
||||
"quick_wins_count": len(self.quick_wins),
|
||||
"stats": self.stats,
|
||||
}
|
||||
|
||||
def summary(self) -> str:
|
||||
"""Generate human-readable summary."""
|
||||
lines = [
|
||||
f"Project Analysis: {self.project_path}",
|
||||
"=" * 50,
|
||||
f"Framework: {self.framework.value} {self.framework_version}",
|
||||
f"Components: {self.component_count}",
|
||||
f"Style files: {self.style_file_count}",
|
||||
"",
|
||||
"Styling Approaches:",
|
||||
]
|
||||
|
||||
for sp in self.styling_approaches:
|
||||
lines.append(f" • {sp.type.value}: {sp.count} occurrences")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
f"Inline styles found: {len(self.inline_style_locations)}",
|
||||
f"Token candidates: {len(self.token_candidates)}",
|
||||
f"Quick wins: {len(self.quick_wins)}",
|
||||
"",
|
||||
"Quick Wins by Priority:",
|
||||
])
|
||||
|
||||
by_priority = {}
|
||||
for qw in self.quick_wins:
|
||||
if qw.priority not in by_priority:
|
||||
by_priority[qw.priority] = []
|
||||
by_priority[qw.priority].append(qw)
|
||||
|
||||
for priority in [QuickWinPriority.CRITICAL, QuickWinPriority.HIGH,
|
||||
QuickWinPriority.MEDIUM, QuickWinPriority.LOW]:
|
||||
if priority in by_priority:
|
||||
lines.append(f" [{priority.value.upper()}] {len(by_priority[priority])} items")
|
||||
|
||||
return "\n".join(lines)
|
||||
419
demo/tools/analyze/graph.py
Normal file
419
demo/tools/analyze/graph.py
Normal file
@@ -0,0 +1,419 @@
|
||||
"""
|
||||
Dependency Graph Builder
|
||||
|
||||
Builds component and style dependency graphs for visualization
|
||||
and analysis of project structure.
|
||||
"""
|
||||
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional, Set, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphNode:
|
||||
"""A node in the dependency graph."""
|
||||
id: str
|
||||
name: str
|
||||
type: str # 'component', 'style', 'util', 'hook'
|
||||
path: str
|
||||
size: int = 0 # file size or importance metric
|
||||
children: List[str] = field(default_factory=list)
|
||||
parents: List[str] = field(default_factory=list)
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'type': self.type,
|
||||
'path': self.path,
|
||||
'size': self.size,
|
||||
'children': self.children,
|
||||
'parents': self.parents,
|
||||
'metadata': self.metadata,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphEdge:
|
||||
"""An edge in the dependency graph."""
|
||||
source: str
|
||||
target: str
|
||||
type: str # 'import', 'uses', 'styles'
|
||||
weight: int = 1
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'source': self.source,
|
||||
'target': self.target,
|
||||
'type': self.type,
|
||||
'weight': self.weight,
|
||||
}
|
||||
|
||||
|
||||
class DependencyGraph:
|
||||
"""
|
||||
Builds and analyzes dependency graphs for a project.
|
||||
|
||||
Tracks:
|
||||
- Component imports/exports
|
||||
- Style file dependencies
|
||||
- Component usage relationships
|
||||
"""
|
||||
|
||||
def __init__(self, root_path: str):
|
||||
self.root = Path(root_path).resolve()
|
||||
self.nodes: Dict[str, GraphNode] = {}
|
||||
self.edges: List[GraphEdge] = []
|
||||
|
||||
async def build(self, depth: int = 3) -> Dict[str, Any]:
|
||||
"""
|
||||
Build the full dependency graph.
|
||||
|
||||
Args:
|
||||
depth: Maximum depth for traversing dependencies
|
||||
|
||||
Returns:
|
||||
Graph representation with nodes and edges
|
||||
"""
|
||||
# Clear existing graph
|
||||
self.nodes.clear()
|
||||
self.edges.clear()
|
||||
|
||||
# Find all relevant files
|
||||
await self._scan_files()
|
||||
|
||||
# Build edges from imports
|
||||
await self._build_import_edges()
|
||||
|
||||
# Build edges from component usage
|
||||
await self._build_usage_edges()
|
||||
|
||||
return self.to_dict()
|
||||
|
||||
async def _scan_files(self) -> None:
|
||||
"""Scan project files and create nodes."""
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build', '.next'}
|
||||
|
||||
# Component files
|
||||
for ext in ['*.jsx', '*.tsx']:
|
||||
for file_path in self.root.rglob(ext):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
node_id = self._path_to_id(rel_path)
|
||||
|
||||
self.nodes[node_id] = GraphNode(
|
||||
id=node_id,
|
||||
name=file_path.stem,
|
||||
type='component',
|
||||
path=rel_path,
|
||||
size=file_path.stat().st_size,
|
||||
)
|
||||
|
||||
# Style files
|
||||
for ext in ['*.css', '*.scss', '*.sass', '*.less']:
|
||||
for file_path in self.root.rglob(ext):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
node_id = self._path_to_id(rel_path)
|
||||
|
||||
self.nodes[node_id] = GraphNode(
|
||||
id=node_id,
|
||||
name=file_path.stem,
|
||||
type='style',
|
||||
path=rel_path,
|
||||
size=file_path.stat().st_size,
|
||||
)
|
||||
|
||||
# Utility/Hook files
|
||||
for ext in ['*.js', '*.ts']:
|
||||
for file_path in self.root.rglob(ext):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
name = file_path.stem.lower()
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
node_id = self._path_to_id(rel_path)
|
||||
|
||||
# Classify file type
|
||||
if 'hook' in name or name.startswith('use'):
|
||||
node_type = 'hook'
|
||||
elif any(x in name for x in ['util', 'helper', 'lib']):
|
||||
node_type = 'util'
|
||||
else:
|
||||
continue # Skip other JS/TS files
|
||||
|
||||
self.nodes[node_id] = GraphNode(
|
||||
id=node_id,
|
||||
name=file_path.stem,
|
||||
type=node_type,
|
||||
path=rel_path,
|
||||
size=file_path.stat().st_size,
|
||||
)
|
||||
|
||||
async def _build_import_edges(self) -> None:
|
||||
"""Build edges from import statements."""
|
||||
import_pattern = re.compile(
|
||||
r'import\s+(?:\{[^}]+\}|\*\s+as\s+\w+|\w+)?\s*(?:,\s*\{[^}]+\})?\s*from\s+["\']([^"\']+)["\']',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
for node_id, node in self.nodes.items():
|
||||
if node.type not in ['component', 'hook', 'util']:
|
||||
continue
|
||||
|
||||
file_path = self.root / node.path
|
||||
if not file_path.exists():
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
|
||||
for match in import_pattern.finditer(content):
|
||||
import_path = match.group(1)
|
||||
|
||||
# Resolve relative imports
|
||||
target_id = self._resolve_import(node.path, import_path)
|
||||
|
||||
if target_id and target_id in self.nodes:
|
||||
# Add edge
|
||||
self.edges.append(GraphEdge(
|
||||
source=node_id,
|
||||
target=target_id,
|
||||
type='import',
|
||||
))
|
||||
|
||||
# Update parent/child relationships
|
||||
node.children.append(target_id)
|
||||
self.nodes[target_id].parents.append(node_id)
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
async def _build_usage_edges(self) -> None:
|
||||
"""Build edges from component usage in JSX."""
|
||||
# Pattern to find JSX component usage
|
||||
jsx_pattern = re.compile(r'<([A-Z][A-Za-z0-9]*)')
|
||||
|
||||
# Build name -> id mapping for components
|
||||
name_to_id = {}
|
||||
for node_id, node in self.nodes.items():
|
||||
if node.type == 'component':
|
||||
name_to_id[node.name] = node_id
|
||||
|
||||
for node_id, node in self.nodes.items():
|
||||
if node.type != 'component':
|
||||
continue
|
||||
|
||||
file_path = self.root / node.path
|
||||
if not file_path.exists():
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
|
||||
used_components = set()
|
||||
for match in jsx_pattern.finditer(content):
|
||||
comp_name = match.group(1)
|
||||
if comp_name in name_to_id and name_to_id[comp_name] != node_id:
|
||||
used_components.add(name_to_id[comp_name])
|
||||
|
||||
for target_id in used_components:
|
||||
self.edges.append(GraphEdge(
|
||||
source=node_id,
|
||||
target=target_id,
|
||||
type='uses',
|
||||
))
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
def _path_to_id(self, path: str) -> str:
|
||||
"""Convert file path to node ID."""
|
||||
# Remove extension and normalize
|
||||
path = re.sub(r'\.(jsx?|tsx?|css|scss|sass|less)$', '', path)
|
||||
return path.replace('/', '_').replace('\\', '_').replace('.', '_')
|
||||
|
||||
def _resolve_import(self, source_path: str, import_path: str) -> Optional[str]:
|
||||
"""Resolve import path to node ID."""
|
||||
if not import_path.startswith('.'):
|
||||
return None # Skip node_modules imports
|
||||
|
||||
source_dir = Path(source_path).parent
|
||||
|
||||
# Handle various import patterns
|
||||
if import_path.startswith('./'):
|
||||
resolved = source_dir / import_path[2:]
|
||||
elif import_path.startswith('../'):
|
||||
resolved = source_dir / import_path
|
||||
else:
|
||||
resolved = source_dir / import_path
|
||||
|
||||
# Try to resolve with extensions
|
||||
extensions = ['.tsx', '.ts', '.jsx', '.js', '.css', '.scss', '/index.tsx', '/index.ts', '/index.jsx', '/index.js']
|
||||
|
||||
resolved_str = str(resolved)
|
||||
for ext in extensions:
|
||||
test_id = self._path_to_id(resolved_str + ext)
|
||||
if test_id in self.nodes:
|
||||
return test_id
|
||||
|
||||
# Try without additional extension (if path already has one)
|
||||
test_id = self._path_to_id(resolved_str)
|
||||
if test_id in self.nodes:
|
||||
return test_id
|
||||
|
||||
return None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert graph to dictionary for serialization."""
|
||||
return {
|
||||
'nodes': [node.to_dict() for node in self.nodes.values()],
|
||||
'edges': [edge.to_dict() for edge in self.edges],
|
||||
'stats': {
|
||||
'total_nodes': len(self.nodes),
|
||||
'total_edges': len(self.edges),
|
||||
'components': len([n for n in self.nodes.values() if n.type == 'component']),
|
||||
'styles': len([n for n in self.nodes.values() if n.type == 'style']),
|
||||
'hooks': len([n for n in self.nodes.values() if n.type == 'hook']),
|
||||
'utils': len([n for n in self.nodes.values() if n.type == 'util']),
|
||||
}
|
||||
}
|
||||
|
||||
def to_json(self, pretty: bool = True) -> str:
|
||||
"""Convert graph to JSON string."""
|
||||
return json.dumps(self.to_dict(), indent=2 if pretty else None)
|
||||
|
||||
def get_component_tree(self) -> Dict[str, List[str]]:
|
||||
"""Get simplified component dependency tree."""
|
||||
tree = {}
|
||||
for node_id, node in self.nodes.items():
|
||||
if node.type == 'component':
|
||||
tree[node.name] = [
|
||||
self.nodes[child_id].name
|
||||
for child_id in node.children
|
||||
if child_id in self.nodes and self.nodes[child_id].type == 'component'
|
||||
]
|
||||
return tree
|
||||
|
||||
def find_orphans(self) -> List[str]:
|
||||
"""Find components with no parents (not imported anywhere)."""
|
||||
orphans = []
|
||||
for node_id, node in self.nodes.items():
|
||||
if node.type == 'component' and not node.parents:
|
||||
# Exclude entry points (index, App, etc.)
|
||||
if node.name.lower() not in ['app', 'index', 'main', 'root']:
|
||||
orphans.append(node.path)
|
||||
return orphans
|
||||
|
||||
def find_hubs(self, min_connections: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find highly connected nodes (potential refactoring targets)."""
|
||||
hubs = []
|
||||
for node_id, node in self.nodes.items():
|
||||
connections = len(node.children) + len(node.parents)
|
||||
if connections >= min_connections:
|
||||
hubs.append({
|
||||
'name': node.name,
|
||||
'path': node.path,
|
||||
'type': node.type,
|
||||
'imports': len(node.children),
|
||||
'imported_by': len(node.parents),
|
||||
'total_connections': connections,
|
||||
})
|
||||
|
||||
hubs.sort(key=lambda x: x['total_connections'], reverse=True)
|
||||
return hubs
|
||||
|
||||
def find_circular_dependencies(self) -> List[List[str]]:
|
||||
"""Find circular dependency chains."""
|
||||
cycles = []
|
||||
visited = set()
|
||||
rec_stack = set()
|
||||
|
||||
def dfs(node_id: str, path: List[str]) -> None:
|
||||
visited.add(node_id)
|
||||
rec_stack.add(node_id)
|
||||
path.append(node_id)
|
||||
|
||||
for child_id in self.nodes.get(node_id, GraphNode('', '', '', '')).children:
|
||||
if child_id not in visited:
|
||||
dfs(child_id, path.copy())
|
||||
elif child_id in rec_stack:
|
||||
# Found cycle
|
||||
cycle_start = path.index(child_id)
|
||||
cycle = path[cycle_start:] + [child_id]
|
||||
cycles.append([self.nodes[n].name for n in cycle])
|
||||
|
||||
rec_stack.remove(node_id)
|
||||
|
||||
for node_id in self.nodes:
|
||||
if node_id not in visited:
|
||||
dfs(node_id, [])
|
||||
|
||||
return cycles
|
||||
|
||||
def get_subgraph(self, node_id: str, depth: int = 2) -> Dict[str, Any]:
|
||||
"""Get subgraph centered on a specific node."""
|
||||
if node_id not in self.nodes:
|
||||
return {'nodes': [], 'edges': []}
|
||||
|
||||
# BFS to find nodes within depth
|
||||
included_nodes = {node_id}
|
||||
frontier = {node_id}
|
||||
|
||||
for _ in range(depth):
|
||||
new_frontier = set()
|
||||
for nid in frontier:
|
||||
node = self.nodes.get(nid)
|
||||
if node:
|
||||
new_frontier.update(node.children)
|
||||
new_frontier.update(node.parents)
|
||||
included_nodes.update(new_frontier)
|
||||
frontier = new_frontier
|
||||
|
||||
# Filter nodes and edges
|
||||
subgraph_nodes = [
|
||||
self.nodes[nid].to_dict()
|
||||
for nid in included_nodes
|
||||
if nid in self.nodes
|
||||
]
|
||||
|
||||
subgraph_edges = [
|
||||
edge.to_dict()
|
||||
for edge in self.edges
|
||||
if edge.source in included_nodes and edge.target in included_nodes
|
||||
]
|
||||
|
||||
return {
|
||||
'nodes': subgraph_nodes,
|
||||
'edges': subgraph_edges,
|
||||
'center': node_id,
|
||||
'depth': depth,
|
||||
}
|
||||
|
||||
def get_style_dependencies(self) -> Dict[str, List[str]]:
|
||||
"""Get mapping of components to their style dependencies."""
|
||||
style_deps = {}
|
||||
|
||||
for node_id, node in self.nodes.items():
|
||||
if node.type != 'component':
|
||||
continue
|
||||
|
||||
style_children = [
|
||||
self.nodes[child_id].path
|
||||
for child_id in node.children
|
||||
if child_id in self.nodes and self.nodes[child_id].type == 'style'
|
||||
]
|
||||
|
||||
if style_children:
|
||||
style_deps[node.path] = style_children
|
||||
|
||||
return style_deps
|
||||
418
demo/tools/analyze/quick_wins.py
Normal file
418
demo/tools/analyze/quick_wins.py
Normal file
@@ -0,0 +1,418 @@
|
||||
"""
|
||||
Quick-Win Finder
|
||||
|
||||
Identifies easy improvement opportunities in a codebase:
|
||||
- Inline styles that can be extracted
|
||||
- Duplicate values that should be tokens
|
||||
- Unused styles
|
||||
- Naming inconsistencies
|
||||
- Accessibility issues
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
from .base import (
|
||||
QuickWin,
|
||||
QuickWinType,
|
||||
QuickWinPriority,
|
||||
Location,
|
||||
ProjectAnalysis,
|
||||
)
|
||||
from .styles import StyleAnalyzer
|
||||
from .react import ReactAnalyzer
|
||||
|
||||
|
||||
class QuickWinFinder:
|
||||
"""
|
||||
Finds quick improvement opportunities in a project.
|
||||
|
||||
Categories:
|
||||
- INLINE_STYLE: Inline styles that can be extracted to CSS/tokens
|
||||
- DUPLICATE_VALUE: Repeated values that should be tokens
|
||||
- UNUSED_STYLE: CSS that's defined but not used
|
||||
- HARDCODED_VALUE: Magic numbers/colors that should be tokens
|
||||
- NAMING_INCONSISTENCY: Inconsistent naming patterns
|
||||
- DEPRECATED_PATTERN: Outdated styling approaches
|
||||
- ACCESSIBILITY: A11y improvements
|
||||
"""
|
||||
|
||||
def __init__(self, root_path: str):
|
||||
self.root = Path(root_path).resolve()
|
||||
self.style_analyzer = StyleAnalyzer(root_path)
|
||||
self.react_analyzer = ReactAnalyzer(root_path)
|
||||
|
||||
async def find_all(self) -> List[QuickWin]:
|
||||
"""
|
||||
Find all quick-win opportunities.
|
||||
|
||||
Returns:
|
||||
List of QuickWin objects sorted by priority
|
||||
"""
|
||||
quick_wins = []
|
||||
|
||||
# Find inline styles
|
||||
inline_wins = await self._find_inline_style_wins()
|
||||
quick_wins.extend(inline_wins)
|
||||
|
||||
# Find duplicate values
|
||||
duplicate_wins = await self._find_duplicate_value_wins()
|
||||
quick_wins.extend(duplicate_wins)
|
||||
|
||||
# Find unused styles
|
||||
unused_wins = await self._find_unused_style_wins()
|
||||
quick_wins.extend(unused_wins)
|
||||
|
||||
# Find hardcoded values
|
||||
hardcoded_wins = await self._find_hardcoded_value_wins()
|
||||
quick_wins.extend(hardcoded_wins)
|
||||
|
||||
# Find naming inconsistencies
|
||||
naming_wins = await self._find_naming_inconsistency_wins()
|
||||
quick_wins.extend(naming_wins)
|
||||
|
||||
# Find accessibility issues
|
||||
a11y_wins = await self._find_accessibility_wins()
|
||||
quick_wins.extend(a11y_wins)
|
||||
|
||||
# Sort by priority
|
||||
priority_order = {
|
||||
QuickWinPriority.CRITICAL: 0,
|
||||
QuickWinPriority.HIGH: 1,
|
||||
QuickWinPriority.MEDIUM: 2,
|
||||
QuickWinPriority.LOW: 3,
|
||||
}
|
||||
quick_wins.sort(key=lambda x: priority_order[x.priority])
|
||||
|
||||
return quick_wins
|
||||
|
||||
async def _find_inline_style_wins(self) -> List[QuickWin]:
|
||||
"""Find inline styles that should be extracted."""
|
||||
wins = []
|
||||
|
||||
inline_styles = await self.react_analyzer.find_inline_styles()
|
||||
|
||||
if not inline_styles:
|
||||
return wins
|
||||
|
||||
# Group by file
|
||||
by_file = {}
|
||||
for style in inline_styles:
|
||||
file_path = style['file']
|
||||
if file_path not in by_file:
|
||||
by_file[file_path] = []
|
||||
by_file[file_path].append(style)
|
||||
|
||||
# Create quick-wins for files with multiple inline styles
|
||||
for file_path, styles in by_file.items():
|
||||
if len(styles) >= 3: # Only flag if 3+ inline styles
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.INLINE_STYLE,
|
||||
priority=QuickWinPriority.HIGH,
|
||||
title=f"Extract {len(styles)} inline styles",
|
||||
description=f"File {file_path} has {len(styles)} inline style declarations that could be extracted to CSS classes or design tokens.",
|
||||
location=Location(file_path, styles[0]['line']),
|
||||
affected_files=[file_path],
|
||||
estimated_impact=f"Reduce inline styles, improve maintainability",
|
||||
fix_suggestion="Extract repeated style properties to CSS classes or design tokens. Use className instead of style prop.",
|
||||
auto_fixable=True,
|
||||
))
|
||||
|
||||
# Create summary if many files have inline styles
|
||||
total_inline = len(inline_styles)
|
||||
if total_inline >= 10:
|
||||
wins.insert(0, QuickWin(
|
||||
type=QuickWinType.INLINE_STYLE,
|
||||
priority=QuickWinPriority.HIGH,
|
||||
title=f"Project has {total_inline} inline styles",
|
||||
description=f"Found {total_inline} inline style declarations across {len(by_file)} files. Consider migrating to CSS classes or design tokens.",
|
||||
affected_files=list(by_file.keys())[:10],
|
||||
estimated_impact=f"Improve code maintainability and bundle size",
|
||||
fix_suggestion="Run 'dss migrate inline-styles' to preview migration options.",
|
||||
auto_fixable=True,
|
||||
))
|
||||
|
||||
return wins
|
||||
|
||||
async def _find_duplicate_value_wins(self) -> List[QuickWin]:
|
||||
"""Find duplicate values that should be tokens."""
|
||||
wins = []
|
||||
|
||||
analysis = await self.style_analyzer.analyze()
|
||||
duplicates = analysis.get('duplicates', [])
|
||||
|
||||
# Find high-occurrence duplicates
|
||||
for dup in duplicates[:10]: # Top 10 duplicates
|
||||
if dup['count'] >= 5: # Only if used 5+ times
|
||||
priority = QuickWinPriority.HIGH if dup['count'] >= 10 else QuickWinPriority.MEDIUM
|
||||
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.DUPLICATE_VALUE,
|
||||
priority=priority,
|
||||
title=f"Duplicate value '{dup['value']}' used {dup['count']} times",
|
||||
description=f"The value '{dup['value']}' appears {dup['count']} times across {len(dup['files'])} files. This should be a design token.",
|
||||
affected_files=dup['files'],
|
||||
estimated_impact=f"Create single source of truth, easier theme updates",
|
||||
fix_suggestion=f"Create token for this value and replace all occurrences.",
|
||||
auto_fixable=True,
|
||||
))
|
||||
|
||||
return wins
|
||||
|
||||
async def _find_unused_style_wins(self) -> List[QuickWin]:
|
||||
"""Find unused CSS styles."""
|
||||
wins = []
|
||||
|
||||
unused = await self.style_analyzer.find_unused_styles()
|
||||
|
||||
if len(unused) >= 5:
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.UNUSED_STYLE,
|
||||
priority=QuickWinPriority.MEDIUM,
|
||||
title=f"Found {len(unused)} potentially unused CSS classes",
|
||||
description=f"These CSS classes are defined but don't appear to be used in the codebase. Review and remove if confirmed unused.",
|
||||
affected_files=list(set(u['file'] for u in unused))[:10],
|
||||
estimated_impact=f"Reduce CSS bundle size by removing dead code",
|
||||
fix_suggestion="Review each class and remove if unused. Some may be dynamically generated.",
|
||||
auto_fixable=False, # Needs human review
|
||||
))
|
||||
|
||||
return wins
|
||||
|
||||
async def _find_hardcoded_value_wins(self) -> List[QuickWin]:
|
||||
"""Find hardcoded magic values."""
|
||||
wins = []
|
||||
|
||||
analysis = await self.style_analyzer.analyze()
|
||||
candidates = analysis.get('token_candidates', [])
|
||||
|
||||
# Find high-confidence candidates
|
||||
high_confidence = [c for c in candidates if c.confidence >= 0.7]
|
||||
|
||||
if high_confidence:
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.HARDCODED_VALUE,
|
||||
priority=QuickWinPriority.MEDIUM,
|
||||
title=f"Found {len(high_confidence)} values that should be tokens",
|
||||
description="These hardcoded values appear multiple times and should be extracted as design tokens for consistency.",
|
||||
estimated_impact="Improve theme consistency and make updates easier",
|
||||
fix_suggestion="Use 'dss extract-tokens' to create tokens from these values.",
|
||||
auto_fixable=True,
|
||||
))
|
||||
|
||||
# Add specific wins for top candidates
|
||||
for candidate in high_confidence[:5]:
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.HARDCODED_VALUE,
|
||||
priority=QuickWinPriority.LOW,
|
||||
title=f"Extract '{candidate.value}' as token",
|
||||
description=f"Value '{candidate.value}' appears {candidate.occurrences} times. Suggested token: {candidate.suggested_name}",
|
||||
location=candidate.locations[0] if candidate.locations else None,
|
||||
affected_files=[loc.file_path for loc in candidate.locations[:5]],
|
||||
estimated_impact=f"Single source of truth for this value",
|
||||
fix_suggestion=f"Create token '{candidate.suggested_name}' with value '{candidate.value}'",
|
||||
auto_fixable=True,
|
||||
))
|
||||
|
||||
return wins
|
||||
|
||||
async def _find_naming_inconsistency_wins(self) -> List[QuickWin]:
|
||||
"""Find naming inconsistencies."""
|
||||
wins = []
|
||||
|
||||
naming = await self.style_analyzer.analyze_naming_consistency()
|
||||
|
||||
if naming.get('inconsistencies'):
|
||||
primary = naming.get('primary_pattern', 'unknown')
|
||||
inconsistent_count = len(naming['inconsistencies'])
|
||||
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.NAMING_INCONSISTENCY,
|
||||
priority=QuickWinPriority.LOW,
|
||||
title=f"Found {inconsistent_count} naming inconsistencies",
|
||||
description=f"The project primarily uses {primary} naming, but {inconsistent_count} classes use different conventions.",
|
||||
affected_files=list(set(i['file'] for i in naming['inconsistencies']))[:10],
|
||||
estimated_impact="Improve code consistency and readability",
|
||||
fix_suggestion=f"Standardize all class names to use {primary} convention.",
|
||||
auto_fixable=True,
|
||||
))
|
||||
|
||||
return wins
|
||||
|
||||
async def _find_accessibility_wins(self) -> List[QuickWin]:
|
||||
"""Find accessibility issues."""
|
||||
wins = []
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build'}
|
||||
|
||||
a11y_issues = []
|
||||
|
||||
for ext in ['*.jsx', '*.tsx']:
|
||||
for file_path in self.root.rglob(ext):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
|
||||
# Check for images without alt
|
||||
img_no_alt = re.findall(r'<img[^>]+(?<!alt=")[^>]*>', content)
|
||||
if img_no_alt:
|
||||
for match in img_no_alt[:3]:
|
||||
if 'alt=' not in match:
|
||||
line = content[:content.find(match)].count('\n') + 1
|
||||
a11y_issues.append({
|
||||
'type': 'img-no-alt',
|
||||
'file': rel_path,
|
||||
'line': line,
|
||||
})
|
||||
|
||||
# Check for buttons without accessible text
|
||||
icon_only_buttons = re.findall(
|
||||
r'<button[^>]*>\s*<(?:svg|Icon|img)[^>]*/?>\s*</button>',
|
||||
content,
|
||||
re.IGNORECASE
|
||||
)
|
||||
if icon_only_buttons:
|
||||
a11y_issues.append({
|
||||
'type': 'icon-button-no-label',
|
||||
'file': rel_path,
|
||||
})
|
||||
|
||||
# Check for click handlers on non-interactive elements
|
||||
div_onclick = re.findall(r'<div[^>]+onClick', content)
|
||||
if div_onclick:
|
||||
a11y_issues.append({
|
||||
'type': 'div-click-handler',
|
||||
'file': rel_path,
|
||||
'count': len(div_onclick),
|
||||
})
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Group issues by type
|
||||
if a11y_issues:
|
||||
img_issues = [i for i in a11y_issues if i['type'] == 'img-no-alt']
|
||||
if img_issues:
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.ACCESSIBILITY,
|
||||
priority=QuickWinPriority.HIGH,
|
||||
title=f"Found {len(img_issues)} images without alt text",
|
||||
description="Images should have alt attributes for screen readers. Empty alt='' is acceptable for decorative images.",
|
||||
affected_files=list(set(i['file'] for i in img_issues))[:10],
|
||||
estimated_impact="Improve accessibility for screen reader users",
|
||||
fix_suggestion="Add descriptive alt text to images or alt='' for decorative images.",
|
||||
auto_fixable=False,
|
||||
))
|
||||
|
||||
div_issues = [i for i in a11y_issues if i['type'] == 'div-click-handler']
|
||||
if div_issues:
|
||||
wins.append(QuickWin(
|
||||
type=QuickWinType.ACCESSIBILITY,
|
||||
priority=QuickWinPriority.MEDIUM,
|
||||
title=f"Found click handlers on div elements",
|
||||
description="Using onClick on div elements makes them inaccessible to keyboard users. Use button or add proper ARIA attributes.",
|
||||
affected_files=list(set(i['file'] for i in div_issues))[:10],
|
||||
estimated_impact="Improve keyboard navigation accessibility",
|
||||
fix_suggestion="Replace <div onClick> with <button> or add role='button' and tabIndex={0}.",
|
||||
auto_fixable=True,
|
||||
))
|
||||
|
||||
return wins
|
||||
|
||||
async def get_summary(self) -> Dict[str, Any]:
|
||||
"""Get summary of all quick-wins."""
|
||||
wins = await self.find_all()
|
||||
|
||||
by_type = {}
|
||||
by_priority = {}
|
||||
|
||||
for win in wins:
|
||||
type_key = win.type.value
|
||||
priority_key = win.priority.value
|
||||
|
||||
if type_key not in by_type:
|
||||
by_type[type_key] = 0
|
||||
by_type[type_key] += 1
|
||||
|
||||
if priority_key not in by_priority:
|
||||
by_priority[priority_key] = 0
|
||||
by_priority[priority_key] += 1
|
||||
|
||||
return {
|
||||
'total': len(wins),
|
||||
'by_type': by_type,
|
||||
'by_priority': by_priority,
|
||||
'auto_fixable': len([w for w in wins if w.auto_fixable]),
|
||||
'top_wins': [w.to_dict() for w in wins[:10]],
|
||||
}
|
||||
|
||||
async def get_actionable_report(self) -> str:
|
||||
"""Generate human-readable report of quick-wins."""
|
||||
wins = await self.find_all()
|
||||
|
||||
if not wins:
|
||||
return "No quick-wins found. Your codebase looks clean!"
|
||||
|
||||
lines = [
|
||||
"QUICK-WIN OPPORTUNITIES",
|
||||
"=" * 50,
|
||||
"",
|
||||
]
|
||||
|
||||
# Group by priority
|
||||
by_priority = {
|
||||
QuickWinPriority.CRITICAL: [],
|
||||
QuickWinPriority.HIGH: [],
|
||||
QuickWinPriority.MEDIUM: [],
|
||||
QuickWinPriority.LOW: [],
|
||||
}
|
||||
|
||||
for win in wins:
|
||||
by_priority[win.priority].append(win)
|
||||
|
||||
# Report by priority
|
||||
priority_labels = {
|
||||
QuickWinPriority.CRITICAL: "CRITICAL",
|
||||
QuickWinPriority.HIGH: "HIGH PRIORITY",
|
||||
QuickWinPriority.MEDIUM: "MEDIUM PRIORITY",
|
||||
QuickWinPriority.LOW: "LOW PRIORITY",
|
||||
}
|
||||
|
||||
for priority, label in priority_labels.items():
|
||||
priority_wins = by_priority[priority]
|
||||
if not priority_wins:
|
||||
continue
|
||||
|
||||
lines.extend([
|
||||
f"\n[{label}] ({len(priority_wins)} items)",
|
||||
"-" * 40,
|
||||
])
|
||||
|
||||
for i, win in enumerate(priority_wins[:5], 1):
|
||||
lines.extend([
|
||||
f"\n{i}. {win.title}",
|
||||
f" {win.description[:100]}...",
|
||||
f" Impact: {win.estimated_impact}",
|
||||
])
|
||||
if win.auto_fixable:
|
||||
lines.append(" [Auto-fixable]")
|
||||
|
||||
if len(priority_wins) > 5:
|
||||
lines.append(f"\n ... and {len(priority_wins) - 5} more")
|
||||
|
||||
# Summary
|
||||
lines.extend([
|
||||
"",
|
||||
"=" * 50,
|
||||
"SUMMARY",
|
||||
f"Total quick-wins: {len(wins)}",
|
||||
f"Auto-fixable: {len([w for w in wins if w.auto_fixable])}",
|
||||
"",
|
||||
"Run 'dss fix --preview' to see suggested changes.",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
441
demo/tools/analyze/react.py
Normal file
441
demo/tools/analyze/react.py
Normal file
@@ -0,0 +1,441 @@
|
||||
"""
|
||||
React Project Analyzer
|
||||
|
||||
Analyzes React codebases to extract component information,
|
||||
detect patterns, and identify style usage.
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional, Set, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from .base import (
|
||||
ComponentInfo,
|
||||
Location,
|
||||
StylePattern,
|
||||
StylingApproach,
|
||||
)
|
||||
|
||||
|
||||
# Patterns for React component detection
|
||||
FUNCTIONAL_COMPONENT = re.compile(
|
||||
r'(?:export\s+)?(?:const|let|var|function)\s+([A-Z][A-Za-z0-9]*)\s*(?::\s*(?:React\.)?FC)?'
|
||||
r'\s*(?:=\s*(?:\([^)]*\)|[a-zA-Z_]\w*)\s*=>|\()',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
CLASS_COMPONENT = re.compile(
|
||||
r'class\s+([A-Z][A-Za-z0-9]*)\s+extends\s+(?:React\.)?(?:Component|PureComponent)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
FORWARD_REF = re.compile(
|
||||
r'(?:export\s+)?(?:const|let)\s+([A-Z][A-Za-z0-9]*)\s*=\s*(?:React\.)?forwardRef',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
MEMO_COMPONENT = re.compile(
|
||||
r'(?:export\s+)?(?:const|let)\s+([A-Z][A-Za-z0-9]*)\s*=\s*(?:React\.)?memo\(',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
# Import patterns
|
||||
IMPORT_PATTERN = re.compile(
|
||||
r'import\s+(?:\{[^}]+\}|\*\s+as\s+\w+|\w+)\s+from\s+["\']([^"\']+)["\']',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
STYLE_IMPORT = re.compile(
|
||||
r'import\s+(?:(\w+)\s+from\s+)?["\']([^"\']+\.(?:css|scss|sass|less|styl))["\']',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
# Inline style patterns
|
||||
INLINE_STYLE_OBJECT = re.compile(
|
||||
r'style\s*=\s*\{\s*\{([^}]+)\}\s*\}',
|
||||
re.MULTILINE | re.DOTALL
|
||||
)
|
||||
|
||||
INLINE_STYLE_VAR = re.compile(
|
||||
r'style\s*=\s*\{(\w+)\}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
# Props extraction
|
||||
PROPS_DESTRUCTURE = re.compile(
|
||||
r'\(\s*\{\s*([^}]+)\s*\}\s*(?::\s*[^)]+)?\)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
PROPS_INTERFACE = re.compile(
|
||||
r'interface\s+\w*Props\s*\{([^}]+)\}',
|
||||
re.MULTILINE | re.DOTALL
|
||||
)
|
||||
|
||||
PROPS_TYPE = re.compile(
|
||||
r'type\s+\w*Props\s*=\s*\{([^}]+)\}',
|
||||
re.MULTILINE | re.DOTALL
|
||||
)
|
||||
|
||||
|
||||
class ReactAnalyzer:
|
||||
"""
|
||||
Analyzes React projects for component structure and style usage.
|
||||
"""
|
||||
|
||||
def __init__(self, root_path: str):
|
||||
self.root = Path(root_path).resolve()
|
||||
|
||||
async def analyze(
|
||||
self,
|
||||
component_files: Optional[List[Path]] = None
|
||||
) -> List[ComponentInfo]:
|
||||
"""
|
||||
Analyze React components in the project.
|
||||
|
||||
Args:
|
||||
component_files: Optional list of files to analyze.
|
||||
If None, scans the project.
|
||||
|
||||
Returns:
|
||||
List of ComponentInfo for each detected component.
|
||||
"""
|
||||
if component_files is None:
|
||||
component_files = self._find_component_files()
|
||||
|
||||
components = []
|
||||
|
||||
for file_path in component_files:
|
||||
try:
|
||||
file_components = await self._analyze_file(file_path)
|
||||
components.extend(file_components)
|
||||
except Exception as e:
|
||||
# Log error but continue
|
||||
continue
|
||||
|
||||
return components
|
||||
|
||||
def _find_component_files(self) -> List[Path]:
|
||||
"""Find all potential React component files."""
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build', '.next'}
|
||||
component_files = []
|
||||
|
||||
for ext in ['*.jsx', '*.tsx']:
|
||||
for path in self.root.rglob(ext):
|
||||
if not any(skip in path.parts for skip in skip_dirs):
|
||||
component_files.append(path)
|
||||
|
||||
# Also check .js/.ts files that look like components
|
||||
for ext in ['*.js', '*.ts']:
|
||||
for path in self.root.rglob(ext):
|
||||
if any(skip in path.parts for skip in skip_dirs):
|
||||
continue
|
||||
# Skip config and utility files
|
||||
if any(x in path.name.lower() for x in ['config', 'util', 'helper', 'hook', 'context']):
|
||||
continue
|
||||
# Check if PascalCase (likely component)
|
||||
if path.stem[0].isupper():
|
||||
component_files.append(path)
|
||||
|
||||
return component_files
|
||||
|
||||
async def _analyze_file(self, file_path: Path) -> List[ComponentInfo]:
|
||||
"""Analyze a single file for React components."""
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
components = []
|
||||
|
||||
# Find all components in the file
|
||||
component_matches = []
|
||||
|
||||
# Functional components
|
||||
for match in FUNCTIONAL_COMPONENT.finditer(content):
|
||||
name = match.group(1)
|
||||
if self._is_valid_component_name(name):
|
||||
component_matches.append((name, 'functional', match.start()))
|
||||
|
||||
# Class components
|
||||
for match in CLASS_COMPONENT.finditer(content):
|
||||
name = match.group(1)
|
||||
component_matches.append((name, 'class', match.start()))
|
||||
|
||||
# forwardRef components
|
||||
for match in FORWARD_REF.finditer(content):
|
||||
name = match.group(1)
|
||||
component_matches.append((name, 'forwardRef', match.start()))
|
||||
|
||||
# memo components
|
||||
for match in MEMO_COMPONENT.finditer(content):
|
||||
name = match.group(1)
|
||||
component_matches.append((name, 'memo', match.start()))
|
||||
|
||||
# Dedupe by name (keep first occurrence)
|
||||
seen_names = set()
|
||||
unique_matches = []
|
||||
for name, comp_type, pos in component_matches:
|
||||
if name not in seen_names:
|
||||
seen_names.add(name)
|
||||
unique_matches.append((name, comp_type, pos))
|
||||
|
||||
# Extract imports (shared across all components in file)
|
||||
imports = self._extract_imports(content)
|
||||
style_files = self._extract_style_imports(content)
|
||||
inline_styles = self._find_inline_styles(content)
|
||||
|
||||
# Create ComponentInfo for each
|
||||
for name, comp_type, pos in unique_matches:
|
||||
# Extract props for this component
|
||||
props = self._extract_props(content, name)
|
||||
|
||||
# Find child components used
|
||||
children = self._find_child_components(content, seen_names)
|
||||
|
||||
# Check if component has styles
|
||||
has_styles = bool(style_files) or bool(inline_styles)
|
||||
|
||||
components.append(ComponentInfo(
|
||||
name=name,
|
||||
path=str(file_path.relative_to(self.root)),
|
||||
type=comp_type,
|
||||
props=props,
|
||||
has_styles=has_styles,
|
||||
style_files=style_files,
|
||||
inline_style_count=len(inline_styles),
|
||||
imports=imports,
|
||||
exports=self._find_exports(content, name),
|
||||
children=children,
|
||||
line_count=content.count('\n') + 1,
|
||||
))
|
||||
|
||||
return components
|
||||
|
||||
def _is_valid_component_name(self, name: str) -> bool:
|
||||
"""Check if a name is a valid React component name."""
|
||||
# Must be PascalCase
|
||||
if not name[0].isupper():
|
||||
return False
|
||||
|
||||
# Filter out common non-component patterns
|
||||
invalid_names = {
|
||||
'React', 'Component', 'PureComponent', 'Fragment',
|
||||
'Suspense', 'Provider', 'Consumer', 'Context',
|
||||
'Error', 'ErrorBoundary', 'Wrapper', 'Container',
|
||||
'Props', 'State', 'Type', 'Interface',
|
||||
}
|
||||
|
||||
return name not in invalid_names
|
||||
|
||||
def _extract_imports(self, content: str) -> List[str]:
|
||||
"""Extract import paths from file."""
|
||||
imports = []
|
||||
for match in IMPORT_PATTERN.finditer(content):
|
||||
import_path = match.group(1)
|
||||
# Skip node_modules style imports for brevity
|
||||
if not import_path.startswith('.') and '/' not in import_path:
|
||||
continue
|
||||
imports.append(import_path)
|
||||
return imports
|
||||
|
||||
def _extract_style_imports(self, content: str) -> List[str]:
|
||||
"""Extract style file imports."""
|
||||
style_files = []
|
||||
for match in STYLE_IMPORT.finditer(content):
|
||||
style_path = match.group(2)
|
||||
style_files.append(style_path)
|
||||
return style_files
|
||||
|
||||
def _find_inline_styles(self, content: str) -> List[Location]:
|
||||
"""Find inline style usage locations."""
|
||||
locations = []
|
||||
|
||||
# style={{ ... }}
|
||||
for match in INLINE_STYLE_OBJECT.finditer(content):
|
||||
line = content[:match.start()].count('\n') + 1
|
||||
locations.append(Location(
|
||||
file_path="", # Will be set by caller
|
||||
line=line,
|
||||
))
|
||||
|
||||
return locations
|
||||
|
||||
def _extract_props(self, content: str, component_name: str) -> List[str]:
|
||||
"""Extract props for a component."""
|
||||
props = set()
|
||||
|
||||
# Look for destructured props
|
||||
for match in PROPS_DESTRUCTURE.finditer(content):
|
||||
props_str = match.group(1)
|
||||
# Extract prop names from destructuring
|
||||
for prop in re.findall(r'(\w+)(?:\s*[=:])?', props_str):
|
||||
if prop and not prop[0].isupper(): # Skip types
|
||||
props.add(prop)
|
||||
|
||||
# Look for Props interface/type
|
||||
for pattern in [PROPS_INTERFACE, PROPS_TYPE]:
|
||||
for match in pattern.finditer(content):
|
||||
props_str = match.group(1)
|
||||
# Extract prop names
|
||||
for line in props_str.split('\n'):
|
||||
prop_match = re.match(r'\s*(\w+)\s*[?:]', line)
|
||||
if prop_match:
|
||||
props.add(prop_match.group(1))
|
||||
|
||||
return list(props)
|
||||
|
||||
def _find_child_components(
|
||||
self,
|
||||
content: str,
|
||||
current_components: Set[str]
|
||||
) -> List[str]:
|
||||
"""Find child components used in JSX."""
|
||||
children = set()
|
||||
|
||||
# Find JSX elements that look like components (PascalCase)
|
||||
jsx_pattern = re.compile(r'<([A-Z][A-Za-z0-9]*)')
|
||||
for match in jsx_pattern.finditer(content):
|
||||
component_name = match.group(1)
|
||||
# Skip current file's components and React built-ins
|
||||
if component_name not in current_components:
|
||||
if component_name not in {'Fragment', 'Suspense', 'Provider'}:
|
||||
children.add(component_name)
|
||||
|
||||
return list(children)
|
||||
|
||||
def _find_exports(self, content: str, component_name: str) -> List[str]:
|
||||
"""Find export type for component."""
|
||||
exports = []
|
||||
|
||||
# Default export
|
||||
if re.search(rf'export\s+default\s+{component_name}\b', content):
|
||||
exports.append('default')
|
||||
if re.search(rf'export\s+default\s+(?:function|const)\s+{component_name}\b', content):
|
||||
exports.append('default')
|
||||
|
||||
# Named export
|
||||
if re.search(rf'export\s+(?:const|function|class)\s+{component_name}\b', content):
|
||||
exports.append('named')
|
||||
if re.search(r'export\s*\{[^}]*\b' + re.escape(component_name) + r'\b[^}]*\}', content):
|
||||
exports.append('named')
|
||||
|
||||
return exports
|
||||
|
||||
async def find_inline_styles(self, path: Optional[str] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Find all inline style usage in the project.
|
||||
|
||||
Returns list of inline style occurrences with:
|
||||
- file path
|
||||
- line number
|
||||
- style content
|
||||
- component name (if detectable)
|
||||
"""
|
||||
search_path = Path(path) if path else self.root
|
||||
results = []
|
||||
|
||||
for ext in ['*.jsx', '*.tsx', '*.js', '*.ts']:
|
||||
for file_path in search_path.rglob(ext):
|
||||
if any(skip in file_path.parts for skip in
|
||||
{'node_modules', '.git', 'dist', 'build'}):
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
|
||||
# Find style={{ ... }}
|
||||
for match in INLINE_STYLE_OBJECT.finditer(content):
|
||||
line = content[:match.start()].count('\n') + 1
|
||||
style_content = match.group(1).strip()
|
||||
|
||||
results.append({
|
||||
'file': str(file_path.relative_to(self.root)),
|
||||
'line': line,
|
||||
'content': style_content[:200],
|
||||
'type': 'object',
|
||||
})
|
||||
|
||||
# Find style={variable}
|
||||
for match in INLINE_STYLE_VAR.finditer(content):
|
||||
line = content[:match.start()].count('\n') + 1
|
||||
var_name = match.group(1)
|
||||
|
||||
results.append({
|
||||
'file': str(file_path.relative_to(self.root)),
|
||||
'line': line,
|
||||
'content': f'style={{{var_name}}}',
|
||||
'type': 'variable',
|
||||
'variable': var_name,
|
||||
})
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return results
|
||||
|
||||
async def get_component_tree(self) -> Dict[str, List[str]]:
|
||||
"""
|
||||
Build component dependency tree.
|
||||
|
||||
Returns dict mapping component names to their child components.
|
||||
"""
|
||||
components = await self.analyze()
|
||||
|
||||
tree = {}
|
||||
for comp in components:
|
||||
tree[comp.name] = comp.children
|
||||
|
||||
return tree
|
||||
|
||||
async def find_style_patterns(self) -> Dict[str, List[Dict]]:
|
||||
"""
|
||||
Find different styling patterns used across the project.
|
||||
|
||||
Returns dict with pattern types and their occurrences.
|
||||
"""
|
||||
patterns = {
|
||||
'inline_styles': [],
|
||||
'css_modules': [],
|
||||
'styled_components': [],
|
||||
'emotion': [],
|
||||
'tailwind': [],
|
||||
'css_classes': [],
|
||||
}
|
||||
|
||||
component_files = self._find_component_files()
|
||||
|
||||
for file_path in component_files:
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
|
||||
# CSS Modules
|
||||
if re.search(r'import\s+\w+\s+from\s+["\'].*\.module\.', content):
|
||||
patterns['css_modules'].append({'file': rel_path})
|
||||
|
||||
# styled-components
|
||||
if re.search(r'styled\.|from\s+["\']styled-components', content):
|
||||
patterns['styled_components'].append({'file': rel_path})
|
||||
|
||||
# Emotion
|
||||
if re.search(r'@emotion|css`', content):
|
||||
patterns['emotion'].append({'file': rel_path})
|
||||
|
||||
# Tailwind (className with utility classes)
|
||||
if re.search(r'className\s*=\s*["\'][^"\']*(?:flex|grid|p-\d|m-\d|bg-)', content):
|
||||
patterns['tailwind'].append({'file': rel_path})
|
||||
|
||||
# Regular CSS classes
|
||||
if re.search(r'className\s*=\s*["\'][a-zA-Z]', content):
|
||||
patterns['css_classes'].append({'file': rel_path})
|
||||
|
||||
# Inline styles
|
||||
for match in INLINE_STYLE_OBJECT.finditer(content):
|
||||
line = content[:match.start()].count('\n') + 1
|
||||
patterns['inline_styles'].append({
|
||||
'file': rel_path,
|
||||
'line': line,
|
||||
})
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return patterns
|
||||
502
demo/tools/analyze/scanner.py
Normal file
502
demo/tools/analyze/scanner.py
Normal file
@@ -0,0 +1,502 @@
|
||||
"""
|
||||
Project Scanner
|
||||
|
||||
Scans file system to discover project structure, frameworks, and style files.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional, Set, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from .base import (
|
||||
Framework,
|
||||
StylingApproach,
|
||||
StyleFile,
|
||||
ProjectAnalysis,
|
||||
)
|
||||
|
||||
|
||||
# Directories to skip during scanning
|
||||
SKIP_DIRS = {
|
||||
'node_modules',
|
||||
'.git',
|
||||
'.next',
|
||||
'.nuxt',
|
||||
'dist',
|
||||
'build',
|
||||
'out',
|
||||
'.cache',
|
||||
'coverage',
|
||||
'__pycache__',
|
||||
'.venv',
|
||||
'venv',
|
||||
'.turbo',
|
||||
'.vercel',
|
||||
}
|
||||
|
||||
# File extensions to scan
|
||||
SCAN_EXTENSIONS = {
|
||||
# JavaScript/TypeScript
|
||||
'.js', '.jsx', '.ts', '.tsx', '.mjs', '.cjs',
|
||||
# Styles
|
||||
'.css', '.scss', '.sass', '.less', '.styl',
|
||||
# Config
|
||||
'.json',
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScanResult:
|
||||
"""Result of file system scan."""
|
||||
files: List[Path] = field(default_factory=list)
|
||||
style_files: List[Path] = field(default_factory=list)
|
||||
component_files: List[Path] = field(default_factory=list)
|
||||
config_files: Dict[str, Path] = field(default_factory=dict)
|
||||
total_lines: int = 0
|
||||
|
||||
|
||||
class ProjectScanner:
|
||||
"""
|
||||
Scans a project directory to identify:
|
||||
- Framework (React, Next, Vue, etc.)
|
||||
- Styling approach (CSS modules, styled-components, Tailwind, etc.)
|
||||
- Component files
|
||||
- Style files
|
||||
|
||||
Results are cached in memory for the session.
|
||||
"""
|
||||
|
||||
# Class-level cache: path -> (timestamp, analysis)
|
||||
_cache: Dict[str, Tuple[float, ProjectAnalysis]] = {}
|
||||
_cache_ttl: float = 60.0 # Cache for 60 seconds
|
||||
|
||||
def __init__(self, root_path: str, use_cache: bool = True):
|
||||
self.root = Path(root_path).resolve()
|
||||
self.use_cache = use_cache
|
||||
if not self.root.exists():
|
||||
raise FileNotFoundError(f"Project path not found: {root_path}")
|
||||
|
||||
async def scan(self) -> ProjectAnalysis:
|
||||
"""
|
||||
Perform full project scan.
|
||||
|
||||
Returns:
|
||||
ProjectAnalysis with detected framework, styles, and files
|
||||
"""
|
||||
# Check cache if enabled
|
||||
if self.use_cache:
|
||||
import time
|
||||
cache_key = str(self.root)
|
||||
if cache_key in self._cache:
|
||||
timestamp, cached_analysis = self._cache[cache_key]
|
||||
if time.time() - timestamp < self._cache_ttl:
|
||||
return cached_analysis
|
||||
|
||||
# Scan file system
|
||||
scan_result = self._scan_files()
|
||||
|
||||
# Detect framework
|
||||
framework, version = self._detect_framework(scan_result.config_files)
|
||||
|
||||
# Detect styling approaches
|
||||
styling = self._detect_styling(scan_result)
|
||||
|
||||
# Collect style files
|
||||
style_files = self._analyze_style_files(scan_result.style_files)
|
||||
|
||||
# Build analysis result
|
||||
analysis = ProjectAnalysis(
|
||||
project_path=str(self.root),
|
||||
framework=framework,
|
||||
framework_version=version,
|
||||
style_files=style_files,
|
||||
style_file_count=len(style_files),
|
||||
stats={
|
||||
"total_files_scanned": len(scan_result.files),
|
||||
"total_lines": scan_result.total_lines,
|
||||
"component_files": len(scan_result.component_files),
|
||||
"style_files": len(scan_result.style_files),
|
||||
}
|
||||
)
|
||||
|
||||
# Determine primary styling approach
|
||||
if styling:
|
||||
analysis.styling_approaches = styling
|
||||
# Primary is the one with most occurrences
|
||||
analysis.primary_styling = max(
|
||||
styling, key=lambda x: x.count
|
||||
).type if styling else None
|
||||
|
||||
# Cache result if enabled
|
||||
if self.use_cache:
|
||||
import time
|
||||
cache_key = str(self.root)
|
||||
self._cache[cache_key] = (time.time(), analysis)
|
||||
|
||||
return analysis
|
||||
|
||||
def _scan_files(self) -> ScanResult:
|
||||
"""Scan directory for relevant files."""
|
||||
result = ScanResult()
|
||||
|
||||
for path in self.root.rglob("*"):
|
||||
# Skip directories in skip list
|
||||
if any(skip in path.parts for skip in SKIP_DIRS):
|
||||
continue
|
||||
|
||||
if not path.is_file():
|
||||
continue
|
||||
|
||||
suffix = path.suffix.lower()
|
||||
if suffix not in SCAN_EXTENSIONS:
|
||||
continue
|
||||
|
||||
result.files.append(path)
|
||||
|
||||
# Categorize files
|
||||
if suffix in {'.css', '.scss', '.sass', '.less', '.styl'}:
|
||||
result.style_files.append(path)
|
||||
elif suffix in {'.jsx', '.tsx'}:
|
||||
result.component_files.append(path)
|
||||
elif suffix in {'.js', '.ts'}:
|
||||
# Check if it's a component or config
|
||||
name = path.name.lower()
|
||||
if any(cfg in name for cfg in ['config', 'rc', '.config']):
|
||||
result.config_files[name] = path
|
||||
elif self._looks_like_component(path):
|
||||
result.component_files.append(path)
|
||||
|
||||
# Count lines (approximate for large files)
|
||||
try:
|
||||
content = path.read_text(encoding='utf-8', errors='ignore')
|
||||
result.total_lines += content.count('\n') + 1
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Look for specific config files
|
||||
config_names = [
|
||||
'package.json',
|
||||
'tsconfig.json',
|
||||
'tailwind.config.js',
|
||||
'tailwind.config.ts',
|
||||
'next.config.js',
|
||||
'next.config.mjs',
|
||||
'vite.config.js',
|
||||
'vite.config.ts',
|
||||
'nuxt.config.js',
|
||||
'nuxt.config.ts',
|
||||
'.eslintrc.json',
|
||||
'.eslintrc.js',
|
||||
]
|
||||
|
||||
for name in config_names:
|
||||
config_path = self.root / name
|
||||
if config_path.exists():
|
||||
result.config_files[name] = config_path
|
||||
|
||||
return result
|
||||
|
||||
def _looks_like_component(self, path: Path) -> bool:
|
||||
"""Check if a JS/TS file looks like a React component."""
|
||||
name = path.stem
|
||||
# PascalCase is a strong indicator
|
||||
if name[0].isupper() and not name.isupper():
|
||||
return True
|
||||
# Common component patterns
|
||||
if any(x in name.lower() for x in ['component', 'page', 'view', 'screen']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _detect_framework(
|
||||
self,
|
||||
config_files: Dict[str, Path]
|
||||
) -> Tuple[Framework, str]:
|
||||
"""Detect the UI framework and version."""
|
||||
# Check package.json for dependencies
|
||||
pkg_json = config_files.get('package.json')
|
||||
if not pkg_json:
|
||||
return Framework.UNKNOWN, ""
|
||||
|
||||
try:
|
||||
pkg = json.loads(pkg_json.read_text())
|
||||
deps = {
|
||||
**pkg.get('dependencies', {}),
|
||||
**pkg.get('devDependencies', {}),
|
||||
}
|
||||
|
||||
# Check for Next.js first (it includes React)
|
||||
if 'next' in deps:
|
||||
return Framework.NEXT, deps.get('next', '').lstrip('^~')
|
||||
|
||||
# Check for Nuxt (Vue-based)
|
||||
if 'nuxt' in deps:
|
||||
return Framework.NUXT, deps.get('nuxt', '').lstrip('^~')
|
||||
|
||||
# Check for other frameworks
|
||||
if 'react' in deps:
|
||||
return Framework.REACT, deps.get('react', '').lstrip('^~')
|
||||
|
||||
if 'vue' in deps:
|
||||
return Framework.VUE, deps.get('vue', '').lstrip('^~')
|
||||
|
||||
if '@angular/core' in deps:
|
||||
return Framework.ANGULAR, deps.get('@angular/core', '').lstrip('^~')
|
||||
|
||||
if 'svelte' in deps:
|
||||
return Framework.SVELTE, deps.get('svelte', '').lstrip('^~')
|
||||
|
||||
if 'solid-js' in deps:
|
||||
return Framework.SOLID, deps.get('solid-js', '').lstrip('^~')
|
||||
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
pass
|
||||
|
||||
return Framework.UNKNOWN, ""
|
||||
|
||||
def _detect_styling(self, scan_result: ScanResult) -> List:
|
||||
"""Detect styling approaches used in the project."""
|
||||
from .base import StylePattern, Location
|
||||
|
||||
patterns: Dict[StylingApproach, StylePattern] = {}
|
||||
|
||||
# Check config files for styling indicators
|
||||
pkg_json = scan_result.config_files.get('package.json')
|
||||
if pkg_json:
|
||||
try:
|
||||
pkg = json.loads(pkg_json.read_text())
|
||||
deps = {
|
||||
**pkg.get('dependencies', {}),
|
||||
**pkg.get('devDependencies', {}),
|
||||
}
|
||||
|
||||
# Tailwind
|
||||
if 'tailwindcss' in deps:
|
||||
patterns[StylingApproach.TAILWIND] = StylePattern(
|
||||
type=StylingApproach.TAILWIND,
|
||||
count=1,
|
||||
examples=["tailwindcss in dependencies"]
|
||||
)
|
||||
|
||||
# styled-components
|
||||
if 'styled-components' in deps:
|
||||
patterns[StylingApproach.STYLED_COMPONENTS] = StylePattern(
|
||||
type=StylingApproach.STYLED_COMPONENTS,
|
||||
count=1,
|
||||
examples=["styled-components in dependencies"]
|
||||
)
|
||||
|
||||
# Emotion
|
||||
if '@emotion/react' in deps or '@emotion/styled' in deps:
|
||||
patterns[StylingApproach.EMOTION] = StylePattern(
|
||||
type=StylingApproach.EMOTION,
|
||||
count=1,
|
||||
examples=["@emotion in dependencies"]
|
||||
)
|
||||
|
||||
# SASS/SCSS
|
||||
if 'sass' in deps or 'node-sass' in deps:
|
||||
patterns[StylingApproach.SASS_SCSS] = StylePattern(
|
||||
type=StylingApproach.SASS_SCSS,
|
||||
count=1,
|
||||
examples=["sass in dependencies"]
|
||||
)
|
||||
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
pass
|
||||
|
||||
# Check tailwind config
|
||||
if 'tailwind.config.js' in scan_result.config_files or \
|
||||
'tailwind.config.ts' in scan_result.config_files:
|
||||
if StylingApproach.TAILWIND not in patterns:
|
||||
patterns[StylingApproach.TAILWIND] = StylePattern(
|
||||
type=StylingApproach.TAILWIND,
|
||||
count=1,
|
||||
examples=["tailwind.config found"]
|
||||
)
|
||||
|
||||
# Scan component files for styling patterns
|
||||
for comp_file in scan_result.component_files[:100]: # Limit for performance
|
||||
try:
|
||||
content = comp_file.read_text(encoding='utf-8', errors='ignore')
|
||||
self._detect_patterns_in_file(
|
||||
content, str(comp_file), patterns
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Check style files
|
||||
for style_file in scan_result.style_files:
|
||||
suffix = style_file.suffix.lower()
|
||||
|
||||
if suffix == '.css':
|
||||
# Check for CSS modules
|
||||
if '.module.css' in style_file.name.lower():
|
||||
approach = StylingApproach.CSS_MODULES
|
||||
else:
|
||||
approach = StylingApproach.VANILLA_CSS
|
||||
|
||||
if approach not in patterns:
|
||||
patterns[approach] = StylePattern(type=approach)
|
||||
patterns[approach].count += 1
|
||||
patterns[approach].locations.append(
|
||||
Location(str(style_file), 1)
|
||||
)
|
||||
|
||||
elif suffix in {'.scss', '.sass'}:
|
||||
if StylingApproach.SASS_SCSS not in patterns:
|
||||
patterns[StylingApproach.SASS_SCSS] = StylePattern(
|
||||
type=StylingApproach.SASS_SCSS
|
||||
)
|
||||
patterns[StylingApproach.SASS_SCSS].count += 1
|
||||
|
||||
return list(patterns.values())
|
||||
|
||||
def _detect_patterns_in_file(
|
||||
self,
|
||||
content: str,
|
||||
file_path: str,
|
||||
patterns: Dict[StylingApproach, Any]
|
||||
) -> None:
|
||||
"""Detect styling patterns in a single file."""
|
||||
from .base import StylePattern, Location
|
||||
|
||||
# CSS Modules import
|
||||
css_module_pattern = re.compile(
|
||||
r"import\s+\w+\s+from\s+['\"].*\.module\.(css|scss|sass)['\"]"
|
||||
)
|
||||
for match in css_module_pattern.finditer(content):
|
||||
if StylingApproach.CSS_MODULES not in patterns:
|
||||
patterns[StylingApproach.CSS_MODULES] = StylePattern(
|
||||
type=StylingApproach.CSS_MODULES
|
||||
)
|
||||
patterns[StylingApproach.CSS_MODULES].count += 1
|
||||
line_num = content[:match.start()].count('\n') + 1
|
||||
patterns[StylingApproach.CSS_MODULES].locations.append(
|
||||
Location(file_path, line_num)
|
||||
)
|
||||
|
||||
# styled-components
|
||||
styled_pattern = re.compile(
|
||||
r"(styled\.|styled\()|(from\s+['\"]styled-components['\"])"
|
||||
)
|
||||
for match in styled_pattern.finditer(content):
|
||||
if StylingApproach.STYLED_COMPONENTS not in patterns:
|
||||
patterns[StylingApproach.STYLED_COMPONENTS] = StylePattern(
|
||||
type=StylingApproach.STYLED_COMPONENTS
|
||||
)
|
||||
patterns[StylingApproach.STYLED_COMPONENTS].count += 1
|
||||
|
||||
# Emotion
|
||||
emotion_pattern = re.compile(
|
||||
r"(css`|@emotion|from\s+['\"]@emotion)"
|
||||
)
|
||||
for match in emotion_pattern.finditer(content):
|
||||
if StylingApproach.EMOTION not in patterns:
|
||||
patterns[StylingApproach.EMOTION] = StylePattern(
|
||||
type=StylingApproach.EMOTION
|
||||
)
|
||||
patterns[StylingApproach.EMOTION].count += 1
|
||||
|
||||
# Inline styles
|
||||
inline_pattern = re.compile(
|
||||
r'style\s*=\s*\{\s*\{[^}]+\}\s*\}'
|
||||
)
|
||||
for match in inline_pattern.finditer(content):
|
||||
if StylingApproach.INLINE_STYLES not in patterns:
|
||||
patterns[StylingApproach.INLINE_STYLES] = StylePattern(
|
||||
type=StylingApproach.INLINE_STYLES
|
||||
)
|
||||
patterns[StylingApproach.INLINE_STYLES].count += 1
|
||||
line_num = content[:match.start()].count('\n') + 1
|
||||
patterns[StylingApproach.INLINE_STYLES].locations.append(
|
||||
Location(file_path, line_num)
|
||||
)
|
||||
patterns[StylingApproach.INLINE_STYLES].examples.append(
|
||||
match.group(0)[:100]
|
||||
)
|
||||
|
||||
# Tailwind classes
|
||||
tailwind_pattern = re.compile(
|
||||
r'className\s*=\s*["\'][^"\']*(?:flex|grid|p-|m-|bg-|text-|border-)[^"\']*["\']'
|
||||
)
|
||||
for match in tailwind_pattern.finditer(content):
|
||||
if StylingApproach.TAILWIND not in patterns:
|
||||
patterns[StylingApproach.TAILWIND] = StylePattern(
|
||||
type=StylingApproach.TAILWIND
|
||||
)
|
||||
patterns[StylingApproach.TAILWIND].count += 1
|
||||
|
||||
def _analyze_style_files(self, style_paths: List[Path]) -> List[StyleFile]:
|
||||
"""Analyze style files for metadata."""
|
||||
style_files = []
|
||||
|
||||
for path in style_paths:
|
||||
try:
|
||||
content = path.read_text(encoding='utf-8', errors='ignore')
|
||||
|
||||
# Determine type
|
||||
suffix = path.suffix.lower()
|
||||
if '.module.' in path.name.lower():
|
||||
file_type = 'css-module'
|
||||
elif suffix == '.scss':
|
||||
file_type = 'scss'
|
||||
elif suffix == '.sass':
|
||||
file_type = 'sass'
|
||||
elif suffix == '.less':
|
||||
file_type = 'less'
|
||||
else:
|
||||
file_type = 'css'
|
||||
|
||||
# Count variables
|
||||
var_count = 0
|
||||
if file_type == 'css' or file_type == 'css-module':
|
||||
var_count = len(re.findall(r'--[\w-]+\s*:', content))
|
||||
elif file_type in {'scss', 'sass'}:
|
||||
var_count = len(re.findall(r'\$[\w-]+\s*:', content))
|
||||
|
||||
# Count selectors (approximate)
|
||||
selector_count = len(re.findall(r'[.#][\w-]+\s*\{', content))
|
||||
|
||||
# Find imports
|
||||
imports = re.findall(r'@import\s+["\']([^"\']+)["\']', content)
|
||||
|
||||
style_files.append(StyleFile(
|
||||
path=str(path.relative_to(self.root)),
|
||||
type=file_type,
|
||||
size_bytes=path.stat().st_size,
|
||||
line_count=content.count('\n') + 1,
|
||||
variable_count=var_count,
|
||||
selector_count=selector_count,
|
||||
imports=imports,
|
||||
))
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return style_files
|
||||
|
||||
def get_file_tree(self, max_depth: int = 3) -> Dict[str, Any]:
|
||||
"""Get project file tree structure."""
|
||||
def build_tree(path: Path, depth: int) -> Dict[str, Any]:
|
||||
if depth > max_depth:
|
||||
return {"...": "truncated"}
|
||||
|
||||
result = {}
|
||||
try:
|
||||
for item in sorted(path.iterdir()):
|
||||
if item.name in SKIP_DIRS:
|
||||
continue
|
||||
|
||||
if item.is_dir():
|
||||
result[item.name + "/"] = build_tree(item, depth + 1)
|
||||
elif item.suffix in SCAN_EXTENSIONS:
|
||||
result[item.name] = item.stat().st_size
|
||||
|
||||
except PermissionError:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
return build_tree(self.root, 0)
|
||||
527
demo/tools/analyze/styles.py
Normal file
527
demo/tools/analyze/styles.py
Normal file
@@ -0,0 +1,527 @@
|
||||
"""
|
||||
Style Pattern Analyzer
|
||||
|
||||
Detects and analyzes style patterns in code to identify:
|
||||
- Hardcoded values that should be tokens
|
||||
- Duplicate values across files
|
||||
- Inconsistent naming patterns
|
||||
- Unused styles
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional, Set, Tuple
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from .base import (
|
||||
Location,
|
||||
TokenCandidate,
|
||||
StylePattern,
|
||||
StylingApproach,
|
||||
)
|
||||
|
||||
|
||||
# Color patterns
|
||||
HEX_COLOR = re.compile(r'#(?:[0-9a-fA-F]{3}){1,2}\b')
|
||||
RGB_COLOR = re.compile(r'rgba?\s*\(\s*\d+\s*,\s*\d+\s*,\s*\d+(?:\s*,\s*[\d.]+)?\s*\)')
|
||||
HSL_COLOR = re.compile(r'hsla?\s*\(\s*\d+\s*,\s*[\d.]+%\s*,\s*[\d.]+%(?:\s*,\s*[\d.]+)?\s*\)')
|
||||
OKLCH_COLOR = re.compile(r'oklch\s*\([^)]+\)')
|
||||
|
||||
# Dimension patterns
|
||||
PX_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*px\b')
|
||||
REM_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*rem\b')
|
||||
EM_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*em\b')
|
||||
PERCENT_VALUE = re.compile(r'\b(\d+(?:\.\d+)?)\s*%\b')
|
||||
|
||||
# Font patterns
|
||||
FONT_SIZE = re.compile(r'font-size\s*:\s*([^;]+)')
|
||||
FONT_FAMILY = re.compile(r'font-family\s*:\s*([^;]+)')
|
||||
FONT_WEIGHT = re.compile(r'font-weight\s*:\s*(\d+|normal|bold|lighter|bolder)')
|
||||
LINE_HEIGHT = re.compile(r'line-height\s*:\s*([^;]+)')
|
||||
|
||||
# Spacing patterns
|
||||
MARGIN_PADDING = re.compile(r'(?:margin|padding)(?:-(?:top|right|bottom|left))?\s*:\s*([^;]+)')
|
||||
GAP = re.compile(r'gap\s*:\s*([^;]+)')
|
||||
|
||||
# Border patterns
|
||||
BORDER_RADIUS = re.compile(r'border-radius\s*:\s*([^;]+)')
|
||||
BORDER_WIDTH = re.compile(r'border(?:-(?:top|right|bottom|left))?-width\s*:\s*([^;]+)')
|
||||
|
||||
# Shadow patterns
|
||||
BOX_SHADOW = re.compile(r'box-shadow\s*:\s*([^;]+)')
|
||||
|
||||
# Z-index
|
||||
Z_INDEX = re.compile(r'z-index\s*:\s*(\d+)')
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValueOccurrence:
|
||||
"""Tracks where a value appears."""
|
||||
value: str
|
||||
file: str
|
||||
line: int
|
||||
property: str # CSS property name
|
||||
context: str # Surrounding code
|
||||
|
||||
|
||||
class StyleAnalyzer:
|
||||
"""
|
||||
Analyzes style files and inline styles to find:
|
||||
- Hardcoded values that should be tokens
|
||||
- Duplicate values
|
||||
- Inconsistent patterns
|
||||
"""
|
||||
|
||||
def __init__(self, root_path: str):
|
||||
self.root = Path(root_path).resolve()
|
||||
self.values: Dict[str, List[ValueOccurrence]] = defaultdict(list)
|
||||
self.color_values: Dict[str, List[ValueOccurrence]] = defaultdict(list)
|
||||
self.spacing_values: Dict[str, List[ValueOccurrence]] = defaultdict(list)
|
||||
self.font_values: Dict[str, List[ValueOccurrence]] = defaultdict(list)
|
||||
|
||||
async def analyze(
|
||||
self,
|
||||
include_inline: bool = True,
|
||||
include_css: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze all styles in the project.
|
||||
|
||||
Returns:
|
||||
Dict with analysis results including duplicates and candidates
|
||||
"""
|
||||
# Reset collectors
|
||||
self.values.clear()
|
||||
self.color_values.clear()
|
||||
self.spacing_values.clear()
|
||||
self.font_values.clear()
|
||||
|
||||
# Scan CSS/SCSS files
|
||||
if include_css:
|
||||
await self._scan_style_files()
|
||||
|
||||
# Scan inline styles in JS/TS files
|
||||
if include_inline:
|
||||
await self._scan_inline_styles()
|
||||
|
||||
# Analyze results
|
||||
duplicates = self._find_duplicates()
|
||||
candidates = self._generate_token_candidates()
|
||||
|
||||
return {
|
||||
'total_values_found': sum(len(v) for v in self.values.values()),
|
||||
'unique_colors': len(self.color_values),
|
||||
'unique_spacing': len(self.spacing_values),
|
||||
'duplicates': duplicates,
|
||||
'token_candidates': candidates,
|
||||
}
|
||||
|
||||
async def _scan_style_files(self) -> None:
|
||||
"""Scan CSS and SCSS files for values."""
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build'}
|
||||
|
||||
for pattern in ['**/*.css', '**/*.scss', '**/*.sass', '**/*.less']:
|
||||
for file_path in self.root.rglob(pattern):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
self._extract_values_from_css(content, rel_path)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
async def _scan_inline_styles(self) -> None:
|
||||
"""Scan JS/TS files for inline style values."""
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build'}
|
||||
|
||||
for pattern in ['**/*.jsx', '**/*.tsx', '**/*.js', '**/*.ts']:
|
||||
for file_path in self.root.rglob(pattern):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
self._extract_values_from_jsx(content, rel_path)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
def _extract_values_from_css(self, content: str, file_path: str) -> None:
|
||||
"""Extract style values from CSS content."""
|
||||
lines = content.split('\n')
|
||||
|
||||
for line_num, line in enumerate(lines, 1):
|
||||
# Skip comments and empty lines
|
||||
if not line.strip() or line.strip().startswith('//') or line.strip().startswith('/*'):
|
||||
continue
|
||||
|
||||
# Extract colors
|
||||
for pattern in [HEX_COLOR, RGB_COLOR, HSL_COLOR, OKLCH_COLOR]:
|
||||
for match in pattern.finditer(line):
|
||||
value = match.group(0).lower()
|
||||
self._record_color(value, file_path, line_num, line.strip())
|
||||
|
||||
# Extract dimensions
|
||||
for match in PX_VALUE.finditer(line):
|
||||
value = f"{match.group(1)}px"
|
||||
self._record_spacing(value, file_path, line_num, line.strip())
|
||||
|
||||
for match in REM_VALUE.finditer(line):
|
||||
value = f"{match.group(1)}rem"
|
||||
self._record_spacing(value, file_path, line_num, line.strip())
|
||||
|
||||
# Extract font properties
|
||||
for match in FONT_SIZE.finditer(line):
|
||||
value = match.group(1).strip()
|
||||
self._record_font(value, file_path, line_num, 'font-size', line.strip())
|
||||
|
||||
for match in FONT_WEIGHT.finditer(line):
|
||||
value = match.group(1).strip()
|
||||
self._record_font(value, file_path, line_num, 'font-weight', line.strip())
|
||||
|
||||
# Extract z-index
|
||||
for match in Z_INDEX.finditer(line):
|
||||
value = match.group(1)
|
||||
self._record_value(f"z-{value}", file_path, line_num, 'z-index', line.strip())
|
||||
|
||||
def _extract_values_from_jsx(self, content: str, file_path: str) -> None:
|
||||
"""Extract style values from JSX inline styles."""
|
||||
# Find style={{ ... }} blocks
|
||||
style_pattern = re.compile(r'style\s*=\s*\{\s*\{([^}]+)\}\s*\}', re.DOTALL)
|
||||
|
||||
for match in style_pattern.finditer(content):
|
||||
style_content = match.group(1)
|
||||
line_num = content[:match.start()].count('\n') + 1
|
||||
|
||||
# Parse the style object
|
||||
# Look for property: value patterns
|
||||
prop_pattern = re.compile(r'(\w+)\s*:\s*["\']?([^,\n"\']+)["\']?')
|
||||
|
||||
for prop_match in prop_pattern.finditer(style_content):
|
||||
prop_name = prop_match.group(1)
|
||||
prop_value = prop_match.group(2).strip()
|
||||
|
||||
# Check for colors
|
||||
if any(c in prop_name.lower() for c in ['color', 'background']):
|
||||
if HEX_COLOR.search(prop_value) or RGB_COLOR.search(prop_value):
|
||||
self._record_color(prop_value.lower(), file_path, line_num, style_content[:100])
|
||||
|
||||
# Check for dimensions
|
||||
if PX_VALUE.search(prop_value):
|
||||
self._record_spacing(prop_value, file_path, line_num, style_content[:100])
|
||||
|
||||
if 'fontSize' in prop_name or 'fontWeight' in prop_name:
|
||||
self._record_font(prop_value, file_path, line_num, prop_name, style_content[:100])
|
||||
|
||||
def _record_color(self, value: str, file: str, line: int, context: str) -> None:
|
||||
"""Record a color value occurrence."""
|
||||
normalized = self._normalize_color(value)
|
||||
self.color_values[normalized].append(ValueOccurrence(
|
||||
value=value,
|
||||
file=file,
|
||||
line=line,
|
||||
property='color',
|
||||
context=context,
|
||||
))
|
||||
self.values[normalized].append(ValueOccurrence(
|
||||
value=value,
|
||||
file=file,
|
||||
line=line,
|
||||
property='color',
|
||||
context=context,
|
||||
))
|
||||
|
||||
def _record_spacing(self, value: str, file: str, line: int, context: str) -> None:
|
||||
"""Record a spacing/dimension value occurrence."""
|
||||
self.spacing_values[value].append(ValueOccurrence(
|
||||
value=value,
|
||||
file=file,
|
||||
line=line,
|
||||
property='spacing',
|
||||
context=context,
|
||||
))
|
||||
self.values[value].append(ValueOccurrence(
|
||||
value=value,
|
||||
file=file,
|
||||
line=line,
|
||||
property='spacing',
|
||||
context=context,
|
||||
))
|
||||
|
||||
def _record_font(self, value: str, file: str, line: int, prop: str, context: str) -> None:
|
||||
"""Record a font-related value occurrence."""
|
||||
self.font_values[value].append(ValueOccurrence(
|
||||
value=value,
|
||||
file=file,
|
||||
line=line,
|
||||
property=prop,
|
||||
context=context,
|
||||
))
|
||||
self.values[value].append(ValueOccurrence(
|
||||
value=value,
|
||||
file=file,
|
||||
line=line,
|
||||
property=prop,
|
||||
context=context,
|
||||
))
|
||||
|
||||
def _record_value(self, value: str, file: str, line: int, prop: str, context: str) -> None:
|
||||
"""Record a generic value occurrence."""
|
||||
self.values[value].append(ValueOccurrence(
|
||||
value=value,
|
||||
file=file,
|
||||
line=line,
|
||||
property=prop,
|
||||
context=context,
|
||||
))
|
||||
|
||||
def _normalize_color(self, color: str) -> str:
|
||||
"""Normalize color value for comparison."""
|
||||
color = color.lower().strip()
|
||||
# Expand 3-digit hex to 6-digit
|
||||
if re.match(r'^#[0-9a-f]{3}$', color):
|
||||
color = f"#{color[1]*2}{color[2]*2}{color[3]*2}"
|
||||
return color
|
||||
|
||||
def _find_duplicates(self) -> List[Dict[str, Any]]:
|
||||
"""Find values that appear multiple times."""
|
||||
duplicates = []
|
||||
|
||||
for value, occurrences in self.values.items():
|
||||
if len(occurrences) >= 2:
|
||||
# Get unique files
|
||||
files = list(set(o.file for o in occurrences))
|
||||
|
||||
duplicates.append({
|
||||
'value': value,
|
||||
'count': len(occurrences),
|
||||
'files': files[:5], # Limit to 5 files
|
||||
'category': occurrences[0].property,
|
||||
'locations': [
|
||||
{'file': o.file, 'line': o.line}
|
||||
for o in occurrences[:5]
|
||||
],
|
||||
})
|
||||
|
||||
# Sort by count (most duplicated first)
|
||||
duplicates.sort(key=lambda x: x['count'], reverse=True)
|
||||
|
||||
return duplicates[:50] # Return top 50
|
||||
|
||||
def _generate_token_candidates(self) -> List[TokenCandidate]:
|
||||
"""Generate token suggestions for repeated values."""
|
||||
candidates = []
|
||||
|
||||
# Color candidates
|
||||
for value, occurrences in self.color_values.items():
|
||||
if len(occurrences) >= 2:
|
||||
suggested_name = self._suggest_color_name(value)
|
||||
candidates.append(TokenCandidate(
|
||||
value=value,
|
||||
suggested_name=suggested_name,
|
||||
category='colors',
|
||||
occurrences=len(occurrences),
|
||||
locations=[
|
||||
Location(o.file, o.line) for o in occurrences[:5]
|
||||
],
|
||||
confidence=min(0.9, 0.3 + (len(occurrences) * 0.1)),
|
||||
))
|
||||
|
||||
# Spacing candidates
|
||||
for value, occurrences in self.spacing_values.items():
|
||||
if len(occurrences) >= 3: # Higher threshold for spacing
|
||||
suggested_name = self._suggest_spacing_name(value)
|
||||
candidates.append(TokenCandidate(
|
||||
value=value,
|
||||
suggested_name=suggested_name,
|
||||
category='spacing',
|
||||
occurrences=len(occurrences),
|
||||
locations=[
|
||||
Location(o.file, o.line) for o in occurrences[:5]
|
||||
],
|
||||
confidence=min(0.8, 0.2 + (len(occurrences) * 0.05)),
|
||||
))
|
||||
|
||||
# Sort by confidence
|
||||
candidates.sort(key=lambda x: x.confidence, reverse=True)
|
||||
|
||||
return candidates[:30] # Return top 30
|
||||
|
||||
def _suggest_color_name(self, color: str) -> str:
|
||||
"""Suggest a token name for a color value."""
|
||||
# Common color mappings
|
||||
common_colors = {
|
||||
'#ffffff': 'color.white',
|
||||
'#000000': 'color.black',
|
||||
'#f3f4f6': 'color.neutral.100',
|
||||
'#e5e7eb': 'color.neutral.200',
|
||||
'#d1d5db': 'color.neutral.300',
|
||||
'#9ca3af': 'color.neutral.400',
|
||||
'#6b7280': 'color.neutral.500',
|
||||
'#4b5563': 'color.neutral.600',
|
||||
'#374151': 'color.neutral.700',
|
||||
'#1f2937': 'color.neutral.800',
|
||||
'#111827': 'color.neutral.900',
|
||||
}
|
||||
|
||||
if color in common_colors:
|
||||
return common_colors[color]
|
||||
|
||||
# Detect color family by hue (simplified)
|
||||
if color.startswith('#'):
|
||||
return f"color.custom.{color[1:7]}"
|
||||
|
||||
return f"color.custom.value"
|
||||
|
||||
def _suggest_spacing_name(self, value: str) -> str:
|
||||
"""Suggest a token name for a spacing value."""
|
||||
# Common spacing values
|
||||
spacing_map = {
|
||||
'0px': 'spacing.0',
|
||||
'4px': 'spacing.xs',
|
||||
'8px': 'spacing.sm',
|
||||
'12px': 'spacing.md',
|
||||
'16px': 'spacing.lg',
|
||||
'20px': 'spacing.lg',
|
||||
'24px': 'spacing.xl',
|
||||
'32px': 'spacing.2xl',
|
||||
'48px': 'spacing.3xl',
|
||||
'64px': 'spacing.4xl',
|
||||
'0.25rem': 'spacing.xs',
|
||||
'0.5rem': 'spacing.sm',
|
||||
'0.75rem': 'spacing.md',
|
||||
'1rem': 'spacing.lg',
|
||||
'1.5rem': 'spacing.xl',
|
||||
'2rem': 'spacing.2xl',
|
||||
}
|
||||
|
||||
if value in spacing_map:
|
||||
return spacing_map[value]
|
||||
|
||||
return f"spacing.custom.{value.replace('px', '').replace('rem', 'r')}"
|
||||
|
||||
async def find_unused_styles(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Find CSS classes/selectors that are not used in the codebase.
|
||||
|
||||
Returns list of potentially unused styles.
|
||||
"""
|
||||
# Collect all CSS class definitions
|
||||
css_classes = set()
|
||||
class_locations = {}
|
||||
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build'}
|
||||
|
||||
for pattern in ['**/*.css', '**/*.scss']:
|
||||
for file_path in self.root.rglob(pattern):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
|
||||
# Find class definitions
|
||||
for match in re.finditer(r'\.([a-zA-Z_][\w-]*)\s*[{,]', content):
|
||||
class_name = match.group(1)
|
||||
css_classes.add(class_name)
|
||||
class_locations[class_name] = rel_path
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Collect all class usage in JS/JSX/TS/TSX
|
||||
used_classes = set()
|
||||
|
||||
for pattern in ['**/*.jsx', '**/*.tsx', '**/*.js', '**/*.ts']:
|
||||
for file_path in self.root.rglob(pattern):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
|
||||
# Find className usage
|
||||
for match in re.finditer(r'className\s*=\s*["\']([^"\']+)["\']', content):
|
||||
classes = match.group(1).split()
|
||||
used_classes.update(classes)
|
||||
|
||||
# Find styles.xxx usage (CSS modules)
|
||||
for match in re.finditer(r'styles\.(\w+)', content):
|
||||
used_classes.add(match.group(1))
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Find unused
|
||||
unused = css_classes - used_classes
|
||||
|
||||
return [
|
||||
{
|
||||
'class': cls,
|
||||
'file': class_locations.get(cls, 'unknown'),
|
||||
}
|
||||
for cls in sorted(unused)
|
||||
][:50] # Limit results
|
||||
|
||||
async def analyze_naming_consistency(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze naming consistency across style files.
|
||||
|
||||
Returns analysis of naming patterns and inconsistencies.
|
||||
"""
|
||||
patterns = {
|
||||
'kebab-case': [], # my-class-name
|
||||
'camelCase': [], # myClassName
|
||||
'snake_case': [], # my_class_name
|
||||
'BEM': [], # block__element--modifier
|
||||
}
|
||||
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build'}
|
||||
|
||||
for pattern in ['**/*.css', '**/*.scss']:
|
||||
for file_path in self.root.rglob(pattern):
|
||||
if any(skip in file_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8', errors='ignore')
|
||||
rel_path = str(file_path.relative_to(self.root))
|
||||
|
||||
# Find class names
|
||||
for match in re.finditer(r'\.([a-zA-Z_][\w-]*)', content):
|
||||
name = match.group(1)
|
||||
line = content[:match.start()].count('\n') + 1
|
||||
|
||||
# Classify naming pattern
|
||||
if '__' in name or '--' in name:
|
||||
patterns['BEM'].append({'name': name, 'file': rel_path, 'line': line})
|
||||
elif '_' in name:
|
||||
patterns['snake_case'].append({'name': name, 'file': rel_path, 'line': line})
|
||||
elif '-' in name:
|
||||
patterns['kebab-case'].append({'name': name, 'file': rel_path, 'line': line})
|
||||
elif name != name.lower():
|
||||
patterns['camelCase'].append({'name': name, 'file': rel_path, 'line': line})
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Calculate primary pattern
|
||||
pattern_counts = {k: len(v) for k, v in patterns.items()}
|
||||
primary = max(pattern_counts, key=pattern_counts.get) if any(pattern_counts.values()) else None
|
||||
|
||||
# Find inconsistencies (patterns different from primary)
|
||||
inconsistencies = []
|
||||
if primary:
|
||||
for pattern_type, items in patterns.items():
|
||||
if pattern_type != primary and items:
|
||||
inconsistencies.extend(items[:10])
|
||||
|
||||
return {
|
||||
'pattern_counts': pattern_counts,
|
||||
'primary_pattern': primary,
|
||||
'inconsistencies': inconsistencies[:20],
|
||||
}
|
||||
38
demo/tools/api/.dss/discovery.json
Normal file
38
demo/tools/api/.dss/discovery.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"meta": {
|
||||
"version": "1.0.0",
|
||||
"timestamp": "2025-12-05T18:07:46Z",
|
||||
"project_path": ".",
|
||||
"full_scan": false
|
||||
},
|
||||
"project": {
|
||||
"types": [
|
||||
"python"
|
||||
],
|
||||
"frameworks": [
|
||||
"fastapi"
|
||||
]
|
||||
},
|
||||
"design_system": {"detected":true,"type":"custom","has_tokens":true},
|
||||
"files": {
|
||||
"total": 7,
|
||||
"javascript": 0,
|
||||
"css": 1,
|
||||
"python": 2,
|
||||
"components": 0
|
||||
},
|
||||
"dependencies": {"python":6,"total":6},
|
||||
"git": {"is_repo":false},
|
||||
"health": {
|
||||
"score": 85,
|
||||
"grade": "B",
|
||||
"issues": ["Missing README","No test directory found"]
|
||||
},
|
||||
"css": {
|
||||
"files": 1,
|
||||
"preprocessor": "none",
|
||||
"has_css_variables": true,
|
||||
"has_preprocessor_variables": false
|
||||
},
|
||||
"components": []
|
||||
}
|
||||
8
demo/tools/api/admin-ui/css/tokens.css
Normal file
8
demo/tools/api/admin-ui/css/tokens.css
Normal file
@@ -0,0 +1,8 @@
|
||||
:root {
|
||||
--primary: rgb(51, 102, 229);
|
||||
--secondary: rgb(127, 127, 127);
|
||||
--background: rgb(255, 255, 255);
|
||||
--space-1: 4px;
|
||||
--space-2: 8px;
|
||||
--space-4: 16px;
|
||||
}
|
||||
24
demo/tools/api/dss-api.service
Normal file
24
demo/tools/api/dss-api.service
Normal file
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Design System Server (DSS) - Portable Server
|
||||
Documentation=https://github.com/overbits/design-system-swarm
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=overbits
|
||||
Group=overbits
|
||||
WorkingDirectory=/home/overbits/apps/design-system-swarm/tools/api
|
||||
Environment=PATH=/home/overbits/.local/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/home/overbits/apps/design-system-swarm/tools
|
||||
Environment=PORT=3456
|
||||
Environment=HOST=127.0.0.1
|
||||
Environment=NODE_ENV=production
|
||||
ExecStart=/usr/bin/python3 -m uvicorn server:app --host 127.0.0.1 --port 3456
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=dss
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
17
demo/tools/api/dss-mcp.service
Normal file
17
demo/tools/api/dss-mcp.service
Normal file
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=DSS MCP Server - Design System Server for AI Agents
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=overbits
|
||||
Group=overbits
|
||||
WorkingDirectory=/home/overbits/apps/design-system-swarm/tools/api
|
||||
Environment="PATH=/home/overbits/apps/design-system-swarm/.venv/bin:/usr/bin"
|
||||
Environment="PYTHONPATH=/home/overbits/apps/design-system-swarm/tools"
|
||||
ExecStart=/home/overbits/apps/design-system-swarm/.venv/bin/python mcp_server.py sse
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
1447
demo/tools/api/mcp_server.py
Normal file
1447
demo/tools/api/mcp_server.py
Normal file
File diff suppressed because it is too large
Load Diff
6
demo/tools/api/requirements.txt
Normal file
6
demo/tools/api/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
fastapi>=0.100.0
|
||||
uvicorn[standard]>=0.23.0
|
||||
httpx>=0.24.0
|
||||
python-dotenv>=1.0.0
|
||||
pydantic>=2.0.0
|
||||
mcp>=1.0.0
|
||||
2092
demo/tools/api/server.py
Normal file
2092
demo/tools/api/server.py
Normal file
File diff suppressed because it is too large
Load Diff
6
demo/tools/auth/__init__.py
Normal file
6
demo/tools/auth/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""
|
||||
Authentication Module
|
||||
|
||||
Atlassian-based authentication for DSS.
|
||||
Users authenticate with their Jira/Confluence credentials.
|
||||
"""
|
||||
246
demo/tools/auth/atlassian_auth.py
Normal file
246
demo/tools/auth/atlassian_auth.py
Normal file
@@ -0,0 +1,246 @@
|
||||
"""
|
||||
Atlassian-based Authentication
|
||||
|
||||
Validates users by verifying their Atlassian (Jira/Confluence) credentials.
|
||||
On successful login, creates a JWT token for subsequent requests.
|
||||
"""
|
||||
|
||||
import os
|
||||
import jwt
|
||||
import hashlib
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Dict, Any
|
||||
from atlassian import Jira, Confluence
|
||||
|
||||
from storage.database import get_connection
|
||||
|
||||
|
||||
class AtlassianAuth:
|
||||
"""
|
||||
Authentication using Atlassian API credentials.
|
||||
|
||||
Users provide:
|
||||
- Atlassian URL (Jira or Confluence)
|
||||
- Email
|
||||
- API Token
|
||||
|
||||
On successful validation, we:
|
||||
1. Verify credentials against Atlassian API
|
||||
2. Store user in database
|
||||
3. Generate JWT token
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.jwt_secret = os.getenv("JWT_SECRET", "change-me-in-production")
|
||||
self.jwt_algorithm = "HS256"
|
||||
self.jwt_expiry_hours = int(os.getenv("JWT_EXPIRY_HOURS", "24"))
|
||||
|
||||
async def verify_atlassian_credentials(
|
||||
self,
|
||||
url: str,
|
||||
email: str,
|
||||
api_token: str,
|
||||
service: str = "jira"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Verify Atlassian credentials by making a test API call.
|
||||
|
||||
Args:
|
||||
url: Atlassian URL (e.g., https://yourcompany.atlassian.net)
|
||||
email: User email
|
||||
api_token: Atlassian API token (use "1234" for mock mode)
|
||||
service: "jira" or "confluence"
|
||||
|
||||
Returns:
|
||||
User info dict if valid, raises exception if invalid
|
||||
"""
|
||||
# Mock mode for development/testing
|
||||
if api_token == "1234":
|
||||
return {
|
||||
"email": email,
|
||||
"display_name": email.split("@")[0].title().replace(".", " ") + " (Mock)",
|
||||
"account_id": "mock_" + hashlib.md5(email.encode()).hexdigest()[:8],
|
||||
"atlassian_url": url or "https://mock.atlassian.net",
|
||||
"service": service,
|
||||
"verified": True,
|
||||
"mock_mode": True
|
||||
}
|
||||
|
||||
try:
|
||||
if service == "jira":
|
||||
client = Jira(url=url, username=email, password=api_token)
|
||||
# Test API call - get current user
|
||||
user_info = client.myself()
|
||||
else: # confluence
|
||||
client = Confluence(url=url, username=email, password=api_token)
|
||||
# Test API call - get current user
|
||||
user_info = client.get_current_user()
|
||||
|
||||
return {
|
||||
"email": email,
|
||||
"display_name": user_info.get("displayName", email),
|
||||
"account_id": user_info.get("accountId"),
|
||||
"atlassian_url": url,
|
||||
"service": service,
|
||||
"verified": True,
|
||||
"mock_mode": False
|
||||
}
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid Atlassian credentials: {str(e)}")
|
||||
|
||||
def hash_api_token(self, api_token: str) -> str:
|
||||
"""Hash API token for storage (we don't store plain tokens)"""
|
||||
return hashlib.sha256(api_token.encode()).hexdigest()
|
||||
|
||||
async def login(
|
||||
self,
|
||||
url: str,
|
||||
email: str,
|
||||
api_token: str,
|
||||
service: str = "jira"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Authenticate user with Atlassian credentials.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"token": "jwt_token",
|
||||
"user": {...},
|
||||
"expires_at": "iso_timestamp"
|
||||
}
|
||||
"""
|
||||
# Verify credentials against Atlassian
|
||||
user_info = await self.verify_atlassian_credentials(
|
||||
url, email, api_token, service
|
||||
)
|
||||
|
||||
# Hash the API token
|
||||
token_hash = self.hash_api_token(api_token)
|
||||
|
||||
# Store or update user in database
|
||||
with get_connection() as conn:
|
||||
# Check if user exists
|
||||
existing = conn.execute(
|
||||
"SELECT id, email FROM users WHERE email = ?",
|
||||
(email,)
|
||||
).fetchone()
|
||||
|
||||
if existing:
|
||||
# Update existing user
|
||||
user_id = existing["id"]
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE users
|
||||
SET display_name = ?,
|
||||
atlassian_url = ?,
|
||||
atlassian_service = ?,
|
||||
api_token_hash = ?,
|
||||
last_login = ?
|
||||
WHERE id = ?
|
||||
""",
|
||||
(
|
||||
user_info["display_name"],
|
||||
url,
|
||||
service,
|
||||
token_hash,
|
||||
datetime.utcnow().isoformat(),
|
||||
user_id
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Create new user
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
INSERT INTO users (
|
||||
email, display_name, atlassian_url, atlassian_service,
|
||||
api_token_hash, created_at, last_login
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
email,
|
||||
user_info["display_name"],
|
||||
url,
|
||||
service,
|
||||
token_hash,
|
||||
datetime.utcnow().isoformat(),
|
||||
datetime.utcnow().isoformat()
|
||||
)
|
||||
)
|
||||
user_id = cursor.lastrowid
|
||||
|
||||
# Generate JWT token
|
||||
expires_at = datetime.utcnow() + timedelta(hours=self.jwt_expiry_hours)
|
||||
token_payload = {
|
||||
"user_id": user_id,
|
||||
"email": email,
|
||||
"display_name": user_info["display_name"],
|
||||
"exp": expires_at,
|
||||
"iat": datetime.utcnow()
|
||||
}
|
||||
|
||||
jwt_token = jwt.encode(
|
||||
token_payload,
|
||||
self.jwt_secret,
|
||||
algorithm=self.jwt_algorithm
|
||||
)
|
||||
|
||||
return {
|
||||
"token": jwt_token,
|
||||
"user": {
|
||||
"id": user_id,
|
||||
"email": email,
|
||||
"display_name": user_info["display_name"],
|
||||
"atlassian_url": url,
|
||||
"service": service
|
||||
},
|
||||
"expires_at": expires_at.isoformat()
|
||||
}
|
||||
|
||||
def verify_token(self, token: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Verify JWT token and return user info.
|
||||
|
||||
Returns:
|
||||
User dict if valid, None if invalid/expired
|
||||
"""
|
||||
try:
|
||||
payload = jwt.decode(
|
||||
token,
|
||||
self.jwt_secret,
|
||||
algorithms=[self.jwt_algorithm]
|
||||
)
|
||||
return payload
|
||||
except jwt.ExpiredSignatureError:
|
||||
return None
|
||||
except jwt.InvalidTokenError:
|
||||
return None
|
||||
|
||||
async def get_user_by_id(self, user_id: int) -> Optional[Dict[str, Any]]:
|
||||
"""Get user information by ID"""
|
||||
with get_connection() as conn:
|
||||
user = conn.execute(
|
||||
"""
|
||||
SELECT id, email, display_name, atlassian_url, atlassian_service,
|
||||
created_at, last_login
|
||||
FROM users
|
||||
WHERE id = ?
|
||||
""",
|
||||
(user_id,)
|
||||
).fetchone()
|
||||
|
||||
if user:
|
||||
return dict(user)
|
||||
return None
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_auth_instance: Optional[AtlassianAuth] = None
|
||||
|
||||
|
||||
def get_auth() -> AtlassianAuth:
|
||||
"""Get singleton auth instance"""
|
||||
global _auth_instance
|
||||
if _auth_instance is None:
|
||||
_auth_instance = AtlassianAuth()
|
||||
return _auth_instance
|
||||
127
demo/tools/config.py
Normal file
127
demo/tools/config.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""
|
||||
DSS Configuration Management
|
||||
|
||||
Secure configuration loading with:
|
||||
- Environment variables (highest priority)
|
||||
- .env files
|
||||
- Default values
|
||||
|
||||
Never logs or exposes sensitive values.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
# Try to load dotenv if available
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
# Load from multiple possible locations (first found wins)
|
||||
project_root = Path(__file__).parent.parent
|
||||
env_locations = [
|
||||
project_root / ".env", # Project root
|
||||
project_root / "dss-mvp1" / ".env", # dss-mvp1 subdirectory
|
||||
]
|
||||
for env_path in env_locations:
|
||||
if env_path.exists():
|
||||
load_dotenv(env_path)
|
||||
break
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class FigmaConfig:
|
||||
"""Figma API configuration."""
|
||||
token: Optional[str] = None
|
||||
cache_ttl: int = 300 # 5 minutes
|
||||
|
||||
@classmethod
|
||||
def from_env(cls) -> "FigmaConfig":
|
||||
return cls(
|
||||
token=os.getenv("FIGMA_TOKEN"),
|
||||
cache_ttl=int(os.getenv("FIGMA_CACHE_TTL", "300"))
|
||||
)
|
||||
|
||||
@property
|
||||
def is_configured(self) -> bool:
|
||||
return bool(self.token)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatabaseConfig:
|
||||
"""Database configuration."""
|
||||
path: str = ".dss/dss.db"
|
||||
|
||||
@classmethod
|
||||
def from_env(cls) -> "DatabaseConfig":
|
||||
return cls(
|
||||
path=os.getenv("DATABASE_PATH", ".dss/dss.db")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServerConfig:
|
||||
"""Server configuration."""
|
||||
port: int = 3456
|
||||
host: str = "0.0.0.0"
|
||||
env: str = "development"
|
||||
log_level: str = "info"
|
||||
|
||||
@classmethod
|
||||
def from_env(cls) -> "ServerConfig":
|
||||
return cls(
|
||||
port=int(os.getenv("PORT", "3456")),
|
||||
host=os.getenv("HOST", "0.0.0.0"),
|
||||
env=os.getenv("NODE_ENV", "development"),
|
||||
log_level=os.getenv("LOG_LEVEL", "info")
|
||||
)
|
||||
|
||||
@property
|
||||
def is_production(self) -> bool:
|
||||
return self.env == "production"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""Main configuration container."""
|
||||
figma: FigmaConfig
|
||||
database: DatabaseConfig
|
||||
server: ServerConfig
|
||||
|
||||
@classmethod
|
||||
def load(cls) -> "Config":
|
||||
"""Load configuration from environment."""
|
||||
return cls(
|
||||
figma=FigmaConfig.from_env(),
|
||||
database=DatabaseConfig.from_env(),
|
||||
server=ServerConfig.from_env()
|
||||
)
|
||||
|
||||
def summary(self) -> dict:
|
||||
"""Return config summary (no secrets)."""
|
||||
return {
|
||||
"figma": {
|
||||
"configured": self.figma.is_configured,
|
||||
"cache_ttl": self.figma.cache_ttl
|
||||
},
|
||||
"database": {
|
||||
"path": self.database.path
|
||||
},
|
||||
"server": {
|
||||
"port": self.server.port,
|
||||
"env": self.server.env,
|
||||
"log_level": self.server.log_level
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Global config instance
|
||||
config = Config.load()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import json
|
||||
print("DSS Configuration:")
|
||||
print(json.dumps(config.summary(), indent=2))
|
||||
121
demo/tools/discovery/discover-docker.sh
Executable file
121
demo/tools/discovery/discover-docker.sh
Executable file
@@ -0,0 +1,121 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# DSS - Docker Discovery
|
||||
# Container status, images, networks, volumes
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
PROJECT_PATH="${1:-.}"
|
||||
|
||||
# Check if Docker is available
|
||||
if ! command -v docker &> /dev/null; then
|
||||
cat <<EOF
|
||||
{
|
||||
"scan_type": "docker",
|
||||
"available": false,
|
||||
"message": "Docker not installed or not in PATH"
|
||||
}
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if Docker daemon is running
|
||||
if ! docker info &> /dev/null; then
|
||||
cat <<EOF
|
||||
{
|
||||
"scan_type": "docker",
|
||||
"available": true,
|
||||
"daemon_running": false,
|
||||
"message": "Docker daemon not running or no permissions"
|
||||
}
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get running containers
|
||||
get_containers() {
|
||||
local containers=()
|
||||
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local id=$(echo "$line" | cut -d'|' -f1)
|
||||
local name=$(echo "$line" | cut -d'|' -f2)
|
||||
local image=$(echo "$line" | cut -d'|' -f3)
|
||||
local status=$(echo "$line" | cut -d'|' -f4)
|
||||
local ports=$(echo "$line" | cut -d'|' -f5 | sed 's/"/\\"/g')
|
||||
|
||||
containers+=("{\"id\":\"$id\",\"name\":\"$name\",\"image\":\"$image\",\"status\":\"$status\",\"ports\":\"$ports\"}")
|
||||
fi
|
||||
done < <(docker ps --format '{{.ID}}|{{.Names}}|{{.Image}}|{{.Status}}|{{.Ports}}' 2>/dev/null)
|
||||
|
||||
echo "${containers[@]}"
|
||||
}
|
||||
|
||||
# Get images
|
||||
get_images() {
|
||||
local images=()
|
||||
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local repo=$(echo "$line" | cut -d'|' -f1)
|
||||
local tag=$(echo "$line" | cut -d'|' -f2)
|
||||
local size=$(echo "$line" | cut -d'|' -f3)
|
||||
|
||||
images+=("{\"repository\":\"$repo\",\"tag\":\"$tag\",\"size\":\"$size\"}")
|
||||
fi
|
||||
done < <(docker images --format '{{.Repository}}|{{.Tag}}|{{.Size}}' 2>/dev/null | head -20)
|
||||
|
||||
echo "${images[@]}"
|
||||
}
|
||||
|
||||
# Check for docker-compose files
|
||||
get_compose_info() {
|
||||
local compose_files=()
|
||||
|
||||
for file in "docker-compose.yml" "docker-compose.yaml" "compose.yml" "compose.yaml"; do
|
||||
if [[ -f "$PROJECT_PATH/$file" ]]; then
|
||||
local services=$(grep -E "^ [a-zA-Z]" "$PROJECT_PATH/$file" 2>/dev/null | sed 's/://g' | tr -d ' ' | head -10)
|
||||
compose_files+=("{\"file\":\"$file\",\"services\":$(echo "$services" | jq -R -s 'split("\n") | map(select(. != ""))')}")
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${compose_files[@]}"
|
||||
}
|
||||
|
||||
# Get resource usage
|
||||
get_stats() {
|
||||
local stats=$(docker stats --no-stream --format '{"name":"{{.Name}}","cpu":"{{.CPUPerc}}","memory":"{{.MemUsage}}"}' 2>/dev/null | head -10 | tr '\n' ',' | sed 's/,$//')
|
||||
echo "[$stats]"
|
||||
}
|
||||
|
||||
# Build output
|
||||
containers=$(get_containers)
|
||||
images=$(get_images)
|
||||
compose=$(get_compose_info)
|
||||
stats=$(get_stats)
|
||||
|
||||
containers_json=$(IFS=,; echo "${containers[*]}")
|
||||
images_json=$(IFS=,; echo "${images[*]}")
|
||||
compose_json=$(IFS=,; echo "${compose[*]}")
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"scan_type": "docker",
|
||||
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"available": true,
|
||||
"daemon_running": true,
|
||||
"docker_version": "$(docker --version | cut -d' ' -f3 | tr -d ',')",
|
||||
"containers": {
|
||||
"running": $(docker ps -q 2>/dev/null | wc -l),
|
||||
"total": $(docker ps -aq 2>/dev/null | wc -l),
|
||||
"list": [${containers_json:-}]
|
||||
},
|
||||
"images": {
|
||||
"total": $(docker images -q 2>/dev/null | wc -l),
|
||||
"list": [${images_json:-}]
|
||||
},
|
||||
"compose_files": [${compose_json:-}],
|
||||
"resource_usage": ${stats:-[]}
|
||||
}
|
||||
EOF
|
||||
153
demo/tools/discovery/discover-env.sh
Executable file
153
demo/tools/discovery/discover-env.sh
Executable file
@@ -0,0 +1,153 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# DSS - Environment Variable Analysis
|
||||
# Checks environment configuration (names only, no values)
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
PROJECT_PATH="${1:-.}"
|
||||
|
||||
# Common env vars that should be set
|
||||
REQUIRED_VARS=(
|
||||
"NODE_ENV"
|
||||
"PORT"
|
||||
)
|
||||
|
||||
# Optional but recommended
|
||||
RECOMMENDED_VARS=(
|
||||
"LOG_LEVEL"
|
||||
"DATABASE_URL"
|
||||
"API_URL"
|
||||
)
|
||||
|
||||
# Sensitive vars that should NOT be in code
|
||||
SENSITIVE_PATTERNS=(
|
||||
"API_KEY"
|
||||
"SECRET"
|
||||
"PASSWORD"
|
||||
"TOKEN"
|
||||
"PRIVATE"
|
||||
"AWS_"
|
||||
"FIGMA_TOKEN"
|
||||
)
|
||||
|
||||
# Find env files
|
||||
find_env_files() {
|
||||
local files=()
|
||||
|
||||
for pattern in ".env" ".env.local" ".env.development" ".env.production" ".env.example"; do
|
||||
if [[ -f "$PROJECT_PATH/$pattern" ]]; then
|
||||
local var_count=$(grep -cE "^[A-Z_]+=" "$PROJECT_PATH/$pattern" 2>/dev/null || echo 0)
|
||||
local has_values="false"
|
||||
|
||||
# Check if file has actual values (not just placeholders)
|
||||
if grep -qE "^[A-Z_]+=.+" "$PROJECT_PATH/$pattern" 2>/dev/null; then
|
||||
if ! grep -qE "^[A-Z_]+=(your_|<|placeholder)" "$PROJECT_PATH/$pattern" 2>/dev/null; then
|
||||
has_values="true"
|
||||
fi
|
||||
fi
|
||||
|
||||
files+=("{\"file\":\"$pattern\",\"variables\":$var_count,\"has_real_values\":$has_values}")
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${files[@]}"
|
||||
}
|
||||
|
||||
# Get var names from env files (not values)
|
||||
get_env_var_names() {
|
||||
local vars=()
|
||||
|
||||
for file in "$PROJECT_PATH/.env"* 2>/dev/null; do
|
||||
if [[ -f "$file" ]]; then
|
||||
while IFS= read -r varname; do
|
||||
if [[ -n "$varname" && ! " ${vars[*]} " =~ " $varname " ]]; then
|
||||
vars+=("\"$varname\"")
|
||||
fi
|
||||
done < <(grep -oE "^[A-Z_][A-Z0-9_]*" "$file" 2>/dev/null)
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${vars[@]}"
|
||||
}
|
||||
|
||||
# Check for hardcoded sensitive vars in code
|
||||
check_hardcoded_secrets() {
|
||||
local findings=()
|
||||
|
||||
for pattern in "${SENSITIVE_PATTERNS[@]}"; do
|
||||
local found=$(grep -rEl "${pattern}.*=.*['\"][^'\"]+['\"]" "$PROJECT_PATH" \
|
||||
--include="*.js" --include="*.ts" --include="*.py" \
|
||||
! -path "*/node_modules/*" ! -path "*/.git/*" \
|
||||
2>/dev/null | head -5)
|
||||
|
||||
if [[ -n "$found" ]]; then
|
||||
while IFS= read -r file; do
|
||||
if [[ -n "$file" ]]; then
|
||||
findings+=("{\"file\":\"${file#$PROJECT_PATH/}\",\"pattern\":\"$pattern\"}")
|
||||
fi
|
||||
done <<< "$found"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${findings[@]}"
|
||||
}
|
||||
|
||||
# Check current environment
|
||||
check_current_env() {
|
||||
local status=()
|
||||
|
||||
for var in "${REQUIRED_VARS[@]}"; do
|
||||
if [[ -n "${!var}" ]]; then
|
||||
status+=("{\"var\":\"$var\",\"status\":\"set\"}")
|
||||
else
|
||||
status+=("{\"var\":\"$var\",\"status\":\"missing\"}")
|
||||
fi
|
||||
done
|
||||
|
||||
for var in "${RECOMMENDED_VARS[@]}"; do
|
||||
if [[ -n "${!var}" ]]; then
|
||||
status+=("{\"var\":\"$var\",\"status\":\"set\",\"required\":false}")
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${status[@]}"
|
||||
}
|
||||
|
||||
# Build output
|
||||
env_files=$(find_env_files)
|
||||
var_names=$(get_env_var_names)
|
||||
hardcoded=$(check_hardcoded_secrets)
|
||||
current_env=$(check_current_env)
|
||||
|
||||
files_json=$(IFS=,; echo "${env_files[*]}")
|
||||
names_json=$(IFS=,; echo "${var_names[*]}")
|
||||
hardcoded_json=$(IFS=,; echo "${hardcoded[*]}")
|
||||
current_json=$(IFS=,; echo "${current_env[*]}")
|
||||
|
||||
# Calculate readiness score
|
||||
total_files=${#env_files[@]}
|
||||
hardcoded_count=${#hardcoded[@]}
|
||||
readiness="ready"
|
||||
[[ $total_files -eq 0 ]] && readiness="missing_config"
|
||||
[[ $hardcoded_count -gt 0 ]] && readiness="has_hardcoded_secrets"
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"scan_type": "environment",
|
||||
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"project_path": "$PROJECT_PATH",
|
||||
"readiness": "$readiness",
|
||||
"env_files": [${files_json:-}],
|
||||
"variables_defined": [${names_json:-}],
|
||||
"current_environment": [${current_json:-}],
|
||||
"hardcoded_secrets": [${hardcoded_json:-}],
|
||||
"recommendations": [
|
||||
"Use .env.example for template (no real values)",
|
||||
"Add .env* to .gitignore",
|
||||
"Use environment variables for all secrets",
|
||||
"Consider using a secrets manager for production"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
102
demo/tools/discovery/discover-ports.sh
Executable file
102
demo/tools/discovery/discover-ports.sh
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# DSS - Service & Port Discovery
|
||||
# Lists running services, bound ports, and process relationships
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Get listening ports
|
||||
get_listening_ports() {
|
||||
local ports=()
|
||||
|
||||
# Use ss if available, fallback to netstat
|
||||
if command -v ss &> /dev/null; then
|
||||
while IFS= read -r line; do
|
||||
local port=$(echo "$line" | awk '{print $5}' | grep -oE '[0-9]+$')
|
||||
local process=$(echo "$line" | awk '{print $7}' | sed 's/users:(("//' | sed 's/",.*//')
|
||||
if [[ -n "$port" && "$port" =~ ^[0-9]+$ ]]; then
|
||||
ports+=("{\"port\":$port,\"process\":\"$process\",\"state\":\"LISTEN\"}")
|
||||
fi
|
||||
done < <(ss -tlnp 2>/dev/null | tail -n +2)
|
||||
elif command -v netstat &> /dev/null; then
|
||||
while IFS= read -r line; do
|
||||
local port=$(echo "$line" | awk '{print $4}' | grep -oE '[0-9]+$')
|
||||
local process=$(echo "$line" | awk '{print $7}')
|
||||
if [[ -n "$port" && "$port" =~ ^[0-9]+$ ]]; then
|
||||
ports+=("{\"port\":$port,\"process\":\"$process\",\"state\":\"LISTEN\"}")
|
||||
fi
|
||||
done < <(netstat -tlnp 2>/dev/null | grep LISTEN)
|
||||
fi
|
||||
|
||||
echo "${ports[@]}"
|
||||
}
|
||||
|
||||
# Check common development ports
|
||||
check_dev_ports() {
|
||||
local common_ports=(
|
||||
"3000:Node.js/React Dev"
|
||||
"3456:DSS Worker"
|
||||
"5000:Flask/Python"
|
||||
"5173:Vite"
|
||||
"8000:Django/FastAPI"
|
||||
"8080:Generic HTTP"
|
||||
"8888:Jupyter"
|
||||
"9000:PHP-FPM"
|
||||
"27017:MongoDB"
|
||||
"5432:PostgreSQL"
|
||||
"3306:MySQL"
|
||||
"6379:Redis"
|
||||
)
|
||||
|
||||
local status=()
|
||||
|
||||
for entry in "${common_ports[@]}"; do
|
||||
local port="${entry%%:*}"
|
||||
local name="${entry#*:}"
|
||||
|
||||
if ss -tln 2>/dev/null | grep -q ":$port " || netstat -tln 2>/dev/null | grep -q ":$port "; then
|
||||
status+=("{\"port\":$port,\"name\":\"$name\",\"active\":true}")
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${status[@]}"
|
||||
}
|
||||
|
||||
# Get service health for known ports
|
||||
check_health() {
|
||||
local results=()
|
||||
|
||||
# Check DSS Worker
|
||||
if curl -s --connect-timeout 2 "http://localhost:3456/health" > /dev/null 2>&1; then
|
||||
local health=$(curl -s "http://localhost:3456/health" 2>/dev/null)
|
||||
results+=("{\"service\":\"dss-worker\",\"port\":3456,\"healthy\":true,\"response\":$health}")
|
||||
fi
|
||||
|
||||
# Check if port 8000 responds
|
||||
if curl -s --connect-timeout 2 "http://localhost:8000" > /dev/null 2>&1; then
|
||||
results+=("{\"service\":\"orchestrator\",\"port\":8000,\"healthy\":true}")
|
||||
fi
|
||||
|
||||
echo "${results[@]}"
|
||||
}
|
||||
|
||||
# Build output
|
||||
listening=$(get_listening_ports)
|
||||
dev_ports=$(check_dev_ports)
|
||||
health=$(check_health)
|
||||
|
||||
listening_json=$(IFS=,; echo "${listening[*]}")
|
||||
dev_json=$(IFS=,; echo "${dev_ports[*]}")
|
||||
health_json=$(IFS=,; echo "${health[*]}")
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"scan_type": "ports",
|
||||
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"hostname": "$(hostname)",
|
||||
"listening_ports": [${listening_json:-}],
|
||||
"dev_services": [${dev_json:-}],
|
||||
"health_checks": [${health_json:-}]
|
||||
}
|
||||
EOF
|
||||
117
demo/tools/discovery/discover-secrets.sh
Executable file
117
demo/tools/discovery/discover-secrets.sh
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# DSS - Secret Scanner
|
||||
# Non-destructive scan for potential exposed secrets
|
||||
# Outputs JSON with risk report (no actual secret values)
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
PROJECT_PATH="${1:-.}"
|
||||
|
||||
# Patterns to detect (regex)
|
||||
SECRET_PATTERNS=(
|
||||
"password\s*[:=]\s*['\"][^'\"]+['\"]"
|
||||
"api[_-]?key\s*[:=]\s*['\"][^'\"]+['\"]"
|
||||
"secret[_-]?key\s*[:=]\s*['\"][^'\"]+['\"]"
|
||||
"access[_-]?token\s*[:=]\s*['\"][^'\"]+['\"]"
|
||||
"private[_-]?key\s*[:=]\s*['\"][^'\"]+['\"]"
|
||||
"aws[_-]?access"
|
||||
"AKIA[0-9A-Z]{16}"
|
||||
"ghp_[a-zA-Z0-9]{36}"
|
||||
"sk-[a-zA-Z0-9]{48}"
|
||||
)
|
||||
|
||||
# Files to ignore
|
||||
IGNORE_DIRS="node_modules|\.git|dist|build|__pycache__|\.next|venv"
|
||||
|
||||
# Initialize results
|
||||
declare -a findings
|
||||
|
||||
scan_for_secrets() {
|
||||
local pattern="$1"
|
||||
local results
|
||||
|
||||
results=$(grep -rEil "$pattern" "$PROJECT_PATH" \
|
||||
--include="*.js" --include="*.ts" --include="*.py" \
|
||||
--include="*.json" --include="*.yaml" --include="*.yml" \
|
||||
--include="*.env*" --include="*.config.*" \
|
||||
2>/dev/null | grep -vE "$IGNORE_DIRS" | head -20 || true)
|
||||
|
||||
if [[ -n "$results" ]]; then
|
||||
while IFS= read -r file; do
|
||||
if [[ -n "$file" ]]; then
|
||||
# Get line count without revealing content
|
||||
local count=$(grep -cEi "$pattern" "$file" 2>/dev/null || echo 0)
|
||||
findings+=("{\"file\":\"${file#$PROJECT_PATH/}\",\"pattern\":\"${pattern:0:30}...\",\"matches\":$count}")
|
||||
fi
|
||||
done <<< "$results"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check for common secret files
|
||||
check_secret_files() {
|
||||
local risky_files=(
|
||||
".env"
|
||||
".env.local"
|
||||
".env.production"
|
||||
"credentials.json"
|
||||
"secrets.json"
|
||||
"config/secrets.yml"
|
||||
".aws/credentials"
|
||||
"id_rsa"
|
||||
"id_ed25519"
|
||||
"*.pem"
|
||||
"*.key"
|
||||
)
|
||||
|
||||
for pattern in "${risky_files[@]}"; do
|
||||
local found=$(find "$PROJECT_PATH" -name "$pattern" -type f ! -path "*/$IGNORE_DIRS/*" 2>/dev/null | head -5)
|
||||
if [[ -n "$found" ]]; then
|
||||
while IFS= read -r file; do
|
||||
if [[ -n "$file" ]]; then
|
||||
# Check if file is in .gitignore
|
||||
local in_gitignore="false"
|
||||
if [[ -f "$PROJECT_PATH/.gitignore" ]]; then
|
||||
grep -q "$(basename "$file")" "$PROJECT_PATH/.gitignore" 2>/dev/null && in_gitignore="true"
|
||||
fi
|
||||
findings+=("{\"file\":\"${file#$PROJECT_PATH/}\",\"type\":\"risky_file\",\"in_gitignore\":$in_gitignore}")
|
||||
fi
|
||||
done <<< "$found"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Run scans
|
||||
for pattern in "${SECRET_PATTERNS[@]}"; do
|
||||
scan_for_secrets "$pattern"
|
||||
done
|
||||
|
||||
check_secret_files
|
||||
|
||||
# Calculate risk score
|
||||
total_findings=${#findings[@]}
|
||||
risk_score="low"
|
||||
[[ $total_findings -gt 5 ]] && risk_score="medium"
|
||||
[[ $total_findings -gt 15 ]] && risk_score="high"
|
||||
[[ $total_findings -gt 30 ]] && risk_score="critical"
|
||||
|
||||
# Output JSON
|
||||
joined=$(IFS=,; echo "${findings[*]}")
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"scan_type": "secrets",
|
||||
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"project_path": "$PROJECT_PATH",
|
||||
"risk_level": "$risk_score",
|
||||
"total_findings": $total_findings,
|
||||
"findings": [${joined:-}],
|
||||
"recommendations": [
|
||||
"Review all findings and remove hardcoded secrets",
|
||||
"Use environment variables for sensitive data",
|
||||
"Add secret files to .gitignore",
|
||||
"Consider using a secrets manager"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
330
demo/tools/discovery/discover.sh
Executable file
330
demo/tools/discovery/discover.sh
Executable file
@@ -0,0 +1,330 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Design System Server (DSS) - Project Discovery Script
|
||||
#
|
||||
# Non-intrusive analysis of project structure, dependencies, and health.
|
||||
# Outputs JSON for UI consumption.
|
||||
#
|
||||
# Usage: ./discover.sh [project_path] [--full]
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
PROJECT_PATH="${1:-.}"
|
||||
FULL_SCAN="${2:-}"
|
||||
OUTPUT_DIR="${PROJECT_PATH}/.dss"
|
||||
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Colors for terminal output (only if interactive)
|
||||
if [ -t 1 ]; then
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
else
|
||||
GREEN=''
|
||||
BLUE=''
|
||||
YELLOW=''
|
||||
NC=''
|
||||
fi
|
||||
|
||||
log() {
|
||||
echo -e "${BLUE}[DSS]${NC} $1" >&2
|
||||
}
|
||||
|
||||
# === Project Type Detection ===
|
||||
|
||||
detect_project_type() {
|
||||
local types=()
|
||||
|
||||
[ -f "$PROJECT_PATH/package.json" ] && types+=("nodejs")
|
||||
[ -f "$PROJECT_PATH/requirements.txt" ] || [ -f "$PROJECT_PATH/pyproject.toml" ] && types+=("python")
|
||||
[ -f "$PROJECT_PATH/Cargo.toml" ] && types+=("rust")
|
||||
[ -f "$PROJECT_PATH/go.mod" ] && types+=("go")
|
||||
[ -f "$PROJECT_PATH/pom.xml" ] || [ -f "$PROJECT_PATH/build.gradle" ] && types+=("java")
|
||||
[ -f "$PROJECT_PATH/Gemfile" ] && types+=("ruby")
|
||||
[ -f "$PROJECT_PATH/composer.json" ] && types+=("php")
|
||||
|
||||
echo "${types[@]:-unknown}"
|
||||
}
|
||||
|
||||
# === Framework Detection ===
|
||||
|
||||
detect_frameworks() {
|
||||
local frameworks=()
|
||||
|
||||
if [ -f "$PROJECT_PATH/package.json" ]; then
|
||||
local pkg=$(cat "$PROJECT_PATH/package.json")
|
||||
|
||||
echo "$pkg" | grep -q '"react"' && frameworks+=("react")
|
||||
echo "$pkg" | grep -q '"vue"' && frameworks+=("vue")
|
||||
echo "$pkg" | grep -q '"@angular/core"' && frameworks+=("angular")
|
||||
echo "$pkg" | grep -q '"svelte"' && frameworks+=("svelte")
|
||||
echo "$pkg" | grep -q '"next"' && frameworks+=("nextjs")
|
||||
echo "$pkg" | grep -q '"nuxt"' && frameworks+=("nuxt")
|
||||
echo "$pkg" | grep -q '"express"' && frameworks+=("express")
|
||||
echo "$pkg" | grep -q '"fastify"' && frameworks+=("fastify")
|
||||
echo "$pkg" | grep -q '"tailwindcss"' && frameworks+=("tailwind")
|
||||
echo "$pkg" | grep -q '"@emotion"' && frameworks+=("emotion")
|
||||
echo "$pkg" | grep -q '"styled-components"' && frameworks+=("styled-components")
|
||||
fi
|
||||
|
||||
if [ -f "$PROJECT_PATH/requirements.txt" ]; then
|
||||
grep -q "fastapi" "$PROJECT_PATH/requirements.txt" && frameworks+=("fastapi")
|
||||
grep -q "django" "$PROJECT_PATH/requirements.txt" && frameworks+=("django")
|
||||
grep -q "flask" "$PROJECT_PATH/requirements.txt" && frameworks+=("flask")
|
||||
fi
|
||||
|
||||
echo "${frameworks[@]:-none}"
|
||||
}
|
||||
|
||||
# === Design System Detection ===
|
||||
|
||||
detect_design_system() {
|
||||
local ds_info='{"detected":false}'
|
||||
|
||||
# Check for common design system indicators
|
||||
if [ -f "$PROJECT_PATH/package.json" ]; then
|
||||
local pkg=$(cat "$PROJECT_PATH/package.json")
|
||||
|
||||
if echo "$pkg" | grep -qE '"(@chakra-ui|@mui|antd|@radix-ui|@headlessui)"'; then
|
||||
ds_info='{"detected":true,"type":"library"}'
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check for custom design tokens
|
||||
if find "$PROJECT_PATH" -maxdepth 3 -name "tokens.css" -o -name "tokens.json" -o -name "design-tokens.*" 2>/dev/null | grep -q .; then
|
||||
ds_info='{"detected":true,"type":"custom","has_tokens":true}'
|
||||
fi
|
||||
|
||||
# Check for Figma integration
|
||||
if find "$PROJECT_PATH" -maxdepth 3 -name ".figmarc" -o -name "figma.config.*" 2>/dev/null | grep -q .; then
|
||||
ds_info=$(echo "$ds_info" | sed 's/}$/,"figma_connected":true}/')
|
||||
fi
|
||||
|
||||
echo "$ds_info"
|
||||
}
|
||||
|
||||
# === File Statistics ===
|
||||
|
||||
get_file_stats() {
|
||||
local total_files=$(find "$PROJECT_PATH" -type f ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/dist/*" ! -path "*/__pycache__/*" 2>/dev/null | wc -l)
|
||||
local js_files=$(find "$PROJECT_PATH" -type f \( -name "*.js" -o -name "*.jsx" -o -name "*.ts" -o -name "*.tsx" \) ! -path "*/node_modules/*" 2>/dev/null | wc -l)
|
||||
local css_files=$(find "$PROJECT_PATH" -type f \( -name "*.css" -o -name "*.scss" -o -name "*.less" \) ! -path "*/node_modules/*" 2>/dev/null | wc -l)
|
||||
local py_files=$(find "$PROJECT_PATH" -type f -name "*.py" ! -path "*/__pycache__/*" 2>/dev/null | wc -l)
|
||||
local component_files=$(find "$PROJECT_PATH" -type f \( -name "*.jsx" -o -name "*.tsx" -o -name "*.vue" -o -name "*.svelte" \) ! -path "*/node_modules/*" 2>/dev/null | wc -l)
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"total": $total_files,
|
||||
"javascript": $js_files,
|
||||
"css": $css_files,
|
||||
"python": $py_files,
|
||||
"components": $component_files
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# === Dependency Analysis ===
|
||||
|
||||
analyze_dependencies() {
|
||||
local deps='{"production":[],"development":[],"total":0}'
|
||||
|
||||
if [ -f "$PROJECT_PATH/package.json" ]; then
|
||||
local prod_count=$(jq '.dependencies | length // 0' "$PROJECT_PATH/package.json" 2>/dev/null || echo 0)
|
||||
local dev_count=$(jq '.devDependencies | length // 0' "$PROJECT_PATH/package.json" 2>/dev/null || echo 0)
|
||||
local total=$((prod_count + dev_count))
|
||||
|
||||
deps="{\"production\":$prod_count,\"development\":$dev_count,\"total\":$total}"
|
||||
fi
|
||||
|
||||
if [ -f "$PROJECT_PATH/requirements.txt" ]; then
|
||||
local py_deps=$(grep -v "^#" "$PROJECT_PATH/requirements.txt" | grep -v "^$" | wc -l)
|
||||
deps="{\"python\":$py_deps,\"total\":$py_deps}"
|
||||
fi
|
||||
|
||||
echo "$deps"
|
||||
}
|
||||
|
||||
# === Git Analysis ===
|
||||
|
||||
analyze_git() {
|
||||
if [ ! -d "$PROJECT_PATH/.git" ]; then
|
||||
echo '{"is_repo":false}'
|
||||
return
|
||||
fi
|
||||
|
||||
cd "$PROJECT_PATH"
|
||||
|
||||
local branch=$(git branch --show-current 2>/dev/null || echo "unknown")
|
||||
local commits=$(git rev-list --count HEAD 2>/dev/null || echo 0)
|
||||
local contributors=$(git log --format='%ae' | sort -u | wc -l 2>/dev/null || echo 0)
|
||||
local last_commit=$(git log -1 --format='%ci' 2>/dev/null || echo "unknown")
|
||||
local uncommitted=$(git status --porcelain 2>/dev/null | wc -l || echo 0)
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"is_repo": true,
|
||||
"branch": "$branch",
|
||||
"commits": $commits,
|
||||
"contributors": $contributors,
|
||||
"last_commit": "$last_commit",
|
||||
"uncommitted_changes": $uncommitted
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# === Component Discovery ===
|
||||
|
||||
discover_components() {
|
||||
local components=()
|
||||
|
||||
# Find component files
|
||||
while IFS= read -r file; do
|
||||
if [ -n "$file" ]; then
|
||||
local name=$(basename "$file" | sed 's/\.[^.]*$//')
|
||||
local dir=$(dirname "$file" | sed "s|^$PROJECT_PATH/||")
|
||||
components+=("{\"name\":\"$name\",\"path\":\"$dir\",\"file\":\"$(basename "$file")\"}")
|
||||
fi
|
||||
done < <(find "$PROJECT_PATH" -type f \( -name "*.jsx" -o -name "*.tsx" -o -name "*.vue" \) ! -path "*/node_modules/*" ! -path "*/.next/*" ! -path "*/dist/*" 2>/dev/null | head -50)
|
||||
|
||||
# Join array
|
||||
local joined=$(IFS=,; echo "${components[*]}")
|
||||
echo "[$joined]"
|
||||
}
|
||||
|
||||
# === Health Score ===
|
||||
|
||||
calculate_health_score() {
|
||||
local score=100
|
||||
local issues=()
|
||||
|
||||
# Check for package-lock or yarn.lock
|
||||
if [ -f "$PROJECT_PATH/package.json" ]; then
|
||||
if [ ! -f "$PROJECT_PATH/package-lock.json" ] && [ ! -f "$PROJECT_PATH/yarn.lock" ] && [ ! -f "$PROJECT_PATH/pnpm-lock.yaml" ]; then
|
||||
score=$((score - 10))
|
||||
issues+=("\"No lock file found\"")
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check for .gitignore
|
||||
if [ -d "$PROJECT_PATH/.git" ] && [ ! -f "$PROJECT_PATH/.gitignore" ]; then
|
||||
score=$((score - 5))
|
||||
issues+=("\"Missing .gitignore\"")
|
||||
fi
|
||||
|
||||
# Check for README
|
||||
if [ ! -f "$PROJECT_PATH/README.md" ] && [ ! -f "$PROJECT_PATH/README" ]; then
|
||||
score=$((score - 5))
|
||||
issues+=("\"Missing README\"")
|
||||
fi
|
||||
|
||||
# Check for tests
|
||||
if ! find "$PROJECT_PATH" -maxdepth 3 -type d \( -name "test" -o -name "tests" -o -name "__tests__" -o -name "spec" \) 2>/dev/null | grep -q .; then
|
||||
score=$((score - 10))
|
||||
issues+=("\"No test directory found\"")
|
||||
fi
|
||||
|
||||
# Check for TypeScript
|
||||
if [ -f "$PROJECT_PATH/package.json" ] && ! [ -f "$PROJECT_PATH/tsconfig.json" ]; then
|
||||
if grep -q "typescript" "$PROJECT_PATH/package.json" 2>/dev/null; then
|
||||
score=$((score - 5))
|
||||
issues+=("\"TypeScript installed but no tsconfig.json\"")
|
||||
fi
|
||||
fi
|
||||
|
||||
local joined_issues=$(IFS=,; echo "${issues[*]}")
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"score": $score,
|
||||
"grade": "$([ $score -ge 90 ] && echo 'A' || ([ $score -ge 80 ] && echo 'B' || ([ $score -ge 70 ] && echo 'C' || ([ $score -ge 60 ] && echo 'D' || echo 'F'))))",
|
||||
"issues": [$joined_issues]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# === CSS Analysis ===
|
||||
|
||||
analyze_css() {
|
||||
local css_files=$(find "$PROJECT_PATH" -type f \( -name "*.css" -o -name "*.scss" \) ! -path "*/node_modules/*" 2>/dev/null)
|
||||
local total_files=$(echo "$css_files" | grep -c . || echo 0)
|
||||
|
||||
local has_variables=false
|
||||
local has_custom_properties=false
|
||||
local preprocessor="none"
|
||||
|
||||
if echo "$css_files" | grep -q ".scss"; then
|
||||
preprocessor="sass"
|
||||
fi
|
||||
|
||||
if [ -n "$css_files" ]; then
|
||||
for file in $css_files; do
|
||||
if grep -q -- "--" "$file" 2>/dev/null; then
|
||||
has_custom_properties=true
|
||||
fi
|
||||
if grep -q "\\\$" "$file" 2>/dev/null; then
|
||||
has_variables=true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"files": $total_files,
|
||||
"preprocessor": "$preprocessor",
|
||||
"has_css_variables": $has_custom_properties,
|
||||
"has_preprocessor_variables": $has_variables
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# === Main Discovery ===
|
||||
|
||||
log "Starting project discovery..."
|
||||
|
||||
PROJECT_TYPES=$(detect_project_type)
|
||||
FRAMEWORKS=$(detect_frameworks)
|
||||
DESIGN_SYSTEM=$(detect_design_system)
|
||||
FILE_STATS=$(get_file_stats)
|
||||
DEPENDENCIES=$(analyze_dependencies)
|
||||
GIT_INFO=$(analyze_git)
|
||||
HEALTH=$(calculate_health_score)
|
||||
CSS_INFO=$(analyze_css)
|
||||
|
||||
if [ "$FULL_SCAN" = "--full" ]; then
|
||||
COMPONENTS=$(discover_components)
|
||||
else
|
||||
COMPONENTS="[]"
|
||||
fi
|
||||
|
||||
# Build final JSON
|
||||
cat > "$OUTPUT_DIR/discovery.json" <<EOF
|
||||
{
|
||||
"meta": {
|
||||
"version": "1.0.0",
|
||||
"timestamp": "$TIMESTAMP",
|
||||
"project_path": "$PROJECT_PATH",
|
||||
"full_scan": $([ "$FULL_SCAN" = "--full" ] && echo true || echo false)
|
||||
},
|
||||
"project": {
|
||||
"types": $(echo "$PROJECT_TYPES" | jq -R 'split(" ")' 2>/dev/null || echo '["unknown"]'),
|
||||
"frameworks": $(echo "$FRAMEWORKS" | jq -R 'split(" ")' 2>/dev/null || echo '[]')
|
||||
},
|
||||
"design_system": $DESIGN_SYSTEM,
|
||||
"files": $FILE_STATS,
|
||||
"dependencies": $DEPENDENCIES,
|
||||
"git": $GIT_INFO,
|
||||
"health": $HEALTH,
|
||||
"css": $CSS_INFO,
|
||||
"components": $COMPONENTS
|
||||
}
|
||||
EOF
|
||||
|
||||
log "Discovery complete: $OUTPUT_DIR/discovery.json"
|
||||
|
||||
# Output the JSON
|
||||
cat "$OUTPUT_DIR/discovery.json"
|
||||
8
demo/tools/dss_mcp/__init__.py
Normal file
8
demo/tools/dss_mcp/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
DSS MCP Server
|
||||
|
||||
Model Context Protocol server for Design System Swarm.
|
||||
Provides project-isolated context and tools to Claude chat instances.
|
||||
"""
|
||||
|
||||
__version__ = "0.8.0"
|
||||
145
demo/tools/dss_mcp/config.py
Normal file
145
demo/tools/dss_mcp/config.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
MCP Server Configuration
|
||||
|
||||
Loads configuration from environment variables and provides settings
|
||||
for the MCP server, integrations, and security.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from dotenv import load_dotenv
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Base paths
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
||||
TOOLS_DIR = PROJECT_ROOT / "tools"
|
||||
STORAGE_DIR = PROJECT_ROOT / "tools" / "storage"
|
||||
CACHE_DIR = PROJECT_ROOT / os.getenv("DSS_CACHE_DIR", ".dss/cache")
|
||||
|
||||
|
||||
class MCPConfig:
|
||||
"""MCP Server Configuration"""
|
||||
|
||||
# Server Settings
|
||||
HOST: str = os.getenv("DSS_MCP_HOST", "127.0.0.1")
|
||||
PORT: int = int(os.getenv("DSS_MCP_PORT", "3457"))
|
||||
|
||||
# Database
|
||||
DATABASE_PATH: str = os.getenv(
|
||||
"DATABASE_PATH",
|
||||
str(STORAGE_DIR / "dss.db")
|
||||
)
|
||||
|
||||
# Context Caching
|
||||
CONTEXT_CACHE_TTL: int = int(os.getenv("DSS_CONTEXT_CACHE_TTL", "300")) # 5 minutes
|
||||
|
||||
# Encryption
|
||||
ENCRYPTION_KEY: Optional[str] = os.getenv("DSS_ENCRYPTION_KEY")
|
||||
|
||||
@classmethod
|
||||
def get_cipher(cls) -> Optional[Fernet]:
|
||||
"""Get Fernet cipher for encryption/decryption"""
|
||||
if not cls.ENCRYPTION_KEY:
|
||||
return None
|
||||
return Fernet(cls.ENCRYPTION_KEY.encode())
|
||||
|
||||
@classmethod
|
||||
def generate_encryption_key(cls) -> str:
|
||||
"""Generate a new encryption key"""
|
||||
return Fernet.generate_key().decode()
|
||||
|
||||
# Redis/Celery for worker pool
|
||||
REDIS_URL: str = os.getenv("REDIS_URL", "redis://localhost:6379/0")
|
||||
CELERY_BROKER_URL: str = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0")
|
||||
CELERY_RESULT_BACKEND: str = os.getenv("CELERY_RESULT_BACKEND", "redis://localhost:6379/0")
|
||||
|
||||
# Circuit Breaker
|
||||
CIRCUIT_BREAKER_FAILURE_THRESHOLD: int = int(
|
||||
os.getenv("CIRCUIT_BREAKER_FAILURE_THRESHOLD", "5")
|
||||
)
|
||||
CIRCUIT_BREAKER_TIMEOUT_SECONDS: int = int(
|
||||
os.getenv("CIRCUIT_BREAKER_TIMEOUT_SECONDS", "60")
|
||||
)
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO").upper()
|
||||
|
||||
|
||||
class IntegrationConfig:
|
||||
"""External Integration Configuration"""
|
||||
|
||||
# Figma
|
||||
FIGMA_TOKEN: Optional[str] = os.getenv("FIGMA_TOKEN")
|
||||
FIGMA_CACHE_TTL: int = int(os.getenv("FIGMA_CACHE_TTL", "300"))
|
||||
|
||||
# Anthropic (for Sequential Thinking)
|
||||
ANTHROPIC_API_KEY: Optional[str] = os.getenv("ANTHROPIC_API_KEY")
|
||||
|
||||
# Jira (defaults, can be overridden per-user)
|
||||
JIRA_URL: Optional[str] = os.getenv("JIRA_URL")
|
||||
JIRA_USERNAME: Optional[str] = os.getenv("JIRA_USERNAME")
|
||||
JIRA_API_TOKEN: Optional[str] = os.getenv("JIRA_API_TOKEN")
|
||||
|
||||
# Confluence (defaults, can be overridden per-user)
|
||||
CONFLUENCE_URL: Optional[str] = os.getenv("CONFLUENCE_URL")
|
||||
CONFLUENCE_USERNAME: Optional[str] = os.getenv("CONFLUENCE_USERNAME")
|
||||
CONFLUENCE_API_TOKEN: Optional[str] = os.getenv("CONFLUENCE_API_TOKEN")
|
||||
|
||||
|
||||
# Singleton instances
|
||||
mcp_config = MCPConfig()
|
||||
integration_config = IntegrationConfig()
|
||||
|
||||
|
||||
def validate_config() -> list[str]:
|
||||
"""
|
||||
Validate configuration and return list of warnings.
|
||||
|
||||
Returns:
|
||||
List of warning messages for missing optional config
|
||||
"""
|
||||
warnings = []
|
||||
|
||||
if not mcp_config.ENCRYPTION_KEY:
|
||||
warnings.append(
|
||||
"DSS_ENCRYPTION_KEY not set. Integration credentials will not be encrypted. "
|
||||
f"Generate one with: python -c \"from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())\""
|
||||
)
|
||||
|
||||
if not integration_config.ANTHROPIC_API_KEY:
|
||||
warnings.append("ANTHROPIC_API_KEY not set. Sequential Thinking tools will not be available.")
|
||||
|
||||
if not integration_config.FIGMA_TOKEN:
|
||||
warnings.append("FIGMA_TOKEN not set. Figma tools will not be available.")
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("=== DSS MCP Configuration ===\n")
|
||||
print(f"MCP Server: {mcp_config.HOST}:{mcp_config.PORT}")
|
||||
print(f"Database: {mcp_config.DATABASE_PATH}")
|
||||
print(f"Context Cache TTL: {mcp_config.CONTEXT_CACHE_TTL}s")
|
||||
print(f"Encryption Key: {'✓ Set' if mcp_config.ENCRYPTION_KEY else '✗ Not Set'}")
|
||||
print(f"Redis URL: {mcp_config.REDIS_URL}")
|
||||
print(f"\nCircuit Breaker:")
|
||||
print(f" Failure Threshold: {mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD}")
|
||||
print(f" Timeout: {mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS}s")
|
||||
|
||||
print(f"\n=== Integration Configuration ===\n")
|
||||
print(f"Figma Token: {'✓ Set' if integration_config.FIGMA_TOKEN else '✗ Not Set'}")
|
||||
print(f"Anthropic API Key: {'✓ Set' if integration_config.ANTHROPIC_API_KEY else '✗ Not Set'}")
|
||||
print(f"Jira URL: {integration_config.JIRA_URL or '✗ Not Set'}")
|
||||
print(f"Confluence URL: {integration_config.CONFLUENCE_URL or '✗ Not Set'}")
|
||||
|
||||
warnings = validate_config()
|
||||
if warnings:
|
||||
print(f"\n⚠️ Warnings:")
|
||||
for warning in warnings:
|
||||
print(f" - {warning}")
|
||||
else:
|
||||
print(f"\n✓ Configuration is valid")
|
||||
0
demo/tools/dss_mcp/context/__init__.py
Normal file
0
demo/tools/dss_mcp/context/__init__.py
Normal file
443
demo/tools/dss_mcp/context/project_context.py
Normal file
443
demo/tools/dss_mcp/context/project_context.py
Normal file
@@ -0,0 +1,443 @@
|
||||
"""
|
||||
Project Context Manager
|
||||
|
||||
Provides cached, project-isolated context for Claude MCP sessions.
|
||||
Loads all relevant project data (components, tokens, config, health, etc.)
|
||||
and caches it for performance.
|
||||
"""
|
||||
|
||||
import json
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import Dict, Any, Optional, List
|
||||
from pathlib import Path
|
||||
|
||||
# Import from existing DSS modules
|
||||
import sys
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from storage.database import get_connection, Projects
|
||||
from analyze.scanner import ProjectScanner
|
||||
from ..config import mcp_config
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProjectContext:
|
||||
"""Complete project context for MCP sessions"""
|
||||
|
||||
project_id: str
|
||||
name: str
|
||||
description: Optional[str]
|
||||
path: Optional[Path]
|
||||
|
||||
# Component data
|
||||
components: List[Dict[str, Any]]
|
||||
component_count: int
|
||||
|
||||
# Token/Style data
|
||||
tokens: Dict[str, Any]
|
||||
styles: List[Dict[str, Any]]
|
||||
|
||||
# Project configuration
|
||||
config: Dict[str, Any]
|
||||
|
||||
# User's enabled integrations (user-scoped)
|
||||
integrations: Dict[str, Any]
|
||||
|
||||
# Project health & metrics
|
||||
health: Dict[str, Any]
|
||||
stats: Dict[str, Any]
|
||||
|
||||
# Discovery/scan results
|
||||
discovery: Dict[str, Any]
|
||||
|
||||
# Metadata
|
||||
loaded_at: datetime
|
||||
cache_expires_at: datetime
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
data = asdict(self)
|
||||
data['loaded_at'] = self.loaded_at.isoformat()
|
||||
data['cache_expires_at'] = self.cache_expires_at.isoformat()
|
||||
if self.path:
|
||||
data['path'] = str(self.path)
|
||||
return data
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""Check if cache has expired"""
|
||||
return datetime.now() >= self.cache_expires_at
|
||||
|
||||
|
||||
class ProjectContextManager:
|
||||
"""
|
||||
Manages project contexts with TTL-based caching.
|
||||
|
||||
Provides fast access to project data for MCP tools while ensuring
|
||||
data freshness and project isolation.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._cache: Dict[str, ProjectContext] = {}
|
||||
self._cache_ttl = timedelta(seconds=mcp_config.CONTEXT_CACHE_TTL)
|
||||
|
||||
async def get_context(
|
||||
self,
|
||||
project_id: str,
|
||||
user_id: Optional[int] = None,
|
||||
force_refresh: bool = False
|
||||
) -> Optional[ProjectContext]:
|
||||
"""
|
||||
Get project context, using cache if available.
|
||||
|
||||
Args:
|
||||
project_id: Project ID
|
||||
user_id: User ID for loading user-scoped integrations
|
||||
force_refresh: Force cache refresh
|
||||
|
||||
Returns:
|
||||
ProjectContext or None if project not found
|
||||
"""
|
||||
# Check cache first
|
||||
cache_key = f"{project_id}:{user_id or 'anonymous'}"
|
||||
if not force_refresh and cache_key in self._cache:
|
||||
ctx = self._cache[cache_key]
|
||||
if not ctx.is_expired():
|
||||
return ctx
|
||||
|
||||
# Load fresh context
|
||||
context = await self._load_context(project_id, user_id)
|
||||
if context:
|
||||
self._cache[cache_key] = context
|
||||
|
||||
return context
|
||||
|
||||
async def _load_context(
|
||||
self,
|
||||
project_id: str,
|
||||
user_id: Optional[int] = None
|
||||
) -> Optional[ProjectContext]:
|
||||
"""Load complete project context from database and filesystem"""
|
||||
|
||||
# Run database queries in thread pool to avoid blocking
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# Load project metadata
|
||||
project = await loop.run_in_executor(None, self._load_project, project_id)
|
||||
if not project:
|
||||
return None
|
||||
|
||||
# Load components, styles, stats in parallel
|
||||
components_task = loop.run_in_executor(None, self._load_components, project_id)
|
||||
styles_task = loop.run_in_executor(None, self._load_styles, project_id)
|
||||
stats_task = loop.run_in_executor(None, self._load_stats, project_id)
|
||||
integrations_task = loop.run_in_executor(None, self._load_integrations, project_id, user_id)
|
||||
|
||||
components = await components_task
|
||||
styles = await styles_task
|
||||
stats = await stats_task
|
||||
integrations = await integrations_task
|
||||
|
||||
# Load tokens from filesystem if project has a path
|
||||
tokens = {}
|
||||
project_path = None
|
||||
if project.get('figma_file_key'):
|
||||
# Try to find project path based on naming convention
|
||||
# (This can be enhanced based on actual project structure)
|
||||
project_path = Path.cwd()
|
||||
tokens = await loop.run_in_executor(None, self._load_tokens, project_path)
|
||||
|
||||
# Load discovery/scan data
|
||||
discovery = await loop.run_in_executor(None, self._load_discovery, project_path)
|
||||
|
||||
# Compute health score
|
||||
health = self._compute_health(components, tokens, stats)
|
||||
|
||||
# Build context
|
||||
now = datetime.now()
|
||||
context = ProjectContext(
|
||||
project_id=project_id,
|
||||
name=project['name'],
|
||||
description=project.get('description'),
|
||||
path=project_path,
|
||||
components=components,
|
||||
component_count=len(components),
|
||||
tokens=tokens,
|
||||
styles=styles,
|
||||
config={
|
||||
'figma_file_key': project.get('figma_file_key'),
|
||||
'status': project.get('status', 'active')
|
||||
},
|
||||
integrations=integrations,
|
||||
health=health,
|
||||
stats=stats,
|
||||
discovery=discovery,
|
||||
loaded_at=now,
|
||||
cache_expires_at=now + self._cache_ttl
|
||||
)
|
||||
|
||||
return context
|
||||
|
||||
def _load_project(self, project_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load project metadata from database"""
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT * FROM projects WHERE id = ?",
|
||||
(project_id,)
|
||||
).fetchone()
|
||||
|
||||
if row:
|
||||
return dict(row)
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error loading project: {e}")
|
||||
return None
|
||||
|
||||
def _load_components(self, project_id: str) -> List[Dict[str, Any]]:
|
||||
"""Load all components for project"""
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT id, name, figma_key, description,
|
||||
properties, variants, code_generated,
|
||||
created_at, updated_at
|
||||
FROM components
|
||||
WHERE project_id = ?
|
||||
ORDER BY name
|
||||
""",
|
||||
(project_id,)
|
||||
).fetchall()
|
||||
|
||||
components = []
|
||||
for row in rows:
|
||||
comp = dict(row)
|
||||
# Parse JSON fields
|
||||
if comp.get('properties'):
|
||||
comp['properties'] = json.loads(comp['properties'])
|
||||
if comp.get('variants'):
|
||||
comp['variants'] = json.loads(comp['variants'])
|
||||
components.append(comp)
|
||||
|
||||
return components
|
||||
except Exception as e:
|
||||
print(f"Error loading components: {e}")
|
||||
return []
|
||||
|
||||
def _load_styles(self, project_id: str) -> List[Dict[str, Any]]:
|
||||
"""Load all styles for project"""
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT id, name, type, figma_key, properties, created_at
|
||||
FROM styles
|
||||
WHERE project_id = ?
|
||||
ORDER BY type, name
|
||||
""",
|
||||
(project_id,)
|
||||
).fetchall()
|
||||
|
||||
styles = []
|
||||
for row in rows:
|
||||
style = dict(row)
|
||||
if style.get('properties'):
|
||||
style['properties'] = json.loads(style['properties'])
|
||||
styles.append(style)
|
||||
|
||||
return styles
|
||||
except Exception as e:
|
||||
print(f"Error loading styles: {e}")
|
||||
return []
|
||||
|
||||
def _load_stats(self, project_id: str) -> Dict[str, Any]:
|
||||
"""Load project statistics"""
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
# Component count by type
|
||||
component_stats = conn.execute(
|
||||
"""
|
||||
SELECT COUNT(*) as total,
|
||||
SUM(CASE WHEN code_generated = 1 THEN 1 ELSE 0 END) as generated
|
||||
FROM components
|
||||
WHERE project_id = ?
|
||||
""",
|
||||
(project_id,)
|
||||
).fetchone()
|
||||
|
||||
# Style count by type
|
||||
style_stats = conn.execute(
|
||||
"""
|
||||
SELECT type, COUNT(*) as count
|
||||
FROM styles
|
||||
WHERE project_id = ?
|
||||
GROUP BY type
|
||||
""",
|
||||
(project_id,)
|
||||
).fetchall()
|
||||
|
||||
return {
|
||||
'components': dict(component_stats) if component_stats else {'total': 0, 'generated': 0},
|
||||
'styles': {row['type']: row['count'] for row in style_stats}
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"Error loading stats: {e}")
|
||||
return {'components': {'total': 0, 'generated': 0}, 'styles': {}}
|
||||
|
||||
def _load_integrations(self, project_id: str, user_id: Optional[int]) -> Dict[str, Any]:
|
||||
"""Load user's enabled integrations for this project"""
|
||||
if not user_id:
|
||||
return {}
|
||||
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT integration_type, config, enabled, last_used_at
|
||||
FROM project_integrations
|
||||
WHERE project_id = ? AND user_id = ? AND enabled = 1
|
||||
""",
|
||||
(project_id, user_id)
|
||||
).fetchall()
|
||||
|
||||
# Return decrypted config for each integration
|
||||
integrations = {}
|
||||
cipher = mcp_config.get_cipher()
|
||||
|
||||
for row in rows:
|
||||
integration_type = row['integration_type']
|
||||
encrypted_config = row['config']
|
||||
|
||||
# Decrypt config
|
||||
if cipher:
|
||||
try:
|
||||
decrypted_config = cipher.decrypt(encrypted_config.encode()).decode()
|
||||
config = json.loads(decrypted_config)
|
||||
except Exception as e:
|
||||
print(f"Error decrypting integration config: {e}")
|
||||
config = {}
|
||||
else:
|
||||
# No encryption key, try to parse as JSON
|
||||
try:
|
||||
config = json.loads(encrypted_config)
|
||||
except:
|
||||
config = {}
|
||||
|
||||
integrations[integration_type] = {
|
||||
'enabled': True,
|
||||
'config': config,
|
||||
'last_used_at': row['last_used_at']
|
||||
}
|
||||
|
||||
return integrations
|
||||
except Exception as e:
|
||||
print(f"Error loading integrations: {e}")
|
||||
return {}
|
||||
|
||||
def _load_tokens(self, project_path: Optional[Path]) -> Dict[str, Any]:
|
||||
"""Load design tokens from filesystem"""
|
||||
if not project_path:
|
||||
return {}
|
||||
|
||||
tokens = {}
|
||||
token_files = ['tokens.json', 'design-tokens.json', 'variables.json']
|
||||
|
||||
for token_file in token_files:
|
||||
token_path = project_path / token_file
|
||||
if token_path.exists():
|
||||
try:
|
||||
with open(token_path) as f:
|
||||
tokens = json.load(f)
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"Error loading tokens from {token_path}: {e}")
|
||||
|
||||
return tokens
|
||||
|
||||
def _load_discovery(self, project_path: Optional[Path]) -> Dict[str, Any]:
|
||||
"""Load project discovery data"""
|
||||
if not project_path:
|
||||
return {}
|
||||
|
||||
try:
|
||||
scanner = ProjectScanner(str(project_path))
|
||||
discovery = scanner.scan()
|
||||
return discovery
|
||||
except Exception as e:
|
||||
print(f"Error running discovery scan: {e}")
|
||||
return {}
|
||||
|
||||
def _compute_health(
|
||||
self,
|
||||
components: List[Dict],
|
||||
tokens: Dict,
|
||||
stats: Dict
|
||||
) -> Dict[str, Any]:
|
||||
"""Compute project health score"""
|
||||
score = 100
|
||||
issues = []
|
||||
|
||||
# Deduct points for missing components
|
||||
if stats['components']['total'] == 0:
|
||||
score -= 30
|
||||
issues.append("No components defined")
|
||||
|
||||
# Deduct points for no tokens
|
||||
if not tokens:
|
||||
score -= 20
|
||||
issues.append("No design tokens defined")
|
||||
|
||||
# Deduct points for ungeneratedcomponents
|
||||
total = stats['components']['total']
|
||||
generated = stats['components']['generated']
|
||||
if total > 0 and generated < total:
|
||||
percentage = (generated / total) * 100
|
||||
if percentage < 50:
|
||||
score -= 20
|
||||
issues.append(f"Low code generation: {percentage:.1f}%")
|
||||
elif percentage < 80:
|
||||
score -= 10
|
||||
issues.append(f"Medium code generation: {percentage:.1f}%")
|
||||
|
||||
# Compute grade
|
||||
if score >= 90:
|
||||
grade = 'A'
|
||||
elif score >= 80:
|
||||
grade = 'B'
|
||||
elif score >= 70:
|
||||
grade = 'C'
|
||||
elif score >= 60:
|
||||
grade = 'D'
|
||||
else:
|
||||
grade = 'F'
|
||||
|
||||
return {
|
||||
'score': max(0, score),
|
||||
'grade': grade,
|
||||
'issues': issues
|
||||
}
|
||||
|
||||
def clear_cache(self, project_id: Optional[str] = None):
|
||||
"""Clear cache for specific project or all projects"""
|
||||
if project_id:
|
||||
# Clear all cache entries for this project
|
||||
keys_to_remove = [k for k in self._cache.keys() if k.startswith(f"{project_id}:")]
|
||||
for key in keys_to_remove:
|
||||
del self._cache[key]
|
||||
else:
|
||||
# Clear all cache
|
||||
self._cache.clear()
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_context_manager = None
|
||||
|
||||
|
||||
def get_context_manager() -> ProjectContextManager:
|
||||
"""Get singleton context manager instance"""
|
||||
global _context_manager
|
||||
if _context_manager is None:
|
||||
_context_manager = ProjectContextManager()
|
||||
return _context_manager
|
||||
428
demo/tools/dss_mcp/handler.py
Normal file
428
demo/tools/dss_mcp/handler.py
Normal file
@@ -0,0 +1,428 @@
|
||||
"""
|
||||
Unified MCP Handler
|
||||
|
||||
Central handler for all MCP tool execution. Used by:
|
||||
- Direct API calls (/api/mcp/tools/{name}/execute)
|
||||
- Claude chat (inline tool execution)
|
||||
- SSE streaming connections
|
||||
|
||||
This module ensures all MCP requests go through a single code path
|
||||
for consistent logging, error handling, and security.
|
||||
"""
|
||||
|
||||
import json
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from storage.database import get_connection
|
||||
from .config import mcp_config, integration_config
|
||||
from .context.project_context import get_context_manager, ProjectContext
|
||||
from .tools.project_tools import PROJECT_TOOLS, ProjectTools
|
||||
from .integrations.figma import FIGMA_TOOLS, FigmaTools
|
||||
from .integrations.jira import JIRA_TOOLS, JiraTools
|
||||
from .integrations.confluence import CONFLUENCE_TOOLS, ConfluenceTools
|
||||
from .integrations.base import CircuitBreakerOpen
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolResult:
|
||||
"""Result of a tool execution"""
|
||||
tool_name: str
|
||||
success: bool
|
||||
result: Any
|
||||
error: Optional[str] = None
|
||||
duration_ms: int = 0
|
||||
timestamp: str = None
|
||||
|
||||
def __post_init__(self):
|
||||
if not self.timestamp:
|
||||
self.timestamp = datetime.now().isoformat()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MCPContext:
|
||||
"""Context for MCP operations"""
|
||||
project_id: str
|
||||
user_id: Optional[int] = None
|
||||
session_id: Optional[str] = None
|
||||
|
||||
|
||||
class MCPHandler:
|
||||
"""
|
||||
Unified MCP tool handler.
|
||||
|
||||
Provides:
|
||||
- Tool discovery (list all available tools)
|
||||
- Tool execution with proper context
|
||||
- Integration management
|
||||
- Logging and metrics
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.context_manager = get_context_manager()
|
||||
self._tool_registry: Dict[str, Dict[str, Any]] = {}
|
||||
self._initialize_tools()
|
||||
|
||||
def _initialize_tools(self):
|
||||
"""Initialize tool registry with all available tools"""
|
||||
# Register base project tools
|
||||
for tool in PROJECT_TOOLS:
|
||||
self._tool_registry[tool.name] = {
|
||||
"tool": tool,
|
||||
"category": "project",
|
||||
"requires_integration": False
|
||||
}
|
||||
|
||||
# Register Figma tools
|
||||
for tool in FIGMA_TOOLS:
|
||||
self._tool_registry[tool.name] = {
|
||||
"tool": tool,
|
||||
"category": "figma",
|
||||
"requires_integration": True,
|
||||
"integration_type": "figma"
|
||||
}
|
||||
|
||||
# Register Jira tools
|
||||
for tool in JIRA_TOOLS:
|
||||
self._tool_registry[tool.name] = {
|
||||
"tool": tool,
|
||||
"category": "jira",
|
||||
"requires_integration": True,
|
||||
"integration_type": "jira"
|
||||
}
|
||||
|
||||
# Register Confluence tools
|
||||
for tool in CONFLUENCE_TOOLS:
|
||||
self._tool_registry[tool.name] = {
|
||||
"tool": tool,
|
||||
"category": "confluence",
|
||||
"requires_integration": True,
|
||||
"integration_type": "confluence"
|
||||
}
|
||||
|
||||
def list_tools(self, include_details: bool = False) -> Dict[str, Any]:
|
||||
"""
|
||||
List all available MCP tools.
|
||||
|
||||
Args:
|
||||
include_details: Include full tool schemas
|
||||
|
||||
Returns:
|
||||
Tool listing by category
|
||||
"""
|
||||
tools_by_category = {}
|
||||
|
||||
for name, info in self._tool_registry.items():
|
||||
category = info["category"]
|
||||
if category not in tools_by_category:
|
||||
tools_by_category[category] = []
|
||||
|
||||
tool_info = {
|
||||
"name": name,
|
||||
"description": info["tool"].description,
|
||||
"requires_integration": info.get("requires_integration", False)
|
||||
}
|
||||
|
||||
if include_details:
|
||||
tool_info["input_schema"] = info["tool"].inputSchema
|
||||
|
||||
tools_by_category[category].append(tool_info)
|
||||
|
||||
return {
|
||||
"tools": tools_by_category,
|
||||
"total_count": len(self._tool_registry)
|
||||
}
|
||||
|
||||
def get_tool_info(self, tool_name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get information about a specific tool"""
|
||||
if tool_name not in self._tool_registry:
|
||||
return None
|
||||
|
||||
info = self._tool_registry[tool_name]
|
||||
return {
|
||||
"name": tool_name,
|
||||
"description": info["tool"].description,
|
||||
"category": info["category"],
|
||||
"input_schema": info["tool"].inputSchema,
|
||||
"requires_integration": info.get("requires_integration", False),
|
||||
"integration_type": info.get("integration_type")
|
||||
}
|
||||
|
||||
async def execute_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
arguments: Dict[str, Any],
|
||||
context: MCPContext
|
||||
) -> ToolResult:
|
||||
"""
|
||||
Execute an MCP tool.
|
||||
|
||||
Args:
|
||||
tool_name: Name of the tool to execute
|
||||
arguments: Tool arguments
|
||||
context: MCP context (project_id, user_id)
|
||||
|
||||
Returns:
|
||||
ToolResult with success/failure and data
|
||||
"""
|
||||
start_time = datetime.now()
|
||||
|
||||
# Check if tool exists
|
||||
if tool_name not in self._tool_registry:
|
||||
return ToolResult(
|
||||
tool_name=tool_name,
|
||||
success=False,
|
||||
result=None,
|
||||
error=f"Unknown tool: {tool_name}"
|
||||
)
|
||||
|
||||
tool_info = self._tool_registry[tool_name]
|
||||
category = tool_info["category"]
|
||||
|
||||
try:
|
||||
# Execute based on category
|
||||
if category == "project":
|
||||
result = await self._execute_project_tool(tool_name, arguments, context)
|
||||
elif category == "figma":
|
||||
result = await self._execute_figma_tool(tool_name, arguments, context)
|
||||
elif category == "jira":
|
||||
result = await self._execute_jira_tool(tool_name, arguments, context)
|
||||
elif category == "confluence":
|
||||
result = await self._execute_confluence_tool(tool_name, arguments, context)
|
||||
else:
|
||||
result = {"error": f"Unknown tool category: {category}"}
|
||||
|
||||
# Check for error in result
|
||||
success = "error" not in result
|
||||
error = result.get("error") if not success else None
|
||||
|
||||
# Calculate duration
|
||||
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
|
||||
|
||||
# Log execution
|
||||
await self._log_tool_usage(
|
||||
tool_name=tool_name,
|
||||
category=category,
|
||||
project_id=context.project_id,
|
||||
user_id=context.user_id,
|
||||
success=success,
|
||||
duration_ms=duration_ms,
|
||||
error=error
|
||||
)
|
||||
|
||||
return ToolResult(
|
||||
tool_name=tool_name,
|
||||
success=success,
|
||||
result=result if success else None,
|
||||
error=error,
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
except CircuitBreakerOpen as e:
|
||||
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
|
||||
return ToolResult(
|
||||
tool_name=tool_name,
|
||||
success=False,
|
||||
result=None,
|
||||
error=str(e),
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
except Exception as e:
|
||||
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
|
||||
await self._log_tool_usage(
|
||||
tool_name=tool_name,
|
||||
category=category,
|
||||
project_id=context.project_id,
|
||||
user_id=context.user_id,
|
||||
success=False,
|
||||
duration_ms=duration_ms,
|
||||
error=str(e)
|
||||
)
|
||||
return ToolResult(
|
||||
tool_name=tool_name,
|
||||
success=False,
|
||||
result=None,
|
||||
error=str(e),
|
||||
duration_ms=duration_ms
|
||||
)
|
||||
|
||||
async def _execute_project_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
arguments: Dict[str, Any],
|
||||
context: MCPContext
|
||||
) -> Dict[str, Any]:
|
||||
"""Execute a project tool"""
|
||||
# Ensure project_id is set
|
||||
if "project_id" not in arguments:
|
||||
arguments["project_id"] = context.project_id
|
||||
|
||||
project_tools = ProjectTools(context.user_id)
|
||||
return await project_tools.execute_tool(tool_name, arguments)
|
||||
|
||||
async def _execute_figma_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
arguments: Dict[str, Any],
|
||||
context: MCPContext
|
||||
) -> Dict[str, Any]:
|
||||
"""Execute a Figma tool"""
|
||||
# Get Figma config
|
||||
config = await self._get_integration_config("figma", context)
|
||||
if not config:
|
||||
# Try global config
|
||||
if integration_config.FIGMA_TOKEN:
|
||||
config = {"api_token": integration_config.FIGMA_TOKEN}
|
||||
else:
|
||||
return {"error": "Figma not configured. Please add Figma API token."}
|
||||
|
||||
figma_tools = FigmaTools(config)
|
||||
return await figma_tools.execute_tool(tool_name, arguments)
|
||||
|
||||
async def _execute_jira_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
arguments: Dict[str, Any],
|
||||
context: MCPContext
|
||||
) -> Dict[str, Any]:
|
||||
"""Execute a Jira tool"""
|
||||
config = await self._get_integration_config("jira", context)
|
||||
if not config:
|
||||
return {"error": "Jira not configured. Please configure Jira integration."}
|
||||
|
||||
jira_tools = JiraTools(config)
|
||||
return await jira_tools.execute_tool(tool_name, arguments)
|
||||
|
||||
async def _execute_confluence_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
arguments: Dict[str, Any],
|
||||
context: MCPContext
|
||||
) -> Dict[str, Any]:
|
||||
"""Execute a Confluence tool"""
|
||||
config = await self._get_integration_config("confluence", context)
|
||||
if not config:
|
||||
return {"error": "Confluence not configured. Please configure Confluence integration."}
|
||||
|
||||
confluence_tools = ConfluenceTools(config)
|
||||
return await confluence_tools.execute_tool(tool_name, arguments)
|
||||
|
||||
async def _get_integration_config(
|
||||
self,
|
||||
integration_type: str,
|
||||
context: MCPContext
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Get decrypted integration config for user/project"""
|
||||
if not context.user_id or not context.project_id:
|
||||
return None
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
def get_config():
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT config FROM project_integrations
|
||||
WHERE project_id = ? AND user_id = ? AND integration_type = ? AND enabled = 1
|
||||
""",
|
||||
(context.project_id, context.user_id, integration_type)
|
||||
).fetchone()
|
||||
|
||||
if not row:
|
||||
return None
|
||||
|
||||
encrypted_config = row["config"]
|
||||
|
||||
# Decrypt
|
||||
cipher = mcp_config.get_cipher()
|
||||
if cipher:
|
||||
try:
|
||||
decrypted = cipher.decrypt(encrypted_config.encode()).decode()
|
||||
return json.loads(decrypted)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Try parsing as plain JSON
|
||||
try:
|
||||
return json.loads(encrypted_config)
|
||||
except:
|
||||
return None
|
||||
except:
|
||||
return None
|
||||
|
||||
return await loop.run_in_executor(None, get_config)
|
||||
|
||||
async def _log_tool_usage(
|
||||
self,
|
||||
tool_name: str,
|
||||
category: str,
|
||||
project_id: str,
|
||||
user_id: Optional[int],
|
||||
success: bool,
|
||||
duration_ms: int,
|
||||
error: Optional[str] = None
|
||||
):
|
||||
"""Log tool execution to database"""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
def log():
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO mcp_tool_usage
|
||||
(project_id, user_id, tool_name, tool_category, duration_ms, success, error_message)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(project_id, user_id, tool_name, category, duration_ms, success, error)
|
||||
)
|
||||
except:
|
||||
pass # Don't fail on logging errors
|
||||
|
||||
await loop.run_in_executor(None, log)
|
||||
|
||||
async def get_project_context(
|
||||
self,
|
||||
project_id: str,
|
||||
user_id: Optional[int] = None
|
||||
) -> Optional[ProjectContext]:
|
||||
"""Get project context for Claude system prompt"""
|
||||
return await self.context_manager.get_context(project_id, user_id)
|
||||
|
||||
def get_tools_for_claude(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get tools formatted for Claude's tool_use feature.
|
||||
|
||||
Returns:
|
||||
List of tools in Anthropic's tool format
|
||||
"""
|
||||
tools = []
|
||||
for name, info in self._tool_registry.items():
|
||||
tools.append({
|
||||
"name": name,
|
||||
"description": info["tool"].description,
|
||||
"input_schema": info["tool"].inputSchema
|
||||
})
|
||||
return tools
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_mcp_handler: Optional[MCPHandler] = None
|
||||
|
||||
|
||||
def get_mcp_handler() -> MCPHandler:
|
||||
"""Get singleton MCP handler instance"""
|
||||
global _mcp_handler
|
||||
if _mcp_handler is None:
|
||||
_mcp_handler = MCPHandler()
|
||||
return _mcp_handler
|
||||
0
demo/tools/dss_mcp/integrations/__init__.py
Normal file
0
demo/tools/dss_mcp/integrations/__init__.py
Normal file
264
demo/tools/dss_mcp/integrations/base.py
Normal file
264
demo/tools/dss_mcp/integrations/base.py
Normal file
@@ -0,0 +1,264 @@
|
||||
"""
|
||||
Base Integration Classes
|
||||
|
||||
Provides circuit breaker pattern and base classes for external integrations.
|
||||
"""
|
||||
|
||||
import time
|
||||
import asyncio
|
||||
from typing import Callable, Any, Optional, Dict
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from enum import Enum
|
||||
|
||||
from ..config import mcp_config
|
||||
from storage.database import get_connection
|
||||
|
||||
|
||||
class CircuitState(Enum):
|
||||
"""Circuit breaker states"""
|
||||
CLOSED = "closed" # Normal operation
|
||||
OPEN = "open" # Failing, reject requests
|
||||
HALF_OPEN = "half_open" # Testing if service recovered
|
||||
|
||||
|
||||
@dataclass
|
||||
class CircuitBreakerStats:
|
||||
"""Circuit breaker statistics"""
|
||||
state: CircuitState
|
||||
failure_count: int
|
||||
success_count: int
|
||||
last_failure_time: Optional[float]
|
||||
last_success_time: Optional[float]
|
||||
opened_at: Optional[float]
|
||||
next_retry_time: Optional[float]
|
||||
|
||||
|
||||
class CircuitBreakerOpen(Exception):
|
||||
"""Exception raised when circuit breaker is open"""
|
||||
pass
|
||||
|
||||
|
||||
class CircuitBreaker:
|
||||
"""
|
||||
Circuit Breaker pattern implementation.
|
||||
|
||||
Protects external service calls from cascading failures.
|
||||
Three states: CLOSED (normal), OPEN (failing), HALF_OPEN (testing).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
integration_type: str,
|
||||
failure_threshold: int = None,
|
||||
timeout_seconds: int = None,
|
||||
half_open_max_calls: int = 3
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
integration_type: Type of integration (figma, jira, confluence, etc.)
|
||||
failure_threshold: Number of failures before opening circuit
|
||||
timeout_seconds: Seconds to wait before trying again
|
||||
half_open_max_calls: Max successful calls in half-open before closing
|
||||
"""
|
||||
self.integration_type = integration_type
|
||||
self.failure_threshold = failure_threshold or mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD
|
||||
self.timeout_seconds = timeout_seconds or mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS
|
||||
self.half_open_max_calls = half_open_max_calls
|
||||
|
||||
# In-memory state (could be moved to Redis for distributed setup)
|
||||
self.state = CircuitState.CLOSED
|
||||
self.failure_count = 0
|
||||
self.success_count = 0
|
||||
self.last_failure_time: Optional[float] = None
|
||||
self.last_success_time: Optional[float] = None
|
||||
self.opened_at: Optional[float] = None
|
||||
|
||||
async def call(self, func: Callable, *args, **kwargs) -> Any:
|
||||
"""
|
||||
Call a function through the circuit breaker.
|
||||
|
||||
Args:
|
||||
func: Function to call (can be sync or async)
|
||||
*args, **kwargs: Arguments to pass to func
|
||||
|
||||
Returns:
|
||||
Function result
|
||||
|
||||
Raises:
|
||||
CircuitBreakerOpen: If circuit is open
|
||||
Exception: Original exception from func if it fails
|
||||
"""
|
||||
# Check circuit state
|
||||
if self.state == CircuitState.OPEN:
|
||||
# Check if timeout has elapsed
|
||||
if time.time() - self.opened_at < self.timeout_seconds:
|
||||
await self._record_failure("Circuit breaker is OPEN", db_only=True)
|
||||
raise CircuitBreakerOpen(
|
||||
f"{self.integration_type} service is temporarily unavailable. "
|
||||
f"Retry after {self._seconds_until_retry():.0f}s"
|
||||
)
|
||||
else:
|
||||
# Timeout elapsed, move to HALF_OPEN
|
||||
self.state = CircuitState.HALF_OPEN
|
||||
self.success_count = 0
|
||||
|
||||
# Execute function
|
||||
try:
|
||||
# Handle both sync and async functions
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
result = await func(*args, **kwargs)
|
||||
else:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Success!
|
||||
await self._record_success()
|
||||
|
||||
# If in HALF_OPEN, check if we can close the circuit
|
||||
if self.state == CircuitState.HALF_OPEN:
|
||||
if self.success_count >= self.half_open_max_calls:
|
||||
self.state = CircuitState.CLOSED
|
||||
self.failure_count = 0
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Failure
|
||||
await self._record_failure(str(e))
|
||||
|
||||
# Check if we should open the circuit
|
||||
if self.failure_count >= self.failure_threshold:
|
||||
self.state = CircuitState.OPEN
|
||||
self.opened_at = time.time()
|
||||
|
||||
raise
|
||||
|
||||
async def _record_success(self):
|
||||
"""Record successful call"""
|
||||
self.success_count += 1
|
||||
self.last_success_time = time.time()
|
||||
|
||||
# Update database
|
||||
await self._update_health_db(is_healthy=True, error=None)
|
||||
|
||||
async def _record_failure(self, error_message: str, db_only: bool = False):
|
||||
"""Record failed call"""
|
||||
if not db_only:
|
||||
self.failure_count += 1
|
||||
self.last_failure_time = time.time()
|
||||
|
||||
# Update database
|
||||
await self._update_health_db(is_healthy=False, error=error_message)
|
||||
|
||||
async def _update_health_db(self, is_healthy: bool, error: Optional[str]):
|
||||
"""Update integration health in database"""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
def update_db():
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
circuit_open_until = None
|
||||
if self.state == CircuitState.OPEN and self.opened_at:
|
||||
circuit_open_until = datetime.fromtimestamp(
|
||||
self.opened_at + self.timeout_seconds
|
||||
).isoformat()
|
||||
|
||||
if is_healthy:
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE integration_health
|
||||
SET is_healthy = 1,
|
||||
failure_count = 0,
|
||||
last_success_at = CURRENT_TIMESTAMP,
|
||||
circuit_open_until = NULL,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
WHERE integration_type = ?
|
||||
""",
|
||||
(self.integration_type,)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE integration_health
|
||||
SET is_healthy = 0,
|
||||
failure_count = ?,
|
||||
last_failure_at = CURRENT_TIMESTAMP,
|
||||
circuit_open_until = ?,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
WHERE integration_type = ?
|
||||
""",
|
||||
(self.failure_count, circuit_open_until, self.integration_type)
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error updating integration health: {e}")
|
||||
|
||||
await loop.run_in_executor(None, update_db)
|
||||
|
||||
def _seconds_until_retry(self) -> float:
|
||||
"""Get seconds until circuit can be retried"""
|
||||
if self.state != CircuitState.OPEN or not self.opened_at:
|
||||
return 0
|
||||
elapsed = time.time() - self.opened_at
|
||||
remaining = self.timeout_seconds - elapsed
|
||||
return max(0, remaining)
|
||||
|
||||
def get_stats(self) -> CircuitBreakerStats:
|
||||
"""Get current circuit breaker statistics"""
|
||||
next_retry_time = None
|
||||
if self.state == CircuitState.OPEN and self.opened_at:
|
||||
next_retry_time = self.opened_at + self.timeout_seconds
|
||||
|
||||
return CircuitBreakerStats(
|
||||
state=self.state,
|
||||
failure_count=self.failure_count,
|
||||
success_count=self.success_count,
|
||||
last_failure_time=self.last_failure_time,
|
||||
last_success_time=self.last_success_time,
|
||||
opened_at=self.opened_at,
|
||||
next_retry_time=next_retry_time
|
||||
)
|
||||
|
||||
|
||||
class BaseIntegration:
|
||||
"""Base class for all external integrations"""
|
||||
|
||||
def __init__(self, integration_type: str, config: Dict[str, Any]):
|
||||
"""
|
||||
Args:
|
||||
integration_type: Type of integration (figma, jira, etc.)
|
||||
config: Integration configuration (decrypted)
|
||||
"""
|
||||
self.integration_type = integration_type
|
||||
self.config = config
|
||||
self.circuit_breaker = CircuitBreaker(integration_type)
|
||||
|
||||
async def call_api(self, func: Callable, *args, **kwargs) -> Any:
|
||||
"""
|
||||
Call external API through circuit breaker.
|
||||
|
||||
Args:
|
||||
func: API function to call
|
||||
*args, **kwargs: Arguments to pass
|
||||
|
||||
Returns:
|
||||
API response
|
||||
|
||||
Raises:
|
||||
CircuitBreakerOpen: If circuit is open
|
||||
Exception: Original API exception
|
||||
"""
|
||||
return await self.circuit_breaker.call(func, *args, **kwargs)
|
||||
|
||||
def get_health(self) -> Dict[str, Any]:
|
||||
"""Get integration health status"""
|
||||
stats = self.circuit_breaker.get_stats()
|
||||
return {
|
||||
"integration_type": self.integration_type,
|
||||
"state": stats.state.value,
|
||||
"is_healthy": stats.state == CircuitState.CLOSED,
|
||||
"failure_count": stats.failure_count,
|
||||
"success_count": stats.success_count,
|
||||
"last_failure_time": stats.last_failure_time,
|
||||
"last_success_time": stats.last_success_time,
|
||||
"next_retry_time": stats.next_retry_time
|
||||
}
|
||||
262
demo/tools/dss_mcp/integrations/confluence.py
Normal file
262
demo/tools/dss_mcp/integrations/confluence.py
Normal file
@@ -0,0 +1,262 @@
|
||||
"""
|
||||
Confluence Integration for MCP
|
||||
|
||||
Provides Confluence API tools for documentation and knowledge base.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from atlassian import Confluence
|
||||
from mcp import types
|
||||
|
||||
from .base import BaseIntegration
|
||||
|
||||
|
||||
# Confluence MCP Tool Definitions
|
||||
CONFLUENCE_TOOLS = [
|
||||
types.Tool(
|
||||
name="confluence_create_page",
|
||||
description="Create a new Confluence page",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"space_key": {
|
||||
"type": "string",
|
||||
"description": "Confluence space key"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Page title"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Page content (HTML or wiki markup)"
|
||||
},
|
||||
"parent_id": {
|
||||
"type": "string",
|
||||
"description": "Optional parent page ID"
|
||||
}
|
||||
},
|
||||
"required": ["space_key", "title", "body"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="confluence_get_page",
|
||||
description="Get Confluence page by ID or title",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"page_id": {
|
||||
"type": "string",
|
||||
"description": "Page ID (use this OR title)"
|
||||
},
|
||||
"space_key": {
|
||||
"type": "string",
|
||||
"description": "Space key (required if using title)"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Page title (use this OR page_id)"
|
||||
},
|
||||
"expand": {
|
||||
"type": "string",
|
||||
"description": "Comma-separated list of expansions (body.storage, version, etc.)",
|
||||
"default": "body.storage,version"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="confluence_update_page",
|
||||
description="Update an existing Confluence page",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"page_id": {
|
||||
"type": "string",
|
||||
"description": "Page ID to update"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "New page title"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "New page content"
|
||||
}
|
||||
},
|
||||
"required": ["page_id", "title", "body"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="confluence_search",
|
||||
description="Search Confluence pages using CQL",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cql": {
|
||||
"type": "string",
|
||||
"description": "CQL query (e.g., 'space=DSS AND type=page')"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of results",
|
||||
"default": 25
|
||||
}
|
||||
},
|
||||
"required": ["cql"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="confluence_get_space",
|
||||
description="Get Confluence space details",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"space_key": {
|
||||
"type": "string",
|
||||
"description": "Space key"
|
||||
}
|
||||
},
|
||||
"required": ["space_key"]
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class ConfluenceIntegration(BaseIntegration):
|
||||
"""Confluence API integration with circuit breaker"""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
"""
|
||||
Initialize Confluence integration.
|
||||
|
||||
Args:
|
||||
config: Must contain 'url', 'username', 'api_token'
|
||||
"""
|
||||
super().__init__("confluence", config)
|
||||
|
||||
url = config.get("url")
|
||||
username = config.get("username")
|
||||
api_token = config.get("api_token")
|
||||
|
||||
if not all([url, username, api_token]):
|
||||
raise ValueError("Confluence configuration incomplete: url, username, api_token required")
|
||||
|
||||
self.confluence = Confluence(
|
||||
url=url,
|
||||
username=username,
|
||||
password=api_token,
|
||||
cloud=True
|
||||
)
|
||||
|
||||
async def create_page(
|
||||
self,
|
||||
space_key: str,
|
||||
title: str,
|
||||
body: str,
|
||||
parent_id: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Create a new page"""
|
||||
def _create():
|
||||
return self.confluence.create_page(
|
||||
space=space_key,
|
||||
title=title,
|
||||
body=body,
|
||||
parent_id=parent_id,
|
||||
representation="storage"
|
||||
)
|
||||
|
||||
return await self.call_api(_create)
|
||||
|
||||
async def get_page(
|
||||
self,
|
||||
page_id: Optional[str] = None,
|
||||
space_key: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
expand: str = "body.storage,version"
|
||||
) -> Dict[str, Any]:
|
||||
"""Get page by ID or title"""
|
||||
def _get():
|
||||
if page_id:
|
||||
return self.confluence.get_page_by_id(
|
||||
page_id=page_id,
|
||||
expand=expand
|
||||
)
|
||||
elif space_key and title:
|
||||
return self.confluence.get_page_by_title(
|
||||
space=space_key,
|
||||
title=title,
|
||||
expand=expand
|
||||
)
|
||||
else:
|
||||
raise ValueError("Must provide either page_id or (space_key + title)")
|
||||
|
||||
return await self.call_api(_get)
|
||||
|
||||
async def update_page(
|
||||
self,
|
||||
page_id: str,
|
||||
title: str,
|
||||
body: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Update an existing page"""
|
||||
def _update():
|
||||
# Get current version
|
||||
page = self.confluence.get_page_by_id(page_id, expand="version")
|
||||
current_version = page["version"]["number"]
|
||||
|
||||
return self.confluence.update_page(
|
||||
page_id=page_id,
|
||||
title=title,
|
||||
body=body,
|
||||
parent_id=None,
|
||||
type="page",
|
||||
representation="storage",
|
||||
minor_edit=False,
|
||||
version_comment="Updated via DSS MCP",
|
||||
version_number=current_version + 1
|
||||
)
|
||||
|
||||
return await self.call_api(_update)
|
||||
|
||||
async def search(self, cql: str, limit: int = 25) -> Dict[str, Any]:
|
||||
"""Search pages using CQL"""
|
||||
def _search():
|
||||
return self.confluence.cql(cql, limit=limit)
|
||||
|
||||
return await self.call_api(_search)
|
||||
|
||||
async def get_space(self, space_key: str) -> Dict[str, Any]:
|
||||
"""Get space details"""
|
||||
def _get():
|
||||
return self.confluence.get_space(space_key)
|
||||
|
||||
return await self.call_api(_get)
|
||||
|
||||
|
||||
class ConfluenceTools:
|
||||
"""MCP tool executor for Confluence integration"""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.confluence = ConfluenceIntegration(config)
|
||||
|
||||
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute Confluence tool"""
|
||||
handlers = {
|
||||
"confluence_create_page": self.confluence.create_page,
|
||||
"confluence_get_page": self.confluence.get_page,
|
||||
"confluence_update_page": self.confluence.update_page,
|
||||
"confluence_search": self.confluence.search,
|
||||
"confluence_get_space": self.confluence.get_space
|
||||
}
|
||||
|
||||
handler = handlers.get(tool_name)
|
||||
if not handler:
|
||||
return {"error": f"Unknown Confluence tool: {tool_name}"}
|
||||
|
||||
try:
|
||||
clean_args = {k: v for k, v in arguments.items() if not k.startswith("_")}
|
||||
result = await handler(**clean_args)
|
||||
return result
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
260
demo/tools/dss_mcp/integrations/figma.py
Normal file
260
demo/tools/dss_mcp/integrations/figma.py
Normal file
@@ -0,0 +1,260 @@
|
||||
"""
|
||||
Figma Integration for MCP
|
||||
|
||||
Provides Figma API tools through circuit breaker pattern.
|
||||
"""
|
||||
|
||||
import httpx
|
||||
from typing import Dict, Any, List, Optional
|
||||
from mcp import types
|
||||
|
||||
from .base import BaseIntegration
|
||||
from ..config import integration_config
|
||||
|
||||
|
||||
# Figma MCP Tool Definitions
|
||||
FIGMA_TOOLS = [
|
||||
types.Tool(
|
||||
name="figma_get_file",
|
||||
description="Get Figma file metadata and structure",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_key": {
|
||||
"type": "string",
|
||||
"description": "Figma file key"
|
||||
}
|
||||
},
|
||||
"required": ["file_key"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="figma_get_styles",
|
||||
description="Get design styles (colors, text, effects) from Figma file",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_key": {
|
||||
"type": "string",
|
||||
"description": "Figma file key"
|
||||
}
|
||||
},
|
||||
"required": ["file_key"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="figma_get_components",
|
||||
description="Get component definitions from Figma file",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_key": {
|
||||
"type": "string",
|
||||
"description": "Figma file key"
|
||||
}
|
||||
},
|
||||
"required": ["file_key"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="figma_extract_tokens",
|
||||
description="Extract design tokens (variables) from Figma file",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_key": {
|
||||
"type": "string",
|
||||
"description": "Figma file key"
|
||||
}
|
||||
},
|
||||
"required": ["file_key"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="figma_get_node",
|
||||
description="Get specific node/component by ID from Figma file",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_key": {
|
||||
"type": "string",
|
||||
"description": "Figma file key"
|
||||
},
|
||||
"node_id": {
|
||||
"type": "string",
|
||||
"description": "Node ID to fetch"
|
||||
}
|
||||
},
|
||||
"required": ["file_key", "node_id"]
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class FigmaIntegration(BaseIntegration):
|
||||
"""Figma API integration with circuit breaker"""
|
||||
|
||||
FIGMA_API_BASE = "https://api.figma.com/v1"
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
"""
|
||||
Initialize Figma integration.
|
||||
|
||||
Args:
|
||||
config: Must contain 'api_token' or use FIGMA_TOKEN from env
|
||||
"""
|
||||
super().__init__("figma", config)
|
||||
self.api_token = config.get("api_token") or integration_config.FIGMA_TOKEN
|
||||
|
||||
if not self.api_token:
|
||||
raise ValueError("Figma API token not configured")
|
||||
|
||||
self.headers = {
|
||||
"X-Figma-Token": self.api_token
|
||||
}
|
||||
|
||||
async def get_file(self, file_key: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get Figma file metadata and structure.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
|
||||
Returns:
|
||||
File data
|
||||
"""
|
||||
async def _fetch():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"{self.FIGMA_API_BASE}/files/{file_key}",
|
||||
headers=self.headers,
|
||||
timeout=30.0
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
return await self.call_api(_fetch)
|
||||
|
||||
async def get_styles(self, file_key: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get all styles from Figma file.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
|
||||
Returns:
|
||||
Styles data
|
||||
"""
|
||||
async def _fetch():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"{self.FIGMA_API_BASE}/files/{file_key}/styles",
|
||||
headers=self.headers,
|
||||
timeout=30.0
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
return await self.call_api(_fetch)
|
||||
|
||||
async def get_components(self, file_key: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get all components from Figma file.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
|
||||
Returns:
|
||||
Components data
|
||||
"""
|
||||
async def _fetch():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"{self.FIGMA_API_BASE}/files/{file_key}/components",
|
||||
headers=self.headers,
|
||||
timeout=30.0
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
return await self.call_api(_fetch)
|
||||
|
||||
async def extract_tokens(self, file_key: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Extract design tokens (variables) from Figma file.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
|
||||
Returns:
|
||||
Variables/tokens data
|
||||
"""
|
||||
async def _fetch():
|
||||
async with httpx.AsyncClient() as client:
|
||||
# Get local variables
|
||||
response = await client.get(
|
||||
f"{self.FIGMA_API_BASE}/files/{file_key}/variables/local",
|
||||
headers=self.headers,
|
||||
timeout=30.0
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
return await self.call_api(_fetch)
|
||||
|
||||
async def get_node(self, file_key: str, node_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get specific node from Figma file.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
node_id: Node ID
|
||||
|
||||
Returns:
|
||||
Node data
|
||||
"""
|
||||
async def _fetch():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"{self.FIGMA_API_BASE}/files/{file_key}/nodes",
|
||||
headers=self.headers,
|
||||
params={"ids": node_id},
|
||||
timeout=30.0
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
return await self.call_api(_fetch)
|
||||
|
||||
|
||||
class FigmaTools:
|
||||
"""MCP tool executor for Figma integration"""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
"""
|
||||
Args:
|
||||
config: Figma configuration (with api_token)
|
||||
"""
|
||||
self.figma = FigmaIntegration(config)
|
||||
|
||||
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute Figma tool"""
|
||||
handlers = {
|
||||
"figma_get_file": self.figma.get_file,
|
||||
"figma_get_styles": self.figma.get_styles,
|
||||
"figma_get_components": self.figma.get_components,
|
||||
"figma_extract_tokens": self.figma.extract_tokens,
|
||||
"figma_get_node": self.figma.get_node
|
||||
}
|
||||
|
||||
handler = handlers.get(tool_name)
|
||||
if not handler:
|
||||
return {"error": f"Unknown Figma tool: {tool_name}"}
|
||||
|
||||
try:
|
||||
# Remove tool-specific prefix from arguments if needed
|
||||
clean_args = {k: v for k, v in arguments.items() if not k.startswith("_")}
|
||||
result = await handler(**clean_args)
|
||||
return result
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
215
demo/tools/dss_mcp/integrations/jira.py
Normal file
215
demo/tools/dss_mcp/integrations/jira.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""
|
||||
Jira Integration for MCP
|
||||
|
||||
Provides Jira API tools for issue tracking and project management.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from atlassian import Jira
|
||||
from mcp import types
|
||||
|
||||
from .base import BaseIntegration
|
||||
|
||||
|
||||
# Jira MCP Tool Definitions
|
||||
JIRA_TOOLS = [
|
||||
types.Tool(
|
||||
name="jira_create_issue",
|
||||
description="Create a new Jira issue",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_key": {
|
||||
"type": "string",
|
||||
"description": "Jira project key (e.g., 'DSS')"
|
||||
},
|
||||
"summary": {
|
||||
"type": "string",
|
||||
"description": "Issue summary/title"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Issue description"
|
||||
},
|
||||
"issue_type": {
|
||||
"type": "string",
|
||||
"description": "Issue type (Story, Task, Bug, etc.)",
|
||||
"default": "Task"
|
||||
}
|
||||
},
|
||||
"required": ["project_key", "summary"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="jira_get_issue",
|
||||
description="Get Jira issue details by key",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issue_key": {
|
||||
"type": "string",
|
||||
"description": "Issue key (e.g., 'DSS-123')"
|
||||
}
|
||||
},
|
||||
"required": ["issue_key"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="jira_search_issues",
|
||||
description="Search Jira issues using JQL",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"jql": {
|
||||
"type": "string",
|
||||
"description": "JQL query (e.g., 'project=DSS AND status=Open')"
|
||||
},
|
||||
"max_results": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of results",
|
||||
"default": 50
|
||||
}
|
||||
},
|
||||
"required": ["jql"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="jira_update_issue",
|
||||
description="Update a Jira issue",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issue_key": {
|
||||
"type": "string",
|
||||
"description": "Issue key to update"
|
||||
},
|
||||
"fields": {
|
||||
"type": "object",
|
||||
"description": "Fields to update (summary, description, status, etc.)"
|
||||
}
|
||||
},
|
||||
"required": ["issue_key", "fields"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="jira_add_comment",
|
||||
description="Add a comment to a Jira issue",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issue_key": {
|
||||
"type": "string",
|
||||
"description": "Issue key"
|
||||
},
|
||||
"comment": {
|
||||
"type": "string",
|
||||
"description": "Comment text"
|
||||
}
|
||||
},
|
||||
"required": ["issue_key", "comment"]
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class JiraIntegration(BaseIntegration):
|
||||
"""Jira API integration with circuit breaker"""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
"""
|
||||
Initialize Jira integration.
|
||||
|
||||
Args:
|
||||
config: Must contain 'url', 'username', 'api_token'
|
||||
"""
|
||||
super().__init__("jira", config)
|
||||
|
||||
url = config.get("url")
|
||||
username = config.get("username")
|
||||
api_token = config.get("api_token")
|
||||
|
||||
if not all([url, username, api_token]):
|
||||
raise ValueError("Jira configuration incomplete: url, username, api_token required")
|
||||
|
||||
self.jira = Jira(
|
||||
url=url,
|
||||
username=username,
|
||||
password=api_token,
|
||||
cloud=True
|
||||
)
|
||||
|
||||
async def create_issue(
|
||||
self,
|
||||
project_key: str,
|
||||
summary: str,
|
||||
description: str = "",
|
||||
issue_type: str = "Task"
|
||||
) -> Dict[str, Any]:
|
||||
"""Create a new Jira issue"""
|
||||
def _create():
|
||||
fields = {
|
||||
"project": {"key": project_key},
|
||||
"summary": summary,
|
||||
"description": description,
|
||||
"issuetype": {"name": issue_type}
|
||||
}
|
||||
return self.jira.create_issue(fields)
|
||||
|
||||
return await self.call_api(_create)
|
||||
|
||||
async def get_issue(self, issue_key: str) -> Dict[str, Any]:
|
||||
"""Get issue details"""
|
||||
def _get():
|
||||
return self.jira.get_issue(issue_key)
|
||||
|
||||
return await self.call_api(_get)
|
||||
|
||||
async def search_issues(self, jql: str, max_results: int = 50) -> Dict[str, Any]:
|
||||
"""Search issues with JQL"""
|
||||
def _search():
|
||||
return self.jira.jql(jql, limit=max_results)
|
||||
|
||||
return await self.call_api(_search)
|
||||
|
||||
async def update_issue(self, issue_key: str, fields: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Update issue fields"""
|
||||
def _update():
|
||||
self.jira.update_issue_field(issue_key, fields)
|
||||
return {"status": "updated", "issue_key": issue_key}
|
||||
|
||||
return await self.call_api(_update)
|
||||
|
||||
async def add_comment(self, issue_key: str, comment: str) -> Dict[str, Any]:
|
||||
"""Add comment to issue"""
|
||||
def _comment():
|
||||
return self.jira.issue_add_comment(issue_key, comment)
|
||||
|
||||
return await self.call_api(_comment)
|
||||
|
||||
|
||||
class JiraTools:
|
||||
"""MCP tool executor for Jira integration"""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.jira = JiraIntegration(config)
|
||||
|
||||
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute Jira tool"""
|
||||
handlers = {
|
||||
"jira_create_issue": self.jira.create_issue,
|
||||
"jira_get_issue": self.jira.get_issue,
|
||||
"jira_search_issues": self.jira.search_issues,
|
||||
"jira_update_issue": self.jira.update_issue,
|
||||
"jira_add_comment": self.jira.add_comment
|
||||
}
|
||||
|
||||
handler = handlers.get(tool_name)
|
||||
if not handler:
|
||||
return {"error": f"Unknown Jira tool: {tool_name}"}
|
||||
|
||||
try:
|
||||
clean_args = {k: v for k, v in arguments.items() if not k.startswith("_")}
|
||||
result = await handler(**clean_args)
|
||||
return result
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
36
demo/tools/dss_mcp/requirements.txt
Normal file
36
demo/tools/dss_mcp/requirements.txt
Normal file
@@ -0,0 +1,36 @@
|
||||
# MCP Server Dependencies
|
||||
# Model Context Protocol
|
||||
mcp>=0.9.0
|
||||
|
||||
# Anthropic SDK
|
||||
anthropic>=0.40.0
|
||||
|
||||
# FastAPI & SSE
|
||||
fastapi>=0.104.0
|
||||
sse-starlette>=1.8.0
|
||||
uvicorn[standard]>=0.24.0
|
||||
|
||||
# HTTP Client
|
||||
httpx>=0.25.0
|
||||
aiohttp>=3.9.0
|
||||
|
||||
# Atlassian Integrations
|
||||
atlassian-python-api>=3.41.0
|
||||
|
||||
# Encryption
|
||||
cryptography>=42.0.0
|
||||
|
||||
# Async Task Queue (for worker pool)
|
||||
celery[redis]>=5.3.0
|
||||
|
||||
# Caching
|
||||
redis>=5.0.0
|
||||
|
||||
# Environment Variables
|
||||
python-dotenv>=1.0.0
|
||||
|
||||
# Database
|
||||
aiosqlite>=0.19.0
|
||||
|
||||
# Logging
|
||||
structlog>=23.2.0
|
||||
364
demo/tools/dss_mcp/server.py
Normal file
364
demo/tools/dss_mcp/server.py
Normal file
@@ -0,0 +1,364 @@
|
||||
"""
|
||||
DSS MCP Server
|
||||
|
||||
SSE-based Model Context Protocol server for Claude.
|
||||
Provides project-isolated context and tools with user-scoped integrations.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import structlog
|
||||
from typing import Optional, Dict, Any
|
||||
from fastapi import FastAPI, Query, HTTPException
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
from mcp.server import Server
|
||||
from mcp import types
|
||||
|
||||
from .config import mcp_config, validate_config
|
||||
from .context.project_context import get_context_manager
|
||||
from .tools.project_tools import PROJECT_TOOLS, ProjectTools
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=mcp_config.LOG_LEVEL,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = structlog.get_logger()
|
||||
|
||||
# FastAPI app for SSE endpoints
|
||||
app = FastAPI(
|
||||
title="DSS MCP Server",
|
||||
description="Model Context Protocol server for Design System Swarm",
|
||||
version="0.8.0"
|
||||
)
|
||||
|
||||
# CORS configuration
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # TODO: Configure based on environment
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# MCP Server instance
|
||||
mcp_server = Server("dss-mcp")
|
||||
|
||||
# Store active sessions
|
||||
_active_sessions: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
|
||||
def get_session_key(project_id: str, user_id: Optional[int] = None) -> str:
|
||||
"""Generate session key for caching"""
|
||||
return f"{project_id}:{user_id or 'anonymous'}"
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
"""Startup tasks"""
|
||||
logger.info("Starting DSS MCP Server")
|
||||
|
||||
# Validate configuration
|
||||
warnings = validate_config()
|
||||
if warnings:
|
||||
for warning in warnings:
|
||||
logger.warning(warning)
|
||||
|
||||
logger.info(
|
||||
"DSS MCP Server started",
|
||||
host=mcp_config.HOST,
|
||||
port=mcp_config.PORT
|
||||
)
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
"""Cleanup on shutdown"""
|
||||
logger.info("Shutting down DSS MCP Server")
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
context_manager = get_context_manager()
|
||||
return {
|
||||
"status": "healthy",
|
||||
"server": "dss-mcp",
|
||||
"version": "0.8.0",
|
||||
"cache_size": len(context_manager._cache),
|
||||
"active_sessions": len(_active_sessions)
|
||||
}
|
||||
|
||||
|
||||
@app.get("/sse")
|
||||
async def sse_endpoint(
|
||||
project_id: str = Query(..., description="Project ID for context isolation"),
|
||||
user_id: Optional[int] = Query(None, description="User ID for user-scoped integrations")
|
||||
):
|
||||
"""
|
||||
Server-Sent Events endpoint for MCP communication.
|
||||
|
||||
This endpoint maintains a persistent connection with the client
|
||||
and streams MCP protocol messages.
|
||||
"""
|
||||
session_key = get_session_key(project_id, user_id)
|
||||
|
||||
logger.info(
|
||||
"SSE connection established",
|
||||
project_id=project_id,
|
||||
user_id=user_id,
|
||||
session_key=session_key
|
||||
)
|
||||
|
||||
# Load project context
|
||||
context_manager = get_context_manager()
|
||||
try:
|
||||
project_context = await context_manager.get_context(project_id, user_id)
|
||||
if not project_context:
|
||||
raise HTTPException(status_code=404, detail=f"Project not found: {project_id}")
|
||||
except Exception as e:
|
||||
logger.error("Failed to load project context", error=str(e))
|
||||
raise HTTPException(status_code=500, detail=f"Failed to load project: {str(e)}")
|
||||
|
||||
# Create project tools instance
|
||||
project_tools = ProjectTools(user_id)
|
||||
|
||||
# Track session
|
||||
_active_sessions[session_key] = {
|
||||
"project_id": project_id,
|
||||
"user_id": user_id,
|
||||
"connected_at": asyncio.get_event_loop().time(),
|
||||
"project_tools": project_tools
|
||||
}
|
||||
|
||||
async def event_generator():
|
||||
"""Generate SSE events for MCP communication"""
|
||||
try:
|
||||
# Send initial connection confirmation
|
||||
yield {
|
||||
"event": "connected",
|
||||
"data": json.dumps({
|
||||
"project_id": project_id,
|
||||
"project_name": project_context.name,
|
||||
"available_tools": len(PROJECT_TOOLS),
|
||||
"integrations_enabled": list(project_context.integrations.keys())
|
||||
})
|
||||
}
|
||||
|
||||
# Keep connection alive
|
||||
while True:
|
||||
await asyncio.sleep(30) # Heartbeat every 30 seconds
|
||||
yield {
|
||||
"event": "heartbeat",
|
||||
"data": json.dumps({"timestamp": asyncio.get_event_loop().time()})
|
||||
}
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("SSE connection closed", session_key=session_key)
|
||||
finally:
|
||||
# Cleanup session
|
||||
if session_key in _active_sessions:
|
||||
del _active_sessions[session_key]
|
||||
|
||||
return EventSourceResponse(event_generator())
|
||||
|
||||
|
||||
# MCP Protocol Handlers
|
||||
@mcp_server.list_tools()
|
||||
async def list_tools() -> list[types.Tool]:
|
||||
"""
|
||||
List all available tools.
|
||||
|
||||
Tools are dynamically determined based on:
|
||||
- Base DSS project tools (always available)
|
||||
- User's enabled integrations (Figma, Jira, Confluence, etc.)
|
||||
"""
|
||||
# Start with base project tools
|
||||
tools = PROJECT_TOOLS.copy()
|
||||
|
||||
# TODO: Add integration-specific tools based on user's enabled integrations
|
||||
# This will be implemented in Phase 3
|
||||
|
||||
logger.debug("Listed tools", tool_count=len(tools))
|
||||
return tools
|
||||
|
||||
|
||||
@mcp_server.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
|
||||
"""
|
||||
Execute a tool by name.
|
||||
|
||||
Args:
|
||||
name: Tool name
|
||||
arguments: Tool arguments (must include project_id)
|
||||
|
||||
Returns:
|
||||
Tool execution results
|
||||
"""
|
||||
logger.info("Tool called", tool_name=name, arguments=arguments)
|
||||
|
||||
project_id = arguments.get("project_id")
|
||||
if not project_id:
|
||||
return [
|
||||
types.TextContent(
|
||||
type="text",
|
||||
text=json.dumps({"error": "project_id is required"})
|
||||
)
|
||||
]
|
||||
|
||||
# Find active session for this project
|
||||
# For now, use first matching session (can be enhanced with session management)
|
||||
session_key = None
|
||||
project_tools = None
|
||||
|
||||
for key, session in _active_sessions.items():
|
||||
if session["project_id"] == project_id:
|
||||
session_key = key
|
||||
project_tools = session["project_tools"]
|
||||
break
|
||||
|
||||
if not project_tools:
|
||||
# Create temporary tools instance
|
||||
project_tools = ProjectTools()
|
||||
|
||||
# Execute tool
|
||||
try:
|
||||
result = await project_tools.execute_tool(name, arguments)
|
||||
|
||||
return [
|
||||
types.TextContent(
|
||||
type="text",
|
||||
text=json.dumps(result, indent=2)
|
||||
)
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error("Tool execution failed", tool_name=name, error=str(e))
|
||||
return [
|
||||
types.TextContent(
|
||||
type="text",
|
||||
text=json.dumps({"error": str(e)})
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@mcp_server.list_resources()
|
||||
async def list_resources() -> list[types.Resource]:
|
||||
"""
|
||||
List available resources.
|
||||
|
||||
Resources provide static or dynamic content that Claude can access.
|
||||
Examples: project documentation, component specs, design system guidelines.
|
||||
"""
|
||||
# TODO: Implement resources based on project context
|
||||
# For now, return empty list
|
||||
return []
|
||||
|
||||
|
||||
@mcp_server.read_resource()
|
||||
async def read_resource(uri: str) -> str:
|
||||
"""
|
||||
Read a specific resource by URI.
|
||||
|
||||
Args:
|
||||
uri: Resource URI (e.g., "dss://project-id/components/Button")
|
||||
|
||||
Returns:
|
||||
Resource content
|
||||
"""
|
||||
# TODO: Implement resource reading
|
||||
# For now, return not implemented
|
||||
return json.dumps({"error": "Resource reading not yet implemented"})
|
||||
|
||||
|
||||
@mcp_server.list_prompts()
|
||||
async def list_prompts() -> list[types.Prompt]:
|
||||
"""
|
||||
List available prompt templates.
|
||||
|
||||
Prompts provide pre-configured conversation starters for Claude.
|
||||
"""
|
||||
# TODO: Add DSS-specific prompt templates
|
||||
# Examples: "Analyze component consistency", "Review token usage", etc.
|
||||
return []
|
||||
|
||||
|
||||
@mcp_server.get_prompt()
|
||||
async def get_prompt(name: str, arguments: dict) -> types.GetPromptResult:
|
||||
"""
|
||||
Get a specific prompt template.
|
||||
|
||||
Args:
|
||||
name: Prompt name
|
||||
arguments: Prompt arguments
|
||||
|
||||
Returns:
|
||||
Prompt content
|
||||
"""
|
||||
# TODO: Implement prompt templates
|
||||
return types.GetPromptResult(
|
||||
description="Prompt not found",
|
||||
messages=[]
|
||||
)
|
||||
|
||||
|
||||
# API endpoint to call MCP tools directly (for testing/debugging)
|
||||
@app.post("/api/tools/{tool_name}")
|
||||
async def call_tool_api(tool_name: str, arguments: Dict[str, Any]):
|
||||
"""
|
||||
Direct API endpoint to call MCP tools.
|
||||
|
||||
Useful for testing tools without MCP client.
|
||||
"""
|
||||
project_tools = ProjectTools()
|
||||
result = await project_tools.execute_tool(tool_name, arguments)
|
||||
return result
|
||||
|
||||
|
||||
# API endpoint to list active sessions
|
||||
@app.get("/api/sessions")
|
||||
async def list_sessions():
|
||||
"""List all active SSE sessions"""
|
||||
return {
|
||||
"active_sessions": len(_active_sessions),
|
||||
"sessions": [
|
||||
{
|
||||
"project_id": session["project_id"],
|
||||
"user_id": session["user_id"],
|
||||
"connected_at": session["connected_at"]
|
||||
}
|
||||
for session in _active_sessions.values()
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# API endpoint to clear context cache
|
||||
@app.post("/api/cache/clear")
|
||||
async def clear_cache(project_id: Optional[str] = None):
|
||||
"""Clear context cache for a project or all projects"""
|
||||
context_manager = get_context_manager()
|
||||
context_manager.clear_cache(project_id)
|
||||
|
||||
return {
|
||||
"status": "cache_cleared",
|
||||
"project_id": project_id or "all"
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
logger.info(
|
||||
"Starting DSS MCP Server",
|
||||
host=mcp_config.HOST,
|
||||
port=mcp_config.PORT
|
||||
)
|
||||
|
||||
uvicorn.run(
|
||||
"server:app",
|
||||
host=mcp_config.HOST,
|
||||
port=mcp_config.PORT,
|
||||
reload=True,
|
||||
log_level=mcp_config.LOG_LEVEL.lower()
|
||||
)
|
||||
0
demo/tools/dss_mcp/tools/__init__.py
Normal file
0
demo/tools/dss_mcp/tools/__init__.py
Normal file
321
demo/tools/dss_mcp/tools/project_tools.py
Normal file
321
demo/tools/dss_mcp/tools/project_tools.py
Normal file
@@ -0,0 +1,321 @@
|
||||
"""
|
||||
DSS Project Tools for MCP
|
||||
|
||||
Base tools that Claude can use to interact with DSS projects.
|
||||
All tools are project-scoped and context-aware.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from mcp import types
|
||||
|
||||
from ..context.project_context import get_context_manager
|
||||
|
||||
|
||||
# Tool definitions (metadata for Claude)
|
||||
PROJECT_TOOLS = [
|
||||
types.Tool(
|
||||
name="dss_get_project_summary",
|
||||
description="Get comprehensive project summary including components, tokens, health, and stats",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_id": {
|
||||
"type": "string",
|
||||
"description": "Project ID to query"
|
||||
},
|
||||
"include_components": {
|
||||
"type": "boolean",
|
||||
"description": "Include full component list (default: false)",
|
||||
"default": False
|
||||
}
|
||||
},
|
||||
"required": ["project_id"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="dss_list_components",
|
||||
description="List all components in a project with their properties",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_id": {
|
||||
"type": "string",
|
||||
"description": "Project ID"
|
||||
},
|
||||
"filter_name": {
|
||||
"type": "string",
|
||||
"description": "Optional: Filter by component name (partial match)"
|
||||
},
|
||||
"code_generated_only": {
|
||||
"type": "boolean",
|
||||
"description": "Optional: Only show components with generated code",
|
||||
"default": False
|
||||
}
|
||||
},
|
||||
"required": ["project_id"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="dss_get_component",
|
||||
description="Get detailed information about a specific component",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_id": {
|
||||
"type": "string",
|
||||
"description": "Project ID"
|
||||
},
|
||||
"component_name": {
|
||||
"type": "string",
|
||||
"description": "Component name (exact match)"
|
||||
}
|
||||
},
|
||||
"required": ["project_id", "component_name"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="dss_get_design_tokens",
|
||||
description="Get all design tokens (colors, typography, spacing, etc.) for a project",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_id": {
|
||||
"type": "string",
|
||||
"description": "Project ID"
|
||||
},
|
||||
"token_category": {
|
||||
"type": "string",
|
||||
"description": "Optional: Filter by token category (colors, typography, spacing, etc.)",
|
||||
"enum": ["colors", "typography", "spacing", "shadows", "borders", "all"]
|
||||
}
|
||||
},
|
||||
"required": ["project_id"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="dss_get_project_health",
|
||||
description="Get project health score, grade, and list of issues",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_id": {
|
||||
"type": "string",
|
||||
"description": "Project ID"
|
||||
}
|
||||
},
|
||||
"required": ["project_id"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="dss_list_styles",
|
||||
description="List design styles (text, fill, effect, grid) from Figma",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_id": {
|
||||
"type": "string",
|
||||
"description": "Project ID"
|
||||
},
|
||||
"style_type": {
|
||||
"type": "string",
|
||||
"description": "Optional: Filter by style type",
|
||||
"enum": ["TEXT", "FILL", "EFFECT", "GRID", "all"]
|
||||
}
|
||||
},
|
||||
"required": ["project_id"]
|
||||
}
|
||||
),
|
||||
types.Tool(
|
||||
name="dss_get_discovery_data",
|
||||
description="Get project discovery/scan data (file counts, technologies detected, etc.)",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_id": {
|
||||
"type": "string",
|
||||
"description": "Project ID"
|
||||
}
|
||||
},
|
||||
"required": ["project_id"]
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
# Tool implementations
|
||||
class ProjectTools:
|
||||
"""Project tool implementations"""
|
||||
|
||||
def __init__(self, user_id: Optional[int] = None):
|
||||
self.context_manager = get_context_manager()
|
||||
self.user_id = user_id
|
||||
|
||||
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute a tool by name"""
|
||||
handlers = {
|
||||
"dss_get_project_summary": self.get_project_summary,
|
||||
"dss_list_components": self.list_components,
|
||||
"dss_get_component": self.get_component,
|
||||
"dss_get_design_tokens": self.get_design_tokens,
|
||||
"dss_get_project_health": self.get_project_health,
|
||||
"dss_list_styles": self.list_styles,
|
||||
"dss_get_discovery_data": self.get_discovery_data
|
||||
}
|
||||
|
||||
handler = handlers.get(tool_name)
|
||||
if not handler:
|
||||
return {"error": f"Unknown tool: {tool_name}"}
|
||||
|
||||
try:
|
||||
result = await handler(**arguments)
|
||||
return result
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def get_project_summary(
|
||||
self,
|
||||
project_id: str,
|
||||
include_components: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
"""Get comprehensive project summary"""
|
||||
context = await self.context_manager.get_context(project_id, self.user_id)
|
||||
if not context:
|
||||
return {"error": f"Project not found: {project_id}"}
|
||||
|
||||
summary = {
|
||||
"project_id": context.project_id,
|
||||
"name": context.name,
|
||||
"description": context.description,
|
||||
"component_count": context.component_count,
|
||||
"health": context.health,
|
||||
"stats": context.stats,
|
||||
"config": context.config,
|
||||
"integrations_enabled": list(context.integrations.keys()),
|
||||
"loaded_at": context.loaded_at.isoformat()
|
||||
}
|
||||
|
||||
if include_components:
|
||||
summary["components"] = context.components
|
||||
|
||||
return summary
|
||||
|
||||
async def list_components(
|
||||
self,
|
||||
project_id: str,
|
||||
filter_name: Optional[str] = None,
|
||||
code_generated_only: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
"""List components with optional filtering"""
|
||||
context = await self.context_manager.get_context(project_id, self.user_id)
|
||||
if not context:
|
||||
return {"error": f"Project not found: {project_id}"}
|
||||
|
||||
components = context.components
|
||||
|
||||
# Apply filters
|
||||
if filter_name:
|
||||
components = [
|
||||
c for c in components
|
||||
if filter_name.lower() in c['name'].lower()
|
||||
]
|
||||
|
||||
if code_generated_only:
|
||||
components = [c for c in components if c.get('code_generated')]
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"total_count": len(components),
|
||||
"components": components
|
||||
}
|
||||
|
||||
async def get_component(
|
||||
self,
|
||||
project_id: str,
|
||||
component_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Get detailed component information"""
|
||||
context = await self.context_manager.get_context(project_id, self.user_id)
|
||||
if not context:
|
||||
return {"error": f"Project not found: {project_id}"}
|
||||
|
||||
# Find component by name
|
||||
component = next(
|
||||
(c for c in context.components if c['name'] == component_name),
|
||||
None
|
||||
)
|
||||
|
||||
if not component:
|
||||
return {"error": f"Component not found: {component_name}"}
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"component": component
|
||||
}
|
||||
|
||||
async def get_design_tokens(
|
||||
self,
|
||||
project_id: str,
|
||||
token_category: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Get design tokens, optionally filtered by category"""
|
||||
context = await self.context_manager.get_context(project_id, self.user_id)
|
||||
if not context:
|
||||
return {"error": f"Project not found: {project_id}"}
|
||||
|
||||
tokens = context.tokens
|
||||
|
||||
if token_category and token_category != "all":
|
||||
# Filter by category
|
||||
if token_category in tokens:
|
||||
tokens = {token_category: tokens[token_category]}
|
||||
else:
|
||||
tokens = {}
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"tokens": tokens,
|
||||
"categories": list(tokens.keys())
|
||||
}
|
||||
|
||||
async def get_project_health(self, project_id: str) -> Dict[str, Any]:
|
||||
"""Get project health information"""
|
||||
context = await self.context_manager.get_context(project_id, self.user_id)
|
||||
if not context:
|
||||
return {"error": f"Project not found: {project_id}"}
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"health": context.health
|
||||
}
|
||||
|
||||
async def list_styles(
|
||||
self,
|
||||
project_id: str,
|
||||
style_type: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""List design styles with optional type filter"""
|
||||
context = await self.context_manager.get_context(project_id, self.user_id)
|
||||
if not context:
|
||||
return {"error": f"Project not found: {project_id}"}
|
||||
|
||||
styles = context.styles
|
||||
|
||||
if style_type and style_type != "all":
|
||||
styles = [s for s in styles if s['type'] == style_type]
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"total_count": len(styles),
|
||||
"styles": styles
|
||||
}
|
||||
|
||||
async def get_discovery_data(self, project_id: str) -> Dict[str, Any]:
|
||||
"""Get project discovery/scan data"""
|
||||
context = await self.context_manager.get_context(project_id, self.user_id)
|
||||
if not context:
|
||||
return {"error": f"Project not found: {project_id}"}
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"discovery": context.discovery
|
||||
}
|
||||
867
demo/tools/figma/figma_tools.py
Normal file
867
demo/tools/figma/figma_tools.py
Normal file
@@ -0,0 +1,867 @@
|
||||
"""
|
||||
Design System Server (DSS) - Figma Tool Suite
|
||||
|
||||
Complete MCP tool suite for Figma integration:
|
||||
1. figma_extract_variables - Extract design tokens/variables
|
||||
2. figma_extract_components - Extract component definitions
|
||||
3. figma_extract_styles - Extract text/color/effect styles
|
||||
4. figma_sync_tokens - Sync tokens to code
|
||||
5. figma_visual_diff - Compare visual changes
|
||||
6. figma_validate_components - Validate against schema
|
||||
7. figma_generate_code - Generate component code
|
||||
|
||||
Uses SQLite cache for persistence and config module for token management.
|
||||
"""
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
import asyncio
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, List, Any
|
||||
from dataclasses import dataclass, asdict
|
||||
from pathlib import Path
|
||||
import httpx
|
||||
|
||||
# Add parent to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from config import config
|
||||
from storage.database import Cache, ActivityLog
|
||||
|
||||
@dataclass
|
||||
class DesignToken:
|
||||
name: str
|
||||
value: Any
|
||||
type: str # color, spacing, typography, shadow, etc.
|
||||
description: str = ""
|
||||
category: str = ""
|
||||
|
||||
@dataclass
|
||||
class ComponentDefinition:
|
||||
name: str
|
||||
key: str
|
||||
description: str
|
||||
properties: Dict[str, Any]
|
||||
variants: List[Dict[str, Any]]
|
||||
|
||||
@dataclass
|
||||
class StyleDefinition:
|
||||
name: str
|
||||
key: str
|
||||
type: str # TEXT, FILL, EFFECT, GRID
|
||||
properties: Dict[str, Any]
|
||||
|
||||
|
||||
class FigmaClient:
|
||||
"""Figma API client with SQLite caching and rate limiting."""
|
||||
|
||||
def __init__(self, token: Optional[str] = None):
|
||||
# Use token from config if not provided
|
||||
self.token = token or config.figma.token
|
||||
self.base_url = "https://api.figma.com/v1"
|
||||
self.cache_ttl = config.figma.cache_ttl
|
||||
self._use_real_api = bool(self.token)
|
||||
|
||||
def _cache_key(self, endpoint: str) -> str:
|
||||
return f"figma:{hashlib.md5(endpoint.encode()).hexdigest()}"
|
||||
|
||||
async def _request(self, endpoint: str) -> Dict[str, Any]:
|
||||
"""Make authenticated request to Figma API with SQLite caching."""
|
||||
if not self._use_real_api:
|
||||
# Return mock data for local development
|
||||
return self._get_mock_data(endpoint)
|
||||
|
||||
cache_key = self._cache_key(endpoint)
|
||||
|
||||
# Check SQLite cache first
|
||||
cached = Cache.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
# Make real API request
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
response = await client.get(
|
||||
f"{self.base_url}{endpoint}",
|
||||
headers={"X-Figma-Token": self.token}
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
# Store in SQLite cache
|
||||
Cache.set(cache_key, data, ttl=self.cache_ttl)
|
||||
|
||||
# Log activity
|
||||
ActivityLog.log(
|
||||
action="figma_api_call",
|
||||
entity_type="figma",
|
||||
details={"endpoint": endpoint, "cached": False}
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
def _get_mock_data(self, endpoint: str) -> Dict[str, Any]:
|
||||
"""Return mock data for local development."""
|
||||
if "/variables" in endpoint:
|
||||
return {
|
||||
"status": 200,
|
||||
"meta": {
|
||||
"variableCollections": {
|
||||
"VC1": {
|
||||
"id": "VC1",
|
||||
"name": "Colors",
|
||||
"modes": [{"modeId": "M1", "name": "Light"}, {"modeId": "M2", "name": "Dark"}]
|
||||
},
|
||||
"VC2": {
|
||||
"id": "VC2",
|
||||
"name": "Spacing",
|
||||
"modes": [{"modeId": "M1", "name": "Default"}]
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
"V1": {"id": "V1", "name": "primary", "resolvedType": "COLOR",
|
||||
"valuesByMode": {"M1": {"r": 0.2, "g": 0.4, "b": 0.9, "a": 1}}},
|
||||
"V2": {"id": "V2", "name": "secondary", "resolvedType": "COLOR",
|
||||
"valuesByMode": {"M1": {"r": 0.5, "g": 0.5, "b": 0.5, "a": 1}}},
|
||||
"V3": {"id": "V3", "name": "background", "resolvedType": "COLOR",
|
||||
"valuesByMode": {"M1": {"r": 1, "g": 1, "b": 1, "a": 1}, "M2": {"r": 0.1, "g": 0.1, "b": 0.1, "a": 1}}},
|
||||
"V4": {"id": "V4", "name": "space-1", "resolvedType": "FLOAT",
|
||||
"valuesByMode": {"M1": 4}},
|
||||
"V5": {"id": "V5", "name": "space-2", "resolvedType": "FLOAT",
|
||||
"valuesByMode": {"M1": 8}},
|
||||
"V6": {"id": "V6", "name": "space-4", "resolvedType": "FLOAT",
|
||||
"valuesByMode": {"M1": 16}},
|
||||
}
|
||||
}
|
||||
}
|
||||
elif "/components" in endpoint:
|
||||
return {
|
||||
"status": 200,
|
||||
"meta": {
|
||||
"components": {
|
||||
"C1": {"key": "C1", "name": "Button", "description": "Primary action button",
|
||||
"containing_frame": {"name": "Components"}},
|
||||
"C2": {"key": "C2", "name": "Card", "description": "Content container",
|
||||
"containing_frame": {"name": "Components"}},
|
||||
"C3": {"key": "C3", "name": "Input", "description": "Text input field",
|
||||
"containing_frame": {"name": "Components"}},
|
||||
},
|
||||
"component_sets": {
|
||||
"CS1": {"key": "CS1", "name": "Button", "description": "Button with variants"}
|
||||
}
|
||||
}
|
||||
}
|
||||
elif "/styles" in endpoint:
|
||||
return {
|
||||
"status": 200,
|
||||
"meta": {
|
||||
"styles": {
|
||||
"S1": {"key": "S1", "name": "Heading/H1", "style_type": "TEXT"},
|
||||
"S2": {"key": "S2", "name": "Heading/H2", "style_type": "TEXT"},
|
||||
"S3": {"key": "S3", "name": "Body/Regular", "style_type": "TEXT"},
|
||||
"S4": {"key": "S4", "name": "Primary", "style_type": "FILL"},
|
||||
"S5": {"key": "S5", "name": "Shadow/Medium", "style_type": "EFFECT"},
|
||||
}
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {"status": 200, "document": {"name": "Mock Design System"}}
|
||||
|
||||
async def get_file(self, file_key: str) -> Dict[str, Any]:
|
||||
return await self._request(f"/files/{file_key}")
|
||||
|
||||
async def get_variables(self, file_key: str) -> Dict[str, Any]:
|
||||
return await self._request(f"/files/{file_key}/variables/local")
|
||||
|
||||
async def get_components(self, file_key: str) -> Dict[str, Any]:
|
||||
return await self._request(f"/files/{file_key}/components")
|
||||
|
||||
async def get_styles(self, file_key: str) -> Dict[str, Any]:
|
||||
return await self._request(f"/files/{file_key}/styles")
|
||||
|
||||
|
||||
class FigmaToolSuite:
|
||||
"""Complete Figma tool suite for design system management."""
|
||||
|
||||
def __init__(self, token: Optional[str] = None, output_dir: str = "./output"):
|
||||
self.client = FigmaClient(token)
|
||||
self.output_dir = Path(output_dir)
|
||||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._is_real_api = self.client._use_real_api
|
||||
|
||||
@property
|
||||
def mode(self) -> str:
|
||||
"""Return current mode: 'live' or 'mock'."""
|
||||
return "live" if self._is_real_api else "mock"
|
||||
|
||||
# === Tool 1: Extract Variables/Tokens ===
|
||||
|
||||
async def extract_variables(self, file_key: str, format: str = "css") -> Dict[str, Any]:
|
||||
"""
|
||||
Extract design tokens/variables from Figma file.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
format: Output format (css, json, scss, js)
|
||||
|
||||
Returns:
|
||||
Extracted tokens in specified format
|
||||
"""
|
||||
data = await self.client.get_variables(file_key)
|
||||
|
||||
collections = data.get("meta", {}).get("variableCollections", {})
|
||||
variables = data.get("meta", {}).get("variables", {})
|
||||
|
||||
tokens: List[DesignToken] = []
|
||||
|
||||
for var_id, var in variables.items():
|
||||
name = var.get("name", "")
|
||||
var_type = var.get("resolvedType", "")
|
||||
values = var.get("valuesByMode", {})
|
||||
|
||||
# Get first mode value as default
|
||||
first_value = list(values.values())[0] if values else None
|
||||
|
||||
token_type = self._map_figma_type(var_type)
|
||||
formatted_value = self._format_value(first_value, token_type)
|
||||
|
||||
tokens.append(DesignToken(
|
||||
name=self._to_css_name(name),
|
||||
value=formatted_value,
|
||||
type=token_type,
|
||||
category=self._get_category(name)
|
||||
))
|
||||
|
||||
# Generate output in requested format
|
||||
output = self._format_tokens(tokens, format)
|
||||
|
||||
# Save to file
|
||||
ext = {"css": "css", "json": "json", "scss": "scss", "js": "js"}[format]
|
||||
output_path = self.output_dir / f"tokens.{ext}"
|
||||
output_path.write_text(output)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tokens_count": len(tokens),
|
||||
"collections": list(collections.keys()),
|
||||
"output_path": str(output_path),
|
||||
"tokens": [asdict(t) for t in tokens],
|
||||
"formatted_output": output
|
||||
}
|
||||
|
||||
# === Tool 2: Extract Components ===
|
||||
|
||||
# Pages to skip when scanning for component pages
|
||||
SKIP_PAGES = {
|
||||
'Thumbnail', 'Changelog', 'Credits', 'Colors', 'Typography',
|
||||
'Icons', 'Shadows', '---'
|
||||
}
|
||||
|
||||
async def extract_components(self, file_key: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Extract component definitions from Figma file.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
|
||||
Returns:
|
||||
Component definitions with properties and variants
|
||||
"""
|
||||
definitions: List[ComponentDefinition] = []
|
||||
component_sets_count = 0
|
||||
|
||||
# First try the published components endpoint
|
||||
try:
|
||||
data = await self.client.get_components(file_key)
|
||||
|
||||
components_data = data.get("meta", {}).get("components", {})
|
||||
component_sets_data = data.get("meta", {}).get("component_sets", {})
|
||||
|
||||
# Handle both dict (mock) and list (real API) formats
|
||||
if isinstance(components_data, dict):
|
||||
components_iter = list(components_data.items())
|
||||
elif isinstance(components_data, list):
|
||||
components_iter = [(c.get("key", c.get("node_id", "")), c) for c in components_data]
|
||||
else:
|
||||
components_iter = []
|
||||
|
||||
# Count component sets (handle both formats)
|
||||
if isinstance(component_sets_data, dict):
|
||||
component_sets_count = len(component_sets_data)
|
||||
elif isinstance(component_sets_data, list):
|
||||
component_sets_count = len(component_sets_data)
|
||||
|
||||
for comp_id, comp in components_iter:
|
||||
definitions.append(ComponentDefinition(
|
||||
name=comp.get("name", ""),
|
||||
key=comp.get("key", comp_id),
|
||||
description=comp.get("description", ""),
|
||||
properties={},
|
||||
variants=[]
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If no published components, scan document pages for component pages
|
||||
if len(definitions) == 0:
|
||||
try:
|
||||
file_data = await self.client.get_file(file_key)
|
||||
doc = file_data.get("document", {})
|
||||
|
||||
for page in doc.get("children", []):
|
||||
page_name = page.get("name", "")
|
||||
page_type = page.get("type", "")
|
||||
|
||||
# Skip non-component pages
|
||||
if page_type != "CANVAS":
|
||||
continue
|
||||
if page_name.startswith("📖") or page_name.startswith("---"):
|
||||
continue
|
||||
if page_name in self.SKIP_PAGES:
|
||||
continue
|
||||
|
||||
# This looks like a component page
|
||||
definitions.append(ComponentDefinition(
|
||||
name=page_name,
|
||||
key=page.get("id", ""),
|
||||
description=f"Component page: {page_name}",
|
||||
properties={},
|
||||
variants=[]
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
output_path = self.output_dir / "components.json"
|
||||
output_path.write_text(json.dumps([asdict(d) for d in definitions], indent=2))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"components_count": len(definitions),
|
||||
"component_sets_count": component_sets_count,
|
||||
"output_path": str(output_path),
|
||||
"components": [asdict(d) for d in definitions]
|
||||
}
|
||||
|
||||
# === Tool 3: Extract Styles ===
|
||||
|
||||
async def extract_styles(self, file_key: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Extract text, color, and effect styles from Figma file.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
|
||||
Returns:
|
||||
Style definitions organized by type
|
||||
"""
|
||||
definitions: List[StyleDefinition] = []
|
||||
by_type = {"TEXT": [], "FILL": [], "EFFECT": [], "GRID": []}
|
||||
|
||||
# First, try the published styles endpoint
|
||||
try:
|
||||
data = await self.client.get_styles(file_key)
|
||||
styles_data = data.get("meta", {}).get("styles", {})
|
||||
|
||||
# Handle both dict (mock/some endpoints) and list (real API) formats
|
||||
if isinstance(styles_data, dict):
|
||||
styles_iter = list(styles_data.items())
|
||||
elif isinstance(styles_data, list):
|
||||
styles_iter = [(s.get("key", s.get("node_id", "")), s) for s in styles_data]
|
||||
else:
|
||||
styles_iter = []
|
||||
|
||||
for style_id, style in styles_iter:
|
||||
style_type = style.get("style_type", "")
|
||||
defn = StyleDefinition(
|
||||
name=style.get("name", ""),
|
||||
key=style.get("key", style_id),
|
||||
type=style_type,
|
||||
properties={}
|
||||
)
|
||||
definitions.append(defn)
|
||||
if style_type in by_type:
|
||||
by_type[style_type].append(asdict(defn))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Also check document-level styles (for community/unpublished files)
|
||||
if len(definitions) == 0:
|
||||
try:
|
||||
file_data = await self.client.get_file(file_key)
|
||||
doc_styles = file_data.get("styles", {})
|
||||
|
||||
for style_id, style in doc_styles.items():
|
||||
# Document styles use styleType instead of style_type
|
||||
style_type = style.get("styleType", "")
|
||||
defn = StyleDefinition(
|
||||
name=style.get("name", ""),
|
||||
key=style_id,
|
||||
type=style_type,
|
||||
properties={}
|
||||
)
|
||||
definitions.append(defn)
|
||||
if style_type in by_type:
|
||||
by_type[style_type].append(asdict(defn))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
output_path = self.output_dir / "styles.json"
|
||||
output_path.write_text(json.dumps({
|
||||
"all": [asdict(d) for d in definitions],
|
||||
"by_type": by_type
|
||||
}, indent=2))
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"styles_count": len(definitions),
|
||||
"by_type": {k: len(v) for k, v in by_type.items()},
|
||||
"output_path": str(output_path),
|
||||
"styles": by_type
|
||||
}
|
||||
|
||||
# === Tool 4: Sync Tokens ===
|
||||
|
||||
async def sync_tokens(self, file_key: str, target_path: str, format: str = "css") -> Dict[str, Any]:
|
||||
"""
|
||||
Sync tokens from Figma to target code path.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
target_path: Target file path for synced tokens
|
||||
format: Output format
|
||||
|
||||
Returns:
|
||||
Sync result with diff information
|
||||
"""
|
||||
# Extract current tokens
|
||||
result = await self.extract_variables(file_key, format)
|
||||
|
||||
target = Path(target_path)
|
||||
existing_content = target.read_text() if target.exists() else ""
|
||||
new_content = result["formatted_output"]
|
||||
|
||||
# Calculate diff
|
||||
has_changes = existing_content != new_content
|
||||
|
||||
if has_changes:
|
||||
# Backup existing
|
||||
if target.exists():
|
||||
backup_path = target.with_suffix(f".backup{target.suffix}")
|
||||
backup_path.write_text(existing_content)
|
||||
|
||||
# Write new tokens
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
target.write_text(new_content)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"has_changes": has_changes,
|
||||
"tokens_synced": result["tokens_count"],
|
||||
"target_path": str(target),
|
||||
"backup_created": has_changes and bool(existing_content)
|
||||
}
|
||||
|
||||
# === Tool 5: Visual Diff ===
|
||||
|
||||
async def visual_diff(self, file_key: str, baseline_version: str = "latest") -> Dict[str, Any]:
|
||||
"""
|
||||
Compare visual changes between versions.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
baseline_version: Version to compare against
|
||||
|
||||
Returns:
|
||||
Visual diff results
|
||||
"""
|
||||
# In real implementation, this would:
|
||||
# 1. Fetch node images for both versions
|
||||
# 2. Run pixel comparison
|
||||
# 3. Generate diff visualization
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"file_key": file_key,
|
||||
"baseline": baseline_version,
|
||||
"current": "latest",
|
||||
"changes_detected": True,
|
||||
"changed_components": [
|
||||
{"name": "Button", "change_percent": 5.2, "type": "color"},
|
||||
{"name": "Card", "change_percent": 0.0, "type": "none"},
|
||||
],
|
||||
"summary": {
|
||||
"total_components": 3,
|
||||
"changed": 1,
|
||||
"unchanged": 2
|
||||
}
|
||||
}
|
||||
|
||||
# === Tool 6: Validate Components ===
|
||||
|
||||
async def validate_components(self, file_key: str, schema_path: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate components against design system schema.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
schema_path: Optional path to validation schema
|
||||
|
||||
Returns:
|
||||
Validation results with issues
|
||||
"""
|
||||
components = await self.extract_components(file_key)
|
||||
|
||||
issues: List[Dict[str, Any]] = []
|
||||
|
||||
# Run validation rules
|
||||
for comp in components["components"]:
|
||||
# Rule 1: Component naming convention
|
||||
if not comp["name"][0].isupper():
|
||||
issues.append({
|
||||
"component": comp["name"],
|
||||
"rule": "naming-convention",
|
||||
"severity": "warning",
|
||||
"message": f"Component '{comp['name']}' should start with uppercase"
|
||||
})
|
||||
|
||||
# Rule 2: Description required
|
||||
if not comp.get("description"):
|
||||
issues.append({
|
||||
"component": comp["name"],
|
||||
"rule": "description-required",
|
||||
"severity": "info",
|
||||
"message": f"Component '{comp['name']}' missing description"
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"valid": len([i for i in issues if i["severity"] == "error"]) == 0,
|
||||
"components_checked": len(components["components"]),
|
||||
"issues": issues,
|
||||
"summary": {
|
||||
"errors": len([i for i in issues if i["severity"] == "error"]),
|
||||
"warnings": len([i for i in issues if i["severity"] == "warning"]),
|
||||
"info": len([i for i in issues if i["severity"] == "info"])
|
||||
}
|
||||
}
|
||||
|
||||
# === Tool 7: Generate Code ===
|
||||
|
||||
async def generate_code(self, file_key: str, component_name: str,
|
||||
framework: str = "webcomponent") -> Dict[str, Any]:
|
||||
"""
|
||||
Generate component code from Figma definition.
|
||||
|
||||
Args:
|
||||
file_key: Figma file key
|
||||
component_name: Name of component to generate
|
||||
framework: Target framework (webcomponent, react, vue)
|
||||
|
||||
Returns:
|
||||
Generated code
|
||||
"""
|
||||
components = await self.extract_components(file_key)
|
||||
|
||||
# Find the component
|
||||
comp = next((c for c in components["components"] if c["name"].lower() == component_name.lower()), None)
|
||||
|
||||
if not comp:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Component '{component_name}' not found"
|
||||
}
|
||||
|
||||
# Generate code based on framework
|
||||
if framework == "webcomponent":
|
||||
code = self._generate_webcomponent(comp)
|
||||
elif framework == "react":
|
||||
code = self._generate_react(comp)
|
||||
elif framework == "vue":
|
||||
code = self._generate_vue(comp)
|
||||
else:
|
||||
code = self._generate_webcomponent(comp)
|
||||
|
||||
output_path = self.output_dir / f"{comp['name'].lower()}.{self._get_extension(framework)}"
|
||||
output_path.write_text(code)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"component": comp["name"],
|
||||
"framework": framework,
|
||||
"output_path": str(output_path),
|
||||
"code": code
|
||||
}
|
||||
|
||||
# === Helper Methods ===
|
||||
|
||||
def _map_figma_type(self, figma_type: str) -> str:
|
||||
mapping = {
|
||||
"COLOR": "color",
|
||||
"FLOAT": "dimension",
|
||||
"STRING": "string",
|
||||
"BOOLEAN": "boolean"
|
||||
}
|
||||
return mapping.get(figma_type, "unknown")
|
||||
|
||||
def _format_value(self, value: Any, token_type: str) -> str:
|
||||
if token_type == "color" and isinstance(value, dict):
|
||||
r = int(value.get("r", 0) * 255)
|
||||
g = int(value.get("g", 0) * 255)
|
||||
b = int(value.get("b", 0) * 255)
|
||||
a = value.get("a", 1)
|
||||
if a < 1:
|
||||
return f"rgba({r}, {g}, {b}, {a})"
|
||||
return f"rgb({r}, {g}, {b})"
|
||||
elif token_type == "dimension":
|
||||
return f"{value}px"
|
||||
return str(value)
|
||||
|
||||
def _to_css_name(self, name: str) -> str:
|
||||
return name.lower().replace(" ", "-").replace("/", "-")
|
||||
|
||||
def _get_category(self, name: str) -> str:
|
||||
name_lower = name.lower()
|
||||
if any(c in name_lower for c in ["color", "primary", "secondary", "background"]):
|
||||
return "color"
|
||||
if any(c in name_lower for c in ["space", "gap", "padding", "margin"]):
|
||||
return "spacing"
|
||||
if any(c in name_lower for c in ["font", "text", "heading"]):
|
||||
return "typography"
|
||||
return "other"
|
||||
|
||||
def _format_tokens(self, tokens: List[DesignToken], format: str) -> str:
|
||||
if format == "css":
|
||||
lines = [":root {"]
|
||||
for t in tokens:
|
||||
lines.append(f" --{t.name}: {t.value};")
|
||||
lines.append("}")
|
||||
return "\n".join(lines)
|
||||
|
||||
elif format == "json":
|
||||
return json.dumps({t.name: {"value": t.value, "type": t.type} for t in tokens}, indent=2)
|
||||
|
||||
elif format == "scss":
|
||||
return "\n".join([f"${t.name}: {t.value};" for t in tokens])
|
||||
|
||||
elif format == "js":
|
||||
lines = ["export const tokens = {"]
|
||||
for t in tokens:
|
||||
safe_name = t.name.replace("-", "_")
|
||||
lines.append(f" {safe_name}: '{t.value}',")
|
||||
lines.append("};")
|
||||
return "\n".join(lines)
|
||||
|
||||
return ""
|
||||
|
||||
def _generate_webcomponent(self, comp: Dict[str, Any]) -> str:
|
||||
name = comp["name"]
|
||||
tag = f"ds-{name.lower()}"
|
||||
return f'''/**
|
||||
* {name} - Web Component
|
||||
* {comp.get("description", "")}
|
||||
*
|
||||
* Auto-generated from Figma
|
||||
*/
|
||||
|
||||
class Ds{name} extends HTMLElement {{
|
||||
static get observedAttributes() {{
|
||||
return ['variant', 'size', 'disabled'];
|
||||
}}
|
||||
|
||||
constructor() {{
|
||||
super();
|
||||
this.attachShadow({{ mode: 'open' }});
|
||||
}}
|
||||
|
||||
connectedCallback() {{
|
||||
this.render();
|
||||
}}
|
||||
|
||||
attributeChangedCallback() {{
|
||||
this.render();
|
||||
}}
|
||||
|
||||
render() {{
|
||||
const variant = this.getAttribute('variant') || 'default';
|
||||
const size = this.getAttribute('size') || 'default';
|
||||
|
||||
this.shadowRoot.innerHTML = `
|
||||
<style>
|
||||
@import '/admin-ui/css/tokens.css';
|
||||
:host {{
|
||||
display: inline-block;
|
||||
}}
|
||||
.{name.lower()} {{
|
||||
/* Component styles */
|
||||
}}
|
||||
</style>
|
||||
<div class="{name.lower()} {name.lower()}--${{variant}} {name.lower()}--${{size}}">
|
||||
<slot></slot>
|
||||
</div>
|
||||
`;
|
||||
}}
|
||||
}}
|
||||
|
||||
customElements.define('{tag}', Ds{name});
|
||||
export default Ds{name};
|
||||
'''
|
||||
|
||||
def _generate_react(self, comp: Dict[str, Any]) -> str:
|
||||
name = comp["name"]
|
||||
return f'''import React from 'react';
|
||||
import styles from './{name}.module.css';
|
||||
|
||||
/**
|
||||
* {name} Component
|
||||
* {comp.get("description", "")}
|
||||
*
|
||||
* Auto-generated from Figma
|
||||
*/
|
||||
export function {name}({{
|
||||
variant = 'default',
|
||||
size = 'default',
|
||||
children,
|
||||
...props
|
||||
}}) {{
|
||||
return (
|
||||
<div
|
||||
className={{`${{styles.{name.lower()}}} ${{styles[variant]}} ${{styles[size]}}`}}
|
||||
{{...props}}
|
||||
>
|
||||
{{children}}
|
||||
</div>
|
||||
);
|
||||
}}
|
||||
|
||||
export default {name};
|
||||
'''
|
||||
|
||||
def _generate_vue(self, comp: Dict[str, Any]) -> str:
|
||||
name = comp["name"]
|
||||
return f'''<template>
|
||||
<div :class="classes">
|
||||
<slot />
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
/**
|
||||
* {name} Component
|
||||
* {comp.get("description", "")}
|
||||
*
|
||||
* Auto-generated from Figma
|
||||
*/
|
||||
import {{ computed }} from 'vue';
|
||||
|
||||
const props = defineProps({{
|
||||
variant: {{ type: String, default: 'default' }},
|
||||
size: {{ type: String, default: 'default' }}
|
||||
}});
|
||||
|
||||
const classes = computed(() => [
|
||||
'{name.lower()}',
|
||||
`{name.lower()}--${{props.variant}}`,
|
||||
`{name.lower()}--${{props.size}}`
|
||||
]);
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
.{name.lower()} {{
|
||||
/* Component styles */
|
||||
}}
|
||||
</style>
|
||||
'''
|
||||
|
||||
def _get_extension(self, framework: str) -> str:
|
||||
return {"webcomponent": "js", "react": "jsx", "vue": "vue"}[framework]
|
||||
|
||||
|
||||
# === MCP Tool Registration ===
|
||||
|
||||
def create_mcp_tools(mcp_instance):
|
||||
"""Register all Figma tools with MCP server."""
|
||||
|
||||
suite = FigmaToolSuite()
|
||||
|
||||
@mcp_instance.tool()
|
||||
async def figma_extract_variables(file_key: str, format: str = "css") -> str:
|
||||
"""Extract design tokens/variables from a Figma file."""
|
||||
result = await suite.extract_variables(file_key, format)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@mcp_instance.tool()
|
||||
async def figma_extract_components(file_key: str) -> str:
|
||||
"""Extract component definitions from a Figma file."""
|
||||
result = await suite.extract_components(file_key)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@mcp_instance.tool()
|
||||
async def figma_extract_styles(file_key: str) -> str:
|
||||
"""Extract text, color, and effect styles from a Figma file."""
|
||||
result = await suite.extract_styles(file_key)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@mcp_instance.tool()
|
||||
async def figma_sync_tokens(file_key: str, target_path: str, format: str = "css") -> str:
|
||||
"""Sync design tokens from Figma to a target code file."""
|
||||
result = await suite.sync_tokens(file_key, target_path, format)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@mcp_instance.tool()
|
||||
async def figma_visual_diff(file_key: str, baseline_version: str = "latest") -> str:
|
||||
"""Compare visual changes between Figma versions."""
|
||||
result = await suite.visual_diff(file_key, baseline_version)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@mcp_instance.tool()
|
||||
async def figma_validate_components(file_key: str, schema_path: str = "") -> str:
|
||||
"""Validate Figma components against design system rules."""
|
||||
result = await suite.validate_components(file_key, schema_path or None)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@mcp_instance.tool()
|
||||
async def figma_generate_code(file_key: str, component_name: str, framework: str = "webcomponent") -> str:
|
||||
"""Generate component code from Figma definition."""
|
||||
result = await suite.generate_code(file_key, component_name, framework)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
|
||||
# For direct testing
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
async def test():
|
||||
suite = FigmaToolSuite(output_dir="./test_output")
|
||||
|
||||
print("Testing Figma Tool Suite (Mock Mode)\n")
|
||||
|
||||
# Test extract variables
|
||||
print("1. Extract Variables:")
|
||||
result = await suite.extract_variables("test_file_key", "css")
|
||||
print(f" Tokens: {result['tokens_count']}")
|
||||
print(f" Output: {result['output_path']}")
|
||||
|
||||
# Test extract components
|
||||
print("\n2. Extract Components:")
|
||||
result = await suite.extract_components("test_file_key")
|
||||
print(f" Components: {result['components_count']}")
|
||||
|
||||
# Test extract styles
|
||||
print("\n3. Extract Styles:")
|
||||
result = await suite.extract_styles("test_file_key")
|
||||
print(f" Styles: {result['styles_count']}")
|
||||
|
||||
# Test validate
|
||||
print("\n4. Validate Components:")
|
||||
result = await suite.validate_components("test_file_key")
|
||||
print(f" Valid: {result['valid']}")
|
||||
print(f" Issues: {result['summary']}")
|
||||
|
||||
# Test generate code
|
||||
print("\n5. Generate Code:")
|
||||
result = await suite.generate_code("test_file_key", "Button", "webcomponent")
|
||||
print(f" Generated: {result['output_path']}")
|
||||
|
||||
print("\nAll tests passed!")
|
||||
|
||||
asyncio.run(test())
|
||||
25
demo/tools/ingest/__init__.py
Normal file
25
demo/tools/ingest/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
DSS Token Ingestion Module
|
||||
|
||||
Multi-source design token extraction and normalization.
|
||||
Supports: Figma, CSS, SCSS, Tailwind, JSON/YAML, styled-components
|
||||
"""
|
||||
|
||||
from .base import DesignToken, TokenSource, TokenCollection
|
||||
from .css import CSSTokenSource
|
||||
from .scss import SCSSTokenSource
|
||||
from .tailwind import TailwindTokenSource
|
||||
from .json_tokens import JSONTokenSource
|
||||
from .merge import TokenMerger, MergeStrategy
|
||||
|
||||
__all__ = [
|
||||
'DesignToken',
|
||||
'TokenSource',
|
||||
'TokenCollection',
|
||||
'CSSTokenSource',
|
||||
'SCSSTokenSource',
|
||||
'TailwindTokenSource',
|
||||
'JSONTokenSource',
|
||||
'TokenMerger',
|
||||
'MergeStrategy',
|
||||
]
|
||||
462
demo/tools/ingest/base.py
Normal file
462
demo/tools/ingest/base.py
Normal file
@@ -0,0 +1,462 @@
|
||||
"""
|
||||
Base classes for token ingestion.
|
||||
|
||||
Defines the DesignToken model following W3C Design Tokens format
|
||||
and the TokenSource abstract class for all ingestors.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
class TokenType(str, Enum):
|
||||
"""W3C Design Token types."""
|
||||
COLOR = "color"
|
||||
DIMENSION = "dimension"
|
||||
FONT_FAMILY = "fontFamily"
|
||||
FONT_WEIGHT = "fontWeight"
|
||||
FONT_SIZE = "fontSize"
|
||||
LINE_HEIGHT = "lineHeight"
|
||||
LETTER_SPACING = "letterSpacing"
|
||||
DURATION = "duration"
|
||||
CUBIC_BEZIER = "cubicBezier"
|
||||
NUMBER = "number"
|
||||
STRING = "string"
|
||||
SHADOW = "shadow"
|
||||
BORDER = "border"
|
||||
GRADIENT = "gradient"
|
||||
TRANSITION = "transition"
|
||||
COMPOSITE = "composite"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class TokenCategory(str, Enum):
|
||||
"""Token categories for organization."""
|
||||
COLORS = "colors"
|
||||
SPACING = "spacing"
|
||||
TYPOGRAPHY = "typography"
|
||||
SIZING = "sizing"
|
||||
BORDERS = "borders"
|
||||
SHADOWS = "shadows"
|
||||
EFFECTS = "effects"
|
||||
MOTION = "motion"
|
||||
BREAKPOINTS = "breakpoints"
|
||||
Z_INDEX = "z-index"
|
||||
OPACITY = "opacity"
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DesignToken:
|
||||
"""
|
||||
W3C Design Token representation.
|
||||
|
||||
Follows the W3C Design Tokens Community Group format with
|
||||
additional metadata for source tracking and enterprise use.
|
||||
"""
|
||||
# Core properties (W3C spec)
|
||||
name: str # e.g., "color.primary.500"
|
||||
value: Any # e.g., "#3B82F6" or {"r": 59, "g": 130, "b": 246}
|
||||
type: TokenType = TokenType.UNKNOWN
|
||||
description: str = ""
|
||||
|
||||
# Source attribution
|
||||
source: str = "" # e.g., "figma:abc123", "css:tokens.css:12"
|
||||
source_file: str = "" # Original file path
|
||||
source_line: int = 0 # Line number in source
|
||||
original_name: str = "" # Name before normalization
|
||||
original_value: str = "" # Value before processing
|
||||
|
||||
# Organization
|
||||
category: TokenCategory = TokenCategory.OTHER
|
||||
tags: List[str] = field(default_factory=list)
|
||||
group: str = "" # Logical grouping (e.g., "brand", "semantic")
|
||||
|
||||
# State
|
||||
deprecated: bool = False
|
||||
deprecated_message: str = ""
|
||||
|
||||
# Versioning
|
||||
version: str = "1.0.0"
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
updated_at: datetime = field(default_factory=datetime.now)
|
||||
|
||||
# Extensions (for custom metadata)
|
||||
extensions: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
"""Normalize and validate token after creation."""
|
||||
if not self.original_name:
|
||||
self.original_name = self.name
|
||||
if not self.original_value:
|
||||
self.original_value = str(self.value)
|
||||
|
||||
# Auto-detect type if unknown
|
||||
if self.type == TokenType.UNKNOWN:
|
||||
self.type = self._detect_type()
|
||||
|
||||
# Auto-detect category if other
|
||||
if self.category == TokenCategory.OTHER:
|
||||
self.category = self._detect_category()
|
||||
|
||||
def _detect_type(self) -> TokenType:
|
||||
"""Detect token type from value."""
|
||||
value_str = str(self.value).lower().strip()
|
||||
|
||||
# Color patterns
|
||||
if re.match(r'^#[0-9a-f]{3,8}$', value_str):
|
||||
return TokenType.COLOR
|
||||
if re.match(r'^rgb[a]?\s*\(', value_str):
|
||||
return TokenType.COLOR
|
||||
if re.match(r'^hsl[a]?\s*\(', value_str):
|
||||
return TokenType.COLOR
|
||||
if value_str in ('transparent', 'currentcolor', 'inherit'):
|
||||
return TokenType.COLOR
|
||||
|
||||
# Dimension patterns
|
||||
if re.match(r'^-?\d+(\.\d+)?(px|rem|em|%|vh|vw|ch|ex|vmin|vmax)$', value_str):
|
||||
return TokenType.DIMENSION
|
||||
|
||||
# Duration patterns
|
||||
if re.match(r'^\d+(\.\d+)?(ms|s)$', value_str):
|
||||
return TokenType.DURATION
|
||||
|
||||
# Number patterns
|
||||
if re.match(r'^-?\d+(\.\d+)?$', value_str):
|
||||
return TokenType.NUMBER
|
||||
|
||||
# Font family (contains quotes or commas)
|
||||
if ',' in value_str or '"' in value_str or "'" in value_str:
|
||||
if 'sans' in value_str or 'serif' in value_str or 'mono' in value_str:
|
||||
return TokenType.FONT_FAMILY
|
||||
|
||||
# Font weight
|
||||
if value_str in ('normal', 'bold', 'lighter', 'bolder') or \
|
||||
re.match(r'^[1-9]00$', value_str):
|
||||
return TokenType.FONT_WEIGHT
|
||||
|
||||
# Shadow
|
||||
if 'shadow' in self.name.lower() or \
|
||||
re.match(r'^-?\d+.*\s+-?\d+.*\s+-?\d+', value_str):
|
||||
return TokenType.SHADOW
|
||||
|
||||
return TokenType.STRING
|
||||
|
||||
def _detect_category(self) -> TokenCategory:
|
||||
"""Detect category from token name."""
|
||||
name_lower = self.name.lower()
|
||||
|
||||
# Check name patterns
|
||||
patterns = {
|
||||
TokenCategory.COLORS: ['color', 'bg', 'background', 'text', 'border-color', 'fill', 'stroke'],
|
||||
TokenCategory.SPACING: ['space', 'spacing', 'gap', 'margin', 'padding', 'inset'],
|
||||
TokenCategory.TYPOGRAPHY: ['font', 'text', 'line-height', 'letter-spacing', 'typography'],
|
||||
TokenCategory.SIZING: ['size', 'width', 'height', 'min-', 'max-'],
|
||||
TokenCategory.BORDERS: ['border', 'radius', 'outline'],
|
||||
TokenCategory.SHADOWS: ['shadow', 'elevation'],
|
||||
TokenCategory.EFFECTS: ['blur', 'opacity', 'filter', 'backdrop'],
|
||||
TokenCategory.MOTION: ['transition', 'animation', 'duration', 'delay', 'timing', 'ease'],
|
||||
TokenCategory.BREAKPOINTS: ['breakpoint', 'screen', 'media'],
|
||||
TokenCategory.Z_INDEX: ['z-index', 'z-', 'layer'],
|
||||
}
|
||||
|
||||
for category, keywords in patterns.items():
|
||||
if any(kw in name_lower for kw in keywords):
|
||||
return category
|
||||
|
||||
# Check by type
|
||||
if self.type == TokenType.COLOR:
|
||||
return TokenCategory.COLORS
|
||||
if self.type in (TokenType.FONT_FAMILY, TokenType.FONT_WEIGHT, TokenType.FONT_SIZE, TokenType.LINE_HEIGHT):
|
||||
return TokenCategory.TYPOGRAPHY
|
||||
if self.type == TokenType.DURATION:
|
||||
return TokenCategory.MOTION
|
||||
if self.type == TokenType.SHADOW:
|
||||
return TokenCategory.SHADOWS
|
||||
|
||||
return TokenCategory.OTHER
|
||||
|
||||
def normalize_name(self, separator: str = ".") -> str:
|
||||
"""
|
||||
Normalize token name to consistent format.
|
||||
|
||||
Converts various formats to dot-notation:
|
||||
- kebab-case: color-primary-500 -> color.primary.500
|
||||
- snake_case: color_primary_500 -> color.primary.500
|
||||
- camelCase: colorPrimary500 -> color.primary.500
|
||||
"""
|
||||
name = self.name
|
||||
|
||||
# Handle camelCase
|
||||
name = re.sub(r'([a-z])([A-Z])', r'\1.\2', name)
|
||||
|
||||
# Replace separators
|
||||
name = name.replace('-', separator)
|
||||
name = name.replace('_', separator)
|
||||
name = name.replace('/', separator)
|
||||
|
||||
# Clean up multiple separators
|
||||
while separator * 2 in name:
|
||||
name = name.replace(separator * 2, separator)
|
||||
|
||||
return name.lower().strip(separator)
|
||||
|
||||
def to_css_var_name(self) -> str:
|
||||
"""Convert to CSS custom property name."""
|
||||
normalized = self.normalize_name("-")
|
||||
return f"--{normalized}"
|
||||
|
||||
def to_scss_var_name(self) -> str:
|
||||
"""Convert to SCSS variable name."""
|
||||
normalized = self.normalize_name("-")
|
||||
return f"${normalized}"
|
||||
|
||||
def to_js_name(self) -> str:
|
||||
"""Convert to JavaScript object key (camelCase)."""
|
||||
parts = self.normalize_name(".").split(".")
|
||||
if not parts:
|
||||
return ""
|
||||
result = parts[0]
|
||||
for part in parts[1:]:
|
||||
result += part.capitalize()
|
||||
return result
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary (W3C format)."""
|
||||
result = {
|
||||
"$value": self.value,
|
||||
"$type": self.type.value,
|
||||
}
|
||||
|
||||
if self.description:
|
||||
result["$description"] = self.description
|
||||
|
||||
if self.extensions:
|
||||
result["$extensions"] = self.extensions
|
||||
|
||||
# Add DSS metadata
|
||||
result["$extensions"] = result.get("$extensions", {})
|
||||
result["$extensions"]["dss"] = {
|
||||
"source": self.source,
|
||||
"sourceFile": self.source_file,
|
||||
"sourceLine": self.source_line,
|
||||
"originalName": self.original_name,
|
||||
"category": self.category.value,
|
||||
"tags": self.tags,
|
||||
"deprecated": self.deprecated,
|
||||
"version": self.version,
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Serialize to JSON."""
|
||||
return json.dumps(self.to_dict(), indent=2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TokenCollection:
|
||||
"""
|
||||
Collection of design tokens with metadata.
|
||||
|
||||
Represents a complete set of tokens from a single source or merged sources.
|
||||
"""
|
||||
tokens: List[DesignToken] = field(default_factory=list)
|
||||
name: str = ""
|
||||
description: str = ""
|
||||
version: str = "1.0.0"
|
||||
sources: List[str] = field(default_factory=list)
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.tokens)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.tokens)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, int):
|
||||
return self.tokens[key]
|
||||
# Allow access by token name
|
||||
for token in self.tokens:
|
||||
if token.name == key:
|
||||
return token
|
||||
raise KeyError(f"Token '{key}' not found")
|
||||
|
||||
def add(self, token: DesignToken) -> None:
|
||||
"""Add a token to the collection."""
|
||||
self.tokens.append(token)
|
||||
|
||||
def get(self, name: str) -> Optional[DesignToken]:
|
||||
"""Get token by name."""
|
||||
for token in self.tokens:
|
||||
if token.name == name:
|
||||
return token
|
||||
return None
|
||||
|
||||
def filter_by_category(self, category: TokenCategory) -> 'TokenCollection':
|
||||
"""Return new collection filtered by category."""
|
||||
filtered = [t for t in self.tokens if t.category == category]
|
||||
return TokenCollection(
|
||||
tokens=filtered,
|
||||
name=f"{self.name} ({category.value})",
|
||||
sources=self.sources,
|
||||
)
|
||||
|
||||
def filter_by_type(self, token_type: TokenType) -> 'TokenCollection':
|
||||
"""Return new collection filtered by type."""
|
||||
filtered = [t for t in self.tokens if t.type == token_type]
|
||||
return TokenCollection(
|
||||
tokens=filtered,
|
||||
name=f"{self.name} ({token_type.value})",
|
||||
sources=self.sources,
|
||||
)
|
||||
|
||||
def filter_by_source(self, source: str) -> 'TokenCollection':
|
||||
"""Return new collection filtered by source."""
|
||||
filtered = [t for t in self.tokens if source in t.source]
|
||||
return TokenCollection(
|
||||
tokens=filtered,
|
||||
name=f"{self.name} (from {source})",
|
||||
sources=[source],
|
||||
)
|
||||
|
||||
def get_categories(self) -> Set[TokenCategory]:
|
||||
"""Get all unique categories in collection."""
|
||||
return {t.category for t in self.tokens}
|
||||
|
||||
def get_types(self) -> Set[TokenType]:
|
||||
"""Get all unique types in collection."""
|
||||
return {t.type for t in self.tokens}
|
||||
|
||||
def get_duplicates(self) -> Dict[str, List[DesignToken]]:
|
||||
"""Find tokens with duplicate names."""
|
||||
seen: Dict[str, List[DesignToken]] = {}
|
||||
for token in self.tokens:
|
||||
if token.name not in seen:
|
||||
seen[token.name] = []
|
||||
seen[token.name].append(token)
|
||||
return {k: v for k, v in seen.items() if len(v) > 1}
|
||||
|
||||
def to_css(self) -> str:
|
||||
"""Export as CSS custom properties."""
|
||||
lines = [":root {"]
|
||||
for token in sorted(self.tokens, key=lambda t: t.name):
|
||||
var_name = token.to_css_var_name()
|
||||
if token.description:
|
||||
lines.append(f" /* {token.description} */")
|
||||
lines.append(f" {var_name}: {token.value};")
|
||||
lines.append("}")
|
||||
return "\n".join(lines)
|
||||
|
||||
def to_scss(self) -> str:
|
||||
"""Export as SCSS variables."""
|
||||
lines = []
|
||||
for token in sorted(self.tokens, key=lambda t: t.name):
|
||||
var_name = token.to_scss_var_name()
|
||||
if token.description:
|
||||
lines.append(f"// {token.description}")
|
||||
lines.append(f"{var_name}: {token.value};")
|
||||
return "\n".join(lines)
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Export as W3C Design Tokens JSON."""
|
||||
result = {}
|
||||
for token in self.tokens:
|
||||
parts = token.normalize_name().split(".")
|
||||
current = result
|
||||
for part in parts[:-1]:
|
||||
if part not in current:
|
||||
current[part] = {}
|
||||
current = current[part]
|
||||
current[parts[-1]] = token.to_dict()
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
def to_typescript(self) -> str:
|
||||
"""Export as TypeScript constants."""
|
||||
lines = ["export const tokens = {"]
|
||||
for token in sorted(self.tokens, key=lambda t: t.name):
|
||||
js_name = token.to_js_name()
|
||||
value = f'"{token.value}"' if isinstance(token.value, str) else token.value
|
||||
if token.description:
|
||||
lines.append(f" /** {token.description} */")
|
||||
lines.append(f" {js_name}: {value},")
|
||||
lines.append("} as const;")
|
||||
lines.append("")
|
||||
lines.append("export type TokenKey = keyof typeof tokens;")
|
||||
return "\n".join(lines)
|
||||
|
||||
def to_tailwind_config(self) -> str:
|
||||
"""Export as Tailwind config extend object."""
|
||||
# Group tokens by category for Tailwind structure
|
||||
colors = self.filter_by_category(TokenCategory.COLORS)
|
||||
spacing = self.filter_by_category(TokenCategory.SPACING)
|
||||
|
||||
lines = ["module.exports = {", " theme: {", " extend: {"]
|
||||
|
||||
if colors.tokens:
|
||||
lines.append(" colors: {")
|
||||
for token in colors.tokens:
|
||||
name = token.name.replace("color.", "").replace("colors.", "")
|
||||
lines.append(f' "{name}": "{token.value}",')
|
||||
lines.append(" },")
|
||||
|
||||
if spacing.tokens:
|
||||
lines.append(" spacing: {")
|
||||
for token in spacing.tokens:
|
||||
name = token.name.replace("spacing.", "").replace("space.", "")
|
||||
lines.append(f' "{name}": "{token.value}",')
|
||||
lines.append(" },")
|
||||
|
||||
lines.extend([" },", " },", "};"])
|
||||
return "\n".join(lines)
|
||||
|
||||
def summary(self) -> Dict[str, Any]:
|
||||
"""Get collection summary."""
|
||||
return {
|
||||
"total_tokens": len(self.tokens),
|
||||
"categories": {cat.value: len(self.filter_by_category(cat))
|
||||
for cat in self.get_categories()},
|
||||
"types": {t.value: len(self.filter_by_type(t))
|
||||
for t in self.get_types()},
|
||||
"sources": self.sources,
|
||||
"duplicates": len(self.get_duplicates()),
|
||||
}
|
||||
|
||||
|
||||
class TokenSource(ABC):
|
||||
"""
|
||||
Abstract base class for token sources.
|
||||
|
||||
All token ingestors must implement this interface.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def source_type(self) -> str:
|
||||
"""Return source type identifier (e.g., 'css', 'scss', 'figma')."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def extract(self, source: str) -> TokenCollection:
|
||||
"""
|
||||
Extract tokens from source.
|
||||
|
||||
Args:
|
||||
source: File path, URL, or content depending on source type
|
||||
|
||||
Returns:
|
||||
TokenCollection with extracted tokens
|
||||
"""
|
||||
pass
|
||||
|
||||
def _create_source_id(self, file_path: str, line: int = 0) -> str:
|
||||
"""Create source identifier string."""
|
||||
if line:
|
||||
return f"{self.source_type}:{file_path}:{line}"
|
||||
return f"{self.source_type}:{file_path}"
|
||||
282
demo/tools/ingest/css.py
Normal file
282
demo/tools/ingest/css.py
Normal file
@@ -0,0 +1,282 @@
|
||||
"""
|
||||
CSS Token Source
|
||||
|
||||
Extracts design tokens from CSS custom properties (CSS variables).
|
||||
Parses :root declarations and other CSS variable definitions.
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
from .base import DesignToken, TokenCollection, TokenSource, TokenType, TokenCategory
|
||||
|
||||
|
||||
class CSSTokenSource(TokenSource):
|
||||
"""
|
||||
Extract tokens from CSS files.
|
||||
|
||||
Parses CSS custom properties defined in :root or other selectors.
|
||||
Supports:
|
||||
- :root { --color-primary: #3B82F6; }
|
||||
- [data-theme="dark"] { --color-primary: #60A5FA; }
|
||||
- Comments as descriptions
|
||||
"""
|
||||
|
||||
@property
|
||||
def source_type(self) -> str:
|
||||
return "css"
|
||||
|
||||
async def extract(self, source: str) -> TokenCollection:
|
||||
"""
|
||||
Extract tokens from CSS file or content.
|
||||
|
||||
Args:
|
||||
source: File path or CSS content string
|
||||
|
||||
Returns:
|
||||
TokenCollection with extracted tokens
|
||||
"""
|
||||
# Determine if source is file path or content
|
||||
if self._is_file_path(source):
|
||||
file_path = Path(source)
|
||||
if not file_path.exists():
|
||||
raise FileNotFoundError(f"CSS file not found: {source}")
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
source_file = str(file_path.absolute())
|
||||
else:
|
||||
content = source
|
||||
source_file = "<inline>"
|
||||
|
||||
tokens = self._parse_css(content, source_file)
|
||||
|
||||
return TokenCollection(
|
||||
tokens=tokens,
|
||||
name=f"CSS Tokens from {Path(source_file).name if source_file != '<inline>' else 'inline'}",
|
||||
sources=[self._create_source_id(source_file)],
|
||||
)
|
||||
|
||||
def _is_file_path(self, source: str) -> bool:
|
||||
"""Check if source looks like a file path."""
|
||||
# If it contains CSS syntax, it's content
|
||||
if '{' in source or ':' in source and ';' in source:
|
||||
return False
|
||||
# If it ends with .css, it's a file
|
||||
if source.endswith('.css'):
|
||||
return True
|
||||
# If path exists, it's a file
|
||||
return Path(source).exists()
|
||||
|
||||
def _parse_css(self, content: str, source_file: str) -> List[DesignToken]:
|
||||
"""Parse CSS content and extract custom properties."""
|
||||
tokens = []
|
||||
|
||||
# Track line numbers
|
||||
lines = content.split('\n')
|
||||
line_map = self._build_line_map(content)
|
||||
|
||||
# Find all CSS variable declarations
|
||||
# Pattern matches: --var-name: value;
|
||||
var_pattern = re.compile(
|
||||
r'(\/\*[^*]*\*\/\s*)?' # Optional preceding comment
|
||||
r'(--[\w-]+)\s*:\s*' # Variable name
|
||||
r'([^;]+);', # Value
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
# Find variables in all rule blocks
|
||||
for match in var_pattern.finditer(content):
|
||||
comment = match.group(1)
|
||||
var_name = match.group(2)
|
||||
var_value = match.group(3).strip()
|
||||
|
||||
# Get line number
|
||||
pos = match.start()
|
||||
line_num = self._get_line_number(pos, line_map)
|
||||
|
||||
# Extract description from comment
|
||||
description = ""
|
||||
if comment:
|
||||
description = self._clean_comment(comment)
|
||||
|
||||
# Get context (selector)
|
||||
context = self._get_selector_context(content, pos)
|
||||
|
||||
# Create token
|
||||
token = DesignToken(
|
||||
name=self._normalize_var_name(var_name),
|
||||
value=var_value,
|
||||
description=description,
|
||||
source=self._create_source_id(source_file, line_num),
|
||||
source_file=source_file,
|
||||
source_line=line_num,
|
||||
original_name=var_name,
|
||||
original_value=var_value,
|
||||
)
|
||||
|
||||
# Add context as tag if not :root
|
||||
if context and context != ":root":
|
||||
token.tags.append(f"context:{context}")
|
||||
|
||||
tokens.append(token)
|
||||
|
||||
return tokens
|
||||
|
||||
def _build_line_map(self, content: str) -> List[int]:
|
||||
"""Build map of character positions to line numbers."""
|
||||
line_map = []
|
||||
pos = 0
|
||||
for i, line in enumerate(content.split('\n'), 1):
|
||||
line_map.append(pos)
|
||||
pos += len(line) + 1 # +1 for newline
|
||||
return line_map
|
||||
|
||||
def _get_line_number(self, pos: int, line_map: List[int]) -> int:
|
||||
"""Get line number for character position."""
|
||||
for i, line_start in enumerate(line_map):
|
||||
if i + 1 < len(line_map):
|
||||
if line_start <= pos < line_map[i + 1]:
|
||||
return i + 1
|
||||
else:
|
||||
return i + 1
|
||||
return 1
|
||||
|
||||
def _normalize_var_name(self, var_name: str) -> str:
|
||||
"""Convert CSS variable name to token name."""
|
||||
# Remove -- prefix
|
||||
name = var_name.lstrip('-')
|
||||
# Convert kebab-case to dot notation
|
||||
name = name.replace('-', '.')
|
||||
return name
|
||||
|
||||
def _clean_comment(self, comment: str) -> str:
|
||||
"""Extract text from CSS comment."""
|
||||
if not comment:
|
||||
return ""
|
||||
# Remove /* and */
|
||||
text = re.sub(r'/\*|\*/', '', comment)
|
||||
# Clean whitespace
|
||||
text = ' '.join(text.split())
|
||||
return text.strip()
|
||||
|
||||
def _get_selector_context(self, content: str, pos: int) -> str:
|
||||
"""Get the CSS selector context for a variable."""
|
||||
# Find the opening brace before this position
|
||||
before = content[:pos]
|
||||
last_open = before.rfind('{')
|
||||
if last_open == -1:
|
||||
return ""
|
||||
|
||||
# Find the selector before the brace
|
||||
selector_part = before[:last_open]
|
||||
# Get last selector (after } or start)
|
||||
last_close = selector_part.rfind('}')
|
||||
if last_close != -1:
|
||||
selector_part = selector_part[last_close + 1:]
|
||||
|
||||
# Clean up
|
||||
selector = selector_part.strip()
|
||||
# Handle multi-line selectors
|
||||
selector = ' '.join(selector.split())
|
||||
return selector
|
||||
|
||||
|
||||
class CSSInlineExtractor:
|
||||
"""
|
||||
Extract inline styles from HTML/JSX for token candidate identification.
|
||||
|
||||
Finds style="" attributes and extracts values that could become tokens.
|
||||
"""
|
||||
|
||||
# Patterns for extracting inline styles
|
||||
STYLE_ATTR_PATTERN = re.compile(
|
||||
r'style\s*=\s*["\']([^"\']+)["\']',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
# JSX style object pattern
|
||||
JSX_STYLE_PATTERN = re.compile(
|
||||
r'style\s*=\s*\{\{([^}]+)\}\}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
async def extract_candidates(self, source: str) -> List[Tuple[str, str, int]]:
|
||||
"""
|
||||
Extract inline style values as token candidates.
|
||||
|
||||
Returns list of (property, value, line_number) tuples.
|
||||
"""
|
||||
candidates = []
|
||||
|
||||
# Determine if file or content
|
||||
if Path(source).exists():
|
||||
content = Path(source).read_text(encoding="utf-8")
|
||||
else:
|
||||
content = source
|
||||
|
||||
lines = content.split('\n')
|
||||
|
||||
for i, line in enumerate(lines, 1):
|
||||
# Check HTML style attribute
|
||||
for match in self.STYLE_ATTR_PATTERN.finditer(line):
|
||||
style_content = match.group(1)
|
||||
for prop, value in self._parse_style_string(style_content):
|
||||
if self._is_token_candidate(value):
|
||||
candidates.append((prop, value, i))
|
||||
|
||||
# Check JSX style object
|
||||
for match in self.JSX_STYLE_PATTERN.finditer(line):
|
||||
style_content = match.group(1)
|
||||
for prop, value in self._parse_jsx_style(style_content):
|
||||
if self._is_token_candidate(value):
|
||||
candidates.append((prop, value, i))
|
||||
|
||||
return candidates
|
||||
|
||||
def _parse_style_string(self, style: str) -> List[Tuple[str, str]]:
|
||||
"""Parse CSS style string into property-value pairs."""
|
||||
pairs = []
|
||||
for declaration in style.split(';'):
|
||||
if ':' in declaration:
|
||||
prop, value = declaration.split(':', 1)
|
||||
pairs.append((prop.strip(), value.strip()))
|
||||
return pairs
|
||||
|
||||
def _parse_jsx_style(self, style: str) -> List[Tuple[str, str]]:
|
||||
"""Parse JSX style object into property-value pairs."""
|
||||
pairs = []
|
||||
# Simple parsing for common cases
|
||||
for part in style.split(','):
|
||||
if ':' in part:
|
||||
prop, value = part.split(':', 1)
|
||||
prop = prop.strip().strip('"\'')
|
||||
value = value.strip().strip('"\'')
|
||||
# Convert camelCase to kebab-case
|
||||
prop = re.sub(r'([a-z])([A-Z])', r'\1-\2', prop).lower()
|
||||
pairs.append((prop, value))
|
||||
return pairs
|
||||
|
||||
def _is_token_candidate(self, value: str) -> bool:
|
||||
"""Check if value should be extracted as a token."""
|
||||
value = value.strip().lower()
|
||||
|
||||
# Colors are always candidates
|
||||
if re.match(r'^#[0-9a-f]{3,8}$', value):
|
||||
return True
|
||||
if re.match(r'^rgb[a]?\s*\(', value):
|
||||
return True
|
||||
if re.match(r'^hsl[a]?\s*\(', value):
|
||||
return True
|
||||
|
||||
# Dimensions with common units
|
||||
if re.match(r'^\d+(\.\d+)?(px|rem|em|%)$', value):
|
||||
return True
|
||||
|
||||
# Skip variable references
|
||||
if value.startswith('var('):
|
||||
return False
|
||||
|
||||
# Skip inherit/initial/etc
|
||||
if value in ('inherit', 'initial', 'unset', 'auto', 'none'):
|
||||
return False
|
||||
|
||||
return False
|
||||
432
demo/tools/ingest/json_tokens.py
Normal file
432
demo/tools/ingest/json_tokens.py
Normal file
@@ -0,0 +1,432 @@
|
||||
"""
|
||||
JSON Token Source
|
||||
|
||||
Extracts design tokens from JSON/YAML files.
|
||||
Supports W3C Design Tokens format and Style Dictionary format.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from .base import DesignToken, TokenCollection, TokenSource, TokenType, TokenCategory
|
||||
|
||||
|
||||
class JSONTokenSource(TokenSource):
|
||||
"""
|
||||
Extract tokens from JSON/YAML token files.
|
||||
|
||||
Supports:
|
||||
- W3C Design Tokens Community Group format
|
||||
- Style Dictionary format
|
||||
- Tokens Studio format
|
||||
- Figma Tokens plugin format
|
||||
- Generic nested JSON with $value
|
||||
"""
|
||||
|
||||
@property
|
||||
def source_type(self) -> str:
|
||||
return "json"
|
||||
|
||||
async def extract(self, source: str) -> TokenCollection:
|
||||
"""
|
||||
Extract tokens from JSON file or content.
|
||||
|
||||
Args:
|
||||
source: File path or JSON content string
|
||||
|
||||
Returns:
|
||||
TokenCollection with extracted tokens
|
||||
"""
|
||||
if self._is_file_path(source):
|
||||
file_path = Path(source)
|
||||
if not file_path.exists():
|
||||
raise FileNotFoundError(f"Token file not found: {source}")
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
source_file = str(file_path.absolute())
|
||||
else:
|
||||
content = source
|
||||
source_file = "<inline>"
|
||||
|
||||
# Parse JSON
|
||||
try:
|
||||
data = json.loads(content)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON: {e}")
|
||||
|
||||
# Detect format and extract
|
||||
tokens = self._extract_tokens(data, source_file)
|
||||
|
||||
return TokenCollection(
|
||||
tokens=tokens,
|
||||
name=f"JSON Tokens from {Path(source_file).name if source_file != '<inline>' else 'inline'}",
|
||||
sources=[self._create_source_id(source_file)],
|
||||
)
|
||||
|
||||
def _is_file_path(self, source: str) -> bool:
|
||||
"""Check if source looks like a file path."""
|
||||
if source.strip().startswith('{'):
|
||||
return False
|
||||
if source.endswith('.json') or source.endswith('.tokens.json'):
|
||||
return True
|
||||
return Path(source).exists()
|
||||
|
||||
def _extract_tokens(self, data: Dict, source_file: str) -> List[DesignToken]:
|
||||
"""Extract tokens from parsed JSON."""
|
||||
tokens = []
|
||||
|
||||
# Detect format
|
||||
if self._is_w3c_format(data):
|
||||
tokens = self._extract_w3c_tokens(data, source_file)
|
||||
elif self._is_style_dictionary_format(data):
|
||||
tokens = self._extract_style_dictionary_tokens(data, source_file)
|
||||
elif self._is_tokens_studio_format(data):
|
||||
tokens = self._extract_tokens_studio(data, source_file)
|
||||
else:
|
||||
# Generic nested format
|
||||
tokens = self._extract_nested_tokens(data, source_file)
|
||||
|
||||
return tokens
|
||||
|
||||
def _is_w3c_format(self, data: Dict) -> bool:
|
||||
"""Check if data follows W3C Design Tokens format."""
|
||||
# W3C format uses $value and $type
|
||||
def check_node(node: Any) -> bool:
|
||||
if isinstance(node, dict):
|
||||
if '$value' in node:
|
||||
return True
|
||||
return any(check_node(v) for v in node.values())
|
||||
return False
|
||||
return check_node(data)
|
||||
|
||||
def _is_style_dictionary_format(self, data: Dict) -> bool:
|
||||
"""Check if data follows Style Dictionary format."""
|
||||
# Style Dictionary uses 'value' without $
|
||||
def check_node(node: Any) -> bool:
|
||||
if isinstance(node, dict):
|
||||
if 'value' in node and '$value' not in node:
|
||||
return True
|
||||
return any(check_node(v) for v in node.values())
|
||||
return False
|
||||
return check_node(data)
|
||||
|
||||
def _is_tokens_studio_format(self, data: Dict) -> bool:
|
||||
"""Check if data follows Tokens Studio format."""
|
||||
# Tokens Studio has specific structure with sets
|
||||
return '$themes' in data or '$metadata' in data
|
||||
|
||||
def _extract_w3c_tokens(
|
||||
self,
|
||||
data: Dict,
|
||||
source_file: str,
|
||||
prefix: str = ""
|
||||
) -> List[DesignToken]:
|
||||
"""Extract tokens in W3C Design Tokens format."""
|
||||
tokens = []
|
||||
|
||||
for key, value in data.items():
|
||||
# Skip metadata keys
|
||||
if key.startswith('$'):
|
||||
continue
|
||||
|
||||
current_path = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if isinstance(value, dict):
|
||||
if '$value' in value:
|
||||
# This is a token
|
||||
token = self._create_w3c_token(
|
||||
current_path, value, source_file
|
||||
)
|
||||
tokens.append(token)
|
||||
else:
|
||||
# Nested group
|
||||
tokens.extend(
|
||||
self._extract_w3c_tokens(value, source_file, current_path)
|
||||
)
|
||||
|
||||
return tokens
|
||||
|
||||
def _create_w3c_token(
|
||||
self,
|
||||
name: str,
|
||||
data: Dict,
|
||||
source_file: str
|
||||
) -> DesignToken:
|
||||
"""Create token from W3C format node."""
|
||||
value = data.get('$value')
|
||||
token_type = self._parse_w3c_type(data.get('$type', ''))
|
||||
description = data.get('$description', '')
|
||||
|
||||
# Handle aliases/references
|
||||
if isinstance(value, str) and value.startswith('{') and value.endswith('}'):
|
||||
# This is a reference like {colors.primary}
|
||||
pass # Keep as-is for now
|
||||
|
||||
# Get extensions
|
||||
extensions = {}
|
||||
if '$extensions' in data:
|
||||
extensions = data['$extensions']
|
||||
|
||||
token = DesignToken(
|
||||
name=name,
|
||||
value=value,
|
||||
type=token_type,
|
||||
description=description,
|
||||
source=self._create_source_id(source_file),
|
||||
source_file=source_file,
|
||||
extensions=extensions,
|
||||
)
|
||||
|
||||
# Check for deprecated
|
||||
if extensions.get('deprecated'):
|
||||
token.deprecated = True
|
||||
token.deprecated_message = extensions.get('deprecatedMessage', '')
|
||||
|
||||
return token
|
||||
|
||||
def _parse_w3c_type(self, type_str: str) -> TokenType:
|
||||
"""Convert W3C type string to TokenType."""
|
||||
type_map = {
|
||||
'color': TokenType.COLOR,
|
||||
'dimension': TokenType.DIMENSION,
|
||||
'fontFamily': TokenType.FONT_FAMILY,
|
||||
'fontWeight': TokenType.FONT_WEIGHT,
|
||||
'duration': TokenType.DURATION,
|
||||
'cubicBezier': TokenType.CUBIC_BEZIER,
|
||||
'number': TokenType.NUMBER,
|
||||
'shadow': TokenType.SHADOW,
|
||||
'border': TokenType.BORDER,
|
||||
'gradient': TokenType.GRADIENT,
|
||||
'transition': TokenType.TRANSITION,
|
||||
}
|
||||
return type_map.get(type_str, TokenType.UNKNOWN)
|
||||
|
||||
def _extract_style_dictionary_tokens(
|
||||
self,
|
||||
data: Dict,
|
||||
source_file: str,
|
||||
prefix: str = ""
|
||||
) -> List[DesignToken]:
|
||||
"""Extract tokens in Style Dictionary format."""
|
||||
tokens = []
|
||||
|
||||
for key, value in data.items():
|
||||
current_path = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if isinstance(value, dict):
|
||||
if 'value' in value:
|
||||
# This is a token
|
||||
token = DesignToken(
|
||||
name=current_path,
|
||||
value=value['value'],
|
||||
description=value.get('comment', value.get('description', '')),
|
||||
source=self._create_source_id(source_file),
|
||||
source_file=source_file,
|
||||
)
|
||||
|
||||
# Handle attributes
|
||||
if 'attributes' in value:
|
||||
attrs = value['attributes']
|
||||
if 'category' in attrs:
|
||||
token.tags.append(f"category:{attrs['category']}")
|
||||
|
||||
token.tags.append("style-dictionary")
|
||||
tokens.append(token)
|
||||
else:
|
||||
# Nested group
|
||||
tokens.extend(
|
||||
self._extract_style_dictionary_tokens(
|
||||
value, source_file, current_path
|
||||
)
|
||||
)
|
||||
|
||||
return tokens
|
||||
|
||||
def _extract_tokens_studio(
|
||||
self,
|
||||
data: Dict,
|
||||
source_file: str
|
||||
) -> List[DesignToken]:
|
||||
"""Extract tokens from Tokens Studio format."""
|
||||
tokens = []
|
||||
|
||||
# Tokens Studio has token sets as top-level keys
|
||||
# Skip metadata keys
|
||||
for set_name, set_data in data.items():
|
||||
if set_name.startswith('$'):
|
||||
continue
|
||||
|
||||
if isinstance(set_data, dict):
|
||||
set_tokens = self._extract_tokens_studio_set(
|
||||
set_data, source_file, set_name
|
||||
)
|
||||
for token in set_tokens:
|
||||
token.group = set_name
|
||||
tokens.extend(set_tokens)
|
||||
|
||||
return tokens
|
||||
|
||||
def _extract_tokens_studio_set(
|
||||
self,
|
||||
data: Dict,
|
||||
source_file: str,
|
||||
prefix: str = ""
|
||||
) -> List[DesignToken]:
|
||||
"""Extract tokens from a Tokens Studio set."""
|
||||
tokens = []
|
||||
|
||||
for key, value in data.items():
|
||||
current_path = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if isinstance(value, dict):
|
||||
if 'value' in value and 'type' in value:
|
||||
# This is a token
|
||||
token = DesignToken(
|
||||
name=current_path,
|
||||
value=value['value'],
|
||||
type=self._parse_tokens_studio_type(value.get('type', '')),
|
||||
description=value.get('description', ''),
|
||||
source=self._create_source_id(source_file),
|
||||
source_file=source_file,
|
||||
)
|
||||
token.tags.append("tokens-studio")
|
||||
tokens.append(token)
|
||||
else:
|
||||
# Nested group
|
||||
tokens.extend(
|
||||
self._extract_tokens_studio_set(
|
||||
value, source_file, current_path
|
||||
)
|
||||
)
|
||||
|
||||
return tokens
|
||||
|
||||
def _parse_tokens_studio_type(self, type_str: str) -> TokenType:
|
||||
"""Convert Tokens Studio type to TokenType."""
|
||||
type_map = {
|
||||
'color': TokenType.COLOR,
|
||||
'sizing': TokenType.DIMENSION,
|
||||
'spacing': TokenType.DIMENSION,
|
||||
'borderRadius': TokenType.DIMENSION,
|
||||
'borderWidth': TokenType.DIMENSION,
|
||||
'fontFamilies': TokenType.FONT_FAMILY,
|
||||
'fontWeights': TokenType.FONT_WEIGHT,
|
||||
'fontSizes': TokenType.FONT_SIZE,
|
||||
'lineHeights': TokenType.LINE_HEIGHT,
|
||||
'letterSpacing': TokenType.LETTER_SPACING,
|
||||
'paragraphSpacing': TokenType.DIMENSION,
|
||||
'boxShadow': TokenType.SHADOW,
|
||||
'opacity': TokenType.NUMBER,
|
||||
'dimension': TokenType.DIMENSION,
|
||||
'text': TokenType.STRING,
|
||||
'other': TokenType.STRING,
|
||||
}
|
||||
return type_map.get(type_str, TokenType.UNKNOWN)
|
||||
|
||||
def _extract_nested_tokens(
|
||||
self,
|
||||
data: Dict,
|
||||
source_file: str,
|
||||
prefix: str = ""
|
||||
) -> List[DesignToken]:
|
||||
"""Extract tokens from generic nested JSON."""
|
||||
tokens = []
|
||||
|
||||
for key, value in data.items():
|
||||
current_path = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if isinstance(value, dict):
|
||||
# Check if this looks like a token (has primitive values)
|
||||
has_nested = any(isinstance(v, dict) for v in value.values())
|
||||
|
||||
if not has_nested and len(value) <= 3:
|
||||
# Might be a simple token object
|
||||
if 'value' in value:
|
||||
tokens.append(DesignToken(
|
||||
name=current_path,
|
||||
value=value['value'],
|
||||
source=self._create_source_id(source_file),
|
||||
source_file=source_file,
|
||||
))
|
||||
else:
|
||||
# Recurse
|
||||
tokens.extend(
|
||||
self._extract_nested_tokens(value, source_file, current_path)
|
||||
)
|
||||
else:
|
||||
# Recurse into nested object
|
||||
tokens.extend(
|
||||
self._extract_nested_tokens(value, source_file, current_path)
|
||||
)
|
||||
|
||||
elif isinstance(value, (str, int, float, bool)):
|
||||
# Simple value - treat as token
|
||||
tokens.append(DesignToken(
|
||||
name=current_path,
|
||||
value=value,
|
||||
source=self._create_source_id(source_file),
|
||||
source_file=source_file,
|
||||
))
|
||||
|
||||
return tokens
|
||||
|
||||
|
||||
class TokenExporter:
|
||||
"""
|
||||
Export tokens to various JSON formats.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def to_w3c(collection: TokenCollection) -> str:
|
||||
"""Export to W3C Design Tokens format."""
|
||||
result = {}
|
||||
|
||||
for token in collection.tokens:
|
||||
parts = token.normalize_name().split('.')
|
||||
current = result
|
||||
|
||||
for part in parts[:-1]:
|
||||
if part not in current:
|
||||
current[part] = {}
|
||||
current = current[part]
|
||||
|
||||
current[parts[-1]] = {
|
||||
"$value": token.value,
|
||||
"$type": token.type.value,
|
||||
}
|
||||
|
||||
if token.description:
|
||||
current[parts[-1]]["$description"] = token.description
|
||||
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@staticmethod
|
||||
def to_style_dictionary(collection: TokenCollection) -> str:
|
||||
"""Export to Style Dictionary format."""
|
||||
result = {}
|
||||
|
||||
for token in collection.tokens:
|
||||
parts = token.normalize_name().split('.')
|
||||
current = result
|
||||
|
||||
for part in parts[:-1]:
|
||||
if part not in current:
|
||||
current[part] = {}
|
||||
current = current[part]
|
||||
|
||||
current[parts[-1]] = {
|
||||
"value": token.value,
|
||||
}
|
||||
|
||||
if token.description:
|
||||
current[parts[-1]]["comment"] = token.description
|
||||
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
@staticmethod
|
||||
def to_flat(collection: TokenCollection) -> str:
|
||||
"""Export to flat JSON object."""
|
||||
result = {}
|
||||
for token in collection.tokens:
|
||||
result[token.name] = token.value
|
||||
return json.dumps(result, indent=2)
|
||||
447
demo/tools/ingest/merge.py
Normal file
447
demo/tools/ingest/merge.py
Normal file
@@ -0,0 +1,447 @@
|
||||
"""
|
||||
Token Merge Module
|
||||
|
||||
Merge tokens from multiple sources with conflict resolution strategies.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import List, Dict, Optional, Callable, Tuple
|
||||
from .base import DesignToken, TokenCollection, TokenCategory
|
||||
|
||||
|
||||
class MergeStrategy(str, Enum):
|
||||
"""Token merge conflict resolution strategies."""
|
||||
|
||||
# Simple strategies
|
||||
FIRST = "first" # Keep first occurrence
|
||||
LAST = "last" # Keep last occurrence (override)
|
||||
ERROR = "error" # Raise error on conflict
|
||||
|
||||
# Value-based strategies
|
||||
PREFER_FIGMA = "prefer_figma" # Prefer Figma source
|
||||
PREFER_CODE = "prefer_code" # Prefer code sources (CSS, SCSS)
|
||||
PREFER_SPECIFIC = "prefer_specific" # Prefer more specific values
|
||||
|
||||
# Smart strategies
|
||||
MERGE_METADATA = "merge_metadata" # Merge metadata, keep latest value
|
||||
INTERACTIVE = "interactive" # Require user decision
|
||||
|
||||
|
||||
@dataclass
|
||||
class MergeConflict:
|
||||
"""Represents a token name conflict during merge."""
|
||||
token_name: str
|
||||
existing: DesignToken
|
||||
incoming: DesignToken
|
||||
resolution: Optional[str] = None
|
||||
resolved_token: Optional[DesignToken] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MergeResult:
|
||||
"""Result of a token merge operation."""
|
||||
collection: TokenCollection
|
||||
conflicts: List[MergeConflict] = field(default_factory=list)
|
||||
stats: Dict[str, int] = field(default_factory=dict)
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
|
||||
def __post_init__(self):
|
||||
if not self.stats:
|
||||
self.stats = {
|
||||
"total_tokens": 0,
|
||||
"new_tokens": 0,
|
||||
"updated_tokens": 0,
|
||||
"conflicts_resolved": 0,
|
||||
"conflicts_unresolved": 0,
|
||||
}
|
||||
|
||||
|
||||
class TokenMerger:
|
||||
"""
|
||||
Merge multiple token collections with conflict resolution.
|
||||
|
||||
Usage:
|
||||
merger = TokenMerger(strategy=MergeStrategy.LAST)
|
||||
result = merger.merge([collection1, collection2, collection3])
|
||||
"""
|
||||
|
||||
# Source priority for PREFER_* strategies
|
||||
SOURCE_PRIORITY = {
|
||||
"figma": 100,
|
||||
"css": 80,
|
||||
"scss": 80,
|
||||
"tailwind": 70,
|
||||
"json": 60,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
strategy: MergeStrategy = MergeStrategy.LAST,
|
||||
custom_resolver: Optional[Callable[[MergeConflict], DesignToken]] = None
|
||||
):
|
||||
"""
|
||||
Initialize merger.
|
||||
|
||||
Args:
|
||||
strategy: Default conflict resolution strategy
|
||||
custom_resolver: Optional custom conflict resolver function
|
||||
"""
|
||||
self.strategy = strategy
|
||||
self.custom_resolver = custom_resolver
|
||||
|
||||
def merge(
|
||||
self,
|
||||
collections: List[TokenCollection],
|
||||
normalize_names: bool = True
|
||||
) -> MergeResult:
|
||||
"""
|
||||
Merge multiple token collections.
|
||||
|
||||
Args:
|
||||
collections: List of TokenCollections to merge
|
||||
normalize_names: Whether to normalize token names before merging
|
||||
|
||||
Returns:
|
||||
MergeResult with merged collection and conflict information
|
||||
"""
|
||||
result = MergeResult(
|
||||
collection=TokenCollection(
|
||||
name="Merged Tokens",
|
||||
sources=[],
|
||||
)
|
||||
)
|
||||
|
||||
# Track tokens by normalized name
|
||||
tokens_by_name: Dict[str, DesignToken] = {}
|
||||
|
||||
for collection in collections:
|
||||
result.collection.sources.extend(collection.sources)
|
||||
|
||||
for token in collection.tokens:
|
||||
# Normalize name if requested
|
||||
name = token.normalize_name() if normalize_names else token.name
|
||||
|
||||
if name in tokens_by_name:
|
||||
# Conflict detected
|
||||
existing = tokens_by_name[name]
|
||||
conflict = MergeConflict(
|
||||
token_name=name,
|
||||
existing=existing,
|
||||
incoming=token,
|
||||
)
|
||||
|
||||
# Resolve conflict
|
||||
resolved = self._resolve_conflict(conflict)
|
||||
conflict.resolved_token = resolved
|
||||
|
||||
if resolved:
|
||||
tokens_by_name[name] = resolved
|
||||
result.stats["conflicts_resolved"] += 1
|
||||
result.stats["updated_tokens"] += 1
|
||||
else:
|
||||
result.stats["conflicts_unresolved"] += 1
|
||||
result.warnings.append(
|
||||
f"Unresolved conflict for token: {name}"
|
||||
)
|
||||
|
||||
result.conflicts.append(conflict)
|
||||
else:
|
||||
# New token
|
||||
tokens_by_name[name] = token
|
||||
result.stats["new_tokens"] += 1
|
||||
|
||||
# Build final collection
|
||||
result.collection.tokens = list(tokens_by_name.values())
|
||||
result.stats["total_tokens"] = len(result.collection.tokens)
|
||||
|
||||
return result
|
||||
|
||||
def _resolve_conflict(self, conflict: MergeConflict) -> Optional[DesignToken]:
|
||||
"""Resolve a single conflict based on strategy."""
|
||||
|
||||
# Try custom resolver first
|
||||
if self.custom_resolver:
|
||||
return self.custom_resolver(conflict)
|
||||
|
||||
# Apply strategy
|
||||
if self.strategy == MergeStrategy.FIRST:
|
||||
conflict.resolution = "kept_first"
|
||||
return conflict.existing
|
||||
|
||||
elif self.strategy == MergeStrategy.LAST:
|
||||
conflict.resolution = "used_last"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
|
||||
elif self.strategy == MergeStrategy.ERROR:
|
||||
conflict.resolution = "error"
|
||||
raise ValueError(
|
||||
f"Token conflict: {conflict.token_name} "
|
||||
f"(existing: {conflict.existing.source}, "
|
||||
f"incoming: {conflict.incoming.source})"
|
||||
)
|
||||
|
||||
elif self.strategy == MergeStrategy.PREFER_FIGMA:
|
||||
return self._prefer_source(conflict, "figma")
|
||||
|
||||
elif self.strategy == MergeStrategy.PREFER_CODE:
|
||||
return self._prefer_code_source(conflict)
|
||||
|
||||
elif self.strategy == MergeStrategy.PREFER_SPECIFIC:
|
||||
return self._prefer_specific_value(conflict)
|
||||
|
||||
elif self.strategy == MergeStrategy.MERGE_METADATA:
|
||||
return self._merge_metadata(conflict)
|
||||
|
||||
elif self.strategy == MergeStrategy.INTERACTIVE:
|
||||
# For interactive, we can't resolve automatically
|
||||
conflict.resolution = "needs_input"
|
||||
return None
|
||||
|
||||
return conflict.incoming
|
||||
|
||||
def _update_token(
|
||||
self,
|
||||
source: DesignToken,
|
||||
base: DesignToken
|
||||
) -> DesignToken:
|
||||
"""Create updated token preserving some base metadata."""
|
||||
# Create new token with source's value but enhanced metadata
|
||||
updated = DesignToken(
|
||||
name=source.name,
|
||||
value=source.value,
|
||||
type=source.type,
|
||||
description=source.description or base.description,
|
||||
source=source.source,
|
||||
source_file=source.source_file,
|
||||
source_line=source.source_line,
|
||||
original_name=source.original_name,
|
||||
original_value=source.original_value,
|
||||
category=source.category,
|
||||
tags=list(set(source.tags + base.tags)),
|
||||
deprecated=source.deprecated or base.deprecated,
|
||||
deprecated_message=source.deprecated_message or base.deprecated_message,
|
||||
version=source.version,
|
||||
updated_at=datetime.now(),
|
||||
extensions={**base.extensions, **source.extensions},
|
||||
)
|
||||
return updated
|
||||
|
||||
def _prefer_source(
|
||||
self,
|
||||
conflict: MergeConflict,
|
||||
preferred_source: str
|
||||
) -> DesignToken:
|
||||
"""Prefer token from specific source type."""
|
||||
existing_source = conflict.existing.source.split(':')[0]
|
||||
incoming_source = conflict.incoming.source.split(':')[0]
|
||||
|
||||
if incoming_source == preferred_source:
|
||||
conflict.resolution = f"preferred_{preferred_source}"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
elif existing_source == preferred_source:
|
||||
conflict.resolution = f"kept_{preferred_source}"
|
||||
return conflict.existing
|
||||
else:
|
||||
# Neither is preferred, use last
|
||||
conflict.resolution = "fallback_last"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
|
||||
def _prefer_code_source(self, conflict: MergeConflict) -> DesignToken:
|
||||
"""Prefer code sources (CSS, SCSS) over design sources."""
|
||||
code_sources = {"css", "scss", "tailwind"}
|
||||
|
||||
existing_source = conflict.existing.source.split(':')[0]
|
||||
incoming_source = conflict.incoming.source.split(':')[0]
|
||||
|
||||
existing_is_code = existing_source in code_sources
|
||||
incoming_is_code = incoming_source in code_sources
|
||||
|
||||
if incoming_is_code and not existing_is_code:
|
||||
conflict.resolution = "preferred_code"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
elif existing_is_code and not incoming_is_code:
|
||||
conflict.resolution = "kept_code"
|
||||
return conflict.existing
|
||||
else:
|
||||
# Both or neither are code, use priority
|
||||
return self._prefer_by_priority(conflict)
|
||||
|
||||
def _prefer_by_priority(self, conflict: MergeConflict) -> DesignToken:
|
||||
"""Choose based on source priority."""
|
||||
existing_source = conflict.existing.source.split(':')[0]
|
||||
incoming_source = conflict.incoming.source.split(':')[0]
|
||||
|
||||
existing_priority = self.SOURCE_PRIORITY.get(existing_source, 0)
|
||||
incoming_priority = self.SOURCE_PRIORITY.get(incoming_source, 0)
|
||||
|
||||
if incoming_priority > existing_priority:
|
||||
conflict.resolution = "higher_priority"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
else:
|
||||
conflict.resolution = "kept_priority"
|
||||
return conflict.existing
|
||||
|
||||
def _prefer_specific_value(self, conflict: MergeConflict) -> DesignToken:
|
||||
"""Prefer more specific/concrete values."""
|
||||
existing_value = str(conflict.existing.value).lower()
|
||||
incoming_value = str(conflict.incoming.value).lower()
|
||||
|
||||
# Prefer concrete values over variables/references
|
||||
existing_is_var = existing_value.startswith('var(') or \
|
||||
existing_value.startswith('$') or \
|
||||
existing_value.startswith('{')
|
||||
incoming_is_var = incoming_value.startswith('var(') or \
|
||||
incoming_value.startswith('$') or \
|
||||
incoming_value.startswith('{')
|
||||
|
||||
if incoming_is_var and not existing_is_var:
|
||||
conflict.resolution = "kept_concrete"
|
||||
return conflict.existing
|
||||
elif existing_is_var and not incoming_is_var:
|
||||
conflict.resolution = "preferred_concrete"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
|
||||
# Prefer hex colors over named colors
|
||||
existing_is_hex = existing_value.startswith('#')
|
||||
incoming_is_hex = incoming_value.startswith('#')
|
||||
|
||||
if incoming_is_hex and not existing_is_hex:
|
||||
conflict.resolution = "preferred_hex"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
elif existing_is_hex and not incoming_is_hex:
|
||||
conflict.resolution = "kept_hex"
|
||||
return conflict.existing
|
||||
|
||||
# Default to last
|
||||
conflict.resolution = "fallback_last"
|
||||
return self._update_token(conflict.incoming, conflict.existing)
|
||||
|
||||
def _merge_metadata(self, conflict: MergeConflict) -> DesignToken:
|
||||
"""Merge metadata from both tokens, keep latest value."""
|
||||
conflict.resolution = "merged_metadata"
|
||||
|
||||
# Use incoming value but merge all metadata
|
||||
merged_tags = list(set(
|
||||
conflict.existing.tags + conflict.incoming.tags
|
||||
))
|
||||
|
||||
merged_extensions = {
|
||||
**conflict.existing.extensions,
|
||||
**conflict.incoming.extensions
|
||||
}
|
||||
|
||||
# Track both sources
|
||||
merged_extensions['dss'] = merged_extensions.get('dss', {})
|
||||
merged_extensions['dss']['previousSources'] = [
|
||||
conflict.existing.source,
|
||||
conflict.incoming.source
|
||||
]
|
||||
|
||||
return DesignToken(
|
||||
name=conflict.incoming.name,
|
||||
value=conflict.incoming.value,
|
||||
type=conflict.incoming.type or conflict.existing.type,
|
||||
description=conflict.incoming.description or conflict.existing.description,
|
||||
source=conflict.incoming.source,
|
||||
source_file=conflict.incoming.source_file,
|
||||
source_line=conflict.incoming.source_line,
|
||||
original_name=conflict.incoming.original_name,
|
||||
original_value=conflict.incoming.original_value,
|
||||
category=conflict.incoming.category or conflict.existing.category,
|
||||
tags=merged_tags,
|
||||
deprecated=conflict.incoming.deprecated or conflict.existing.deprecated,
|
||||
deprecated_message=conflict.incoming.deprecated_message or conflict.existing.deprecated_message,
|
||||
version=conflict.incoming.version,
|
||||
updated_at=datetime.now(),
|
||||
extensions=merged_extensions,
|
||||
)
|
||||
|
||||
|
||||
class TokenDiff:
|
||||
"""
|
||||
Compare two token collections and find differences.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def diff(
|
||||
source: TokenCollection,
|
||||
target: TokenCollection
|
||||
) -> Dict[str, List]:
|
||||
"""
|
||||
Compare two token collections.
|
||||
|
||||
Returns:
|
||||
Dict with 'added', 'removed', 'changed', 'unchanged' lists
|
||||
"""
|
||||
source_by_name = {t.normalize_name(): t for t in source.tokens}
|
||||
target_by_name = {t.normalize_name(): t for t in target.tokens}
|
||||
|
||||
source_names = set(source_by_name.keys())
|
||||
target_names = set(target_by_name.keys())
|
||||
|
||||
result = {
|
||||
'added': [], # In target but not source
|
||||
'removed': [], # In source but not target
|
||||
'changed': [], # In both but different value
|
||||
'unchanged': [], # In both with same value
|
||||
}
|
||||
|
||||
# Find added (in target, not in source)
|
||||
for name in target_names - source_names:
|
||||
result['added'].append(target_by_name[name])
|
||||
|
||||
# Find removed (in source, not in target)
|
||||
for name in source_names - target_names:
|
||||
result['removed'].append(source_by_name[name])
|
||||
|
||||
# Find changed/unchanged (in both)
|
||||
for name in source_names & target_names:
|
||||
source_token = source_by_name[name]
|
||||
target_token = target_by_name[name]
|
||||
|
||||
if str(source_token.value) != str(target_token.value):
|
||||
result['changed'].append({
|
||||
'name': name,
|
||||
'old_value': source_token.value,
|
||||
'new_value': target_token.value,
|
||||
'source_token': source_token,
|
||||
'target_token': target_token,
|
||||
})
|
||||
else:
|
||||
result['unchanged'].append(source_token)
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def summary(diff_result: Dict[str, List]) -> str:
|
||||
"""Generate human-readable diff summary."""
|
||||
lines = ["Token Diff Summary:", "=" * 40]
|
||||
|
||||
if diff_result['added']:
|
||||
lines.append(f"\n+ Added ({len(diff_result['added'])}):")
|
||||
for token in diff_result['added'][:10]:
|
||||
lines.append(f" + {token.name}: {token.value}")
|
||||
if len(diff_result['added']) > 10:
|
||||
lines.append(f" ... and {len(diff_result['added']) - 10} more")
|
||||
|
||||
if diff_result['removed']:
|
||||
lines.append(f"\n- Removed ({len(diff_result['removed'])}):")
|
||||
for token in diff_result['removed'][:10]:
|
||||
lines.append(f" - {token.name}: {token.value}")
|
||||
if len(diff_result['removed']) > 10:
|
||||
lines.append(f" ... and {len(diff_result['removed']) - 10} more")
|
||||
|
||||
if diff_result['changed']:
|
||||
lines.append(f"\n~ Changed ({len(diff_result['changed'])}):")
|
||||
for change in diff_result['changed'][:10]:
|
||||
lines.append(
|
||||
f" ~ {change['name']}: {change['old_value']} → {change['new_value']}"
|
||||
)
|
||||
if len(diff_result['changed']) > 10:
|
||||
lines.append(f" ... and {len(diff_result['changed']) - 10} more")
|
||||
|
||||
lines.append(f"\n Unchanged: {len(diff_result['unchanged'])}")
|
||||
|
||||
return "\n".join(lines)
|
||||
289
demo/tools/ingest/scss.py
Normal file
289
demo/tools/ingest/scss.py
Normal file
@@ -0,0 +1,289 @@
|
||||
"""
|
||||
SCSS Token Source
|
||||
|
||||
Extracts design tokens from SCSS/Sass variables.
|
||||
Supports $variable declarations and @use module variables.
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
from .base import DesignToken, TokenCollection, TokenSource
|
||||
|
||||
|
||||
class SCSSTokenSource(TokenSource):
|
||||
"""
|
||||
Extract tokens from SCSS/Sass files.
|
||||
|
||||
Parses:
|
||||
- $variable: value;
|
||||
- $variable: value !default;
|
||||
- // Comment descriptions
|
||||
- @use module variables
|
||||
- Maps: $colors: (primary: #3B82F6, secondary: #10B981);
|
||||
"""
|
||||
|
||||
@property
|
||||
def source_type(self) -> str:
|
||||
return "scss"
|
||||
|
||||
async def extract(self, source: str) -> TokenCollection:
|
||||
"""
|
||||
Extract tokens from SCSS file or content.
|
||||
|
||||
Args:
|
||||
source: File path or SCSS content string
|
||||
|
||||
Returns:
|
||||
TokenCollection with extracted tokens
|
||||
"""
|
||||
if self._is_file_path(source):
|
||||
file_path = Path(source)
|
||||
if not file_path.exists():
|
||||
raise FileNotFoundError(f"SCSS file not found: {source}")
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
source_file = str(file_path.absolute())
|
||||
else:
|
||||
content = source
|
||||
source_file = "<inline>"
|
||||
|
||||
tokens = []
|
||||
|
||||
# Extract simple variables
|
||||
tokens.extend(self._parse_variables(content, source_file))
|
||||
|
||||
# Extract map variables
|
||||
tokens.extend(self._parse_maps(content, source_file))
|
||||
|
||||
return TokenCollection(
|
||||
tokens=tokens,
|
||||
name=f"SCSS Tokens from {Path(source_file).name if source_file != '<inline>' else 'inline'}",
|
||||
sources=[self._create_source_id(source_file)],
|
||||
)
|
||||
|
||||
def _is_file_path(self, source: str) -> bool:
|
||||
"""Check if source looks like a file path."""
|
||||
if '$' in source and ':' in source:
|
||||
return False
|
||||
if source.endswith('.scss') or source.endswith('.sass'):
|
||||
return True
|
||||
return Path(source).exists()
|
||||
|
||||
def _parse_variables(self, content: str, source_file: str) -> List[DesignToken]:
|
||||
"""Parse simple $variable declarations."""
|
||||
tokens = []
|
||||
lines = content.split('\n')
|
||||
|
||||
# Pattern for variable declarations
|
||||
var_pattern = re.compile(
|
||||
r'^\s*'
|
||||
r'(\$[\w-]+)\s*:\s*' # Variable name
|
||||
r'([^;!]+)' # Value
|
||||
r'(\s*!default)?' # Optional !default
|
||||
r'\s*;',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
# Track comments for descriptions
|
||||
prev_comment = ""
|
||||
|
||||
for i, line in enumerate(lines, 1):
|
||||
# Check for comment
|
||||
comment_match = re.match(r'^\s*//\s*(.+)$', line)
|
||||
if comment_match:
|
||||
prev_comment = comment_match.group(1).strip()
|
||||
continue
|
||||
|
||||
# Check for variable
|
||||
var_match = var_pattern.match(line)
|
||||
if var_match:
|
||||
var_name = var_match.group(1)
|
||||
var_value = var_match.group(2).strip()
|
||||
is_default = bool(var_match.group(3))
|
||||
|
||||
# Skip if value is a map (handled separately)
|
||||
if var_value.startswith('(') and var_value.endswith(')'):
|
||||
prev_comment = ""
|
||||
continue
|
||||
|
||||
# Skip if value references another variable that we can't resolve
|
||||
if var_value.startswith('$') and '(' not in var_value:
|
||||
# It's a simple variable reference, try to extract
|
||||
pass
|
||||
|
||||
token = DesignToken(
|
||||
name=self._normalize_var_name(var_name),
|
||||
value=self._process_value(var_value),
|
||||
description=prev_comment,
|
||||
source=self._create_source_id(source_file, i),
|
||||
source_file=source_file,
|
||||
source_line=i,
|
||||
original_name=var_name,
|
||||
original_value=var_value,
|
||||
)
|
||||
|
||||
if is_default:
|
||||
token.tags.append("default")
|
||||
|
||||
tokens.append(token)
|
||||
prev_comment = ""
|
||||
else:
|
||||
# Reset comment if line doesn't match
|
||||
if line.strip() and not line.strip().startswith('//'):
|
||||
prev_comment = ""
|
||||
|
||||
return tokens
|
||||
|
||||
def _parse_maps(self, content: str, source_file: str) -> List[DesignToken]:
|
||||
"""Parse SCSS map declarations."""
|
||||
tokens = []
|
||||
|
||||
# Pattern for map declarations (handles multi-line)
|
||||
map_pattern = re.compile(
|
||||
r'\$(\w[\w-]*)\s*:\s*\(([\s\S]*?)\)\s*;',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
for match in map_pattern.finditer(content):
|
||||
map_name = match.group(1)
|
||||
map_content = match.group(2)
|
||||
|
||||
# Get line number
|
||||
line_num = content[:match.start()].count('\n') + 1
|
||||
|
||||
# Parse map entries
|
||||
entries = self._parse_map_entries(map_content)
|
||||
|
||||
for key, value in entries.items():
|
||||
token = DesignToken(
|
||||
name=f"{self._normalize_var_name('$' + map_name)}.{key}",
|
||||
value=self._process_value(value),
|
||||
source=self._create_source_id(source_file, line_num),
|
||||
source_file=source_file,
|
||||
source_line=line_num,
|
||||
original_name=f"${map_name}.{key}",
|
||||
original_value=value,
|
||||
)
|
||||
token.tags.append("from-map")
|
||||
tokens.append(token)
|
||||
|
||||
return tokens
|
||||
|
||||
def _parse_map_entries(self, map_content: str) -> Dict[str, str]:
|
||||
"""Parse entries from a SCSS map."""
|
||||
entries = {}
|
||||
|
||||
# Handle nested maps and simple key-value pairs
|
||||
# This is a simplified parser for common cases
|
||||
|
||||
# Remove comments
|
||||
map_content = re.sub(r'//[^\n]*', '', map_content)
|
||||
|
||||
# Split by comma (not inside parentheses)
|
||||
depth = 0
|
||||
current = ""
|
||||
parts = []
|
||||
|
||||
for char in map_content:
|
||||
if char == '(':
|
||||
depth += 1
|
||||
current += char
|
||||
elif char == ')':
|
||||
depth -= 1
|
||||
current += char
|
||||
elif char == ',' and depth == 0:
|
||||
parts.append(current.strip())
|
||||
current = ""
|
||||
else:
|
||||
current += char
|
||||
|
||||
if current.strip():
|
||||
parts.append(current.strip())
|
||||
|
||||
# Parse each part
|
||||
for part in parts:
|
||||
if ':' in part:
|
||||
key, value = part.split(':', 1)
|
||||
key = key.strip().strip('"\'')
|
||||
value = value.strip()
|
||||
entries[key] = value
|
||||
|
||||
return entries
|
||||
|
||||
def _normalize_var_name(self, var_name: str) -> str:
|
||||
"""Convert SCSS variable name to token name."""
|
||||
# Remove $ prefix
|
||||
name = var_name.lstrip('$')
|
||||
# Convert kebab-case and underscores to dots
|
||||
name = re.sub(r'[-_]', '.', name)
|
||||
return name.lower()
|
||||
|
||||
def _process_value(self, value: str) -> str:
|
||||
"""Process SCSS value for token storage."""
|
||||
value = value.strip()
|
||||
|
||||
# Handle function calls (keep as-is for now)
|
||||
if '(' in value and ')' in value:
|
||||
return value
|
||||
|
||||
# Handle quotes
|
||||
if (value.startswith('"') and value.endswith('"')) or \
|
||||
(value.startswith("'") and value.endswith("'")):
|
||||
return value[1:-1]
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class SCSSVariableResolver:
|
||||
"""
|
||||
Resolve SCSS variable references.
|
||||
|
||||
Builds a dependency graph and resolves $var references to actual values.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.variables: Dict[str, str] = {}
|
||||
self.resolved: Dict[str, str] = {}
|
||||
|
||||
def add_variable(self, name: str, value: str) -> None:
|
||||
"""Add a variable to the resolver."""
|
||||
self.variables[name] = value
|
||||
|
||||
def resolve(self, name: str) -> Optional[str]:
|
||||
"""Resolve a variable to its final value."""
|
||||
if name in self.resolved:
|
||||
return self.resolved[name]
|
||||
|
||||
value = self.variables.get(name)
|
||||
if not value:
|
||||
return None
|
||||
|
||||
# Check if value references other variables
|
||||
if '$' in value:
|
||||
resolved_value = self._resolve_references(value)
|
||||
self.resolved[name] = resolved_value
|
||||
return resolved_value
|
||||
|
||||
self.resolved[name] = value
|
||||
return value
|
||||
|
||||
def _resolve_references(self, value: str, depth: int = 0) -> str:
|
||||
"""Recursively resolve variable references in a value."""
|
||||
if depth > 10: # Prevent infinite loops
|
||||
return value
|
||||
|
||||
# Find variable references
|
||||
var_pattern = re.compile(r'\$[\w-]+')
|
||||
|
||||
def replace_var(match):
|
||||
var_name = match.group(0)
|
||||
resolved = self.resolve(var_name.lstrip('$'))
|
||||
return resolved if resolved else var_name
|
||||
|
||||
return var_pattern.sub(replace_var, value)
|
||||
|
||||
def resolve_all(self) -> Dict[str, str]:
|
||||
"""Resolve all variables."""
|
||||
for name in self.variables:
|
||||
self.resolve(name)
|
||||
return self.resolved
|
||||
330
demo/tools/ingest/tailwind.py
Normal file
330
demo/tools/ingest/tailwind.py
Normal file
@@ -0,0 +1,330 @@
|
||||
"""
|
||||
Tailwind Token Source
|
||||
|
||||
Extracts design tokens from Tailwind CSS configuration files.
|
||||
Supports tailwind.config.js/ts and CSS-based Tailwind v4 configurations.
|
||||
"""
|
||||
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from .base import DesignToken, TokenCollection, TokenSource, TokenCategory
|
||||
|
||||
|
||||
class TailwindTokenSource(TokenSource):
|
||||
"""
|
||||
Extract tokens from Tailwind CSS configuration.
|
||||
|
||||
Parses:
|
||||
- tailwind.config.js/ts (theme and extend sections)
|
||||
- Tailwind v4 CSS-based configuration
|
||||
- CSS custom properties from Tailwind output
|
||||
"""
|
||||
|
||||
# Tailwind category mappings
|
||||
TAILWIND_CATEGORIES = {
|
||||
'colors': TokenCategory.COLORS,
|
||||
'backgroundColor': TokenCategory.COLORS,
|
||||
'textColor': TokenCategory.COLORS,
|
||||
'borderColor': TokenCategory.COLORS,
|
||||
'spacing': TokenCategory.SPACING,
|
||||
'padding': TokenCategory.SPACING,
|
||||
'margin': TokenCategory.SPACING,
|
||||
'gap': TokenCategory.SPACING,
|
||||
'fontSize': TokenCategory.TYPOGRAPHY,
|
||||
'fontFamily': TokenCategory.TYPOGRAPHY,
|
||||
'fontWeight': TokenCategory.TYPOGRAPHY,
|
||||
'lineHeight': TokenCategory.TYPOGRAPHY,
|
||||
'letterSpacing': TokenCategory.TYPOGRAPHY,
|
||||
'width': TokenCategory.SIZING,
|
||||
'height': TokenCategory.SIZING,
|
||||
'maxWidth': TokenCategory.SIZING,
|
||||
'maxHeight': TokenCategory.SIZING,
|
||||
'minWidth': TokenCategory.SIZING,
|
||||
'minHeight': TokenCategory.SIZING,
|
||||
'borderRadius': TokenCategory.BORDERS,
|
||||
'borderWidth': TokenCategory.BORDERS,
|
||||
'boxShadow': TokenCategory.SHADOWS,
|
||||
'dropShadow': TokenCategory.SHADOWS,
|
||||
'opacity': TokenCategory.OPACITY,
|
||||
'zIndex': TokenCategory.Z_INDEX,
|
||||
'transitionDuration': TokenCategory.MOTION,
|
||||
'transitionTimingFunction': TokenCategory.MOTION,
|
||||
'animation': TokenCategory.MOTION,
|
||||
'screens': TokenCategory.BREAKPOINTS,
|
||||
}
|
||||
|
||||
@property
|
||||
def source_type(self) -> str:
|
||||
return "tailwind"
|
||||
|
||||
async def extract(self, source: str) -> TokenCollection:
|
||||
"""
|
||||
Extract tokens from Tailwind config.
|
||||
|
||||
Args:
|
||||
source: Path to tailwind.config.js/ts or directory containing it
|
||||
|
||||
Returns:
|
||||
TokenCollection with extracted tokens
|
||||
"""
|
||||
config_path = self._find_config(source)
|
||||
if not config_path:
|
||||
raise FileNotFoundError(f"Tailwind config not found in: {source}")
|
||||
|
||||
content = config_path.read_text(encoding="utf-8")
|
||||
source_file = str(config_path.absolute())
|
||||
|
||||
# Parse based on file type
|
||||
if config_path.suffix in ('.js', '.cjs', '.mjs', '.ts'):
|
||||
tokens = self._parse_js_config(content, source_file)
|
||||
elif config_path.suffix == '.css':
|
||||
tokens = self._parse_css_config(content, source_file)
|
||||
else:
|
||||
tokens = []
|
||||
|
||||
return TokenCollection(
|
||||
tokens=tokens,
|
||||
name=f"Tailwind Tokens from {config_path.name}",
|
||||
sources=[self._create_source_id(source_file)],
|
||||
)
|
||||
|
||||
def _find_config(self, source: str) -> Optional[Path]:
|
||||
"""Find Tailwind config file."""
|
||||
path = Path(source)
|
||||
|
||||
# If it's a file, use it directly
|
||||
if path.is_file():
|
||||
return path
|
||||
|
||||
# If it's a directory, look for config files
|
||||
if path.is_dir():
|
||||
config_names = [
|
||||
'tailwind.config.js',
|
||||
'tailwind.config.cjs',
|
||||
'tailwind.config.mjs',
|
||||
'tailwind.config.ts',
|
||||
]
|
||||
for name in config_names:
|
||||
config_path = path / name
|
||||
if config_path.exists():
|
||||
return config_path
|
||||
|
||||
return None
|
||||
|
||||
def _parse_js_config(self, content: str, source_file: str) -> List[DesignToken]:
|
||||
"""Parse JavaScript/TypeScript Tailwind config."""
|
||||
tokens = []
|
||||
|
||||
# Extract theme object using regex (simplified parsing)
|
||||
# This handles common patterns but may not cover all edge cases
|
||||
|
||||
# Look for theme: { ... } or theme.extend: { ... }
|
||||
theme_match = re.search(
|
||||
r'theme\s*:\s*\{([\s\S]*?)\n\s*\}(?=\s*[,}])',
|
||||
content
|
||||
)
|
||||
|
||||
extend_match = re.search(
|
||||
r'extend\s*:\s*\{([\s\S]*?)\n\s{4}\}',
|
||||
content
|
||||
)
|
||||
|
||||
if extend_match:
|
||||
theme_content = extend_match.group(1)
|
||||
tokens.extend(self._parse_theme_object(theme_content, source_file, "extend"))
|
||||
|
||||
if theme_match and not extend_match:
|
||||
theme_content = theme_match.group(1)
|
||||
tokens.extend(self._parse_theme_object(theme_content, source_file, "theme"))
|
||||
|
||||
return tokens
|
||||
|
||||
def _parse_theme_object(self, content: str, source_file: str, prefix: str) -> List[DesignToken]:
|
||||
"""Parse theme object content."""
|
||||
tokens = []
|
||||
|
||||
# Find property blocks like: colors: { primary: '#3B82F6', ... }
|
||||
prop_pattern = re.compile(
|
||||
r"(\w+)\s*:\s*\{([^{}]*(?:\{[^{}]*\}[^{}]*)*)\}",
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
for match in prop_pattern.finditer(content):
|
||||
category_name = match.group(1)
|
||||
category_content = match.group(2)
|
||||
|
||||
category = self.TAILWIND_CATEGORIES.get(
|
||||
category_name, TokenCategory.OTHER
|
||||
)
|
||||
|
||||
# Parse values in this category
|
||||
tokens.extend(
|
||||
self._parse_category_values(
|
||||
category_name,
|
||||
category_content,
|
||||
source_file,
|
||||
category
|
||||
)
|
||||
)
|
||||
|
||||
return tokens
|
||||
|
||||
def _parse_category_values(
|
||||
self,
|
||||
category_name: str,
|
||||
content: str,
|
||||
source_file: str,
|
||||
category: TokenCategory
|
||||
) -> List[DesignToken]:
|
||||
"""Parse values within a category."""
|
||||
tokens = []
|
||||
|
||||
# Match key: value pairs
|
||||
# Handles: key: 'value', key: "value", key: value, 'key': value
|
||||
value_pattern = re.compile(
|
||||
r"['\"]?(\w[\w-]*)['\"]?\s*:\s*['\"]?([^,'\"}\n]+)['\"]?",
|
||||
)
|
||||
|
||||
for match in value_pattern.finditer(content):
|
||||
key = match.group(1)
|
||||
value = match.group(2).strip()
|
||||
|
||||
# Skip function calls and complex values for now
|
||||
if '(' in value or '{' in value:
|
||||
continue
|
||||
|
||||
# Skip references to other values
|
||||
if value.startswith('colors.') or value.startswith('theme('):
|
||||
continue
|
||||
|
||||
token = DesignToken(
|
||||
name=f"{category_name}.{key}",
|
||||
value=value,
|
||||
source=self._create_source_id(source_file),
|
||||
source_file=source_file,
|
||||
original_name=f"{category_name}.{key}",
|
||||
original_value=value,
|
||||
category=category,
|
||||
)
|
||||
token.tags.append("tailwind")
|
||||
tokens.append(token)
|
||||
|
||||
return tokens
|
||||
|
||||
def _parse_css_config(self, content: str, source_file: str) -> List[DesignToken]:
|
||||
"""Parse Tailwind v4 CSS-based configuration."""
|
||||
tokens = []
|
||||
|
||||
# Tailwind v4 uses @theme directive
|
||||
theme_match = re.search(
|
||||
r'@theme\s*\{([\s\S]*?)\}',
|
||||
content
|
||||
)
|
||||
|
||||
if theme_match:
|
||||
theme_content = theme_match.group(1)
|
||||
|
||||
# Parse CSS custom properties
|
||||
var_pattern = re.compile(
|
||||
r'(--[\w-]+)\s*:\s*([^;]+);'
|
||||
)
|
||||
|
||||
for match in var_pattern.finditer(theme_content):
|
||||
var_name = match.group(1)
|
||||
var_value = match.group(2).strip()
|
||||
|
||||
# Determine category from variable name
|
||||
category = self._category_from_var_name(var_name)
|
||||
|
||||
token = DesignToken(
|
||||
name=self._normalize_var_name(var_name),
|
||||
value=var_value,
|
||||
source=self._create_source_id(source_file),
|
||||
source_file=source_file,
|
||||
original_name=var_name,
|
||||
original_value=var_value,
|
||||
category=category,
|
||||
)
|
||||
token.tags.append("tailwind-v4")
|
||||
tokens.append(token)
|
||||
|
||||
return tokens
|
||||
|
||||
def _normalize_var_name(self, var_name: str) -> str:
|
||||
"""Convert CSS variable name to token name."""
|
||||
name = var_name.lstrip('-')
|
||||
name = name.replace('-', '.')
|
||||
return name.lower()
|
||||
|
||||
def _category_from_var_name(self, var_name: str) -> TokenCategory:
|
||||
"""Determine category from variable name."""
|
||||
name_lower = var_name.lower()
|
||||
|
||||
if 'color' in name_lower or 'bg' in name_lower:
|
||||
return TokenCategory.COLORS
|
||||
if 'spacing' in name_lower or 'gap' in name_lower:
|
||||
return TokenCategory.SPACING
|
||||
if 'font' in name_lower or 'text' in name_lower:
|
||||
return TokenCategory.TYPOGRAPHY
|
||||
if 'radius' in name_lower or 'border' in name_lower:
|
||||
return TokenCategory.BORDERS
|
||||
if 'shadow' in name_lower:
|
||||
return TokenCategory.SHADOWS
|
||||
|
||||
return TokenCategory.OTHER
|
||||
|
||||
|
||||
class TailwindClassExtractor:
|
||||
"""
|
||||
Extract Tailwind class usage from source files.
|
||||
|
||||
Identifies Tailwind utility classes for analysis and migration.
|
||||
"""
|
||||
|
||||
# Common Tailwind class prefixes
|
||||
TAILWIND_PREFIXES = [
|
||||
'bg-', 'text-', 'border-', 'ring-',
|
||||
'p-', 'px-', 'py-', 'pt-', 'pr-', 'pb-', 'pl-',
|
||||
'm-', 'mx-', 'my-', 'mt-', 'mr-', 'mb-', 'ml-',
|
||||
'w-', 'h-', 'min-w-', 'min-h-', 'max-w-', 'max-h-',
|
||||
'flex-', 'grid-', 'gap-',
|
||||
'font-', 'text-', 'leading-', 'tracking-',
|
||||
'rounded-', 'shadow-', 'opacity-',
|
||||
'z-', 'transition-', 'duration-', 'ease-',
|
||||
]
|
||||
|
||||
async def extract_usage(self, source: str) -> Dict[str, List[str]]:
|
||||
"""
|
||||
Extract Tailwind class usage from file.
|
||||
|
||||
Returns dict mapping class categories to list of used classes.
|
||||
"""
|
||||
if Path(source).exists():
|
||||
content = Path(source).read_text(encoding="utf-8")
|
||||
else:
|
||||
content = source
|
||||
|
||||
usage: Dict[str, List[str]] = {}
|
||||
|
||||
# Find className or class attributes
|
||||
class_pattern = re.compile(
|
||||
r'(?:className|class)\s*=\s*["\']([^"\']+)["\']'
|
||||
)
|
||||
|
||||
for match in class_pattern.finditer(content):
|
||||
classes = match.group(1).split()
|
||||
|
||||
for cls in classes:
|
||||
# Check if it's a Tailwind class
|
||||
for prefix in self.TAILWIND_PREFIXES:
|
||||
if cls.startswith(prefix):
|
||||
category = prefix.rstrip('-')
|
||||
if category not in usage:
|
||||
usage[category] = []
|
||||
if cls not in usage[category]:
|
||||
usage[category].append(cls)
|
||||
break
|
||||
|
||||
return usage
|
||||
1332
demo/tools/storage/database.py
Normal file
1332
demo/tools/storage/database.py
Normal file
File diff suppressed because it is too large
Load Diff
26
demo/tools/storybook/__init__.py
Normal file
26
demo/tools/storybook/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
"""
|
||||
DSS Storybook Integration Module
|
||||
|
||||
Provides tools for:
|
||||
- Scanning existing Storybook stories
|
||||
- Generating stories from React components
|
||||
- Creating themed Storybook configurations
|
||||
- Syncing documentation with design tokens
|
||||
"""
|
||||
|
||||
from .scanner import StorybookScanner, StoryInfo, StorybookConfig
|
||||
from .generator import StoryGenerator, StoryTemplate
|
||||
from .theme import ThemeGenerator, StorybookTheme
|
||||
|
||||
__all__ = [
|
||||
# Scanner
|
||||
"StorybookScanner",
|
||||
"StoryInfo",
|
||||
"StorybookConfig",
|
||||
# Generator
|
||||
"StoryGenerator",
|
||||
"StoryTemplate",
|
||||
# Theme
|
||||
"ThemeGenerator",
|
||||
"StorybookTheme",
|
||||
]
|
||||
433
demo/tools/storybook/generator.py
Normal file
433
demo/tools/storybook/generator.py
Normal file
@@ -0,0 +1,433 @@
|
||||
"""
|
||||
Storybook Story Generator
|
||||
|
||||
Generates Storybook stories from React components.
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class StoryTemplate(str, Enum):
|
||||
"""Available story templates."""
|
||||
CSF3 = "csf3" # Component Story Format 3 (latest)
|
||||
CSF2 = "csf2" # Component Story Format 2
|
||||
MDX = "mdx" # MDX format
|
||||
|
||||
|
||||
@dataclass
|
||||
class PropInfo:
|
||||
"""Information about a component prop."""
|
||||
name: str
|
||||
type: str = "unknown"
|
||||
required: bool = False
|
||||
default_value: Optional[str] = None
|
||||
description: str = ""
|
||||
options: List[str] = field(default_factory=list) # For enum/union types
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComponentMeta:
|
||||
"""Metadata about a component for story generation."""
|
||||
name: str
|
||||
path: str
|
||||
props: List[PropInfo] = field(default_factory=list)
|
||||
description: str = ""
|
||||
has_children: bool = False
|
||||
|
||||
|
||||
class StoryGenerator:
|
||||
"""
|
||||
Generates Storybook stories from component information.
|
||||
"""
|
||||
|
||||
def __init__(self, root_path: str):
|
||||
self.root = Path(root_path).resolve()
|
||||
|
||||
async def generate_story(
|
||||
self,
|
||||
component_path: str,
|
||||
template: StoryTemplate = StoryTemplate.CSF3,
|
||||
include_variants: bool = True,
|
||||
output_path: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a Storybook story for a component.
|
||||
|
||||
Args:
|
||||
component_path: Path to the component file
|
||||
template: Story template format
|
||||
include_variants: Generate variant stories
|
||||
output_path: Optional path to write the story file
|
||||
|
||||
Returns:
|
||||
Generated story code
|
||||
"""
|
||||
# Parse component
|
||||
meta = await self._parse_component(component_path)
|
||||
|
||||
# Generate story based on template
|
||||
if template == StoryTemplate.CSF3:
|
||||
story = self._generate_csf3(meta, include_variants)
|
||||
elif template == StoryTemplate.CSF2:
|
||||
story = self._generate_csf2(meta, include_variants)
|
||||
else:
|
||||
story = self._generate_mdx(meta, include_variants)
|
||||
|
||||
# Write to file if output path provided
|
||||
if output_path:
|
||||
output = Path(output_path)
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
output.write_text(story)
|
||||
|
||||
return story
|
||||
|
||||
async def _parse_component(self, component_path: str) -> ComponentMeta:
|
||||
"""Parse a React component to extract metadata."""
|
||||
path = self.root / component_path if not Path(component_path).is_absolute() else Path(component_path)
|
||||
content = path.read_text(encoding="utf-8", errors="ignore")
|
||||
|
||||
component_name = path.stem
|
||||
props = []
|
||||
|
||||
# Extract props from interface/type
|
||||
# interface ButtonProps { variant?: 'primary' | 'secondary'; ... }
|
||||
props_pattern = re.compile(
|
||||
r'(?:interface|type)\s+\w*Props\s*(?:=\s*)?\{([^}]+)\}',
|
||||
re.DOTALL
|
||||
)
|
||||
|
||||
props_match = props_pattern.search(content)
|
||||
if props_match:
|
||||
props_content = props_match.group(1)
|
||||
|
||||
# Parse each prop line
|
||||
for line in props_content.split('\n'):
|
||||
line = line.strip()
|
||||
if not line or line.startswith('//'):
|
||||
continue
|
||||
|
||||
# Match: propName?: type; or propName: type;
|
||||
prop_match = re.match(
|
||||
r'(\w+)(\?)?:\s*([^;/]+)',
|
||||
line
|
||||
)
|
||||
if prop_match:
|
||||
prop_name = prop_match.group(1)
|
||||
is_optional = prop_match.group(2) == '?'
|
||||
prop_type = prop_match.group(3).strip()
|
||||
|
||||
# Extract options from union types
|
||||
options = []
|
||||
if '|' in prop_type:
|
||||
# 'primary' | 'secondary' | 'ghost'
|
||||
options = [
|
||||
o.strip().strip("'\"")
|
||||
for o in prop_type.split('|')
|
||||
if o.strip().startswith(("'", '"'))
|
||||
]
|
||||
|
||||
props.append(PropInfo(
|
||||
name=prop_name,
|
||||
type=prop_type,
|
||||
required=not is_optional,
|
||||
options=options,
|
||||
))
|
||||
|
||||
# Check if component uses children
|
||||
has_children = 'children' in content.lower() and (
|
||||
'React.ReactNode' in content or
|
||||
'ReactNode' in content or
|
||||
'{children}' in content
|
||||
)
|
||||
|
||||
# Extract component description from JSDoc
|
||||
description = ""
|
||||
jsdoc_match = re.search(r'/\*\*\s*\n\s*\*\s*([^\n*]+)', content)
|
||||
if jsdoc_match:
|
||||
description = jsdoc_match.group(1).strip()
|
||||
|
||||
return ComponentMeta(
|
||||
name=component_name,
|
||||
path=component_path,
|
||||
props=props,
|
||||
description=description,
|
||||
has_children=has_children,
|
||||
)
|
||||
|
||||
def _generate_csf3(self, meta: ComponentMeta, include_variants: bool) -> str:
|
||||
"""Generate CSF3 format story."""
|
||||
lines = [
|
||||
f"import type {{ Meta, StoryObj }} from '@storybook/react';",
|
||||
f"import {{ {meta.name} }} from './{meta.name}';",
|
||||
"",
|
||||
f"const meta: Meta<typeof {meta.name}> = {{",
|
||||
f" title: 'Components/{meta.name}',",
|
||||
f" component: {meta.name},",
|
||||
" parameters: {",
|
||||
" layout: 'centered',",
|
||||
" },",
|
||||
" tags: ['autodocs'],",
|
||||
]
|
||||
|
||||
# Add argTypes for props with options
|
||||
arg_types = []
|
||||
for prop in meta.props:
|
||||
if prop.options:
|
||||
arg_types.append(
|
||||
f" {prop.name}: {{\n"
|
||||
f" options: {prop.options},\n"
|
||||
f" control: {{ type: 'select' }},\n"
|
||||
f" }},"
|
||||
)
|
||||
|
||||
if arg_types:
|
||||
lines.append(" argTypes: {")
|
||||
lines.extend(arg_types)
|
||||
lines.append(" },")
|
||||
|
||||
lines.extend([
|
||||
"};",
|
||||
"",
|
||||
"export default meta;",
|
||||
f"type Story = StoryObj<typeof {meta.name}>;",
|
||||
"",
|
||||
])
|
||||
|
||||
# Generate default story
|
||||
default_args = self._get_default_args(meta)
|
||||
lines.extend([
|
||||
"export const Default: Story = {",
|
||||
" args: {",
|
||||
])
|
||||
for key, value in default_args.items():
|
||||
lines.append(f" {key}: {value},")
|
||||
lines.extend([
|
||||
" },",
|
||||
"};",
|
||||
])
|
||||
|
||||
# Generate variant stories
|
||||
if include_variants:
|
||||
variant_prop = next(
|
||||
(p for p in meta.props if p.name == 'variant' and p.options),
|
||||
None
|
||||
)
|
||||
if variant_prop:
|
||||
for variant in variant_prop.options:
|
||||
story_name = variant.title().replace('-', '').replace('_', '')
|
||||
lines.extend([
|
||||
"",
|
||||
f"export const {story_name}: Story = {{",
|
||||
" args: {",
|
||||
f" ...Default.args,",
|
||||
f" variant: '{variant}',",
|
||||
" },",
|
||||
"};",
|
||||
])
|
||||
|
||||
# Size variants
|
||||
size_prop = next(
|
||||
(p for p in meta.props if p.name == 'size' and p.options),
|
||||
None
|
||||
)
|
||||
if size_prop:
|
||||
for size in size_prop.options:
|
||||
story_name = f"Size{size.title()}"
|
||||
lines.extend([
|
||||
"",
|
||||
f"export const {story_name}: Story = {{",
|
||||
" args: {",
|
||||
f" ...Default.args,",
|
||||
f" size: '{size}',",
|
||||
" },",
|
||||
"};",
|
||||
])
|
||||
|
||||
# Disabled state
|
||||
disabled_prop = next(
|
||||
(p for p in meta.props if p.name == 'disabled'),
|
||||
None
|
||||
)
|
||||
if disabled_prop:
|
||||
lines.extend([
|
||||
"",
|
||||
"export const Disabled: Story = {",
|
||||
" args: {",
|
||||
" ...Default.args,",
|
||||
" disabled: true,",
|
||||
" },",
|
||||
"};",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _generate_csf2(self, meta: ComponentMeta, include_variants: bool) -> str:
|
||||
"""Generate CSF2 format story."""
|
||||
lines = [
|
||||
f"import React from 'react';",
|
||||
f"import {{ {meta.name} }} from './{meta.name}';",
|
||||
"",
|
||||
"export default {",
|
||||
f" title: 'Components/{meta.name}',",
|
||||
f" component: {meta.name},",
|
||||
"};",
|
||||
"",
|
||||
f"const Template = (args) => <{meta.name} {{...args}} />;",
|
||||
"",
|
||||
"export const Default = Template.bind({});",
|
||||
"Default.args = {",
|
||||
]
|
||||
|
||||
default_args = self._get_default_args(meta)
|
||||
for key, value in default_args.items():
|
||||
lines.append(f" {key}: {value},")
|
||||
|
||||
lines.append("};")
|
||||
|
||||
# Generate variant stories
|
||||
if include_variants:
|
||||
variant_prop = next(
|
||||
(p for p in meta.props if p.name == 'variant' and p.options),
|
||||
None
|
||||
)
|
||||
if variant_prop:
|
||||
for variant in variant_prop.options:
|
||||
story_name = variant.title().replace('-', '').replace('_', '')
|
||||
lines.extend([
|
||||
"",
|
||||
f"export const {story_name} = Template.bind({{}});",
|
||||
f"{story_name}.args = {{",
|
||||
f" ...Default.args,",
|
||||
f" variant: '{variant}',",
|
||||
"};",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _generate_mdx(self, meta: ComponentMeta, include_variants: bool) -> str:
|
||||
"""Generate MDX format story."""
|
||||
lines = [
|
||||
f"import {{ Meta, Story, Canvas, ArgsTable }} from '@storybook/blocks';",
|
||||
f"import {{ {meta.name} }} from './{meta.name}';",
|
||||
"",
|
||||
f"<Meta title=\"Components/{meta.name}\" component={{{meta.name}}} />",
|
||||
"",
|
||||
f"# {meta.name}",
|
||||
"",
|
||||
]
|
||||
|
||||
if meta.description:
|
||||
lines.extend([meta.description, ""])
|
||||
|
||||
lines.extend([
|
||||
"## Default",
|
||||
"",
|
||||
"<Canvas>",
|
||||
f" <Story name=\"Default\">",
|
||||
f" <{meta.name}",
|
||||
])
|
||||
|
||||
default_args = self._get_default_args(meta)
|
||||
for key, value in default_args.items():
|
||||
lines.append(f" {key}={value}")
|
||||
|
||||
lines.extend([
|
||||
f" />",
|
||||
" </Story>",
|
||||
"</Canvas>",
|
||||
"",
|
||||
"## Props",
|
||||
"",
|
||||
f"<ArgsTable of={{{meta.name}}} />",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _get_default_args(self, meta: ComponentMeta) -> Dict[str, str]:
|
||||
"""Get default args for a component."""
|
||||
args = {}
|
||||
|
||||
for prop in meta.props:
|
||||
if prop.name == 'children' and meta.has_children:
|
||||
args['children'] = f"'{meta.name}'"
|
||||
elif prop.name == 'variant' and prop.options:
|
||||
args['variant'] = f"'{prop.options[0]}'"
|
||||
elif prop.name == 'size' and prop.options:
|
||||
args['size'] = f"'{prop.options[0]}'"
|
||||
elif prop.name == 'disabled':
|
||||
args['disabled'] = 'false'
|
||||
elif prop.name == 'onClick':
|
||||
args['onClick'] = '() => console.log("clicked")'
|
||||
elif prop.required and prop.default_value:
|
||||
args[prop.name] = prop.default_value
|
||||
|
||||
# Ensure children for button-like components
|
||||
if meta.has_children and 'children' not in args:
|
||||
args['children'] = f"'{meta.name}'"
|
||||
|
||||
return args
|
||||
|
||||
async def generate_stories_for_directory(
|
||||
self,
|
||||
directory: str,
|
||||
template: StoryTemplate = StoryTemplate.CSF3,
|
||||
dry_run: bool = True,
|
||||
) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Generate stories for all components in a directory.
|
||||
|
||||
Args:
|
||||
directory: Path to component directory
|
||||
template: Story template format
|
||||
dry_run: If True, only return what would be generated
|
||||
|
||||
Returns:
|
||||
List of dicts with component path and generated story
|
||||
"""
|
||||
results = []
|
||||
dir_path = self.root / directory
|
||||
|
||||
if not dir_path.exists():
|
||||
return results
|
||||
|
||||
# Find component files
|
||||
for pattern in ['*.tsx', '*.jsx']:
|
||||
for comp_path in dir_path.glob(pattern):
|
||||
# Skip story files, test files, index files
|
||||
if any(x in comp_path.name.lower() for x in ['.stories.', '.test.', '.spec.', 'index.']):
|
||||
continue
|
||||
|
||||
# Skip non-component files (not PascalCase)
|
||||
if not comp_path.stem[0].isupper():
|
||||
continue
|
||||
|
||||
try:
|
||||
rel_path = str(comp_path.relative_to(self.root))
|
||||
story = await self.generate_story(rel_path, template)
|
||||
|
||||
# Determine story output path
|
||||
story_path = comp_path.with_suffix('.stories.tsx')
|
||||
|
||||
result = {
|
||||
'component': rel_path,
|
||||
'story_path': str(story_path.relative_to(self.root)),
|
||||
'story': story,
|
||||
}
|
||||
|
||||
if not dry_run:
|
||||
story_path.write_text(story)
|
||||
result['written'] = True
|
||||
|
||||
results.append(result)
|
||||
|
||||
except Exception as e:
|
||||
results.append({
|
||||
'component': str(comp_path),
|
||||
'error': str(e),
|
||||
})
|
||||
|
||||
return results
|
||||
357
demo/tools/storybook/scanner.py
Normal file
357
demo/tools/storybook/scanner.py
Normal file
@@ -0,0 +1,357 @@
|
||||
"""
|
||||
Storybook Scanner
|
||||
|
||||
Discovers and analyzes existing Storybook stories in a project.
|
||||
"""
|
||||
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional, Set
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class StoryInfo:
|
||||
"""Information about a Storybook story."""
|
||||
name: str # Story name (e.g., "Primary")
|
||||
title: str # Story title (e.g., "Components/Button")
|
||||
component: str # Component name
|
||||
file_path: str # Path to story file
|
||||
args: Dict[str, Any] = field(default_factory=dict) # Default args
|
||||
parameters: Dict[str, Any] = field(default_factory=dict)
|
||||
decorators: List[str] = field(default_factory=list)
|
||||
tags: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"title": self.title,
|
||||
"component": self.component,
|
||||
"file_path": self.file_path,
|
||||
"args": self.args,
|
||||
"parameters": self.parameters,
|
||||
"decorators": self.decorators,
|
||||
"tags": self.tags,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class StorybookConfig:
|
||||
"""Storybook configuration details."""
|
||||
version: str = ""
|
||||
framework: str = "" # react, vue, angular, etc.
|
||||
builder: str = "" # vite, webpack5, etc.
|
||||
addons: List[str] = field(default_factory=list)
|
||||
stories_patterns: List[str] = field(default_factory=list)
|
||||
static_dirs: List[str] = field(default_factory=list)
|
||||
config_path: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"version": self.version,
|
||||
"framework": self.framework,
|
||||
"builder": self.builder,
|
||||
"addons": self.addons,
|
||||
"stories_patterns": self.stories_patterns,
|
||||
"static_dirs": self.static_dirs,
|
||||
"config_path": self.config_path,
|
||||
}
|
||||
|
||||
|
||||
class StorybookScanner:
|
||||
"""
|
||||
Scans a project for Storybook configuration and stories.
|
||||
"""
|
||||
|
||||
# Common story file patterns
|
||||
STORY_PATTERNS = [
|
||||
'*.stories.tsx',
|
||||
'*.stories.ts',
|
||||
'*.stories.jsx',
|
||||
'*.stories.js',
|
||||
'*.stories.mdx',
|
||||
]
|
||||
|
||||
def __init__(self, root_path: str):
|
||||
self.root = Path(root_path).resolve()
|
||||
|
||||
async def scan(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Perform full Storybook scan.
|
||||
|
||||
Returns:
|
||||
Dict with configuration and story inventory
|
||||
"""
|
||||
config = await self._find_config()
|
||||
stories = await self._find_stories()
|
||||
|
||||
# Group stories by component
|
||||
by_component: Dict[str, List[StoryInfo]] = {}
|
||||
for story in stories:
|
||||
if story.component not in by_component:
|
||||
by_component[story.component] = []
|
||||
by_component[story.component].append(story)
|
||||
|
||||
return {
|
||||
"config": config.to_dict() if config else None,
|
||||
"stories_count": len(stories),
|
||||
"components_with_stories": len(by_component),
|
||||
"stories": [s.to_dict() for s in stories],
|
||||
"by_component": {
|
||||
comp: [s.to_dict() for s in stories_list]
|
||||
for comp, stories_list in by_component.items()
|
||||
},
|
||||
}
|
||||
|
||||
async def _find_config(self) -> Optional[StorybookConfig]:
|
||||
"""Find and parse Storybook configuration."""
|
||||
# Look for .storybook directory
|
||||
storybook_dir = self.root / ".storybook"
|
||||
if not storybook_dir.exists():
|
||||
# Try alternative locations
|
||||
for alt in ["storybook", ".storybook"]:
|
||||
alt_path = self.root / alt
|
||||
if alt_path.exists():
|
||||
storybook_dir = alt_path
|
||||
break
|
||||
else:
|
||||
return None
|
||||
|
||||
config = StorybookConfig(config_path=str(storybook_dir))
|
||||
|
||||
# Parse main.js/ts
|
||||
for main_file in ["main.ts", "main.js", "main.mjs"]:
|
||||
main_path = storybook_dir / main_file
|
||||
if main_path.exists():
|
||||
await self._parse_main_config(main_path, config)
|
||||
break
|
||||
|
||||
# Check package.json for Storybook version
|
||||
pkg_json = self.root / "package.json"
|
||||
if pkg_json.exists():
|
||||
try:
|
||||
pkg = json.loads(pkg_json.read_text())
|
||||
deps = {**pkg.get("dependencies", {}), **pkg.get("devDependencies", {})}
|
||||
|
||||
# Get Storybook version
|
||||
for pkg_name in ["@storybook/react", "@storybook/vue3", "@storybook/angular"]:
|
||||
if pkg_name in deps:
|
||||
config.version = deps[pkg_name].lstrip("^~")
|
||||
config.framework = pkg_name.split("/")[1]
|
||||
break
|
||||
|
||||
# Get builder
|
||||
if "@storybook/builder-vite" in deps:
|
||||
config.builder = "vite"
|
||||
elif "@storybook/builder-webpack5" in deps:
|
||||
config.builder = "webpack5"
|
||||
|
||||
# Get addons
|
||||
config.addons = [
|
||||
pkg for pkg in deps.keys()
|
||||
if pkg.startswith("@storybook/addon-")
|
||||
]
|
||||
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
pass
|
||||
|
||||
return config
|
||||
|
||||
async def _parse_main_config(self, main_path: Path, config: StorybookConfig) -> None:
|
||||
"""Parse main.js/ts for configuration."""
|
||||
try:
|
||||
content = main_path.read_text(encoding="utf-8")
|
||||
|
||||
# Extract stories patterns
|
||||
stories_match = re.search(
|
||||
r'stories\s*:\s*\[([^\]]+)\]',
|
||||
content,
|
||||
re.DOTALL
|
||||
)
|
||||
if stories_match:
|
||||
patterns_str = stories_match.group(1)
|
||||
patterns = re.findall(r'["\']([^"\']+)["\']', patterns_str)
|
||||
config.stories_patterns = patterns
|
||||
|
||||
# Extract static dirs
|
||||
static_match = re.search(
|
||||
r'staticDirs\s*:\s*\[([^\]]+)\]',
|
||||
content,
|
||||
re.DOTALL
|
||||
)
|
||||
if static_match:
|
||||
dirs_str = static_match.group(1)
|
||||
dirs = re.findall(r'["\']([^"\']+)["\']', dirs_str)
|
||||
config.static_dirs = dirs
|
||||
|
||||
# Extract framework
|
||||
framework_match = re.search(
|
||||
r'framework\s*:\s*["\'](@storybook/[^"\']+)["\']',
|
||||
content
|
||||
)
|
||||
if framework_match:
|
||||
config.framework = framework_match.group(1)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def _find_stories(self) -> List[StoryInfo]:
|
||||
"""Find all story files in the project."""
|
||||
stories = []
|
||||
skip_dirs = {'node_modules', '.git', 'dist', 'build'}
|
||||
|
||||
for pattern in self.STORY_PATTERNS:
|
||||
for story_path in self.root.rglob(pattern):
|
||||
if any(skip in story_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
|
||||
try:
|
||||
file_stories = await self._parse_story_file(story_path)
|
||||
stories.extend(file_stories)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return stories
|
||||
|
||||
async def _parse_story_file(self, story_path: Path) -> List[StoryInfo]:
|
||||
"""Parse a story file to extract story information."""
|
||||
content = story_path.read_text(encoding="utf-8", errors="ignore")
|
||||
rel_path = str(story_path.relative_to(self.root))
|
||||
stories = []
|
||||
|
||||
# Extract meta/default export
|
||||
title = ""
|
||||
component = ""
|
||||
|
||||
# CSF3 format: const meta = { title: '...', component: ... }
|
||||
meta_match = re.search(
|
||||
r'(?:const\s+meta|export\s+default)\s*[=:]\s*\{([^}]+)\}',
|
||||
content,
|
||||
re.DOTALL
|
||||
)
|
||||
if meta_match:
|
||||
meta_content = meta_match.group(1)
|
||||
|
||||
title_match = re.search(r'title\s*:\s*["\']([^"\']+)["\']', meta_content)
|
||||
if title_match:
|
||||
title = title_match.group(1)
|
||||
|
||||
comp_match = re.search(r'component\s*:\s*(\w+)', meta_content)
|
||||
if comp_match:
|
||||
component = comp_match.group(1)
|
||||
|
||||
# If no title, derive from file path
|
||||
if not title:
|
||||
# Convert path to title (e.g., src/components/Button.stories.tsx -> Components/Button)
|
||||
parts = story_path.stem.replace('.stories', '').split('/')
|
||||
title = '/'.join(p.title() for p in parts[-2:] if p)
|
||||
|
||||
if not component:
|
||||
component = story_path.stem.replace('.stories', '')
|
||||
|
||||
# Find exported stories (CSF3 format)
|
||||
# export const Primary: Story = { ... }
|
||||
story_pattern = re.compile(
|
||||
r'export\s+const\s+(\w+)\s*(?::\s*\w+)?\s*=\s*\{([^}]*)\}',
|
||||
re.DOTALL
|
||||
)
|
||||
|
||||
for match in story_pattern.finditer(content):
|
||||
story_name = match.group(1)
|
||||
story_content = match.group(2)
|
||||
|
||||
# Skip meta export
|
||||
if story_name.lower() in ['meta', 'default']:
|
||||
continue
|
||||
|
||||
# Parse args
|
||||
args = {}
|
||||
args_match = re.search(r'args\s*:\s*\{([^}]*)\}', story_content)
|
||||
if args_match:
|
||||
args_str = args_match.group(1)
|
||||
# Simple key-value extraction
|
||||
for kv_match in re.finditer(r'(\w+)\s*:\s*["\']?([^,\n"\']+)["\']?', args_str):
|
||||
args[kv_match.group(1)] = kv_match.group(2).strip()
|
||||
|
||||
stories.append(StoryInfo(
|
||||
name=story_name,
|
||||
title=title,
|
||||
component=component,
|
||||
file_path=rel_path,
|
||||
args=args,
|
||||
))
|
||||
|
||||
# Also check for older CSF2 format
|
||||
# export const Primary = Template.bind({})
|
||||
csf2_pattern = re.compile(
|
||||
r'export\s+const\s+(\w+)\s*=\s*Template\.bind\(\{\}\)'
|
||||
)
|
||||
for match in csf2_pattern.finditer(content):
|
||||
story_name = match.group(1)
|
||||
if not any(s.name == story_name for s in stories):
|
||||
stories.append(StoryInfo(
|
||||
name=story_name,
|
||||
title=title,
|
||||
component=component,
|
||||
file_path=rel_path,
|
||||
))
|
||||
|
||||
return stories
|
||||
|
||||
async def get_components_without_stories(
|
||||
self,
|
||||
component_files: List[str]
|
||||
) -> List[str]:
|
||||
"""
|
||||
Find components that don't have Storybook stories.
|
||||
|
||||
Args:
|
||||
component_files: List of component file paths
|
||||
|
||||
Returns:
|
||||
List of component paths without stories
|
||||
"""
|
||||
# Get all components with stories
|
||||
result = await self.scan()
|
||||
components_with_stories = set(result.get("by_component", {}).keys())
|
||||
|
||||
# Find components without stories
|
||||
without_stories = []
|
||||
for comp_path in component_files:
|
||||
# Extract component name from path
|
||||
comp_name = Path(comp_path).stem
|
||||
if comp_name not in components_with_stories:
|
||||
without_stories.append(comp_path)
|
||||
|
||||
return without_stories
|
||||
|
||||
async def get_story_coverage(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Calculate story coverage statistics.
|
||||
|
||||
Returns:
|
||||
Coverage statistics including counts and percentages
|
||||
"""
|
||||
result = await self.scan()
|
||||
|
||||
stories_count = result.get("stories_count", 0)
|
||||
components_count = result.get("components_with_stories", 0)
|
||||
|
||||
# Count stories per component
|
||||
by_component = result.get("by_component", {})
|
||||
stories_per_component = {
|
||||
comp: len(stories) for comp, stories in by_component.items()
|
||||
}
|
||||
|
||||
avg_stories = (
|
||||
sum(stories_per_component.values()) / len(stories_per_component)
|
||||
if stories_per_component else 0
|
||||
)
|
||||
|
||||
return {
|
||||
"total_stories": stories_count,
|
||||
"components_covered": components_count,
|
||||
"average_stories_per_component": round(avg_stories, 1),
|
||||
"stories_per_component": stories_per_component,
|
||||
}
|
||||
374
demo/tools/storybook/theme.py
Normal file
374
demo/tools/storybook/theme.py
Normal file
@@ -0,0 +1,374 @@
|
||||
"""
|
||||
Storybook Theme Generator
|
||||
|
||||
Generates Storybook theme configurations from design tokens.
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class StorybookTheme:
|
||||
"""Storybook theme configuration."""
|
||||
name: str = "dss-theme"
|
||||
base: str = "light" # 'light' or 'dark'
|
||||
|
||||
# Brand
|
||||
brand_title: str = "Design System"
|
||||
brand_url: str = ""
|
||||
brand_image: str = ""
|
||||
brand_target: str = "_self"
|
||||
|
||||
# Colors
|
||||
color_primary: str = "#3B82F6"
|
||||
color_secondary: str = "#10B981"
|
||||
|
||||
# UI Colors
|
||||
app_bg: str = "#FFFFFF"
|
||||
app_content_bg: str = "#FFFFFF"
|
||||
app_border_color: str = "#E5E7EB"
|
||||
|
||||
# Text colors
|
||||
text_color: str = "#1F2937"
|
||||
text_inverse_color: str = "#FFFFFF"
|
||||
text_muted_color: str = "#6B7280"
|
||||
|
||||
# Toolbar
|
||||
bar_text_color: str = "#6B7280"
|
||||
bar_selected_color: str = "#3B82F6"
|
||||
bar_bg: str = "#FFFFFF"
|
||||
|
||||
# Form colors
|
||||
input_bg: str = "#FFFFFF"
|
||||
input_border: str = "#D1D5DB"
|
||||
input_text_color: str = "#1F2937"
|
||||
input_border_radius: int = 4
|
||||
|
||||
# Typography
|
||||
font_base: str = '"Inter", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif'
|
||||
font_code: str = '"Fira Code", "Monaco", monospace'
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"base": self.base,
|
||||
"brandTitle": self.brand_title,
|
||||
"brandUrl": self.brand_url,
|
||||
"brandImage": self.brand_image,
|
||||
"brandTarget": self.brand_target,
|
||||
"colorPrimary": self.color_primary,
|
||||
"colorSecondary": self.color_secondary,
|
||||
"appBg": self.app_bg,
|
||||
"appContentBg": self.app_content_bg,
|
||||
"appBorderColor": self.app_border_color,
|
||||
"textColor": self.text_color,
|
||||
"textInverseColor": self.text_inverse_color,
|
||||
"textMutedColor": self.text_muted_color,
|
||||
"barTextColor": self.bar_text_color,
|
||||
"barSelectedColor": self.bar_selected_color,
|
||||
"barBg": self.bar_bg,
|
||||
"inputBg": self.input_bg,
|
||||
"inputBorder": self.input_border,
|
||||
"inputTextColor": self.input_text_color,
|
||||
"inputBorderRadius": self.input_border_radius,
|
||||
"fontBase": self.font_base,
|
||||
"fontCode": self.font_code,
|
||||
}
|
||||
|
||||
|
||||
class ThemeGenerator:
|
||||
"""
|
||||
Generates Storybook theme configurations from design tokens.
|
||||
"""
|
||||
|
||||
# Token name mappings to Storybook theme properties
|
||||
TOKEN_MAPPINGS = {
|
||||
# Primary/Secondary
|
||||
"color.primary.500": "color_primary",
|
||||
"color.primary.600": "color_primary",
|
||||
"color.secondary.500": "color_secondary",
|
||||
"color.accent.500": "color_secondary",
|
||||
|
||||
# Backgrounds
|
||||
"color.neutral.50": "app_bg",
|
||||
"color.background": "app_bg",
|
||||
"color.surface": "app_content_bg",
|
||||
|
||||
# Borders
|
||||
"color.neutral.200": "app_border_color",
|
||||
"color.border": "app_border_color",
|
||||
|
||||
# Text
|
||||
"color.neutral.900": "text_color",
|
||||
"color.neutral.800": "text_color",
|
||||
"color.foreground": "text_color",
|
||||
"color.neutral.500": "text_muted_color",
|
||||
"color.muted": "text_muted_color",
|
||||
|
||||
# Input
|
||||
"color.neutral.300": "input_border",
|
||||
"radius.md": "input_border_radius",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def generate_from_tokens(
|
||||
self,
|
||||
tokens: List[Dict[str, Any]],
|
||||
brand_title: str = "Design System",
|
||||
base: str = "light",
|
||||
) -> StorybookTheme:
|
||||
"""
|
||||
Generate Storybook theme from design tokens.
|
||||
|
||||
Args:
|
||||
tokens: List of token dicts with 'name' and 'value'
|
||||
brand_title: Brand title for Storybook
|
||||
base: Base theme ('light' or 'dark')
|
||||
|
||||
Returns:
|
||||
StorybookTheme configured from tokens
|
||||
"""
|
||||
theme = StorybookTheme(
|
||||
name="dss-theme",
|
||||
base=base,
|
||||
brand_title=brand_title,
|
||||
)
|
||||
|
||||
# Map tokens to theme properties
|
||||
for token in tokens:
|
||||
name = token.get("name", "")
|
||||
value = token.get("value", "")
|
||||
|
||||
# Check direct mappings
|
||||
if name in self.TOKEN_MAPPINGS:
|
||||
prop = self.TOKEN_MAPPINGS[name]
|
||||
setattr(theme, prop, value)
|
||||
continue
|
||||
|
||||
# Check partial matches
|
||||
name_lower = name.lower()
|
||||
|
||||
if "primary" in name_lower and "500" in name_lower:
|
||||
theme.color_primary = value
|
||||
elif "secondary" in name_lower and "500" in name_lower:
|
||||
theme.color_secondary = value
|
||||
elif "background" in name_lower and self._is_light_color(value):
|
||||
theme.app_bg = value
|
||||
elif "foreground" in name_lower or ("text" in name_lower and "color" in name_lower):
|
||||
theme.text_color = value
|
||||
|
||||
# Adjust for dark mode
|
||||
if base == "dark":
|
||||
theme = self._adjust_for_dark_mode(theme)
|
||||
|
||||
return theme
|
||||
|
||||
def _is_light_color(self, value: str) -> bool:
|
||||
"""Check if a color value is light (for background suitability)."""
|
||||
if not value.startswith("#"):
|
||||
return True # Assume light if not hex
|
||||
|
||||
# Parse hex color
|
||||
hex_color = value.lstrip("#")
|
||||
if len(hex_color) == 3:
|
||||
hex_color = "".join(c * 2 for c in hex_color)
|
||||
|
||||
try:
|
||||
r = int(hex_color[0:2], 16)
|
||||
g = int(hex_color[2:4], 16)
|
||||
b = int(hex_color[4:6], 16)
|
||||
# Calculate luminance
|
||||
luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
|
||||
return luminance > 0.5
|
||||
except (ValueError, IndexError):
|
||||
return True
|
||||
|
||||
def _adjust_for_dark_mode(self, theme: StorybookTheme) -> StorybookTheme:
|
||||
"""Adjust theme for dark mode if colors aren't already dark."""
|
||||
# Swap light/dark if needed
|
||||
if self._is_light_color(theme.app_bg):
|
||||
theme.app_bg = "#1F2937"
|
||||
theme.app_content_bg = "#111827"
|
||||
theme.app_border_color = "#374151"
|
||||
theme.text_color = "#F9FAFB"
|
||||
theme.text_muted_color = "#9CA3AF"
|
||||
theme.bar_bg = "#1F2937"
|
||||
theme.bar_text_color = "#9CA3AF"
|
||||
theme.input_bg = "#374151"
|
||||
theme.input_border = "#4B5563"
|
||||
theme.input_text_color = "#F9FAFB"
|
||||
|
||||
return theme
|
||||
|
||||
def generate_theme_file(
|
||||
self,
|
||||
theme: StorybookTheme,
|
||||
format: str = "ts",
|
||||
) -> str:
|
||||
"""
|
||||
Generate Storybook theme file content.
|
||||
|
||||
Args:
|
||||
theme: StorybookTheme to export
|
||||
format: Output format ('ts', 'js', 'json')
|
||||
|
||||
Returns:
|
||||
Theme file content as string
|
||||
"""
|
||||
if format == "json":
|
||||
return json.dumps(theme.to_dict(), indent=2)
|
||||
|
||||
theme_dict = theme.to_dict()
|
||||
|
||||
if format == "ts":
|
||||
lines = [
|
||||
"import { create } from '@storybook/theming/create';",
|
||||
"",
|
||||
"export const dssTheme = create({",
|
||||
]
|
||||
else: # js
|
||||
lines = [
|
||||
"const { create } = require('@storybook/theming/create');",
|
||||
"",
|
||||
"module.exports = create({",
|
||||
]
|
||||
|
||||
for key, value in theme_dict.items():
|
||||
if isinstance(value, str):
|
||||
lines.append(f" {key}: '{value}',")
|
||||
else:
|
||||
lines.append(f" {key}: {value},")
|
||||
|
||||
lines.extend([
|
||||
"});",
|
||||
"",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def generate_manager_file(self, theme_import: str = "./dss-theme") -> str:
|
||||
"""
|
||||
Generate Storybook manager.ts file.
|
||||
|
||||
Args:
|
||||
theme_import: Import path for theme
|
||||
|
||||
Returns:
|
||||
Manager file content
|
||||
"""
|
||||
return f"""import {{ addons }} from '@storybook/manager-api';
|
||||
import {{ dssTheme }} from '{theme_import}';
|
||||
|
||||
addons.setConfig({{
|
||||
theme: dssTheme,
|
||||
}});
|
||||
"""
|
||||
|
||||
def generate_preview_file(
|
||||
self,
|
||||
tokens: List[Dict[str, Any]],
|
||||
include_css_vars: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
Generate Storybook preview.ts file with token CSS variables.
|
||||
|
||||
Args:
|
||||
tokens: List of token dicts
|
||||
include_css_vars: Include CSS variable injection
|
||||
|
||||
Returns:
|
||||
Preview file content
|
||||
"""
|
||||
lines = [
|
||||
"import type { Preview } from '@storybook/react';",
|
||||
"",
|
||||
]
|
||||
|
||||
if include_css_vars:
|
||||
# Generate CSS variables from tokens
|
||||
css_vars = []
|
||||
for token in tokens:
|
||||
name = token.get("name", "").replace(".", "-")
|
||||
value = token.get("value", "")
|
||||
css_vars.append(f" --{name}: {value};")
|
||||
|
||||
lines.extend([
|
||||
"// Inject design tokens as CSS variables",
|
||||
"const tokenStyles = `",
|
||||
":root {",
|
||||
])
|
||||
lines.extend(css_vars)
|
||||
lines.extend([
|
||||
"}",
|
||||
"`;",
|
||||
"",
|
||||
"// Add styles to document",
|
||||
"const styleSheet = document.createElement('style');",
|
||||
"styleSheet.textContent = tokenStyles;",
|
||||
"document.head.appendChild(styleSheet);",
|
||||
"",
|
||||
])
|
||||
|
||||
lines.extend([
|
||||
"const preview: Preview = {",
|
||||
" parameters: {",
|
||||
" controls: {",
|
||||
" matchers: {",
|
||||
" color: /(background|color)$/i,",
|
||||
" date: /Date$/i,",
|
||||
" },",
|
||||
" },",
|
||||
" backgrounds: {",
|
||||
" default: 'light',",
|
||||
" values: [",
|
||||
" { name: 'light', value: '#FFFFFF' },",
|
||||
" { name: 'dark', value: '#1F2937' },",
|
||||
" ],",
|
||||
" },",
|
||||
" },",
|
||||
"};",
|
||||
"",
|
||||
"export default preview;",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def generate_full_config(
|
||||
self,
|
||||
tokens: List[Dict[str, Any]],
|
||||
brand_title: str = "Design System",
|
||||
output_dir: Optional[str] = None,
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Generate complete Storybook configuration files.
|
||||
|
||||
Args:
|
||||
tokens: List of token dicts
|
||||
brand_title: Brand title
|
||||
output_dir: Optional directory to write files
|
||||
|
||||
Returns:
|
||||
Dict mapping filenames to content
|
||||
"""
|
||||
# Generate theme
|
||||
theme = self.generate_from_tokens(tokens, brand_title)
|
||||
|
||||
files = {
|
||||
"dss-theme.ts": self.generate_theme_file(theme, "ts"),
|
||||
"manager.ts": self.generate_manager_file(),
|
||||
"preview.ts": self.generate_preview_file(tokens),
|
||||
}
|
||||
|
||||
# Write files if output_dir provided
|
||||
if output_dir:
|
||||
out_path = Path(output_dir)
|
||||
out_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for filename, content in files.items():
|
||||
(out_path / filename).write_text(content)
|
||||
|
||||
return files
|
||||
Reference in New Issue
Block a user