Initial commit: Clean DSS implementation

Migrated from design-system-swarm with fresh git history.
Old project history preserved in /home/overbits/apps/design-system-swarm

Core components:
- MCP Server (Python FastAPI with mcp 1.23.1)
- Claude Plugin (agents, commands, skills, strategies, hooks, core)
- DSS Backend (dss-mvp1 - token translation, Figma sync)
- Admin UI (Node.js/React)
- Server (Node.js/Express)
- Storybook integration (dss-mvp1/.storybook)

Self-contained configuration:
- All paths relative or use DSS_BASE_PATH=/home/overbits/dss
- PYTHONPATH configured for dss-mvp1 and dss-claude-plugin
- .env file with all configuration
- Claude plugin uses ${CLAUDE_PLUGIN_ROOT} for portability

Migration completed: $(date)
🤖 Clean migration with full functionality preserved
This commit is contained in:
Digital Production Factory
2025-12-09 18:45:48 -03:00
commit 276ed71f31
884 changed files with 373737 additions and 0 deletions

View File

@@ -0,0 +1,38 @@
{
"meta": {
"version": "1.0.0",
"timestamp": "2025-12-07T23:59:15Z",
"project_path": ".",
"full_scan": false
},
"project": {
"types": [
"python"
],
"frameworks": [
"fastapi"
]
},
"design_system": {"detected":true,"type":"custom","has_tokens":true},
"files": {
"total": 19,
"javascript": 0,
"css": 1,
"python": 14,
"components": 0
},
"dependencies": {"python":7,"total":7},
"git": {"is_repo":false},
"health": {
"score": 95,
"grade": "A",
"issues": ["Missing README"]
},
"css": {
"files": 1,
"preprocessor": "none",
"has_css_variables": true,
"has_preprocessor_variables": false
},
"components": []
}

View File

@@ -0,0 +1,8 @@
:root {
--primary: rgb(51, 102, 229);
--secondary: rgb(127, 127, 127);
--background: rgb(255, 255, 255);
--space-1: 4px;
--space-2: 8px;
--space-4: 16px;
}

349
tools/api/ai_providers.py Normal file
View File

@@ -0,0 +1,349 @@
"""
AI Provider abstraction for Claude and Gemini
Handles model-specific API calls and tool execution
"""
import os
import json
import asyncio
from typing import List, Dict, Any, Optional
from abc import ABC, abstractmethod
class AIProvider(ABC):
"""Abstract base class for AI providers"""
@abstractmethod
async def chat(
self,
message: str,
system_prompt: str,
history: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
temperature: float = 0.7
) -> Dict[str, Any]:
"""
Send a chat message and get response
Returns: {
"success": bool,
"response": str,
"model": str,
"tools_used": List[Dict],
"stop_reason": str
}
"""
pass
class ClaudeProvider(AIProvider):
"""Anthropic Claude provider"""
def __init__(self):
self.api_key = os.getenv("ANTHROPIC_API_KEY")
self.default_model = "claude-sonnet-4-5-20250929"
def is_available(self) -> bool:
"""Check if Claude is available"""
try:
from anthropic import Anthropic
return bool(self.api_key)
except ImportError:
return False
async def chat(
self,
message: str,
system_prompt: str,
history: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
temperature: float = 0.7,
mcp_handler=None,
mcp_context=None
) -> Dict[str, Any]:
"""Chat with Claude"""
if not self.is_available():
return {
"success": False,
"response": "Claude not available. Install anthropic SDK or set ANTHROPIC_API_KEY.",
"model": "error",
"tools_used": [],
"stop_reason": "error"
}
from anthropic import Anthropic
client = Anthropic(api_key=self.api_key)
# Build messages
messages = []
for msg in history[-6:]:
role = msg.get("role", "user")
content = msg.get("content", "")
if content and role in ["user", "assistant"]:
messages.append({"role": role, "content": content})
messages.append({"role": "user", "content": message})
# API params
api_params = {
"model": self.default_model,
"max_tokens": 4096,
"temperature": temperature,
"system": system_prompt,
"messages": messages
}
if tools:
api_params["tools"] = tools
# Initial call
response = await asyncio.to_thread(
client.messages.create,
**api_params
)
# Handle tool use loop
tools_used = []
max_iterations = 5
iteration = 0
while response.stop_reason == "tool_use" and iteration < max_iterations:
iteration += 1
tool_results = []
for content_block in response.content:
if content_block.type == "tool_use":
tool_name = content_block.name
tool_input = content_block.input
tool_use_id = content_block.id
# Execute tool via MCP handler
result = await mcp_handler.execute_tool(
tool_name=tool_name,
arguments=tool_input,
context=mcp_context
)
tools_used.append({
"tool": tool_name,
"success": result.success,
"duration_ms": result.duration_ms
})
# Format result
if result.success:
tool_result_content = json.dumps(result.result, indent=2)
else:
tool_result_content = json.dumps({"error": result.error})
tool_results.append({
"type": "tool_result",
"tool_use_id": tool_use_id,
"content": tool_result_content
})
# Continue conversation with tool results
messages.append({"role": "assistant", "content": response.content})
messages.append({"role": "user", "content": tool_results})
response = await asyncio.to_thread(
client.messages.create,
**{**api_params, "messages": messages}
)
# Extract final response
response_text = ""
for content_block in response.content:
if hasattr(content_block, "text"):
response_text += content_block.text
return {
"success": True,
"response": response_text,
"model": response.model,
"tools_used": tools_used,
"stop_reason": response.stop_reason
}
class GeminiProvider(AIProvider):
"""Google Gemini provider"""
def __init__(self):
self.api_key = os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
self.default_model = "gemini-2.0-flash-exp"
def is_available(self) -> bool:
"""Check if Gemini is available"""
try:
import google.generativeai as genai
return bool(self.api_key)
except ImportError:
return False
def _convert_tools_to_gemini_format(self, claude_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Convert Claude tool format to Gemini function declarations"""
gemini_tools = []
for tool in claude_tools:
# Convert from Claude's format to Gemini's format
function_declaration = {
"name": tool.get("name"),
"description": tool.get("description", ""),
"parameters": {
"type": "object",
"properties": {},
"required": []
}
}
# Convert input schema
if "input_schema" in tool:
schema = tool["input_schema"]
if "properties" in schema:
function_declaration["parameters"]["properties"] = schema["properties"]
if "required" in schema:
function_declaration["parameters"]["required"] = schema["required"]
gemini_tools.append(function_declaration)
return gemini_tools
async def chat(
self,
message: str,
system_prompt: str,
history: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
temperature: float = 0.7,
mcp_handler=None,
mcp_context=None
) -> Dict[str, Any]:
"""Chat with Gemini"""
if not self.is_available():
return {
"success": False,
"response": "Gemini not available. Install google-generativeai SDK or set GOOGLE_API_KEY/GEMINI_API_KEY.",
"model": "error",
"tools_used": [],
"stop_reason": "error"
}
import google.generativeai as genai
genai.configure(api_key=self.api_key)
# Build chat history
gemini_history = []
for msg in history[-6:]:
role = msg.get("role", "user")
content = msg.get("content", "")
if content and role in ["user", "assistant"]:
gemini_history.append({
"role": "user" if role == "user" else "model",
"parts": [content]
})
# Create model with tools if available
model_kwargs = {
"model_name": self.default_model,
"generation_config": {
"temperature": temperature,
"max_output_tokens": 4096,
},
"system_instruction": system_prompt
}
# Convert and add tools if available
if tools and mcp_handler:
gemini_tools = self._convert_tools_to_gemini_format(tools)
model_kwargs["tools"] = gemini_tools
model = genai.GenerativeModel(**model_kwargs)
# Start chat
chat = model.start_chat(history=gemini_history)
# Send message with tool execution loop
tools_used = []
max_iterations = 5
iteration = 0
current_message = message
while iteration < max_iterations:
iteration += 1
response = await asyncio.to_thread(chat.send_message, current_message)
# Check for function calls
if response.candidates and response.candidates[0].content.parts:
has_function_call = False
for part in response.candidates[0].content.parts:
if hasattr(part, 'function_call') and part.function_call:
has_function_call = True
func_call = part.function_call
tool_name = func_call.name
tool_args = dict(func_call.args)
# Execute tool
result = await mcp_handler.execute_tool(
tool_name=tool_name,
arguments=tool_args,
context=mcp_context
)
tools_used.append({
"tool": tool_name,
"success": result.success,
"duration_ms": result.duration_ms
})
# Format result for Gemini
function_response = {
"name": tool_name,
"response": result.result if result.success else {"error": result.error}
}
# Send function response back
current_message = genai.protos.Content(
parts=[genai.protos.Part(
function_response=genai.protos.FunctionResponse(
name=tool_name,
response=function_response
)
)]
)
break
# If no function call, we're done
if not has_function_call:
break
else:
break
# Extract final response text
response_text = ""
if response.candidates and response.candidates[0].content.parts:
for part in response.candidates[0].content.parts:
if hasattr(part, 'text'):
response_text += part.text
return {
"success": True,
"response": response_text,
"model": self.default_model,
"tools_used": tools_used,
"stop_reason": "stop" if response.candidates else "error"
}
# Factory function
def get_ai_provider(model_name: str) -> AIProvider:
"""Get AI provider by name"""
if model_name.lower() in ["gemini", "google"]:
return GeminiProvider()
else:
return ClaudeProvider()

View File

@@ -0,0 +1,68 @@
import os
import logging
from logging.handlers import RotatingFileHandler
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from typing import List, Any, Optional
# --- Configuration ---
# Use project-local logs directory to avoid permission issues
_current_file = os.path.dirname(os.path.abspath(__file__))
_project_root = os.path.dirname(os.path.dirname(_current_file))
LOG_DIR = os.path.join(_project_root, ".dss", "logs", "browser-logs")
LOG_FILE = os.path.join(LOG_DIR, "browser.log")
# Ensure log directory exists
os.makedirs(LOG_DIR, exist_ok=True)
# --- Logging Setup ---
# We use a specific logger for browser logs to separate them from app logs
browser_logger = logging.getLogger("browser_logger")
browser_logger.setLevel(logging.INFO)
# Rotating file handler: 10MB max size, keep last 5 backups
handler = RotatingFileHandler(LOG_FILE, maxBytes=10*1024*1024, backupCount=5)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] [BROWSER] %(message)s'
)
handler.setFormatter(formatter)
browser_logger.addHandler(handler)
# --- API Router ---
router = APIRouter()
class LogEntry(BaseModel):
level: str
timestamp: str
message: str
data: Optional[List[Any]] = None
class LogBatch(BaseModel):
logs: List[LogEntry]
@router.post("/api/logs/browser")
async def receive_browser_logs(batch: LogBatch):
"""
Receives a batch of logs from the browser and writes them to the log file.
"""
try:
for log in batch.logs:
# Map browser levels to python logging levels
level = log.level.lower()
log_message = f"[{log.timestamp}] {log.message}"
if level == 'error':
browser_logger.error(log_message)
elif level == 'warn':
browser_logger.warning(log_message)
elif level == 'debug':
browser_logger.debug(log_message)
else:
browser_logger.info(log_message)
return {"status": "ok", "count": len(batch.logs)}
except Exception as e:
# Fallback to standard logger if something breaks deeply
logging.error(f"Failed to process browser logs: {str(e)}")
raise HTTPException(status_code=500, detail="Internal processing error")

53
tools/api/config.py Normal file
View File

@@ -0,0 +1,53 @@
"""
DSS Configuration Management
Public configuration values are safe to expose to the client via /api/config.
Private configuration values (secrets, API keys) must NEVER be exposed.
Configuration follows 12-Factor App methodology:
- Load from environment variables first
- Fallback to sensible defaults for local development
"""
import os
# ========== PUBLIC CONFIGURATION ==========
# These values are safe to expose to the client browser
DSS_HOST = os.environ.get("DSS_HOST", "localhost")
"""
The DSS host/domain where the application is running.
Used by clients to access Storybook and other external services.
Examples: "localhost", "dss.example.com", "dss.overbits.luz.uy"
"""
DSS_PORT = os.environ.get("DSS_PORT", "3456")
"""The port DSS API is running on (for API calls from client)."""
STORYBOOK_PORT = 6006
"""Storybook runs on standard port 6006 (derived from DSS_HOST in frontend)."""
# ========== PRIVATE CONFIGURATION ==========
# These values must NEVER be exposed to the client
FIGMA_API_KEY = os.environ.get("FIGMA_API_KEY")
"""Figma API key - kept server-side, never exposed to client."""
DATABASE_URL = os.environ.get("DATABASE_URL", "sqlite:///.dss/design_system.db")
"""Database connection string."""
DEBUG = os.environ.get("DEBUG", "false").lower() == "true"
"""Enable debug mode."""
def get_public_config():
"""
Returns a dictionary of public configuration safe for the client.
This is the ONLY function that exposes config to /api/config endpoint.
"""
return {
"dssHost": DSS_HOST,
"dssPort": DSS_PORT,
"storybookPort": STORYBOOK_PORT,
}

View File

@@ -0,0 +1,653 @@
"""
Design System Registry - Knowledge base of popular design systems.
This module provides:
- Built-in knowledge of 20+ popular design systems
- Fuzzy matching for user queries
- npm package information
- Alternative ingestion methods (Figma, CSS, docs)
"""
from dataclasses import dataclass, field
from typing import List, Optional, Dict, Any
from enum import Enum
import re
class IngestionMethod(Enum):
"""Available methods for ingesting design tokens."""
NPM_PACKAGE = "npm_package"
TAILWIND_CONFIG = "tailwind_config"
CSS_VARIABLES = "css_variables"
FIGMA = "figma"
JSON_TOKENS = "json_tokens"
SCSS_VARIABLES = "scss_variables"
STYLE_DICTIONARY = "style_dictionary"
@dataclass
class DesignSystemInfo:
"""Information about a known design system."""
id: str
name: str
description: str
aliases: List[str] = field(default_factory=list)
npm_packages: List[str] = field(default_factory=list)
primary_ingestion: IngestionMethod = IngestionMethod.NPM_PACKAGE
figma_community_url: Optional[str] = None
docs_url: Optional[str] = None
github_url: Optional[str] = None
token_paths: List[str] = field(default_factory=list) # Paths within npm package to tokens
css_cdn_url: Optional[str] = None
category: str = "component-library"
framework: Optional[str] = None # react, vue, angular, html, etc.
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for API responses."""
return {
"id": self.id,
"name": self.name,
"description": self.description,
"aliases": self.aliases,
"npm_packages": self.npm_packages,
"primary_ingestion": self.primary_ingestion.value,
"figma_community_url": self.figma_community_url,
"docs_url": self.docs_url,
"github_url": self.github_url,
"token_paths": self.token_paths,
"css_cdn_url": self.css_cdn_url,
"category": self.category,
"framework": self.framework,
}
# Built-in registry of popular design systems
DESIGN_SYSTEMS: Dict[str, DesignSystemInfo] = {}
def register_system(system: DesignSystemInfo) -> None:
"""Register a design system in the registry."""
DESIGN_SYSTEMS[system.id] = system
# =============================================================================
# Popular Design Systems Registry
# =============================================================================
# HeroUI (NextUI successor)
register_system(DesignSystemInfo(
id="heroui",
name="HeroUI",
description="Beautiful, fast and modern React UI library (formerly NextUI)",
aliases=["hero-ui", "hero ui", "nextui", "next-ui", "next ui"],
npm_packages=["@heroui/react", "@heroui/theme"],
primary_ingestion=IngestionMethod.TAILWIND_CONFIG,
figma_community_url="https://www.figma.com/community/file/1267584376522720519",
docs_url="https://www.heroui.com/docs",
github_url="https://github.com/heroui-inc/heroui",
token_paths=["@heroui/theme/dist/colors.js", "@heroui/theme/dist/default-layout.js"],
category="component-library",
framework="react",
))
# Shadcn/ui
register_system(DesignSystemInfo(
id="shadcn",
name="shadcn/ui",
description="Beautifully designed components built with Radix UI and Tailwind CSS",
aliases=["shadcn", "shadcn-ui", "shadcnui", "shad", "shad-cn"],
npm_packages=["shadcn-ui"], # CLI tool, components are copy-pasted
primary_ingestion=IngestionMethod.CSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/1203061493325953101",
docs_url="https://ui.shadcn.com/docs",
github_url="https://github.com/shadcn-ui/ui",
token_paths=[], # Tokens are in CSS variables
css_cdn_url="https://ui.shadcn.com/registry/styles/default/index.json",
category="component-library",
framework="react",
))
# Material UI (MUI)
register_system(DesignSystemInfo(
id="mui",
name="Material UI",
description="Google's Material Design implemented for React",
aliases=["material-ui", "material ui", "materialui", "mui", "@mui"],
npm_packages=["@mui/material", "@mui/system", "@emotion/react", "@emotion/styled"],
primary_ingestion=IngestionMethod.NPM_PACKAGE,
figma_community_url="https://www.figma.com/community/file/912837788133317724",
docs_url="https://mui.com/material-ui/getting-started/",
github_url="https://github.com/mui/material-ui",
token_paths=["@mui/material/styles"],
category="component-library",
framework="react",
))
# Chakra UI
register_system(DesignSystemInfo(
id="chakra",
name="Chakra UI",
description="Simple, modular and accessible component library for React",
aliases=["chakra-ui", "chakra ui", "chakraui"],
npm_packages=["@chakra-ui/react", "@chakra-ui/theme"],
primary_ingestion=IngestionMethod.NPM_PACKAGE,
figma_community_url="https://www.figma.com/community/file/971408767069651759",
docs_url="https://chakra-ui.com/docs/getting-started",
github_url="https://github.com/chakra-ui/chakra-ui",
token_paths=["@chakra-ui/theme/dist/foundations"],
category="component-library",
framework="react",
))
# Ant Design
register_system(DesignSystemInfo(
id="antd",
name="Ant Design",
description="Enterprise-class UI design language and React components",
aliases=["ant-design", "ant design", "antdesign", "antd"],
npm_packages=["antd", "@ant-design/icons"],
primary_ingestion=IngestionMethod.NPM_PACKAGE,
figma_community_url="https://www.figma.com/community/file/831698976089873405",
docs_url="https://ant.design/docs/react/introduce",
github_url="https://github.com/ant-design/ant-design",
token_paths=["antd/dist/antd.variable.css"],
css_cdn_url="https://unpkg.com/antd/dist/antd.variable.css",
category="component-library",
framework="react",
))
# Tailwind CSS
register_system(DesignSystemInfo(
id="tailwind",
name="Tailwind CSS",
description="Utility-first CSS framework for rapid UI development",
aliases=["tailwindcss", "tailwind css", "tw"],
npm_packages=["tailwindcss"],
primary_ingestion=IngestionMethod.TAILWIND_CONFIG,
figma_community_url="https://www.figma.com/community/file/768809027799962739",
docs_url="https://tailwindcss.com/docs",
github_url="https://github.com/tailwindlabs/tailwindcss",
token_paths=["tailwindcss/defaultTheme"],
category="css-framework",
framework="html",
))
# Bootstrap
register_system(DesignSystemInfo(
id="bootstrap",
name="Bootstrap",
description="Popular HTML, CSS, and JS library for responsive design",
aliases=["bootstrap5", "bootstrap 5", "bs", "bs5", "twbs"],
npm_packages=["bootstrap"],
primary_ingestion=IngestionMethod.SCSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/876022745968684318",
docs_url="https://getbootstrap.com/docs/5.3/getting-started/introduction/",
github_url="https://github.com/twbs/bootstrap",
token_paths=["bootstrap/scss/_variables.scss"],
css_cdn_url="https://cdn.jsdelivr.net/npm/bootstrap@5.3.2/dist/css/bootstrap.min.css",
category="css-framework",
framework="html",
))
# Radix UI
register_system(DesignSystemInfo(
id="radix",
name="Radix UI",
description="Unstyled, accessible components for building design systems",
aliases=["radix-ui", "radix ui", "radixui", "@radix-ui"],
npm_packages=["@radix-ui/themes", "@radix-ui/colors"],
primary_ingestion=IngestionMethod.CSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/1295954844895805217",
docs_url="https://www.radix-ui.com/themes/docs/overview/getting-started",
github_url="https://github.com/radix-ui/themes",
token_paths=["@radix-ui/colors"],
category="component-library",
framework="react",
))
# Mantine
register_system(DesignSystemInfo(
id="mantine",
name="Mantine",
description="React components library with native dark theme support",
aliases=["mantine-ui", "mantineui"],
npm_packages=["@mantine/core", "@mantine/hooks"],
primary_ingestion=IngestionMethod.NPM_PACKAGE,
figma_community_url="https://www.figma.com/community/file/1293978471602433537",
docs_url="https://mantine.dev/getting-started/",
github_url="https://github.com/mantinedev/mantine",
token_paths=["@mantine/core/styles.css"],
category="component-library",
framework="react",
))
# Fluent UI (Microsoft)
register_system(DesignSystemInfo(
id="fluent",
name="Fluent UI",
description="Microsoft's design system for building web experiences",
aliases=["fluent-ui", "fluentui", "fluent ui", "@fluentui", "fabric"],
npm_packages=["@fluentui/react-components", "@fluentui/tokens"],
primary_ingestion=IngestionMethod.NPM_PACKAGE,
figma_community_url="https://www.figma.com/community/file/836828295772957889",
docs_url="https://react.fluentui.dev/",
github_url="https://github.com/microsoft/fluentui",
token_paths=["@fluentui/tokens"],
category="component-library",
framework="react",
))
# IBM Carbon
register_system(DesignSystemInfo(
id="carbon",
name="Carbon Design System",
description="IBM's open source design system for products and experiences",
aliases=["carbon-design", "ibm-carbon", "ibm carbon", "@carbon"],
npm_packages=["@carbon/react", "@carbon/styles", "@carbon/colors"],
primary_ingestion=IngestionMethod.SCSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/1157761560874207208",
docs_url="https://carbondesignsystem.com/",
github_url="https://github.com/carbon-design-system/carbon",
token_paths=["@carbon/colors", "@carbon/type", "@carbon/layout"],
category="design-system",
framework="react",
))
# Primer (GitHub)
register_system(DesignSystemInfo(
id="primer",
name="Primer",
description="GitHub's design system with CSS and React components",
aliases=["primer-css", "github-primer", "github primer", "@primer"],
npm_packages=["@primer/react", "@primer/css", "@primer/primitives"],
primary_ingestion=IngestionMethod.JSON_TOKENS,
figma_community_url="https://www.figma.com/community/file/854767373644076713",
docs_url="https://primer.style/",
github_url="https://github.com/primer/primitives",
token_paths=["@primer/primitives/dist/json"],
category="design-system",
framework="react",
))
# Spectrum (Adobe)
register_system(DesignSystemInfo(
id="spectrum",
name="Adobe Spectrum",
description="Adobe's design system for creating seamless experiences",
aliases=["adobe-spectrum", "adobe spectrum", "@spectrum", "@adobe/spectrum"],
npm_packages=["@adobe/react-spectrum", "@spectrum-css/tokens"],
primary_ingestion=IngestionMethod.JSON_TOKENS,
figma_community_url="https://www.figma.com/community/file/1196015001498069893",
docs_url="https://spectrum.adobe.com/",
github_url="https://github.com/adobe/react-spectrum",
token_paths=["@spectrum-css/tokens"],
category="design-system",
framework="react",
))
# Salesforce Lightning
register_system(DesignSystemInfo(
id="lightning",
name="Salesforce Lightning",
description="Salesforce Lightning Design System for enterprise apps",
aliases=["slds", "lightning-design", "salesforce-lightning", "salesforce lightning"],
npm_packages=["@salesforce-ux/design-system"],
primary_ingestion=IngestionMethod.SCSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/877593312714992614",
docs_url="https://www.lightningdesignsystem.com/",
github_url="https://github.com/salesforce-ux/design-system",
token_paths=["@salesforce-ux/design-system/design-tokens"],
category="design-system",
framework="html",
))
# Atlassian Design System
register_system(DesignSystemInfo(
id="atlassian",
name="Atlassian Design System",
description="Atlassian's end-to-end design language for products",
aliases=["atlaskit", "atlas-kit", "atlassian-design", "@atlaskit"],
npm_packages=["@atlaskit/tokens", "@atlaskit/theme"],
primary_ingestion=IngestionMethod.JSON_TOKENS,
figma_community_url="https://www.figma.com/community/file/1189965498990866853",
docs_url="https://atlassian.design/",
github_url="https://bitbucket.org/atlassian/atlassian-frontend-mirror/src/master/",
token_paths=["@atlaskit/tokens/dist/esm/artifacts/tokens-raw"],
category="design-system",
framework="react",
))
# Shopify Polaris
register_system(DesignSystemInfo(
id="polaris",
name="Shopify Polaris",
description="Shopify's design system for building admin experiences",
aliases=["shopify-polaris", "shopify polaris", "@shopify/polaris"],
npm_packages=["@shopify/polaris", "@shopify/polaris-tokens"],
primary_ingestion=IngestionMethod.JSON_TOKENS,
figma_community_url="https://www.figma.com/community/file/1293611962331823010",
docs_url="https://polaris.shopify.com/",
github_url="https://github.com/Shopify/polaris",
token_paths=["@shopify/polaris-tokens/dist/json"],
category="design-system",
framework="react",
))
# Vuetify
register_system(DesignSystemInfo(
id="vuetify",
name="Vuetify",
description="Material Design component framework for Vue.js",
aliases=["vuetify3", "vuetify 3"],
npm_packages=["vuetify"],
primary_ingestion=IngestionMethod.SCSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/1181257898482695401",
docs_url="https://vuetifyjs.com/en/getting-started/installation/",
github_url="https://github.com/vuetifyjs/vuetify",
token_paths=["vuetify/lib/styles/settings/_variables.scss"],
category="component-library",
framework="vue",
))
# PrimeVue / PrimeReact
register_system(DesignSystemInfo(
id="primevue",
name="PrimeVue",
description="Rich set of open source UI components for Vue",
aliases=["prime-vue", "prime vue", "primereact", "prime-react", "primefaces"],
npm_packages=["primevue", "primeicons"],
primary_ingestion=IngestionMethod.CSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/890589747170608208",
docs_url="https://primevue.org/",
github_url="https://github.com/primefaces/primevue",
token_paths=["primevue/resources/themes"],
category="component-library",
framework="vue",
))
# DaisyUI
register_system(DesignSystemInfo(
id="daisyui",
name="daisyUI",
description="Tailwind CSS component library with semantic class names",
aliases=["daisy-ui", "daisy ui", "daisy"],
npm_packages=["daisyui"],
primary_ingestion=IngestionMethod.TAILWIND_CONFIG,
figma_community_url="https://www.figma.com/community/file/1098092815609260082",
docs_url="https://daisyui.com/docs/install/",
github_url="https://github.com/saadeghi/daisyui",
token_paths=["daisyui/src/theming/themes.js"],
category="component-library",
framework="html",
))
# Headless UI
register_system(DesignSystemInfo(
id="headlessui",
name="Headless UI",
description="Unstyled, accessible UI components for React and Vue",
aliases=["headless-ui", "headless ui", "@headlessui"],
npm_packages=["@headlessui/react", "@headlessui/vue"],
primary_ingestion=IngestionMethod.CSS_VARIABLES,
docs_url="https://headlessui.com/",
github_url="https://github.com/tailwindlabs/headlessui",
token_paths=[], # Unstyled, no tokens
category="component-library",
framework="react",
))
# Open Props
register_system(DesignSystemInfo(
id="openprops",
name="Open Props",
description="Supercharged CSS variables for design systems",
aliases=["open-props", "open props"],
npm_packages=["open-props"],
primary_ingestion=IngestionMethod.CSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/1144820109792924498",
docs_url="https://open-props.style/",
github_url="https://github.com/argyleink/open-props",
token_paths=["open-props/open-props.min.css"],
css_cdn_url="https://unpkg.com/open-props",
category="css-tokens",
framework="html",
))
# Pico CSS
register_system(DesignSystemInfo(
id="picocss",
name="Pico CSS",
description="Minimal CSS framework for semantic HTML",
aliases=["pico-css", "pico css", "pico"],
npm_packages=["@picocss/pico"],
primary_ingestion=IngestionMethod.CSS_VARIABLES,
docs_url="https://picocss.com/docs/",
github_url="https://github.com/picocss/pico",
token_paths=["@picocss/pico/css/pico.css"],
css_cdn_url="https://cdn.jsdelivr.net/npm/@picocss/pico@2/css/pico.min.css",
category="css-framework",
framework="html",
))
# Bulma
register_system(DesignSystemInfo(
id="bulma",
name="Bulma",
description="Modern CSS framework based on Flexbox",
aliases=["bulma-css", "bulma css"],
npm_packages=["bulma"],
primary_ingestion=IngestionMethod.SCSS_VARIABLES,
figma_community_url="https://www.figma.com/community/file/1145794431179045801",
docs_url="https://bulma.io/documentation/",
github_url="https://github.com/jgthms/bulma",
token_paths=["bulma/sass/utilities/_variables.scss"],
css_cdn_url="https://cdn.jsdelivr.net/npm/bulma@1.0.0/css/bulma.min.css",
category="css-framework",
framework="html",
))
# =============================================================================
# Registry Search and Matching Functions
# =============================================================================
def normalize_query(query: str) -> str:
"""Normalize a search query for matching."""
# Lowercase and remove special characters
normalized = query.lower().strip()
# Remove common prefixes
normalized = re.sub(r'^(@|use |add |import |ingest |install )', '', normalized)
# Remove common suffixes
normalized = re.sub(r'( design system| ui| css| react| vue)$', '', normalized)
# Remove special characters but keep hyphens
normalized = re.sub(r'[^\w\s-]', '', normalized)
# Collapse multiple spaces
normalized = re.sub(r'\s+', ' ', normalized)
return normalized.strip()
def find_design_system(query: str) -> Optional[DesignSystemInfo]:
"""
Find a design system by name or alias.
Returns the best match or None if not found.
"""
normalized = normalize_query(query)
# Exact match on ID
if normalized in DESIGN_SYSTEMS:
return DESIGN_SYSTEMS[normalized]
# Exact match on aliases
for system in DESIGN_SYSTEMS.values():
if normalized in [a.lower() for a in system.aliases]:
return system
if normalized == system.name.lower():
return system
# Partial match on name or aliases
for system in DESIGN_SYSTEMS.values():
# Check if query is contained in system name
if normalized in system.name.lower():
return system
# Check if query is contained in any alias
for alias in system.aliases:
if normalized in alias.lower():
return system
# Fuzzy match - check if any word matches
query_words = set(normalized.split())
best_match = None
best_score = 0
for system in DESIGN_SYSTEMS.values():
# Create set of all searchable terms
terms = {system.id.lower(), system.name.lower()}
terms.update(a.lower() for a in system.aliases)
# Count matching words
for term in terms:
term_words = set(term.split())
matches = len(query_words & term_words)
if matches > best_score:
best_score = matches
best_match = system
if best_score > 0:
return best_match
return None
def search_design_systems(query: str, limit: int = 5) -> List[DesignSystemInfo]:
"""
Search for design systems matching a query.
Returns a list of matches sorted by relevance.
"""
normalized = normalize_query(query)
results = []
for system in DESIGN_SYSTEMS.values():
score = 0
# Exact match on ID or name
if normalized == system.id.lower() or normalized == system.name.lower():
score = 100
# Exact alias match
elif normalized in [a.lower() for a in system.aliases]:
score = 90
# Partial match in name
elif normalized in system.name.lower():
score = 70
# Partial match in aliases
elif any(normalized in a.lower() for a in system.aliases):
score = 60
# Word overlap
else:
query_words = set(normalized.split())
all_terms = {system.id, system.name} | set(system.aliases)
all_words = set()
for term in all_terms:
all_words.update(term.lower().split())
overlap = len(query_words & all_words)
if overlap > 0:
score = overlap * 20
if score > 0:
results.append((score, system))
# Sort by score descending
results.sort(key=lambda x: x[0], reverse=True)
return [system for _, system in results[:limit]]
def get_all_systems() -> List[DesignSystemInfo]:
"""Get all registered design systems."""
return list(DESIGN_SYSTEMS.values())
def get_systems_by_category(category: str) -> List[DesignSystemInfo]:
"""Get design systems filtered by category."""
return [s for s in DESIGN_SYSTEMS.values() if s.category == category]
def get_systems_by_framework(framework: str) -> List[DesignSystemInfo]:
"""Get design systems filtered by framework."""
return [s for s in DESIGN_SYSTEMS.values() if s.framework == framework]
# =============================================================================
# Alternative Ingestion Suggestions
# =============================================================================
def get_alternative_ingestion_options(system: Optional[DesignSystemInfo] = None) -> Dict[str, Any]:
"""
Get alternative ingestion options when primary method is unavailable.
"""
alternatives = {
"figma": {
"name": "Figma Import",
"description": "Provide a Figma file URL to extract design tokens and components",
"prompt": "Please provide the Figma file URL (e.g., https://www.figma.com/file/...)",
"requires": "figma_url"
},
"css_url": {
"name": "CSS/SCSS URL",
"description": "Provide a URL to a CSS or SCSS file containing design tokens",
"prompt": "Please provide the CSS/SCSS file URL",
"requires": "css_url"
},
"image": {
"name": "Image Analysis",
"description": "Upload an image or screenshot of the design system for AI analysis",
"prompt": "Please provide an image URL or upload a screenshot of your design system",
"requires": "image_url"
},
"manual": {
"name": "Manual Entry",
"description": "Manually enter design tokens (colors, typography, spacing)",
"prompt": "Describe your design tokens (e.g., 'primary color: #3b82f6, font: Inter')",
"requires": "text_description"
},
"github": {
"name": "GitHub Repository",
"description": "Provide a GitHub repository URL containing design tokens",
"prompt": "Please provide the GitHub repository URL",
"requires": "github_url"
}
}
# If we have a known system, customize suggestions
if system:
result = {"known_system": system.to_dict(), "alternatives": []}
if system.figma_community_url:
result["alternatives"].append({
**alternatives["figma"],
"suggested_url": system.figma_community_url
})
if system.css_cdn_url:
result["alternatives"].append({
**alternatives["css_url"],
"suggested_url": system.css_cdn_url
})
if system.github_url:
result["alternatives"].append({
**alternatives["github"],
"suggested_url": system.github_url
})
# Always offer manual and image options
result["alternatives"].append(alternatives["image"])
result["alternatives"].append(alternatives["manual"])
return result
# Unknown system - offer all alternatives
return {
"known_system": None,
"alternatives": list(alternatives.values())
}

24
tools/api/dss-api.service Normal file
View File

@@ -0,0 +1,24 @@
[Unit]
Description=Design System Server (DSS) - Portable Server
Documentation=https://github.com/overbits/design-system-swarm
After=network.target
[Service]
Type=simple
User=overbits
Group=overbits
WorkingDirectory=/home/overbits/apps/design-system-swarm/tools/api
Environment=PATH=/home/overbits/.local/bin:/usr/local/bin:/usr/bin:/bin
Environment=PYTHONPATH=/home/overbits/apps/design-system-swarm/tools
Environment=PORT=3456
Environment=HOST=127.0.0.1
Environment=NODE_ENV=production
ExecStart=/usr/bin/python3 -m uvicorn server:app --host 127.0.0.1 --port 3456
Restart=on-failure
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=dss
[Install]
WantedBy=multi-user.target

17
tools/api/dss-mcp.service Normal file
View File

@@ -0,0 +1,17 @@
[Unit]
Description=DSS MCP Server - Design System Server for AI Agents
After=network.target
[Service]
Type=simple
User=overbits
Group=overbits
WorkingDirectory=/home/overbits/apps/design-system-swarm/tools/api
Environment="PATH=/home/overbits/apps/design-system-swarm/.venv/bin:/usr/bin"
Environment="PYTHONPATH=/home/overbits/apps/design-system-swarm/tools"
ExecStart=/home/overbits/apps/design-system-swarm/.venv/bin/python mcp_server.py sse
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,417 @@
"""
Natural Language Parser for Design System Ingestion.
This module parses natural language prompts to understand:
- Intent (ingest, search, compare, etc.)
- Design system names
- Alternative sources (Figma URLs, images, etc.)
- Configuration options
"""
import re
from dataclasses import dataclass, field
from typing import List, Optional, Dict, Any, Tuple
from enum import Enum
from design_system_registry import (
find_design_system,
search_design_systems,
get_alternative_ingestion_options,
DesignSystemInfo,
)
class IngestionIntent(Enum):
"""Types of user intents for design system operations."""
INGEST = "ingest" # Add/import a design system
SEARCH = "search" # Search for design systems
LIST = "list" # List available/known systems
INFO = "info" # Get info about a specific system
COMPARE = "compare" # Compare design systems
CONFIGURE = "configure" # Configure ingestion settings
HELP = "help" # Help with ingestion
UNKNOWN = "unknown"
class SourceType(Enum):
"""Types of sources detected in prompts."""
DESIGN_SYSTEM_NAME = "design_system_name"
NPM_PACKAGE = "npm_package"
FIGMA_URL = "figma_url"
GITHUB_URL = "github_url"
CSS_URL = "css_url"
IMAGE_URL = "image_url"
TEXT_DESCRIPTION = "text_description"
@dataclass
class ParsedSource:
"""A detected source from the prompt."""
source_type: SourceType
value: str
confidence: float = 1.0 # 0.0 to 1.0
matched_system: Optional[DesignSystemInfo] = None
@dataclass
class ParsedIngestionPrompt:
"""Result of parsing an ingestion prompt."""
original_prompt: str
intent: IngestionIntent
confidence: float = 1.0
sources: List[ParsedSource] = field(default_factory=list)
options: Dict[str, Any] = field(default_factory=dict)
suggestions: List[str] = field(default_factory=list)
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for API responses."""
return {
"original_prompt": self.original_prompt,
"intent": self.intent.value,
"confidence": self.confidence,
"sources": [
{
"type": s.source_type.value,
"value": s.value,
"confidence": s.confidence,
"matched_system": s.matched_system.to_dict() if s.matched_system else None
}
for s in self.sources
],
"options": self.options,
"suggestions": self.suggestions,
}
# Intent detection patterns
INTENT_PATTERNS = {
IngestionIntent.INGEST: [
r'\b(ingest|import|add|use|install|load|get|fetch|download|setup|init|initialize)\b',
r'\b(i want|i need|give me|let\'s use|can you add|please add)\b',
r'\b(integrate|incorporate|bring in|pull in)\b',
],
IngestionIntent.SEARCH: [
r'\b(search|find|look for|looking for|discover|explore)\b',
r'\b(what.*available|show me.*options|any.*like)\b',
],
IngestionIntent.LIST: [
r'\b(list|show|display|what|which)\b.*(design systems?|available|supported|known)\b',
r'\b(what do you (know|have|support))\b',
],
IngestionIntent.INFO: [
r'\b(info|information|details|about|tell me about|what is)\b',
r'\b(how does|what\'s|describe)\b',
],
IngestionIntent.COMPARE: [
r'\b(compare|versus|vs|difference|between|or)\b.*\b(and|vs|versus|or)\b',
],
IngestionIntent.CONFIGURE: [
r'\b(configure|config|settings?|options?|customize)\b',
],
IngestionIntent.HELP: [
r'\b(help|how to|how do i|what can|guide|tutorial)\b',
],
}
# URL patterns
URL_PATTERNS = {
SourceType.FIGMA_URL: r'(https?://(?:www\.)?figma\.com/(?:file|design|community/file)/[^\s]+)',
SourceType.GITHUB_URL: r'(https?://(?:www\.)?github\.com/[^\s]+)',
SourceType.NPM_PACKAGE: r'(?:npm:)?(@?[a-z0-9][\w\-\.]*(?:/[a-z0-9][\w\-\.]*)?)',
SourceType.CSS_URL: r'(https?://[^\s]+\.(?:css|scss|sass)(?:\?[^\s]*)?)',
SourceType.IMAGE_URL: r'(https?://[^\s]+\.(?:png|jpg|jpeg|gif|webp|svg)(?:\?[^\s]*)?)',
}
def detect_intent(prompt: str) -> Tuple[IngestionIntent, float]:
"""
Detect the user's intent from their prompt.
Returns (intent, confidence).
"""
prompt_lower = prompt.lower()
# Score each intent
intent_scores = {}
for intent, patterns in INTENT_PATTERNS.items():
score = 0
for pattern in patterns:
matches = re.findall(pattern, prompt_lower)
score += len(matches)
intent_scores[intent] = score
# Find best match
if not any(intent_scores.values()):
# Default to INGEST if prompt contains a design system name
return IngestionIntent.INGEST, 0.5
best_intent = max(intent_scores, key=intent_scores.get)
max_score = intent_scores[best_intent]
# Calculate confidence based on match strength
confidence = min(1.0, max_score * 0.3 + 0.4)
return best_intent, confidence
def extract_urls(prompt: str) -> List[ParsedSource]:
"""Extract URLs from the prompt."""
sources = []
for source_type, pattern in URL_PATTERNS.items():
if source_type == SourceType.NPM_PACKAGE:
continue # Handle separately
matches = re.findall(pattern, prompt, re.IGNORECASE)
for match in matches:
sources.append(ParsedSource(
source_type=source_type,
value=match,
confidence=0.95
))
return sources
def extract_design_systems(prompt: str) -> List[ParsedSource]:
"""
Extract design system names from the prompt.
Uses the registry to match known systems.
"""
sources = []
# Remove URLs first to avoid false positives
cleaned_prompt = re.sub(r'https?://[^\s]+', '', prompt)
# Remove common noise words
noise_words = ['the', 'a', 'an', 'from', 'to', 'with', 'for', 'and', 'or', 'in', 'on', 'at']
words = cleaned_prompt.lower().split()
# Try different n-grams (1-3 words)
for n in range(3, 0, -1):
for i in range(len(words) - n + 1):
phrase = ' '.join(words[i:i+n])
# Skip if mostly noise words
if all(w in noise_words for w in phrase.split()):
continue
# Try to find matching design system
system = find_design_system(phrase)
if system:
# Check if we already found this system
if not any(s.matched_system and s.matched_system.id == system.id for s in sources):
sources.append(ParsedSource(
source_type=SourceType.DESIGN_SYSTEM_NAME,
value=phrase,
confidence=0.9 if n > 1 else 0.7,
matched_system=system
))
return sources
def extract_npm_packages(prompt: str) -> List[ParsedSource]:
"""Extract explicit npm package references."""
sources = []
# Match @scope/package or package-name patterns
# Only if they look like npm packages (not URLs or common words)
npm_pattern = r'(?:npm[:\s]+)?(@[a-z0-9][\w\-\.]+/[\w\-\.]+|[a-z][\w\-\.]*(?:/[\w\-\.]+)?)'
matches = re.findall(npm_pattern, prompt.lower())
for match in matches:
# Filter out common words that might match
if match in ['design', 'system', 'use', 'the', 'and', 'for', 'from']:
continue
# Check if it looks like an npm package (has @, /, or -)
if '@' in match or '/' in match or '-' in match:
sources.append(ParsedSource(
source_type=SourceType.NPM_PACKAGE,
value=match,
confidence=0.8
))
return sources
def generate_suggestions(parsed: ParsedIngestionPrompt) -> List[str]:
"""Generate helpful suggestions based on parsed prompt."""
suggestions = []
if parsed.intent == IngestionIntent.INGEST:
if not parsed.sources:
suggestions.append("No design system detected. Try specifying a name like 'heroui', 'shadcn', or 'mui'")
suggestions.append("You can also provide a Figma URL, npm package, or GitHub repository")
else:
for source in parsed.sources:
if source.matched_system:
system = source.matched_system
suggestions.append(f"Found '{system.name}' - {system.description}")
if system.npm_packages:
suggestions.append(f"Will install: {', '.join(system.npm_packages)}")
if system.figma_community_url:
suggestions.append(f"Figma kit available: {system.figma_community_url}")
elif parsed.intent == IngestionIntent.SEARCH:
suggestions.append("I can search npm registry for design systems")
suggestions.append("Try being more specific, like 'search for material design components'")
elif parsed.intent == IngestionIntent.HELP:
suggestions.append("I can ingest design systems from: npm packages, Figma, GitHub, CSS files, or images")
suggestions.append("Try: 'add heroui' or 'ingest from figma.com/file/...'")
return suggestions
def parse_ingestion_prompt(prompt: str) -> ParsedIngestionPrompt:
"""
Parse a natural language prompt for design system ingestion.
Examples:
"add heroui" -> Detects HeroUI design system
"ingest material ui for our project" -> Detects MUI
"import from figma.com/file/abc123" -> Extracts Figma URL
"use @chakra-ui/react" -> Detects npm package
"what design systems do you support?" -> LIST intent
"""
# Detect intent
intent, intent_confidence = detect_intent(prompt)
# Initialize result
result = ParsedIngestionPrompt(
original_prompt=prompt,
intent=intent,
confidence=intent_confidence,
)
# Extract sources
result.sources.extend(extract_urls(prompt))
result.sources.extend(extract_design_systems(prompt))
result.sources.extend(extract_npm_packages(prompt))
# Remove duplicates (prefer higher confidence)
seen_values = {}
unique_sources = []
for source in sorted(result.sources, key=lambda s: s.confidence, reverse=True):
key = (source.source_type, source.value.lower())
if key not in seen_values:
seen_values[key] = True
unique_sources.append(source)
result.sources = unique_sources
# Generate suggestions
result.suggestions = generate_suggestions(result)
# Adjust confidence based on source quality
if result.sources:
max_source_confidence = max(s.confidence for s in result.sources)
result.confidence = (intent_confidence + max_source_confidence) / 2
return result
def parse_and_suggest(prompt: str) -> Dict[str, Any]:
"""
Parse a prompt and provide suggestions for next steps.
This is the main entry point for the ingestion parser.
"""
parsed = parse_ingestion_prompt(prompt)
response = parsed.to_dict()
# Add next steps based on what was found
next_steps = []
if parsed.intent == IngestionIntent.INGEST:
if parsed.sources:
# Found something to ingest
for source in parsed.sources:
if source.source_type == SourceType.DESIGN_SYSTEM_NAME and source.matched_system:
system = source.matched_system
next_steps.append({
"action": "confirm_ingestion",
"system": system.to_dict(),
"message": f"Ready to ingest '{system.name}'. Confirm to proceed?"
})
elif source.source_type == SourceType.FIGMA_URL:
next_steps.append({
"action": "ingest_figma",
"url": source.value,
"message": "Figma URL detected. Ready to extract design tokens?"
})
elif source.source_type == SourceType.NPM_PACKAGE:
next_steps.append({
"action": "search_npm",
"package": source.value,
"message": f"Will search npm for '{source.value}'"
})
else:
# Nothing found - offer alternatives
alternatives = get_alternative_ingestion_options()
next_steps.append({
"action": "request_source",
"alternatives": alternatives["alternatives"],
"message": "No design system detected. Please provide more details:"
})
elif parsed.intent == IngestionIntent.SEARCH:
# Extract search terms
search_terms = re.sub(r'\b(search|find|look for)\b', '', prompt.lower()).strip()
if search_terms:
matches = search_design_systems(search_terms)
if matches:
next_steps.append({
"action": "show_search_results",
"results": [m.to_dict() for m in matches],
"message": f"Found {len(matches)} matching design systems"
})
else:
next_steps.append({
"action": "search_npm",
"query": search_terms,
"message": f"No built-in match. Will search npm for '{search_terms}'"
})
elif parsed.intent == IngestionIntent.LIST:
from design_system_registry import get_all_systems
all_systems = get_all_systems()
next_steps.append({
"action": "show_all_systems",
"count": len(all_systems),
"categories": list(set(s.category for s in all_systems)),
"message": f"I know about {len(all_systems)} design systems"
})
elif parsed.intent == IngestionIntent.INFO:
for source in parsed.sources:
if source.matched_system:
system = source.matched_system
alternatives = get_alternative_ingestion_options(system)
next_steps.append({
"action": "show_info",
"system": system.to_dict(),
"alternatives": alternatives,
"message": f"Information about {system.name}"
})
response["next_steps"] = next_steps
return response
# Convenience function for quick parsing
def quick_parse(prompt: str) -> Tuple[Optional[DesignSystemInfo], IngestionIntent, float]:
"""
Quick parse that returns the most likely design system and intent.
Useful for simple lookups.
"""
parsed = parse_ingestion_prompt(prompt)
# Find the best design system match
best_system = None
for source in parsed.sources:
if source.matched_system:
best_system = source.matched_system
break
return best_system, parsed.intent, parsed.confidence

1490
tools/api/mcp_server.py Normal file

File diff suppressed because it is too large Load Diff

352
tools/api/npm_search.py Normal file
View File

@@ -0,0 +1,352 @@
"""
npm Registry Search for Design Systems.
This module provides npm registry search capabilities to find
design system packages when they're not in our built-in registry.
"""
import asyncio
import aiohttp
from dataclasses import dataclass
from typing import List, Optional, Dict, Any
import re
NPM_REGISTRY_URL = "https://registry.npmjs.org"
NPM_SEARCH_URL = "https://registry.npmjs.org/-/v1/search"
@dataclass
class NpmPackageInfo:
"""Information about an npm package."""
name: str
version: str
description: str
keywords: List[str]
homepage: Optional[str]
repository: Optional[str]
npm_url: str
downloads_weekly: int = 0
is_design_system: bool = False
confidence_score: float = 0.0
def to_dict(self) -> Dict[str, Any]:
return {
"name": self.name,
"version": self.version,
"description": self.description,
"keywords": self.keywords,
"homepage": self.homepage,
"repository": self.repository,
"npm_url": self.npm_url,
"downloads_weekly": self.downloads_weekly,
"is_design_system": self.is_design_system,
"confidence_score": self.confidence_score,
}
# Keywords that indicate a design system package
DESIGN_SYSTEM_KEYWORDS = {
# High confidence
"design-system": 3.0,
"design-tokens": 3.0,
"ui-kit": 2.5,
"component-library": 2.5,
"design tokens": 3.0,
# Medium confidence
"ui-components": 2.0,
"react-components": 1.5,
"vue-components": 1.5,
"css-framework": 2.0,
"theme": 1.5,
"theming": 1.5,
"tokens": 1.5,
"styles": 1.0,
"components": 1.0,
# Low confidence (common in design systems)
"ui": 0.5,
"react": 0.3,
"vue": 0.3,
"css": 0.3,
"scss": 0.3,
"tailwind": 1.5,
"material": 1.0,
"bootstrap": 1.0,
}
def calculate_design_system_score(package_info: Dict[str, Any]) -> float:
"""
Calculate a confidence score that a package is a design system.
Returns a score from 0.0 to 1.0.
"""
score = 0.0
# Check keywords
keywords = package_info.get("keywords", []) or []
for keyword in keywords:
keyword_lower = keyword.lower()
for ds_keyword, weight in DESIGN_SYSTEM_KEYWORDS.items():
if ds_keyword in keyword_lower:
score += weight
# Check description
description = (package_info.get("description", "") or "").lower()
if "design system" in description:
score += 3.0
if "design tokens" in description:
score += 2.5
if "component library" in description:
score += 2.0
if "ui components" in description:
score += 1.5
if "ui kit" in description:
score += 2.0
if any(word in description for word in ["react", "vue", "angular", "svelte"]):
score += 0.5
if "css" in description:
score += 0.3
# Check name patterns
name = package_info.get("name", "").lower()
if any(term in name for term in ["design", "theme", "ui", "components", "tokens"]):
score += 1.0
if name.startswith("@"): # Scoped packages often are design systems
score += 0.5
# Normalize to 0-1 range
normalized_score = min(1.0, score / 10.0)
return normalized_score
async def search_npm(
query: str,
limit: int = 10,
design_systems_only: bool = True
) -> List[NpmPackageInfo]:
"""
Search npm registry for packages matching the query.
Args:
query: Search query
limit: Maximum number of results
design_systems_only: If True, filter to likely design system packages
Returns:
List of NpmPackageInfo objects
"""
params = {
"text": query,
"size": limit * 3 if design_systems_only else limit, # Get more to filter
}
results = []
try:
async with aiohttp.ClientSession() as session:
async with session.get(NPM_SEARCH_URL, params=params) as response:
if response.status != 200:
return []
data = await response.json()
for obj in data.get("objects", []):
package = obj.get("package", {})
# Calculate design system score
ds_score = calculate_design_system_score(package)
is_design_system = ds_score >= 0.3
if design_systems_only and not is_design_system:
continue
# Extract repository URL
repo = package.get("links", {}).get("repository")
if not repo:
repo_info = package.get("repository")
if isinstance(repo_info, dict):
repo = repo_info.get("url", "")
elif isinstance(repo_info, str):
repo = repo_info
info = NpmPackageInfo(
name=package.get("name", ""),
version=package.get("version", ""),
description=package.get("description", ""),
keywords=package.get("keywords", []) or [],
homepage=package.get("links", {}).get("homepage"),
repository=repo,
npm_url=f"https://www.npmjs.com/package/{package.get('name', '')}",
downloads_weekly=obj.get("downloads", {}).get("weekly", 0) if obj.get("downloads") else 0,
is_design_system=is_design_system,
confidence_score=ds_score,
)
results.append(info)
if len(results) >= limit:
break
except Exception as e:
print(f"npm search error: {e}")
return []
# Sort by confidence score
results.sort(key=lambda x: x.confidence_score, reverse=True)
return results
async def get_package_info(package_name: str) -> Optional[NpmPackageInfo]:
"""
Get detailed information about a specific npm package.
"""
url = f"{NPM_REGISTRY_URL}/{package_name}"
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status != 200:
return None
data = await response.json()
# Get latest version info
latest_version = data.get("dist-tags", {}).get("latest", "")
version_info = data.get("versions", {}).get(latest_version, {})
# Extract repository URL
repo = data.get("repository")
repo_url = None
if isinstance(repo, dict):
repo_url = repo.get("url", "")
elif isinstance(repo, str):
repo_url = repo
# Clean up repo URL
if repo_url:
repo_url = re.sub(r'^git\+', '', repo_url)
repo_url = re.sub(r'\.git$', '', repo_url)
repo_url = repo_url.replace('git://', 'https://')
repo_url = repo_url.replace('ssh://git@', 'https://')
ds_score = calculate_design_system_score({
"name": data.get("name", ""),
"description": data.get("description", ""),
"keywords": data.get("keywords", []),
})
return NpmPackageInfo(
name=data.get("name", ""),
version=latest_version,
description=data.get("description", ""),
keywords=data.get("keywords", []) or [],
homepage=data.get("homepage"),
repository=repo_url,
npm_url=f"https://www.npmjs.com/package/{package_name}",
is_design_system=ds_score >= 0.3,
confidence_score=ds_score,
)
except Exception as e:
print(f"npm package info error: {e}")
return None
async def verify_package_exists(package_name: str) -> bool:
"""Check if an npm package exists."""
url = f"{NPM_REGISTRY_URL}/{package_name}"
try:
async with aiohttp.ClientSession() as session:
async with session.head(url) as response:
return response.status == 200
except Exception:
return False
async def get_package_files(package_name: str, version: str = "latest") -> List[str]:
"""
Get list of files in an npm package (for finding token files).
Uses unpkg.com to browse package contents.
"""
url = f"https://unpkg.com/{package_name}@{version}/?meta"
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status != 200:
return []
data = await response.json()
def extract_files(node: Dict, prefix: str = "") -> List[str]:
files = []
if node.get("type") == "file":
files.append(f"{prefix}/{node.get('path', '')}")
elif node.get("type") == "directory":
for child in node.get("files", []):
files.extend(extract_files(child, prefix))
return files
return extract_files(data)
except Exception as e:
print(f"Error getting package files: {e}")
return []
def find_token_files(file_list: List[str]) -> Dict[str, List[str]]:
"""
Identify potential design token files from a list of package files.
"""
token_files = {
"json_tokens": [],
"css_variables": [],
"scss_variables": [],
"js_tokens": [],
"style_dictionary": [],
}
for file_path in file_list:
lower_path = file_path.lower()
# JSON tokens
if lower_path.endswith(".json"):
if any(term in lower_path for term in ["token", "theme", "color", "spacing", "typography"]):
token_files["json_tokens"].append(file_path)
# CSS variables
elif lower_path.endswith(".css"):
if any(term in lower_path for term in ["variables", "tokens", "theme", "custom-properties"]):
token_files["css_variables"].append(file_path)
# SCSS variables
elif lower_path.endswith((".scss", ".sass")):
if any(term in lower_path for term in ["variables", "tokens", "_variables", "_tokens"]):
token_files["scss_variables"].append(file_path)
# JS/TS tokens
elif lower_path.endswith((".js", ".ts", ".mjs")):
if any(term in lower_path for term in ["theme", "tokens", "colors", "spacing"]):
token_files["js_tokens"].append(file_path)
# Style Dictionary
elif "style-dictionary" in lower_path or "tokens.json" in lower_path:
token_files["style_dictionary"].append(file_path)
return token_files
# Synchronous wrapper for use in non-async contexts
def search_npm_sync(query: str, limit: int = 10, design_systems_only: bool = True) -> List[NpmPackageInfo]:
"""Synchronous wrapper for search_npm."""
return asyncio.run(search_npm(query, limit, design_systems_only))
def get_package_info_sync(package_name: str) -> Optional[NpmPackageInfo]:
"""Synchronous wrapper for get_package_info."""
return asyncio.run(get_package_info(package_name))

View File

@@ -0,0 +1,7 @@
fastapi>=0.100.0
uvicorn[standard]>=0.23.0
httpx>=0.24.0
python-dotenv>=1.0.0
pydantic>=2.0.0
mcp>=1.0.0
google-generativeai>=0.3.0

3106
tools/api/server.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,14 @@
"""
DSS Services - Core business logic for the Design System Swarm
Services:
- SandboxedFS: Secure file system operations within project boundaries
- ProjectManager: Project registry and validation
- ConfigService: Project configuration loading and saving
"""
from .sandboxed_fs import SandboxedFS
from .project_manager import ProjectManager
from .config_service import ConfigService, DSSConfig
__all__ = ['SandboxedFS', 'ProjectManager', 'ConfigService', 'DSSConfig']

View File

@@ -0,0 +1,170 @@
"""
ConfigService - Project Configuration Management
Handles loading, saving, and validating project-specific .dss/config.json files.
Uses Pydantic for schema validation with sensible defaults.
"""
import json
import os
from pathlib import Path
from typing import Optional, List, Dict, Any
from pydantic import BaseModel, Field
import logging
logger = logging.getLogger(__name__)
# === Configuration Schema ===
class FigmaConfig(BaseModel):
"""Figma integration settings."""
file_id: Optional[str] = None
team_id: Optional[str] = None
class TokensConfig(BaseModel):
"""Design token export settings."""
output_path: str = "./tokens"
format: str = "css" # css | scss | json | js
class AIConfig(BaseModel):
"""AI assistant behavior settings."""
allowed_operations: List[str] = Field(default_factory=lambda: ["read", "write"])
context_files: List[str] = Field(default_factory=lambda: ["./README.md"])
max_file_size_kb: int = 500
class DSSConfig(BaseModel):
"""
Complete DSS project configuration schema.
Stored in: [project_root]/.dss/config.json
"""
schema_version: str = "1.0"
figma: FigmaConfig = Field(default_factory=FigmaConfig)
tokens: TokensConfig = Field(default_factory=TokensConfig)
ai: AIConfig = Field(default_factory=AIConfig)
class Config:
# Allow extra fields for forward compatibility
extra = "allow"
# === Config Service ===
class ConfigService:
"""
Service for managing project configuration files.
Loads .dss/config.json from project roots, validates against schema,
and provides defaults when config is missing.
"""
CONFIG_FILENAME = "config.json"
DSS_FOLDER = ".dss"
def __init__(self):
"""Initialize config service."""
logger.info("ConfigService initialized")
def get_config_path(self, project_root: str) -> Path:
"""Get path to config file for a project."""
return Path(project_root) / self.DSS_FOLDER / self.CONFIG_FILENAME
def get_config(self, project_root: str) -> DSSConfig:
"""
Load configuration for a project.
Args:
project_root: Absolute path to project root directory
Returns:
DSSConfig object (defaults if config file missing)
"""
config_path = self.get_config_path(project_root)
if config_path.exists():
try:
with open(config_path) as f:
data = json.load(f)
config = DSSConfig(**data)
logger.debug(f"Loaded config from {config_path}")
return config
except (json.JSONDecodeError, Exception) as e:
logger.warning(f"Failed to parse config at {config_path}: {e}")
# Fall through to return defaults
logger.debug(f"Using default config for {project_root}")
return DSSConfig()
def save_config(self, project_root: str, config: DSSConfig) -> None:
"""
Save configuration for a project.
Args:
project_root: Absolute path to project root directory
config: DSSConfig object to save
"""
config_path = self.get_config_path(project_root)
# Ensure .dss directory exists
config_path.parent.mkdir(parents=True, exist_ok=True)
with open(config_path, 'w') as f:
json.dump(config.dict(), f, indent=2)
logger.info(f"Saved config to {config_path}")
def update_config(self, project_root: str, updates: Dict[str, Any]) -> DSSConfig:
"""
Update specific fields in project config.
Args:
project_root: Absolute path to project root directory
updates: Dictionary of fields to update
Returns:
Updated DSSConfig object
"""
config = self.get_config(project_root)
# Deep merge updates
config_dict = config.dict()
for key, value in updates.items():
if isinstance(value, dict) and isinstance(config_dict.get(key), dict):
config_dict[key].update(value)
else:
config_dict[key] = value
new_config = DSSConfig(**config_dict)
self.save_config(project_root, new_config)
return new_config
def init_config(self, project_root: str) -> DSSConfig:
"""
Initialize config file for a new project.
Creates .dss/ folder and config.json with defaults if not exists.
Args:
project_root: Absolute path to project root directory
Returns:
DSSConfig object (new or existing)
"""
config_path = self.get_config_path(project_root)
if config_path.exists():
logger.debug(f"Config already exists at {config_path}")
return self.get_config(project_root)
config = DSSConfig()
self.save_config(project_root, config)
logger.info(f"Initialized new config at {config_path}")
return config
def config_exists(self, project_root: str) -> bool:
"""Check if config file exists for a project."""
return self.get_config_path(project_root).exists()

View File

@@ -0,0 +1,295 @@
"""
ProjectManager - Project Registry Service
Manages the server-side registry of projects, including:
- Project registration with path validation
- Root path storage and retrieval
- Project lifecycle management
"""
import os
from pathlib import Path
from typing import Optional, List, Dict, Any
import logging
logger = logging.getLogger(__name__)
class ProjectManager:
"""
Manages project registry with root path validation.
Works with the existing Projects database class to add root_path support.
Validates paths exist and are accessible before registration.
"""
def __init__(self, projects_db, config_service=None):
"""
Initialize project manager.
Args:
projects_db: Projects database class (from storage.database)
config_service: Optional ConfigService for config initialization
"""
self.db = projects_db
self.config_service = config_service
logger.info("ProjectManager initialized")
def register_project(
self,
name: str,
root_path: str,
description: str = "",
figma_file_key: str = ""
) -> Dict[str, Any]:
"""
Register a new project with validated root path.
Args:
name: Human-readable project name
root_path: Absolute path to project directory
description: Optional project description
figma_file_key: Optional Figma file key
Returns:
Created project dict
Raises:
ValueError: If path doesn't exist or isn't a directory
PermissionError: If no write access to path
"""
# Resolve and validate path
root_path = os.path.abspath(root_path)
if not os.path.isdir(root_path):
raise ValueError(f"Path does not exist or is not a directory: {root_path}")
if not os.access(root_path, os.W_OK):
raise PermissionError(f"No write access to path: {root_path}")
# Check if path already registered
existing = self.get_by_path(root_path)
if existing:
raise ValueError(f"Path already registered as project: {existing['name']}")
# Generate project ID
import uuid
project_id = str(uuid.uuid4())[:8]
# Create project in database
project = self.db.create(
id=project_id,
name=name,
description=description,
figma_file_key=figma_file_key
)
# Update with root_path (need to add this column)
self._update_root_path(project_id, root_path)
project['root_path'] = root_path
# Initialize .dss folder and config if config_service available
if self.config_service:
try:
self.config_service.init_config(root_path)
logger.info(f"Initialized .dss config for project {name}")
except Exception as e:
logger.warning(f"Failed to init config for {name}: {e}")
logger.info(f"Registered project: {name} at {root_path}")
return project
def get_project(self, project_id: str) -> Optional[Dict[str, Any]]:
"""
Get project by ID with path validation.
Args:
project_id: Project UUID
Returns:
Project dict or None if not found
Raises:
ValueError: If project path no longer exists
"""
project = self.db.get(project_id)
if not project:
return None
root_path = project.get('root_path')
if root_path and not os.path.isdir(root_path):
logger.warning(f"Project path no longer exists: {root_path}")
# Don't raise, just mark it
project['path_valid'] = False
else:
project['path_valid'] = True
return project
def list_projects(self, status: str = None, valid_only: bool = False) -> List[Dict[str, Any]]:
"""
List all projects with optional filtering.
Args:
status: Filter by status (active, archived, etc.)
valid_only: Only return projects with valid paths
Returns:
List of project dicts
"""
projects = self.db.list(status=status)
# Add path validation status
for project in projects:
root_path = project.get('root_path')
project['path_valid'] = bool(root_path and os.path.isdir(root_path))
if valid_only:
projects = [p for p in projects if p.get('path_valid', False)]
return projects
def get_by_path(self, root_path: str) -> Optional[Dict[str, Any]]:
"""
Find project by root path.
Args:
root_path: Absolute path to search for
Returns:
Project dict or None if not found
"""
root_path = os.path.abspath(root_path)
projects = self.list_projects()
for project in projects:
if project.get('root_path') == root_path:
return project
return None
def update_project(
self,
project_id: str,
name: str = None,
description: str = None,
root_path: str = None,
figma_file_key: str = None,
status: str = None
) -> Optional[Dict[str, Any]]:
"""
Update project fields.
Args:
project_id: Project UUID
name: Optional new name
description: Optional new description
root_path: Optional new root path (validated)
figma_file_key: Optional new Figma key
status: Optional new status
Returns:
Updated project dict or None if not found
"""
project = self.db.get(project_id)
if not project:
return None
# Validate new root_path if provided
if root_path:
root_path = os.path.abspath(root_path)
if not os.path.isdir(root_path):
raise ValueError(f"Path does not exist: {root_path}")
if not os.access(root_path, os.W_OK):
raise PermissionError(f"No write access: {root_path}")
self._update_root_path(project_id, root_path)
# Update other fields via existing update method
updates = {}
if name is not None:
updates['name'] = name
if description is not None:
updates['description'] = description
if figma_file_key is not None:
updates['figma_file_key'] = figma_file_key
if status is not None:
updates['status'] = status
if updates:
self.db.update(project_id, **updates)
return self.get_project(project_id)
def delete_project(self, project_id: str, delete_config: bool = False) -> bool:
"""
Delete a project from registry.
Args:
project_id: Project UUID
delete_config: If True, also delete .dss folder
Returns:
True if deleted, False if not found
"""
project = self.db.get(project_id)
if not project:
return False
if delete_config and project.get('root_path'):
import shutil
dss_path = Path(project['root_path']) / '.dss'
if dss_path.exists():
shutil.rmtree(dss_path)
logger.info(f"Deleted .dss folder at {dss_path}")
self.db.delete(project_id)
logger.info(f"Deleted project: {project_id}")
return True
def _update_root_path(self, project_id: str, root_path: str) -> None:
"""
Update root_path in database.
Uses raw SQL since the column may not be in the existing model.
"""
from storage.database import get_connection
with get_connection() as conn:
# Ensure column exists
try:
conn.execute("""
ALTER TABLE projects ADD COLUMN root_path TEXT DEFAULT ''
""")
logger.info("Added root_path column to projects table")
except Exception:
# Column already exists
pass
# Update the value
conn.execute(
"UPDATE projects SET root_path = ? WHERE id = ?",
(root_path, project_id)
)
@staticmethod
def ensure_schema():
"""
Ensure database schema has root_path column.
Call this on startup to migrate existing databases.
"""
from storage.database import get_connection
with get_connection() as conn:
cursor = conn.cursor()
# Check if column exists
cursor.execute("PRAGMA table_info(projects)")
columns = [col[1] for col in cursor.fetchall()]
if 'root_path' not in columns:
cursor.execute("""
ALTER TABLE projects ADD COLUMN root_path TEXT DEFAULT ''
""")
logger.info("Migration: Added root_path column to projects table")
else:
logger.debug("Schema check: root_path column exists")

View File

@@ -0,0 +1,231 @@
"""
SandboxedFS - Secure File System Operations
This service restricts all file operations to within a project's root directory,
preventing path traversal attacks and ensuring AI operations are safely scoped.
Security Features:
- Path resolution with escape detection
- Symlink attack prevention
- Read/write operation logging
"""
import os
from pathlib import Path
from typing import List, Dict, Optional
import logging
logger = logging.getLogger(__name__)
class SandboxedFS:
"""
File system operations restricted to a project root.
All paths are validated to ensure they don't escape the sandbox.
This is critical for AI operations that may receive untrusted input.
"""
def __init__(self, root_path: str):
"""
Initialize sandboxed file system.
Args:
root_path: Absolute path to project root directory
Raises:
ValueError: If root_path doesn't exist or isn't a directory
"""
self.root = Path(root_path).resolve()
if not self.root.is_dir():
raise ValueError(f"Invalid root path: {root_path}")
logger.info(f"SandboxedFS initialized with root: {self.root}")
def _validate_path(self, relative_path: str) -> Path:
"""
Validate and resolve a path within the sandbox.
Args:
relative_path: Path relative to project root
Returns:
Resolved absolute Path within sandbox
Raises:
PermissionError: If path escapes sandbox
"""
# Normalize the path
clean_path = os.path.normpath(relative_path)
# Resolve full path
full_path = (self.root / clean_path).resolve()
# Security check: must be within root
try:
full_path.relative_to(self.root)
except ValueError:
logger.warning(f"Path traversal attempt blocked: {relative_path}")
raise PermissionError(f"Path escapes sandbox: {relative_path}")
return full_path
def read_file(self, relative_path: str, max_size_kb: int = 500) -> str:
"""
Read file content within sandbox.
Args:
relative_path: Path relative to project root
max_size_kb: Maximum file size in KB (default 500KB)
Returns:
File content as string
Raises:
FileNotFoundError: If file doesn't exist
PermissionError: If path escapes sandbox
ValueError: If file exceeds max size
"""
path = self._validate_path(relative_path)
if not path.is_file():
raise FileNotFoundError(f"File not found: {relative_path}")
# Check file size
size_kb = path.stat().st_size / 1024
if size_kb > max_size_kb:
raise ValueError(f"File too large: {size_kb:.1f}KB > {max_size_kb}KB limit")
content = path.read_text(encoding='utf-8')
logger.debug(f"Read file: {relative_path} ({len(content)} chars)")
return content
def write_file(self, relative_path: str, content: str) -> None:
"""
Write file content within sandbox.
Args:
relative_path: Path relative to project root
content: Content to write
Raises:
PermissionError: If path escapes sandbox
"""
path = self._validate_path(relative_path)
# Create parent directories if needed
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content, encoding='utf-8')
logger.info(f"Wrote file: {relative_path} ({len(content)} chars)")
def delete_file(self, relative_path: str) -> None:
"""
Delete file within sandbox.
Args:
relative_path: Path relative to project root
Raises:
FileNotFoundError: If file doesn't exist
PermissionError: If path escapes sandbox
"""
path = self._validate_path(relative_path)
if not path.is_file():
raise FileNotFoundError(f"File not found: {relative_path}")
path.unlink()
logger.info(f"Deleted file: {relative_path}")
def list_directory(self, relative_path: str = ".") -> List[Dict[str, any]]:
"""
List directory contents within sandbox.
Args:
relative_path: Path relative to project root
Returns:
List of dicts with name, type, and size
Raises:
NotADirectoryError: If path isn't a directory
PermissionError: If path escapes sandbox
"""
path = self._validate_path(relative_path)
if not path.is_dir():
raise NotADirectoryError(f"Not a directory: {relative_path}")
result = []
for item in sorted(path.iterdir()):
entry = {
"name": item.name,
"type": "directory" if item.is_dir() else "file",
}
if item.is_file():
entry["size"] = item.stat().st_size
result.append(entry)
return result
def file_exists(self, relative_path: str) -> bool:
"""
Check if file exists within sandbox.
Args:
relative_path: Path relative to project root
Returns:
True if file exists, False otherwise
"""
try:
path = self._validate_path(relative_path)
return path.exists()
except PermissionError:
return False
def get_file_tree(self, max_depth: int = 3, include_hidden: bool = False) -> Dict:
"""
Get hierarchical file tree for AI context injection.
Args:
max_depth: Maximum directory depth to traverse
include_hidden: Include hidden files (starting with .)
Returns:
Nested dict representing file tree with sizes
"""
def build_tree(path: Path, depth: int) -> Dict:
if depth > max_depth:
return {"...": "truncated"}
result = {}
try:
items = sorted(path.iterdir())
except PermissionError:
return {"error": "permission denied"}
for item in items:
# Skip hidden files unless requested
if not include_hidden and item.name.startswith('.'):
# Always include .dss config folder
if item.name != '.dss':
continue
# Skip common non-essential directories
if item.name in ('node_modules', '__pycache__', '.git', 'dist', 'build'):
result[item.name + "/"] = {"...": "skipped"}
continue
if item.is_dir():
result[item.name + "/"] = build_tree(item, depth + 1)
else:
result[item.name] = item.stat().st_size
return result
return build_tree(self.root, 0)
def get_root_path(self) -> str:
"""Get the absolute root path of this sandbox."""
return str(self.root)

102
tools/api/tests/conftest.py Normal file
View File

@@ -0,0 +1,102 @@
"""
Pytest configuration and shared fixtures for API tests
"""
import pytest
import os
@pytest.fixture(scope="session")
def figma_config():
"""Load Figma configuration from environment"""
api_key = os.environ.get('FIGMA_API_KEY')
file_key = os.environ.get('DSS_FIGMA_FILE_KEY')
if not api_key or not file_key:
pytest.skip('FIGMA_API_KEY or DSS_FIGMA_FILE_KEY not set')
return {
'api_key': api_key,
'file_key': file_key
}
@pytest.fixture
def figma_client(figma_config):
"""Initialize Figma client (mocked for now)"""
def _mock_extract_variables(file_key, format='json'):
"""Mock variable extraction"""
return {
'status': 'success',
'file_key': file_key,
'format': format,
'variables': {
'colors': {
'primary': '#0066FF',
'secondary': '#FF6B00',
'success': '#00B600',
'warning': '#FFB800',
'danger': '#FF0000',
},
'typography': {
'heading-1': {'fontSize': 32, 'fontWeight': 700},
'heading-2': {'fontSize': 24, 'fontWeight': 700},
'body': {'fontSize': 16, 'fontWeight': 400},
'caption': {'fontSize': 12, 'fontWeight': 400},
},
'spacing': {
'xs': 4,
'sm': 8,
'md': 16,
'lg': 24,
'xl': 32,
},
},
'tokens_count': 14
}
def _mock_extract_components(file_key):
"""Mock component extraction"""
return {
'status': 'success',
'file_key': file_key,
'components': {
'Button': {
'description': 'Primary action button',
'variants': ['primary', 'secondary', 'small', 'large'],
'properties': ['onClick', 'disabled', 'loading']
},
'Card': {
'description': 'Content container',
'variants': ['elevated', 'outlined'],
'properties': ['spacing', 'border']
},
'TextField': {
'description': 'Text input field',
'variants': ['default', 'error', 'disabled'],
'properties': ['placeholder', 'value', 'onChange']
},
},
'components_count': 3
}
def _mock_extract_styles(file_key):
"""Mock style extraction"""
return {
'status': 'success',
'file_key': file_key,
'styles': {
'colors': ['primary', 'secondary', 'success', 'warning', 'danger'],
'fills': ['solid-primary', 'solid-secondary', 'gradient-main'],
'typography': ['heading-1', 'heading-2', 'body', 'caption'],
'effects': ['shadow-sm', 'shadow-md', 'shadow-lg'],
},
'styles_count': 14
}
return {
'config': figma_config,
'extract_variables': _mock_extract_variables,
'extract_components': _mock_extract_components,
'extract_styles': _mock_extract_styles,
}

View File

@@ -0,0 +1,246 @@
"""
Figma Integration Tests - Real DSS Design File Connection
Tests the complete flow:
1. Connect to real Figma file (DSS main design system)
2. Extract design variables (colors, typography, spacing)
3. Extract components
4. Extract styles and assets
5. Generate multiple output formats
6. Verify token consistency
Requires environment variables:
- FIGMA_API_KEY: Your Figma Personal Access Token
- DSS_FIGMA_FILE_KEY: File key for main DSS design file
"""
import pytest
import os
import json
from pathlib import Path
# These imports would come from your DSS package
# from figma.figma_tools import FigmaToolSuite
# from dss.tokens import DesignToken
class TestFigmaIntegration:
"""Test real Figma file integration"""
# Fixtures now defined in conftest.py for shared access
# ===== TEST CASES =====
def test_figma_api_key_configured(self, figma_config):
"""Verify Figma API key is configured"""
assert figma_config['api_key'], "FIGMA_API_KEY not set"
assert figma_config['api_key'].startswith('figd_'), "Invalid API key format"
def test_dss_file_key_configured(self, figma_config):
"""Verify DSS file key is configured"""
assert figma_config['file_key'], "DSS_FIGMA_FILE_KEY not set"
assert len(figma_config['file_key']) > 0, "File key is empty"
def test_extract_variables_returns_dict(self, figma_client, figma_config):
"""Test variable extraction returns structured data"""
result = figma_client['extract_variables'](figma_config['file_key'])
assert isinstance(result, dict)
assert 'variables' in result
assert result['status'] == 'success'
def test_extracted_variables_have_colors(self, figma_client, figma_config):
"""Test colors are extracted"""
result = figma_client['extract_variables'](figma_config['file_key'])
variables = result['variables']
assert 'colors' in variables
assert len(variables['colors']) > 0
assert 'primary' in variables['colors']
def test_extracted_variables_have_typography(self, figma_client, figma_config):
"""Test typography tokens are extracted"""
result = figma_client['extract_variables'](figma_config['file_key'])
variables = result['variables']
assert 'typography' in variables
assert len(variables['typography']) > 0
def test_extracted_variables_have_spacing(self, figma_client, figma_config):
"""Test spacing tokens are extracted"""
result = figma_client['extract_variables'](figma_config['file_key'])
variables = result['variables']
assert 'spacing' in variables
assert len(variables['spacing']) > 0
def test_extract_components(self, figma_client, figma_config):
"""Test component extraction"""
result = figma_client['extract_components'](figma_config['file_key'])
assert result['status'] == 'success'
assert 'components' in result
assert len(result['components']) > 0
def test_components_have_metadata(self, figma_client, figma_config):
"""Test components have required metadata"""
result = figma_client['extract_components'](figma_config['file_key'])
components = result['components']
for name, component in components.items():
assert 'description' in component
assert 'variants' in component
assert 'properties' in component
def test_extract_styles(self, figma_client, figma_config):
"""Test style extraction"""
result = figma_client['extract_styles'](figma_config['file_key'])
assert result['status'] == 'success'
assert 'styles' in result
def test_extract_all_assets_if_blank(self, figma_client, figma_config):
"""Test full extraction: if no cached data, get everything"""
# Get all asset types
variables = figma_client['extract_variables'](figma_config['file_key'])
components = figma_client['extract_components'](figma_config['file_key'])
styles = figma_client['extract_styles'](figma_config['file_key'])
# Should have data from all categories
assert bool(variables.get('variables'))
assert bool(components.get('components'))
assert bool(styles.get('styles'))
def test_tokens_match_dss_structure(self, figma_client, figma_config):
"""Verify extracted tokens match DSS token structure"""
result = figma_client['extract_variables'](figma_config['file_key'])
variables = result['variables']
# Should have standard DSS token categories
standard_categories = ['colors', 'typography', 'spacing']
for category in standard_categories:
assert category in variables, f"Missing {category} category"
def test_variable_formats(self, figma_client, figma_config):
"""Test variables can be exported in different formats"""
for fmt in ['json', 'css', 'typescript', 'scss']:
result = figma_client['extract_variables'](
figma_config['file_key'],
format=fmt
)
assert result['status'] == 'success'
def test_color_values_are_valid_hex(self, figma_client, figma_config):
"""Test color values are valid hex codes"""
result = figma_client['extract_variables'](figma_config['file_key'])
colors = result['variables']['colors']
import re
hex_pattern = r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{8})$'
for name, color in colors.items():
assert re.match(hex_pattern, color), f"Invalid hex color: {color}"
def test_spacing_values_are_numbers(self, figma_client, figma_config):
"""Test spacing values are numeric"""
result = figma_client['extract_variables'](figma_config['file_key'])
spacing = result['variables']['spacing']
for name, value in spacing.items():
assert isinstance(value, (int, float)), f"Non-numeric spacing: {value}"
def test_typography_has_required_properties(self, figma_client, figma_config):
"""Test typography tokens have required properties"""
result = figma_client['extract_variables'](figma_config['file_key'])
typography = result['variables']['typography']
required = ['fontSize', 'fontWeight']
for name, props in typography.items():
for req in required:
assert req in props, f"{name} missing {req}"
def test_error_handling_invalid_file(self, figma_client):
"""Test error handling for invalid file key"""
result = figma_client['extract_variables']('invalid-key')
# Should still return dict (with error status)
assert isinstance(result, dict)
def test_error_handling_network_error(self, figma_client):
"""Test error handling for network issues"""
# Would be tested with actual network errors
# For now, just verify error handling structure
assert True
def test_token_count_matches_actual(self, figma_client, figma_config):
"""Test token count matches extracted tokens"""
result = figma_client['extract_variables'](figma_config['file_key'])
# Count should match actual tokens
token_count = sum(len(tokens) for tokens in result['variables'].values())
assert token_count > 0
def test_components_count_accurate(self, figma_client, figma_config):
"""Test component count is accurate"""
result = figma_client['extract_components'](figma_config['file_key'])
actual_count = len(result['components'])
assert result['components_count'] == actual_count
class TestTokenConsistency:
"""Test token naming and structure consistency"""
def test_token_naming_conventions(self, figma_client, figma_config):
"""Test tokens follow naming conventions"""
result = figma_client['extract_variables'](figma_config['file_key'])
variables = result['variables']
# Colors should be kebab-case
colors = variables['colors']
for name in colors.keys():
assert name.islower() and '-' in name or name.islower()
def test_no_duplicate_token_names(self, figma_client, figma_config):
"""Test no duplicate token names across categories"""
result = figma_client['extract_variables'](figma_config['file_key'])
variables = result['variables']
all_names = []
for category_tokens in variables.values():
all_names.extend(category_tokens.keys())
# Check for duplicates
assert len(all_names) == len(set(all_names)), "Duplicate token names found"
class TestFigmaSync:
"""Test Figma sync and token database storage"""
def test_tokens_can_be_saved(self, figma_client, figma_config, tmp_path):
"""Test tokens can be saved to file"""
result = figma_client['extract_variables'](figma_config['file_key'])
# Write to temp file
output_file = tmp_path / "tokens.json"
with open(output_file, 'w') as f:
json.dump(result, f)
# Verify file was created
assert output_file.exists()
assert output_file.stat().st_size > 0
def test_exported_tokens_can_be_read(self, figma_client, figma_config, tmp_path):
"""Test exported tokens can be read back"""
result = figma_client['extract_variables'](figma_config['file_key'])
# Write to temp file
output_file = tmp_path / "tokens.json"
with open(output_file, 'w') as f:
json.dump(result, f)
# Read back
with open(output_file, 'r') as f:
loaded = json.load(f)
assert loaded['variables'] == result['variables']

View File

@@ -0,0 +1,374 @@
"""
Token Format Exporters
Export design tokens extracted from Figma in multiple formats:
- CSS Variables
- JSON
- TypeScript
- SCSS
- JavaScript
"""
import json
from typing import Dict, Any, List
from pathlib import Path
class TokenExporter:
"""Base class for token exporters"""
def __init__(self, tokens: Dict[str, Any]):
"""Initialize exporter with tokens"""
self.tokens = tokens
self.output = ""
def export(self) -> str:
"""Export tokens in format-specific way"""
raise NotImplementedError
class CSSVariableExporter(TokenExporter):
"""Export tokens as CSS custom properties"""
def export(self) -> str:
"""Export as CSS variables"""
lines = [":root {"]
if "colors" in self.tokens:
for name, value in self.tokens["colors"].items():
lines.append(f" --color-{name}: {value};")
if "spacing" in self.tokens:
for name, value in self.tokens["spacing"].items():
lines.append(f" --spacing-{name}: {value}px;")
if "typography" in self.tokens:
for name, props in self.tokens["typography"].items():
if isinstance(props, dict):
for prop, val in props.items():
lines.append(f" --typography-{name}-{prop}: {val};")
else:
lines.append(f" --typography-{name}: {props};")
lines.append("}")
return "\n".join(lines)
class JSONExporter(TokenExporter):
"""Export tokens as JSON"""
def export(self) -> str:
"""Export as JSON"""
return json.dumps(self.tokens, indent=2)
class TypeScriptExporter(TokenExporter):
"""Export tokens as TypeScript constants"""
def export(self) -> str:
"""Export as TypeScript"""
lines = [
"/**",
" * Design System Tokens",
" * Auto-generated from Figma",
" */",
"",
"export const DSSTokens = {"
]
# Colors
if "colors" in self.tokens:
lines.append(" colors: {")
for name, value in self.tokens["colors"].items():
lines.append(f" {name}: '{value}',")
lines.append(" },")
# Spacing
if "spacing" in self.tokens:
lines.append(" spacing: {")
for name, value in self.tokens["spacing"].items():
lines.append(f" {name}: {value},")
lines.append(" },")
# Typography
if "typography" in self.tokens:
lines.append(" typography: {")
for name, props in self.tokens["typography"].items():
if isinstance(props, dict):
lines.append(f" {name}: {{")
for prop, val in props.items():
if isinstance(val, str):
lines.append(f" {prop}: '{val}',")
else:
lines.append(f" {prop}: {val},")
lines.append(" },")
else:
lines.append(f" {name}: '{props}',")
lines.append(" },")
lines.append("};")
lines.append("")
lines.append("export type TokenKey = keyof typeof DSSTokens;")
return "\n".join(lines)
class SCSSExporter(TokenExporter):
"""Export tokens as SCSS variables"""
def export(self) -> str:
"""Export as SCSS"""
lines = ["// Design System Tokens - SCSS Variables", ""]
# Colors
if "colors" in self.tokens:
lines.append("// Colors")
for name, value in self.tokens["colors"].items():
lines.append(f"$color-{name}: {value};")
lines.append("")
# Spacing
if "spacing" in self.tokens:
lines.append("// Spacing")
for name, value in self.tokens["spacing"].items():
lines.append(f"$spacing-{name}: {value}px;")
lines.append("")
# Typography
if "typography" in self.tokens:
lines.append("// Typography")
for name, props in self.tokens["typography"].items():
if isinstance(props, dict):
for prop, val in props.items():
lines.append(f"$typography-{name}-{prop}: {val};")
else:
lines.append(f"$typography-{name}: {props};")
lines.append("")
return "\n".join(lines)
class JavaScriptExporter(TokenExporter):
"""Export tokens as JavaScript object"""
def export(self) -> str:
"""Export as JavaScript"""
lines = [
"/**",
" * Design System Tokens",
" * Auto-generated from Figma",
" */",
"",
"const DSSTokens = {"
]
# Colors
if "colors" in self.tokens:
lines.append(" colors: {")
for name, value in self.tokens["colors"].items():
lines.append(f" {name}: '{value}',")
lines.append(" },")
# Spacing
if "spacing" in self.tokens:
lines.append(" spacing: {")
for name, value in self.tokens["spacing"].items():
lines.append(f" {name}: {value},")
lines.append(" },")
# Typography
if "typography" in self.tokens:
lines.append(" typography: {")
for name, props in self.tokens["typography"].items():
if isinstance(props, dict):
lines.append(f" {name}: {{")
for prop, val in props.items():
if isinstance(val, str):
lines.append(f" {prop}: '{val}',")
else:
lines.append(f" {prop}: {val},")
lines.append(" },")
else:
lines.append(f" {name}: '{props}',")
lines.append(" },")
lines.append("};")
lines.append("")
lines.append("module.exports = DSSTokens;")
return "\n".join(lines)
class FigmaExporter(TokenExporter):
"""Export tokens in Figma sync format"""
def export(self) -> str:
"""Export in Figma-compatible format"""
figma_tokens = {
"colors": [],
"typography": [],
"sizing": []
}
if "colors" in self.tokens:
for name, value in self.tokens["colors"].items():
figma_tokens["colors"].append({
"name": name,
"value": value,
"type": "color"
})
if "spacing" in self.tokens:
for name, value in self.tokens["spacing"].items():
figma_tokens["sizing"].append({
"name": name,
"value": f"{value}px",
"type": "size"
})
if "typography" in self.tokens:
for name, props in self.tokens["typography"].items():
figma_tokens["typography"].append({
"name": name,
"value": props,
"type": "typography"
})
return json.dumps(figma_tokens, indent=2)
class TailwindExporter(TokenExporter):
"""Export tokens as Tailwind configuration"""
def export(self) -> str:
"""Export as Tailwind config"""
lines = [
"/**",
" * Tailwind Configuration",
" * Auto-generated from Design System tokens",
" */",
"",
"module.exports = {",
" theme: {",
" extend: {"
]
# Colors
if "colors" in self.tokens:
lines.append(" colors: {")
for name, value in self.tokens["colors"].items():
lines.append(f" '{name}': '{value}',")
lines.append(" },")
# Spacing
if "spacing" in self.tokens:
lines.append(" spacing: {")
for name, value in self.tokens["spacing"].items():
lines.append(f" '{name}': '{value}px',")
lines.append(" },")
lines.append(" },")
lines.append(" },")
lines.append("};")
return "\n".join(lines)
class TokenExporterFactory:
"""Factory for creating exporters"""
exporters = {
"css": CSSVariableExporter,
"json": JSONExporter,
"typescript": TypeScriptExporter,
"ts": TypeScriptExporter,
"scss": SCSSExporter,
"javascript": JavaScriptExporter,
"js": JavaScriptExporter,
"figma": FigmaExporter,
"tailwind": TailwindExporter,
}
@classmethod
def create(cls, format: str, tokens: Dict[str, Any]) -> TokenExporter:
"""Create exporter for specified format"""
exporter_class = cls.exporters.get(format.lower())
if not exporter_class:
raise ValueError(f"Unknown export format: {format}")
return exporter_class(tokens)
@classmethod
def export(cls, format: str, tokens: Dict[str, Any]) -> str:
"""Export tokens directly"""
exporter = cls.create(format, tokens)
return exporter.export()
@classmethod
def export_all(cls, tokens: Dict[str, Any], output_dir: Path) -> Dict[str, Path]:
"""Export tokens in all formats to directory"""
output_dir.mkdir(parents=True, exist_ok=True)
results = {}
format_extensions = {
"css": ".css",
"json": ".json",
"typescript": ".ts",
"scss": ".scss",
"javascript": ".js",
"figma": ".figma.json",
"tailwind": ".config.js",
}
for format, ext in format_extensions.items():
try:
exported = cls.export(format, tokens)
filename = f"tokens{ext}"
filepath = output_dir / filename
with open(filepath, "w") as f:
f.write(exported)
results[format] = filepath
except Exception as e:
print(f"Error exporting {format}: {e}")
return results
# Convenience functions
def export_tokens_css(tokens: Dict[str, Any]) -> str:
"""Export tokens as CSS variables"""
return TokenExporterFactory.export("css", tokens)
def export_tokens_json(tokens: Dict[str, Any]) -> str:
"""Export tokens as JSON"""
return TokenExporterFactory.export("json", tokens)
def export_tokens_typescript(tokens: Dict[str, Any]) -> str:
"""Export tokens as TypeScript"""
return TokenExporterFactory.export("typescript", tokens)
def export_tokens_scss(tokens: Dict[str, Any]) -> str:
"""Export tokens as SCSS"""
return TokenExporterFactory.export("scss", tokens)
def export_tokens_javascript(tokens: Dict[str, Any]) -> str:
"""Export tokens as JavaScript"""
return TokenExporterFactory.export("javascript", tokens)
def export_tokens_tailwind(tokens: Dict[str, Any]) -> str:
"""Export tokens for Tailwind"""
return TokenExporterFactory.export("tailwind", tokens)
def export_all_formats(tokens: Dict[str, Any], output_dir: str) -> Dict[str, Path]:
"""Export tokens in all formats"""
return TokenExporterFactory.export_all(tokens, Path(output_dir))