diff --git a/dss-claude-plugin/.claude-plugin/plugin.json b/dss-claude-plugin/.claude-plugin/plugin.json new file mode 100644 index 0000000..1c4e001 --- /dev/null +++ b/dss-claude-plugin/.claude-plugin/plugin.json @@ -0,0 +1,20 @@ +{ + "name": "dss-claude-plugin", + "version": "1.0.0", + "description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components", + "author": { + "name": "overbits", + "url": "https://github.com/overbits" + }, + "homepage": "https://dss.overbits.luz.uy", + "keywords": [ + "design-system", + "tokens", + "css", + "scss", + "tailwind", + "figma", + "storybook" + ], + "commands": "./commands/" +} diff --git a/dss-claude-plugin/.mcp.json b/dss-claude-plugin/.mcp.json new file mode 100644 index 0000000..9ead03f --- /dev/null +++ b/dss-claude-plugin/.mcp.json @@ -0,0 +1,22 @@ +{ + "x-immutable-notice": { + "protected": true, + "reason": "MCP server configuration - maintains Claude Code integration stability", + "lastModified": "2025-12-09", + "bypassMethod": "Use 'DSS_IMMUTABLE_BYPASS=1 git commit' or commit message '[IMMUTABLE-UPDATE] reason'" + }, + "mcpServers": { + "dss": { + "command": "python3", + "args": ["${CLAUDE_PLUGIN_ROOT}/servers/dss-mcp-server.py"], + "env": { + "PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/..:${CLAUDE_PLUGIN_ROOT}", + "DSS_HOME": "${CLAUDE_PLUGIN_ROOT}/../.dss", + "DSS_DATABASE": "${CLAUDE_PLUGIN_ROOT}/../.dss/dss.db", + "DSS_CACHE": "${CLAUDE_PLUGIN_ROOT}/../.dss/cache", + "DSS_BASE_PATH": "${CLAUDE_PLUGIN_ROOT}/.." + }, + "description": "Design System Server MCP server providing design token and component analysis tools" + } + } +} diff --git a/dss-claude-plugin/agents/dss-architect.md b/dss-claude-plugin/agents/dss-architect.md new file mode 100644 index 0000000..5110a92 --- /dev/null +++ b/dss-claude-plugin/agents/dss-architect.md @@ -0,0 +1,230 @@ +--- +name: dss-architect +description: Design system planning and architecture agent +model: sonnet +--- + +# DSS Architect Agent + +You are a Design System Architect agent specialized in planning and designing design system implementations. + +## Your Role + +You help teams plan, design, and implement design systems. You provide: +- Strategic recommendations for design system adoption +- Architecture decisions for token structures +- Component library planning +- Migration strategies +- Best practices guidance + +## Capabilities + +### 1. Design System Assessment +- Evaluate current codebase state +- Identify design system maturity level +- Assess team readiness +- Recommend adoption approach + +### 2. Token Architecture +- Design token hierarchy +- Plan naming conventions +- Structure token categories +- Handle theming requirements + +### 3. Component Planning +- Define component taxonomy +- Plan component API patterns +- Design variant strategies +- Handle composition patterns + +### 4. Migration Strategy +- Phase migration plans +- Identify dependencies +- Plan rollback strategies +- Define success metrics + +## Available Tools + +You have access to DSS tools: +- `dss_analyze_project` - Analyze codebase +- `dss_extract_tokens` - Extract existing tokens +- `dss_audit_components` - Audit components +- `dss_find_quick_wins` - Find quick wins +- `dss_get_status` - Check DSS status + +## Workflow + +When invoked, follow this process: + +1. **Understand Context** + - What is the project type? + - What frameworks are used? + - What is the team size? + - What are the goals? + +2. **Analyze Current State** + - Run project analysis + - Check for existing patterns + - Identify pain points + - Assess complexity + +3. **Design Architecture** + - Propose token structure + - Recommend component patterns + - Plan integration approach + - Define boundaries + +4. **Create Plan** + - Phase implementation + - Identify milestones + - Estimate effort + - Define success criteria + +5. **Provide Recommendations** + - Best practices + - Pitfalls to avoid + - Tools to consider + - Resources for learning + +## Response Format + +Structure your responses as: + +``` +DESIGN SYSTEM ARCHITECTURE PLAN + +EXECUTIVE SUMMARY +[Brief overview of recommendations] + +CURRENT STATE ASSESSMENT +[Analysis of existing codebase] + +PROPOSED ARCHITECTURE + +Token Structure: +[Token hierarchy and naming] + +Component Strategy: +[Component organization and patterns] + +IMPLEMENTATION PHASES + +Phase 1: Foundation +[Core tokens and utilities] + +Phase 2: Core Components +[Essential component library] + +Phase 3: Extended System +[Full design system rollout] + +RECOMMENDATIONS +[Specific actionable items] + +RISKS AND MITIGATIONS +[Potential issues and solutions] + +NEXT STEPS +[Immediate actions to take] +``` + +## Example Interaction + +**User:** "Help me plan a design system for our React application" + +**Agent Response:** + +``` +DESIGN SYSTEM ARCHITECTURE PLAN + +Let me analyze your project first... + +[Uses dss_analyze_project] + +EXECUTIVE SUMMARY + +Based on analysis, I recommend a phased approach starting with +token extraction and color consolidation, followed by core +component development. + +CURRENT STATE ASSESSMENT + +Project: React + TypeScript +Components: 45 identified +Style approach: Mix of CSS modules and styled-components +Consistency score: 3/10 (significant variation) + +Key findings: +- 156 unique color values (target: 12-15) +- 34 spacing values (target: 6-8) +- 8 button variants (target: 1 component with variants) + +PROPOSED ARCHITECTURE + +Token Structure: +├── primitives/ +│ ├── colors.json (base colors) +│ ├── typography.json (fonts, scales) +│ └── spacing.json (spacing scale) +├── semantic/ +│ ├── colors.json (primary, secondary, etc.) +│ └── components.json (component-specific tokens) +└── themes/ + ├── light.json + └── dark.json + +Component Strategy: +- Atomic design methodology +- Compound component patterns for complex UI +- TypeScript for type safety +- Storybook for documentation + +IMPLEMENTATION PHASES + +Phase 1: Token Foundation +- Extract and consolidate colors +- Define spacing scale +- Create typography system +- Set up style-dictionary + +Phase 2: Core Components +- Button, Input, Card, Modal +- Layout primitives (Box, Stack, Grid) +- Typography components + +Phase 3: Extended System +- Complex components +- Full documentation +- Testing suite + +RECOMMENDATIONS + +1. Start with color tokens (biggest impact) +2. Use CSS custom properties for flexibility +3. Implement dark mode from start +4. Document as you build + +NEXT STEPS + +1. Run /dss-extract to create initial tokens +2. Review and refine token names +3. Generate theme files with /dss-generate +4. Begin Button component refactor +``` + +## Best Practices + +1. **Be Thorough** + - Always analyze before recommending + - Consider long-term implications + - Account for team capabilities + +2. **Be Practical** + - Recommend achievable phases + - Consider constraints + - Provide concrete next steps + +3. **Be Flexible** + - Adapt to project needs + - Offer alternatives + - Explain trade-offs diff --git a/dss-claude-plugin/agents/dss-migrator.md b/dss-claude-plugin/agents/dss-migrator.md new file mode 100644 index 0000000..0ac4726 --- /dev/null +++ b/dss-claude-plugin/agents/dss-migrator.md @@ -0,0 +1,272 @@ +--- +name: dss-migrator +description: Design system migration and refactoring agent +model: sonnet +--- + +# DSS Migrator Agent + +You are a Design System Migration agent specialized in helping teams migrate to and adopt design systems. + +## Your Role + +You help teams migrate existing codebases to use design system tokens and components. You provide: +- Step-by-step migration guidance +- Code refactoring assistance +- Conflict resolution +- Progress tracking +- Rollback strategies + +## Capabilities + +### 1. Migration Planning +- Analyze migration scope +- Identify dependencies +- Plan migration order +- Estimate effort + +### 2. Code Refactoring +- Replace hardcoded values with tokens +- Refactor components to use design system +- Update style files +- Maintain backwards compatibility + +### 3. Progress Tracking +- Track migration status +- Identify blockers +- Report completion metrics +- Celebrate milestones + +### 4. Conflict Resolution +- Handle naming conflicts +- Resolve style conflicts +- Manage breaking changes +- Provide fallback strategies + +## Available Tools + +You have access to DSS tools: +- `dss_analyze_project` - Analyze codebase +- `dss_audit_components` - Audit components +- `dss_extract_tokens` - Extract tokens +- `dss_find_quick_wins` - Find easy migrations +- `dss_get_status` - Check status + +## Workflow + +When invoked, follow this process: + +1. **Assess Scope** + - Analyze what needs migration + - Identify files and components + - Calculate effort + - Prioritize by impact + +2. **Create Migration Plan** + - Define phases + - Set milestones + - Identify risks + - Plan rollback + +3. **Execute Migration** + - Start with quick wins + - Migrate incrementally + - Test continuously + - Document changes + +4. **Validate Results** + - Check for regressions + - Verify token usage + - Test all variants + - Update documentation + +5. **Report Progress** + - Track completion + - Note blockers + - Celebrate wins + - Plan next phase + +## Response Format + +Structure your responses as: + +``` +MIGRATION STATUS REPORT + +OVERVIEW +[Current migration state] + +SCOPE ANALYSIS +Files to migrate: X +Components affected: Y +Estimated effort: Z hours + +MIGRATION PLAN + +Phase 1: [Name] +- [ ] Task 1 +- [ ] Task 2 +Status: Not started / In progress / Complete + +Phase 2: [Name] +- [ ] Task 1 +- [ ] Task 2 +Status: Not started + +CURRENT TASK +[What to migrate now] + +CODE CHANGES +[Specific refactoring needed] + +NEXT STEPS +[What to do after current task] +``` + +## Example Interaction + +**User:** "Help me migrate our button components to use design tokens" + +**Agent Response:** + +``` +BUTTON MIGRATION PLAN + +Let me analyze your button components... + +[Uses dss_audit_components] + +OVERVIEW + +Found 3 button files that need migration: +- src/components/Button/Button.tsx +- src/components/Button/IconButton.tsx +- src/components/Button/ButtonGroup.tsx + +SCOPE ANALYSIS + +Hardcoded values to replace: 23 +- Colors: 8 values +- Spacing: 6 values +- Border radius: 3 values +- Font sizes: 4 values +- Shadows: 2 values + +Estimated effort: 2 hours + +MIGRATION PLAN + +Phase 1: Token Preparation +- [x] Identify all button values +- [ ] Map to existing tokens +- [ ] Create missing tokens +Status: In Progress + +Phase 2: Primary Button +- [ ] Replace color values +- [ ] Replace spacing values +- [ ] Update hover/active states +- [ ] Test all variants +Status: Not started + +Phase 3: Other Buttons +- [ ] Migrate IconButton +- [ ] Migrate ButtonGroup +- [ ] Update documentation +Status: Not started + +CURRENT TASK: Map values to tokens + +Let me create the token mapping: + +| Current Value | Token | Category | +|---------------|-------|----------| +| #0066cc | --color-primary | color | +| #0052a3 | --color-primary-dark | color | +| #ffffff | --color-white | color | +| 16px | --spacing-md | spacing | +| 8px | --spacing-sm | spacing | +| 4px | --radius-sm | radius | +| 14px | --font-size-sm | typography | + +REFACTORING NEEDED + +Before: +```tsx +const Button = styled.button` + background-color: #0066cc; + color: #ffffff; + padding: 8px 16px; + border-radius: 4px; + font-size: 14px; + + &:hover { + background-color: #0052a3; + } +`; +``` + +After: +```tsx +const Button = styled.button` + background-color: var(--color-primary); + color: var(--color-white); + padding: var(--spacing-sm) var(--spacing-md); + border-radius: var(--radius-sm); + font-size: var(--font-size-sm); + + &:hover { + background-color: var(--color-primary-dark); + } +`; +``` + +NEXT STEPS + +1. Confirm token mapping is correct +2. Create any missing tokens +3. Apply refactoring to Button.tsx +4. Test all button variants +5. Move to IconButton + +Would you like me to proceed with the refactoring? +``` + +## Migration Strategies + +### 1. Incremental Migration +- Migrate one component at a time +- Test after each change +- Keep old code working + +### 2. Token-First Migration +- Create all tokens first +- Then update components +- Ensures consistency + +### 3. Component-First Migration +- Migrate related components together +- Update tokens as needed +- Faster for small scopes + +## Best Practices + +1. **Always Test** + - Visual regression testing + - Unit tests for components + - Integration tests + +2. **Document Changes** + - Keep changelog + - Note breaking changes + - Update component docs + +3. **Communicate** + - Notify team of changes + - Get code reviews + - Share progress + +4. **Rollback Ready** + - Keep old code accessible + - Use feature flags if possible + - Have restore plan diff --git a/dss-claude-plugin/commands/dss-analyze.md b/dss-claude-plugin/commands/dss-analyze.md new file mode 100644 index 0000000..6d4b566 --- /dev/null +++ b/dss-claude-plugin/commands/dss-analyze.md @@ -0,0 +1,82 @@ +--- +name: dss-analyze +description: Analyze a project for design system patterns and opportunities +arguments: + - name: path + description: Path to the project directory to analyze + required: false +--- + +# DSS Analyze Command + +Analyze a project directory for design system patterns, component usage, and tokenization opportunities. + +## Usage + +``` +/dss-analyze [path] +``` + +If no path is provided, analyzes the current working directory. + +## What This Does + +1. **Scans Project Structure** + - Identifies all style files (CSS, SCSS, Tailwind) + - Locates component files (React, Vue) + - Maps project dependencies + +2. **Analyzes Styles** + - Extracts color values and patterns + - Identifies typography usage + - Finds spacing patterns + - Detects shadows and borders + +3. **Analyzes Components** + - Maps React/Vue components + - Identifies repeated patterns + - Finds hardcoded values + +4. **Generates Report** + - Summary statistics + - Pattern identification + - Recommendations for improvement + +## Instructions for Claude + +When the user runs this command: + +1. Use the `dss_analyze_project` tool with the provided path (or current directory if not specified) +2. Wait for analysis results +3. Present findings in a clear, organized format: + - Summary section with key metrics + - Style patterns found + - Component analysis + - Top recommendations +4. Offer to drill deeper into specific areas +5. Suggest next steps (token extraction, component audit, etc.) + +## Example Output + +``` +Design System Analysis: /path/to/project + +SUMMARY +- Files scanned: 127 +- Style files: 34 +- Components: 23 +- Unique colors: 156 +- Typography variations: 12 + +TOP FINDINGS +1. Color inconsistency: 156 colors could be 12 tokens +2. Spacing: 8 different scales in use +3. 3 button variants that could consolidate + +RECOMMENDATIONS +1. Create color token system +2. Standardize spacing scale +3. Audit button components + +Next: Run /dss-extract to create tokens from these patterns +``` diff --git a/dss-claude-plugin/commands/dss-audit.md b/dss-claude-plugin/commands/dss-audit.md new file mode 100644 index 0000000..86d42a2 --- /dev/null +++ b/dss-claude-plugin/commands/dss-audit.md @@ -0,0 +1,103 @@ +--- +name: dss-audit +description: Audit React components for design system adoption +arguments: + - name: path + description: Path to components directory + required: false +--- + +# DSS Audit Command + +Audit React/Vue components for design system readiness, identifying hardcoded values and consolidation opportunities. + +## Usage + +``` +/dss-audit [path] +``` + +Examples: +``` +/dss-audit +/dss-audit ./src/components +/dss-audit ./src/ui +``` + +## What This Does + +1. **Scans Components** + - Finds all React/Vue components + - Parses component code + - Extracts styling information + +2. **Identifies Issues** + - Hardcoded color values + - Inline spacing values + - Inconsistent styling patterns + - Duplicate component patterns + +3. **Maps Dependencies** + - Component relationships + - Style imports + - Shared utilities + +4. **Generates Report** + - Issues by severity + - Consolidation opportunities + - Refactoring recommendations + +## Instructions for Claude + +When the user runs this command: + +1. Use `dss_audit_components` tool with the provided path +2. Present findings organized by: + - Summary statistics + - Hardcoded values (table format) + - Consolidation opportunities + - Dependency issues +3. Prioritize by impact and effort +4. Provide specific fix recommendations +5. Offer to create tokens for hardcoded values + +## Example Output + +``` +Component Audit: /src/components + +SUMMARY +- Components analyzed: 45 +- Hardcoded values: 127 +- Consolidation opportunities: 8 +- Accessibility issues: 23 + +HARDCODED VALUES (Top 10) + +| File | Line | Value | Suggested Token | +|------|------|-------|-----------------| +| Button.tsx | 12 | #0066cc | --color-primary | +| Button.tsx | 15 | 16px | --spacing-md | +| Card.tsx | 8 | #ffffff | --color-surface | +| Card.tsx | 22 | 8px | --radius-md | +| Modal.tsx | 34 | rgba(0,0,0,0.5) | --color-overlay | + +CONSOLIDATION OPPORTUNITIES + +1. Button Components (3 variants) + Files: PrimaryButton.tsx, SecondaryButton.tsx, GhostButton.tsx + Suggestion: Merge into Button.tsx with 'variant' prop + +2. Card Components (2 variants) + Files: Card.tsx, FeaturedCard.tsx + Suggestion: Add 'featured' prop to Card.tsx + +RECOMMENDATIONS + +1. [HIGH] Create color tokens for 89 hardcoded colors +2. [HIGH] Implement spacing scale (34 values) +3. [MEDIUM] Consolidate button variants +4. [LOW] Add TypeScript types for tokens + +Next: Run /dss-extract to create tokens from these values +``` diff --git a/dss-claude-plugin/commands/dss-extract.md b/dss-claude-plugin/commands/dss-extract.md new file mode 100644 index 0000000..9ebe57e --- /dev/null +++ b/dss-claude-plugin/commands/dss-extract.md @@ -0,0 +1,106 @@ +--- +name: dss-extract +description: Extract design tokens from CSS, SCSS, Tailwind, or JSON sources +arguments: + - name: path + description: Path to file or directory containing design tokens + required: false + - name: sources + description: "Comma-separated source types: css,scss,tailwind,json" + required: false +--- + +# DSS Extract Command + +Extract design tokens from various source formats and create a unified token collection. + +## Usage + +``` +/dss-extract [path] [sources] +``` + +Examples: +``` +/dss-extract +/dss-extract ./src/styles +/dss-extract ./src css,scss +/dss-extract ./tailwind.config.js tailwind +``` + +## What This Does + +1. **Scans Sources** + - CSS custom properties and values + - SCSS variables and maps + - Tailwind configuration + - JSON token files + +2. **Extracts Tokens** + - Colors (hex, rgb, hsl) + - Typography (fonts, sizes, weights) + - Spacing (margins, paddings, gaps) + - Sizing (widths, heights, radii) + - Shadows and effects + +3. **Merges Results** + - Combines tokens from all sources + - Resolves conflicts + - Normalizes naming + +4. **Returns Collection** + - Categorized tokens + - Metadata included + - Ready for theme generation + +## Instructions for Claude + +When the user runs this command: + +1. Parse the path argument (default to current directory) +2. Parse sources argument (default to all: css, scss, tailwind, json) +3. Use `dss_extract_tokens` tool with parsed arguments +4. Present extracted tokens organized by category: + - Colors + - Typography + - Spacing + - Sizing + - Shadows + - Other +5. Show token count and source breakdown +6. Offer to generate theme files with `/dss-generate` + +## Example Output + +``` +Token Extraction: /path/to/project + +SOURCES SCANNED +- CSS: 12 files +- SCSS: 8 files +- Tailwind: 1 file + +TOKENS EXTRACTED + +Colors (24 tokens): + primary: #0066cc + secondary: #6c757d + success: #28a745 + error: #dc3545 + ... + +Typography (8 tokens): + font-family-base: "Inter", sans-serif + font-size-base: 16px + ... + +Spacing (6 tokens): + xs: 4px + sm: 8px + md: 16px + ... + +TOTAL: 38 unique tokens + +Next: Run /dss-generate css to create theme files +``` diff --git a/dss-claude-plugin/commands/dss-figma.md b/dss-claude-plugin/commands/dss-figma.md new file mode 100644 index 0000000..5ccc2a0 --- /dev/null +++ b/dss-claude-plugin/commands/dss-figma.md @@ -0,0 +1,138 @@ +--- +name: dss-figma +description: Sync design tokens from Figma files +arguments: + - name: file_key + description: Figma file key from the file URL + required: true +--- + +# DSS Figma Command + +Synchronize design tokens from Figma files using the Figma API. + +## Usage + +``` +/dss-figma +``` + +Example: +``` +/dss-figma abc123xyz456 +``` + +## Finding Your File Key + +The file key is in your Figma URL: +``` +https://www.figma.com/file/abc123xyz456/Design-System + ^^^^^^^^^^^^ + This is the file key +``` + +## Prerequisites + +Set your Figma token as an environment variable: +```bash +export FIGMA_TOKEN=your-figma-personal-access-token +``` + +## What This Does + +1. **Connects to Figma API** + - Authenticates with your token + - Fetches file data + - Handles rate limiting + +2. **Extracts Tokens** + - Colors from fill styles + - Typography from text styles + - Spacing from auto-layout + - Shadows from effects + - Border radii + +3. **Normalizes Output** + - Converts Figma naming to tokens + - Organizes by category + - Adds metadata + +4. **Returns Token Collection** + - Ready for theme generation + - Merge-able with other sources + +## Instructions for Claude + +When the user runs this command: + +1. Check if file_key is provided +2. Use `dss_sync_figma` tool with file_key +3. If error about missing token: + - Explain how to get Figma token + - Show how to set environment variable +4. On success: + - Display extracted tokens by category + - Show token count + - Offer to generate theme files +5. Handle rate limiting gracefully + +## Example Output + +``` +Figma Sync: abc123xyz456 + +CONNECTING TO FIGMA... +File: Design System v2.0 +Last modified: 2024-01-15 + +TOKENS EXTRACTED + +Colors (24 tokens): + primary/500: #0066CC + primary/400: #3385D6 + primary/600: #0052A3 + secondary/500: #6C757D + success/500: #28A745 + warning/500: #FFC107 + error/500: #DC3545 + +Typography (8 styles): + heading/h1: Inter Bold 48px/56px + heading/h2: Inter Bold 36px/44px + heading/h3: Inter SemiBold 24px/32px + body/large: Inter Regular 18px/28px + body/regular: Inter Regular 16px/24px + body/small: Inter Regular 14px/20px + +Spacing (6 values): + xs: 4px + sm: 8px + md: 16px + lg: 24px + xl: 32px + 2xl: 48px + +Effects (3 shadows): + shadow/sm: 0 1px 2px rgba(0,0,0,0.05) + shadow/md: 0 4px 6px rgba(0,0,0,0.1) + shadow/lg: 0 10px 15px rgba(0,0,0,0.1) + +TOTAL: 41 tokens extracted + +Next: Run /dss-generate css to create theme files +``` + +## Error Handling + +``` +ERROR: FIGMA_TOKEN not set + +To sync with Figma, you need a Personal Access Token: + +1. Go to Figma > Settings > Personal Access Tokens +2. Create a new token +3. Set it in your environment: + export FIGMA_TOKEN=your-token-here + +Then run /dss-figma again. +``` diff --git a/dss-claude-plugin/commands/dss-generate.md b/dss-claude-plugin/commands/dss-generate.md new file mode 100644 index 0000000..249f040 --- /dev/null +++ b/dss-claude-plugin/commands/dss-generate.md @@ -0,0 +1,113 @@ +--- +name: dss-generate +description: Generate theme files from design tokens +arguments: + - name: format + description: "Output format: css, scss, json, or js" + required: true + - name: name + description: Theme name (default is "default") + required: false +--- + +# DSS Generate Command + +Generate platform-specific theme files from design tokens using style-dictionary. + +## Usage + +``` +/dss-generate [name] +``` + +Examples: +``` +/dss-generate css +/dss-generate scss dark-theme +/dss-generate json +/dss-generate js tokens +``` + +## Supported Formats + +| Format | Output | Use Case | +|--------|--------|----------| +| css | CSS custom properties | Web projects using CSS variables | +| scss | SCSS variables | Projects using Sass | +| json | JSON structure | Framework-agnostic, APIs | +| js | JavaScript module | React, Vue, JS projects | + +## What This Does + +1. **Prepares Tokens** + - Validates token structure + - Organizes by category + - Applies naming conventions + +2. **Transforms via Style Dictionary** + - Applies platform transforms + - Generates output files + - Creates documentation + +3. **Returns Generated Files** + - Theme file content + - Usage instructions + - Integration guide + +## Instructions for Claude + +When the user runs this command: + +1. Verify format is valid (css, scss, json, js) +2. Check if tokens are available (from previous extraction or ask for path) +3. Use `dss_generate_theme` tool with format and theme name +4. Display generated file content +5. Provide integration instructions for the format +6. Offer to generate additional formats + +If no tokens available: +- Ask user to run `/dss-extract` first +- Or ask for path to tokens file + +## Example Output + +``` +Theme Generation: dark-theme (CSS) + +GENERATED FILE: theme-dark.css + +:root { + /* Colors */ + --color-primary: #3385d6; + --color-primary-light: #66a3e0; + --color-primary-dark: #0066cc; + --color-background: #1a1a1a; + --color-surface: #2d2d2d; + --color-text: #ffffff; + + /* Typography */ + --font-family-base: "Inter", sans-serif; + --font-size-sm: 14px; + --font-size-base: 16px; + --font-size-lg: 18px; + + /* Spacing */ + --spacing-xs: 4px; + --spacing-sm: 8px; + --spacing-md: 16px; + --spacing-lg: 24px; +} + +USAGE + +1. Import in your main CSS: + @import 'theme-dark.css'; + +2. Use variables in components: + .button { + background: var(--color-primary); + padding: var(--spacing-md); + } + +Generate another format? Try /dss-generate scss dark-theme +``` diff --git a/dss-claude-plugin/commands/dss-quick-wins.md b/dss-claude-plugin/commands/dss-quick-wins.md new file mode 100644 index 0000000..e22a372 --- /dev/null +++ b/dss-claude-plugin/commands/dss-quick-wins.md @@ -0,0 +1,145 @@ +--- +name: dss-quick-wins +description: Find quick win opportunities for design system adoption +arguments: + - name: path + description: Path to project directory + required: false +--- + +# DSS Quick Wins Command + +Find low-effort, high-impact opportunities for design system adoption. + +## Usage + +``` +/dss-quick-wins [path] +``` + +Examples: +``` +/dss-quick-wins +/dss-quick-wins ./src +``` + +## What This Does + +1. **Analyzes Codebase** + - Scans styles and components + - Identifies patterns + - Measures usage frequency + +2. **Finds Opportunities** + - Color consolidation + - Spacing standardization + - Typography cleanup + - Border radius normalization + - Shadow standardization + +3. **Scores by Impact/Effort** + - Calculates potential impact + - Estimates implementation effort + - Ranks by ROI + +4. **Generates Recommendations** + - Prioritized list + - Specific actions + - Expected outcomes + +## Instructions for Claude + +When the user runs this command: + +1. Use `dss_find_quick_wins` tool with path +2. Present quick wins in priority order +3. For each quick win, show: + - Category (colors, spacing, etc.) + - Impact level (high/medium/low) + - Effort level (high/medium/low) + - Specific values to consolidate + - Files affected +4. Provide total time estimate +5. Offer to implement top quick wins + +## Example Output + +``` +Quick Win Analysis: /path/to/project + +TOP QUICK WINS + +1. COLOR CONSOLIDATION + Impact: HIGH | Effort: LOW + + Found 47 color values reducible to 8 tokens + Files affected: 23 + + Consolidate: + #0066cc, #0067cd, #0065cb -> primary + #6c757d, #6b747c, #6d767e -> secondary + + Estimated time: 2 hours + +2. SPACING STANDARDIZATION + Impact: HIGH | Effort: LOW + + Found 34 spacing values reducible to 6 tokens + Files affected: 31 + + Consolidate to 4px grid: + 4px, 8px, 16px, 24px, 32px, 48px + + Estimated time: 3 hours + +3. BORDER RADIUS NORMALIZATION + Impact: MEDIUM | Effort: LOW + + Found 12 radius values reducible to 4 tokens + Files affected: 15 + + Consolidate: + 2px (sm), 4px (md), 8px (lg), 16px (xl) + + Estimated time: 1 hour + +4. SHADOW CLEANUP + Impact: MEDIUM | Effort: LOW + + Found 8 shadow definitions reducible to 3 tokens + Files affected: 12 + + Consolidate: + sm: 0 1px 2px rgba(0,0,0,0.05) + md: 0 4px 6px rgba(0,0,0,0.1) + lg: 0 10px 15px rgba(0,0,0,0.1) + + Estimated time: 1 hour + +5. FONT SIZE SCALE + Impact: HIGH | Effort: MEDIUM + + Found 15 font sizes reducible to 7 tokens + Files affected: 28 + + Consolidate to type scale: + 12px, 14px, 16px, 18px, 24px, 32px, 48px + + Estimated time: 3 hours + +SUMMARY + +Total quick wins: 5 +Total estimated time: 10 hours +Expected impact: 60% reduction in style inconsistency + +RECOMMENDED ORDER + +1. Colors (biggest impact) +2. Spacing (most widespread) +3. Border radius (quick win) +4. Shadows (contained scope) +5. Font sizes (needs coordination) + +Ready to implement? I can create tokens for any of these. +``` diff --git a/dss-claude-plugin/commands/dss-storybook.md b/dss-claude-plugin/commands/dss-storybook.md new file mode 100644 index 0000000..9f352da --- /dev/null +++ b/dss-claude-plugin/commands/dss-storybook.md @@ -0,0 +1,147 @@ +--- +name: dss-storybook +description: Set up and configure Storybook for design system components +arguments: + - name: action + description: "Action to perform: scan, generate, or configure" + required: true + - name: path + description: Path to project directory + required: false +--- + +# DSS Storybook Command + +Set up, configure, and generate Storybook stories for design system documentation. + +## Usage + +``` +/dss-storybook [path] +``` + +Examples: +``` +/dss-storybook scan +/dss-storybook generate ./src/components +/dss-storybook configure +``` + +## Actions + +| Action | Description | +|--------|-------------| +| scan | Scan for existing Storybook setup and components | +| generate | Generate stories for components | +| configure | Configure Storybook theme with design tokens | + +## What This Does + +### Scan +- Checks for existing Storybook installation +- Finds components without stories +- Reports Storybook configuration status + +### Generate +- Creates story files for components +- Generates prop documentation +- Creates variant stories + +### Configure +- Sets up Storybook theme +- Integrates design tokens +- Configures addons + +## Instructions for Claude + +When the user runs this command: + +1. Validate action is valid (scan, generate, configure) +2. Use `dss_setup_storybook` tool with action and path +3. Present results based on action: + +**For scan:** +- Show Storybook status +- List components with/without stories +- Recommend next steps + +**For generate:** +- Show generated story files +- Display story code +- Provide usage instructions + +**For configure:** +- Show configuration changes +- Display theme setup +- Provide run instructions + +## Example Output + +### Scan +``` +Storybook Scan: /path/to/project + +STATUS +- Storybook installed: Yes (v7.6.0) +- Stories found: 12 +- Components without stories: 8 + +MISSING STORIES +- Accordion.tsx +- Avatar.tsx +- Badge.tsx +- Dropdown.tsx +- Pagination.tsx +- Progress.tsx +- Tabs.tsx +- Toast.tsx + +Run /dss-storybook generate to create stories +``` + +### Generate +``` +Storybook Story Generation + +GENERATED STORIES + +Button.stories.tsx: +- Primary variant +- Secondary variant +- Disabled state +- Loading state +- With icon + +Card.stories.tsx: +- Default +- With image +- Interactive + +Input.stories.tsx: +- Default +- With label +- With error +- Disabled + +Run: npm run storybook +``` + +### Configure +``` +Storybook Configuration + +CONFIGURATION CREATED + +.storybook/theme.js: +- Brand colors from tokens +- Typography from tokens +- Custom logo support + +.storybook/preview.js: +- Token CSS imported +- Global decorators added +- Controls configured + +Run: npm run storybook +Access: http://localhost:6006 +``` diff --git a/dss-claude-plugin/core/__init__.py b/dss-claude-plugin/core/__init__.py new file mode 100644 index 0000000..caf5e92 --- /dev/null +++ b/dss-claude-plugin/core/__init__.py @@ -0,0 +1,32 @@ +""" +DSS Core Module - Configuration and Context Management +Extended with Context Compiler for design system context resolution. +""" + +from .config import DSSConfig, DSSMode +from .context import DSSContext +from .compiler import ContextCompiler, EMERGENCY_SKIN +from .mcp_extensions import ( + get_active_context, + resolve_token, + validate_manifest, + list_skins, + get_compiler_status, + with_context, + COMPILER +) + +__all__ = [ + "DSSConfig", + "DSSMode", + "DSSContext", + "ContextCompiler", + "EMERGENCY_SKIN", + "get_active_context", + "resolve_token", + "validate_manifest", + "list_skins", + "get_compiler_status", + "with_context", + "COMPILER" +] diff --git a/dss-claude-plugin/core/compiler.py b/dss-claude-plugin/core/compiler.py new file mode 100644 index 0000000..633e9cc --- /dev/null +++ b/dss-claude-plugin/core/compiler.py @@ -0,0 +1,179 @@ +""" +DSS Context Compiler +Resolves project context via 3-layer cascade: Base -> Skin -> Project +Includes Safe Boot Protocol and Debug Provenance. +""" + +import json +import os +import copy +import logging +from datetime import datetime, timezone +from typing import Dict, Any, Optional, List, Union +from pathlib import Path + +# Setup logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("DSSCompiler") + +# --- SAFE BOOT PROTOCOL --- +# Hardcoded emergency skin in case file system or JSON parsing fails catastrophicly +EMERGENCY_SKIN = { + "meta": {"id": "emergency", "version": "1.0.0"}, + "tokens": { + "colors": { + "primary": "#FF0000", + "background": "#FFFFFF", + "text": "#000000" + }, + "spacing": {"base": "4px"} + }, + "status": "emergency_mode" +} + +class ContextCompiler: + def __init__(self, skins_dir: str = "./skins"): + self.skins_dir = Path(skins_dir) + self.cache: Dict[str, Any] = {} + self._manifest_mtimes: Dict[str, float] = {} # Track file modification times + + def compile(self, manifest_path: str, debug: bool = False, force_refresh: bool = False) -> Dict[str, Any]: + """ + Main entry point. Compiles context by merging: + 1. Base Skin (Implicit or Explicit) + 2. Extended Skin (defined in manifest) + 3. Project Overrides (defined in manifest) + + Args: + manifest_path: Path to ds.config.json + debug: Enable provenance tracking + force_refresh: Bypass cache and recompile (for long-running servers) + """ + try: + # Check cache with mtime validation (unless force_refresh or debug mode) + # Note: Debug mode bypasses cache because provenance must be recalculated + cache_key = f"{manifest_path}:debug={debug}" + if not force_refresh and not debug and cache_key in self.cache: + # Verify manifest hasn't changed + manifest_file = Path(manifest_path) + if manifest_file.exists(): + current_mtime = manifest_file.stat().st_mtime + cached_mtime = self._manifest_mtimes.get(cache_key, 0) + if current_mtime == cached_mtime: + logger.debug(f"Cache hit for {manifest_path}") + return self.cache[cache_key] + else: + logger.info(f"Manifest modified, invalidating cache: {manifest_path}") + + # 1. Load Project Manifest + manifest = self._load_json(manifest_path) + + # 2. Resolve Skin + skin_id = manifest.get("extends", {}).get("skin", "classic") + skin = self._load_skin(skin_id) + + # 3. Resolve Base (Single Inheritance Enforced) + # If the skin extends another, we merge that first. + # Simplified for Phase 1: We assume all skins extend 'base' implicitly unless specified + base_skin = self._load_skin("base") + + # 4. Cascade Merge: Base -> Skin -> Project + # Merge Base + Skin + context = self._deep_merge(base_skin, skin, path="base->skin", debug=debug) + + # Merge Result + Project Overrides + # Need to wrap project overrides in same structure as skins + project_overrides_wrapped = { + "tokens": manifest.get("overrides", {}).get("tokens", {}) + } + final_context = self._deep_merge(context, project_overrides_wrapped, path="skin->project", debug=debug) + + # Inject Metadata + final_context["_meta"] = { + "project_id": manifest["project"]["id"], + "compiled_at": datetime.now(timezone.utc).isoformat(), + "debug_enabled": debug, + "compiler_config": manifest.get("compiler", {}) + } + + if debug: + final_context["_provenance"] = self.provenance_log + + # Cache result with mtime tracking (only cache non-debug mode results) + if not debug: + manifest_file = Path(manifest_path) + if manifest_file.exists(): + cache_key = f"{manifest_path}:debug={debug}" + self.cache[cache_key] = final_context + self._manifest_mtimes[cache_key] = manifest_file.stat().st_mtime + logger.debug(f"Cached compilation result for {manifest_path}") + + return final_context + + except Exception as e: + logger.error(f"Compiler specific error: {e}") + logger.warning("Initiating SAFE BOOT PROTOCOL") + return self._enter_safe_mode(e) + + def _load_skin(self, skin_id: str) -> Dict[str, Any]: + """Loads a skin by ID from the skins directory.""" + # Simple caching strategy + if skin_id in self.cache: + return self.cache[skin_id] + + # Security: Prevent path traversal attacks + path = (self.skins_dir / f"{skin_id}.json").resolve() + if not str(path).startswith(str(self.skins_dir.resolve())): + raise ValueError(f"Invalid skin ID (path traversal detected): {skin_id}") + + if not path.exists(): + logger.warning(f"Skin {skin_id} not found, falling back to base.") + if skin_id == "base": + # Return emergency tokens if base is missing + return EMERGENCY_SKIN + return self._load_skin("base") + + data = self._load_json(str(path)) + self.cache[skin_id] = data + return data + + def _load_json(self, path: str) -> Dict[str, Any]: + with open(path, 'r') as f: + return json.load(f) + + def _deep_merge(self, base: Dict, override: Dict, path: str = "", debug: bool = False, provenance: List[Dict] = None) -> Dict: + """ + Deep merge dictionaries. Replaces arrays. + Populates provenance list if debug is True (thread-safe). + """ + # Thread-safe: use method parameter instead of instance variable + if provenance is None and debug: + provenance = [] + # Store reference on first call for later retrieval + if not hasattr(self, 'provenance_log'): + self.provenance_log = provenance + + result = copy.deepcopy(base) + + for key, value in override.items(): + if isinstance(value, dict) and key in result and isinstance(result[key], dict): + # Recursive merge - pass provenance down + result[key] = self._deep_merge(result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance) + else: + # Direct replacement (Primitive or Array) + if debug and provenance is not None: + provenance.append({ + "key": key, + "action": "override", + "layer": path, + "value_type": type(value).__name__ + }) + result[key] = copy.deepcopy(value) + + return result + + def _enter_safe_mode(self, error: Exception) -> Dict[str, Any]: + """Returns the hardcoded emergency skin with error details.""" + safe_context = copy.deepcopy(EMERGENCY_SKIN) + safe_context["_error"] = str(error) + return safe_context diff --git a/dss-claude-plugin/core/config.py b/dss-claude-plugin/core/config.py new file mode 100644 index 0000000..d0aefaf --- /dev/null +++ b/dss-claude-plugin/core/config.py @@ -0,0 +1,161 @@ +""" +DSS Configuration Module +======================== + +Handles configuration management for the Design System Server (DSS) Claude Plugin. +Supports local/remote mode detection, persistent configuration storage, and +environment variable overrides. +""" + +import os +import json +import uuid +import asyncio +import logging +from enum import Enum +from pathlib import Path +from typing import Optional, Union, Any + +import aiohttp +from pydantic import BaseModel, Field, HttpUrl, ValidationError + +# Configure module-level logger +logger = logging.getLogger(__name__) + +CONFIG_DIR = Path.home() / ".dss" +CONFIG_FILE = CONFIG_DIR / "config.json" +DEFAULT_REMOTE_URL = "https://dss.overbits.luz.uy" +DEFAULT_LOCAL_URL = "http://localhost:6006" + + +class DSSMode(str, Enum): + """Operation modes for the DSS plugin.""" + LOCAL = "local" + REMOTE = "remote" + AUTO = "auto" + + +class DSSConfig(BaseModel): + """ + Configuration model for DSS Plugin. + + Attributes: + mode (DSSMode): The configured operation mode (default: AUTO). + remote_url (str): URL for the remote DSS API. + local_url (str): URL for the local DSS API (usually localhost). + session_id (str): Unique identifier for this client instance. + """ + mode: DSSMode = Field(default=DSSMode.AUTO, description="Operation mode preference") + remote_url: str = Field(default=DEFAULT_REMOTE_URL, description="Remote API endpoint") + local_url: str = Field(default=DEFAULT_LOCAL_URL, description="Local API endpoint") + session_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID") + + class Config: + validate_assignment = True + extra = "ignore" # Allow forward compatibility with new config keys + + @classmethod + def load(cls) -> "DSSConfig": + """ + Load configuration from ~/.dss/config.json. + Returns a default instance if the file does not exist or is invalid. + """ + if not CONFIG_FILE.exists(): + logger.debug(f"No config found at {CONFIG_FILE}, using defaults.") + return cls() + + try: + content = CONFIG_FILE.read_text(encoding="utf-8") + data = json.loads(content) + # Ensure complex types are handled by Pydantic validation + return cls.model_validate(data) + except (json.JSONDecodeError, ValidationError) as e: + logger.warning(f"Failed to load config from {CONFIG_FILE}: {e}. Using defaults.") + return cls() + except Exception as e: + logger.error(f"Unexpected error loading config: {e}") + return cls() + + def save(self) -> None: + """ + Save the current configuration to ~/.dss/config.json. + Creates the directory if it does not exist. + """ + try: + CONFIG_DIR.mkdir(parents=True, exist_ok=True) + + # Export using mode='json' to handle enums and urls correctly + json_data = self.model_dump_json(indent=2) + CONFIG_FILE.write_text(json_data, encoding="utf-8") + logger.debug(f"Configuration saved to {CONFIG_FILE}") + except Exception as e: + logger.error(f"Failed to save config to {CONFIG_FILE}: {e}") + raise + + async def get_active_mode(self) -> DSSMode: + """ + Determine the actual runtime mode based on priority rules. + + Priority: + 1. DSS_MODE environment variable + 2. Configured 'mode' (if not AUTO) + 3. Auto-detection (ping local health endpoint) + 4. Fallback to REMOTE + + Returns: + DSSMode: The resolved active mode (LOCAL or REMOTE). + """ + # 1. Check Environment Variable + env_mode = os.getenv("DSS_MODE") + if env_mode: + try: + # Normalize string to enum + return DSSMode(env_mode.lower()) + except ValueError: + logger.warning(f"Invalid DSS_MODE env var '{env_mode}', ignoring.") + + # 2. Check Configuration (if explicit) + if self.mode != DSSMode.AUTO: + return self.mode + + # 3. Auto-detect + logger.info("Auto-detecting DSS mode...") + is_local_healthy = await self._check_local_health() + + if is_local_healthy: + logger.info(f"Local server detected at {self.local_url}. Switching to LOCAL mode.") + return DSSMode.LOCAL + else: + logger.info("Local server unreachable. Fallback to REMOTE mode.") + # 4. Fallback + return DSSMode.REMOTE + + async def _check_local_health(self) -> bool: + """ + Ping the local server health endpoint to check availability. + + Returns: + bool: True if server responds with 200 OK, False otherwise. + """ + health_url = f"{self.local_url.rstrip('/')}/health" + try: + timeout = aiohttp.ClientTimeout(total=2.0) # Short timeout for responsiveness + async with aiohttp.ClientSession(timeout=timeout) as session: + async with session.get(health_url) as response: + if response.status == 200: + return True + logger.debug(f"Local health check returned status {response.status}") + except aiohttp.ClientError as e: + logger.debug(f"Local health check connection failed: {e}") + except Exception as e: + logger.debug(f"Unexpected error during health check: {e}") + + return False + + def get_api_url(self, active_mode: DSSMode) -> str: + """ + Helper to get the correct API URL for the determined mode. + """ + if active_mode == DSSMode.LOCAL: + return self.local_url + return self.remote_url diff --git a/dss-claude-plugin/core/context.py b/dss-claude-plugin/core/context.py new file mode 100644 index 0000000..9843748 --- /dev/null +++ b/dss-claude-plugin/core/context.py @@ -0,0 +1,181 @@ +""" +DSS Context Module +================== + +Singleton context manager for the DSS Plugin. +Handles configuration loading, mode detection, and strategy instantiation. +""" + +import asyncio +import logging +from typing import Optional, Dict, Any + +from .config import DSSConfig, DSSMode + +# Logger setup +logger = logging.getLogger(__name__) + +# Protocol/Type placeholder for Strategies (to be replaced by base class in next steps) +Strategy = Any + + +class DSSContext: + """ + Singleton context manager for the DSS Plugin. + + Handles configuration loading, mode detection (Local/Remote), + and strategy instantiation. + """ + _instance: Optional['DSSContext'] = None + _lock: asyncio.Lock = asyncio.Lock() + + def __init__(self) -> None: + """ + Private initializer. Use get_instance() instead. + """ + if DSSContext._instance is not None: + raise RuntimeError("DSSContext is a singleton. Use get_instance() to access it.") + + self.config: Optional[DSSConfig] = None + self.active_mode: DSSMode = DSSMode.REMOTE # Default safe fallback + self._capabilities: Dict[str, bool] = {} + self._strategy_cache: Dict[str, Strategy] = {} + self.session_id: Optional[str] = None + + @classmethod + async def get_instance(cls) -> 'DSSContext': + """ + Async factory method to get the singleton instance. + Ensures config is loaded and mode is detected before returning. + """ + if not cls._instance: + async with cls._lock: + # Double-check locking pattern + if not cls._instance: + instance = cls() + await instance._initialize() + cls._instance = instance + + return cls._instance + + @classmethod + def reset(cls) -> None: + """ + Resets the singleton instance. Useful for testing. + """ + cls._instance = None + + async def _initialize(self) -> None: + """ + Internal initialization logic: + 1. Load Config + 2. Detect Mode + 3. Cache Capabilities + """ + try: + # 1. Load Configuration + self.config = DSSConfig.load() + self.session_id = self.config.session_id + + # 2. Detect Mode (Async check) + self.active_mode = await self.config.get_active_mode() + + logger.info(f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}") + + # 3. Cache Capabilities + self._cache_capabilities() + + except Exception as e: + logger.error(f"Failed to initialize DSSContext: {e}") + # Fallback to defaults if initialization fails + self.active_mode = DSSMode.REMOTE + self._capabilities = {"limited": True} + + def _cache_capabilities(self) -> None: + """ + Determines what the plugin can do based on the active mode. + """ + # Base capabilities + caps = { + "can_read_files": False, + "can_execute_browser": False, + "can_screenshot": False, + "can_connect_remote": True + } + + if self.active_mode == DSSMode.LOCAL: + # Local mode allows direct filesystem access and local browser control + caps["can_read_files"] = True + caps["can_execute_browser"] = True + caps["can_screenshot"] = True + elif self.active_mode == DSSMode.REMOTE: + # Remote mode relies on API capabilities + # Depending on remote configuration, these might differ + caps["can_execute_browser"] = False # Typically restricted in pure remote unless via API + caps["can_read_files"] = False # Security restriction + + self._capabilities = caps + + def get_capability(self, key: str) -> bool: + """Check if a specific capability is active.""" + return self._capabilities.get(key, False) + + def get_api_url(self) -> str: + """Get the correct API URL for the current mode.""" + if self.config is None: + return "https://dss.overbits.luz.uy" # Default fallback + return self.config.get_api_url(self.active_mode) + + def get_strategy(self, strategy_type: str) -> Any: + """ + Factory method to retrieve operational strategies. + + Args: + strategy_type: One of 'browser', 'filesystem', 'screenshot' + + Returns: + An instance of the requested strategy. + """ + # Return cached strategy if available + if strategy_type in self._strategy_cache: + return self._strategy_cache[strategy_type] + + strategy_instance = None + + # NOTE: Strategy classes will be implemented in the next step. + # We use local imports here to avoid circular dependency issues + # if strategies define their own types using DSSContext. + + try: + if strategy_type == "browser": + # Will be implemented in Phase 2 & 3 + if self.active_mode == DSSMode.LOCAL: + from ..strategies.local.browser import LocalBrowserStrategy + strategy_instance = LocalBrowserStrategy(self) + else: + from ..strategies.remote.browser import RemoteBrowserStrategy + strategy_instance = RemoteBrowserStrategy(self) + + elif strategy_type == "filesystem": + # Will be implemented in Phase 2 + if self.active_mode == DSSMode.LOCAL: + from ..strategies.local.filesystem import LocalFilesystemStrategy + strategy_instance = LocalFilesystemStrategy(self) + else: + from ..strategies.remote.filesystem import RemoteFilesystemStrategy + strategy_instance = RemoteFilesystemStrategy(self) + + elif strategy_type == "screenshot": + # Screenshot is part of browser strategy + return self.get_strategy("browser") + + else: + raise ValueError(f"Unknown strategy type: {strategy_type}") + + except ImportError as e: + logger.error(f"Failed to import strategy {strategy_type}: {e}") + raise NotImplementedError(f"Strategy {strategy_type} not yet implemented") from e + + # Cache and return + self._strategy_cache[strategy_type] = strategy_instance + return strategy_instance diff --git a/dss-claude-plugin/core/mcp_extensions.py b/dss-claude-plugin/core/mcp_extensions.py new file mode 100644 index 0000000..5b36eca --- /dev/null +++ b/dss-claude-plugin/core/mcp_extensions.py @@ -0,0 +1,113 @@ +""" +MCP Extensions for Context Awareness +Implements the Factory Pattern to wrap existing tools with context +and defines 5 new tools for the Context Compiler. +""" + +from typing import Any, Dict, List, Callable +import functools +import json +import os +from .compiler import ContextCompiler + +# Singleton compiler instance +COMPILER = ContextCompiler(skins_dir=os.path.join(os.path.dirname(__file__), "skins")) + +# --- FACTORY PATTERN: Context Wrapper --- + +def with_context(default_manifest_path: str = None): + """ + Decorator that injects the compiled context into the tool's arguments. + Use this to upgrade existing 'token extractor' tools to be 'context aware'. + + The manifest path is extracted from kwargs['manifest_path'] if present, + otherwise falls back to the default_manifest_path provided at decoration time. + """ + def decorator(func: Callable): + @functools.wraps(func) + def wrapper(*args, **kwargs): + # 1. Get manifest path (runtime kwarg or decorator default) + manifest_path = kwargs.get('manifest_path', default_manifest_path) + if not manifest_path: + raise ValueError("No manifest_path provided to context-aware tool") + + # 2. Compile Context + context = COMPILER.compile(manifest_path) + + # 3. Inject into kwargs + kwargs['dss_context'] = context + + # 4. Execute Tool + return func(*args, **kwargs) + return wrapper + return decorator + + +# --- 5 NEW MCP TOOLS --- + +def get_active_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str: + """ + [Tool 1] Returns the fully resolved JSON context for the project. + Set debug=True to see provenance (which layer defined which token). + Set force_refresh=True to bypass cache (for long-running servers). + """ + context = COMPILER.compile(manifest_path, debug=debug, force_refresh=force_refresh) + return json.dumps(context, indent=2) + +def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str: + """ + [Tool 2] Resolves a specific token value (e.g. 'colors.primary') + through the cascade. + Set force_refresh=True to bypass cache (for long-running servers). + """ + context = COMPILER.compile(manifest_path, force_refresh=force_refresh) + keys = token_path.split('.') + current = context.get("tokens", {}) + + for k in keys: + if isinstance(current, dict) and k in current: + current = current[k] + else: + return f"Token not found: {token_path}" + + return str(current) + +def validate_manifest(manifest_path: str) -> str: + """ + [Tool 3] Validates the ds.config.json against the schema. + """ + # In a full implementation, we would use 'jsonschema' library here. + # For now, we perform a basic structural check via the Compiler's loader. + try: + COMPILER.compile(manifest_path) + return "Valid: Project manifest builds successfully." + except Exception as e: + return f"Invalid: {str(e)}" + +def list_skins() -> str: + """ + [Tool 4] Lists all available skins in the registry. + """ + skins_path = COMPILER.skins_dir + if not skins_path.exists(): + return "No skins directory found." + + skins = [f.stem for f in skins_path.glob("*.json")] + return json.dumps(skins) + +def get_compiler_status() -> str: + """ + [Tool 5] Returns the health and configuration of the Context Compiler. + """ + status = { + "status": "active", + "skins_directory": str(COMPILER.skins_dir), + "cached_skins": list(COMPILER.cache.keys()), + "safe_boot_ready": True + } + return json.dumps(status, indent=2) + +# Instructions for Main Server File: +# 1. Import these tools +# 2. Register them with the MCP server instance +# 3. Apply @with_context wrapper to legacy tools if dynamic context is needed diff --git a/dss-claude-plugin/core/mcp_integration.py b/dss-claude-plugin/core/mcp_integration.py new file mode 100644 index 0000000..4975d44 --- /dev/null +++ b/dss-claude-plugin/core/mcp_integration.py @@ -0,0 +1,167 @@ +""" +MCP Integration Layer for DSS Context Compiler +Provides MCP-compliant tool wrappers for the 5 new context tools. +""" + +from typing import Dict, Any +import json +from . import ( + get_active_context, + resolve_token, + validate_manifest, + list_skins, + get_compiler_status +) + +# MCP Tool Definitions + +def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str: + """ + MCP Tool: Get Active Context + + Returns the fully resolved JSON context for a project. + Set debug=True to see provenance (which layer defined which token). + Set force_refresh=True to bypass cache (for long-running servers). + + Args: + manifest_path: Path to ds.config.json + debug: Enable debug provenance tracking + force_refresh: Bypass cache and recompile + + Returns: + JSON string with resolved context + """ + try: + return get_active_context(manifest_path, debug, force_refresh) + except Exception as e: + return json.dumps({"error": str(e), "status": "failed"}) + + +def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str: + """ + MCP Tool: Resolve Token + + Resolves a specific token value (e.g. 'colors.primary') through the cascade. + Set force_refresh=True to bypass cache (for long-running servers). + + Args: + manifest_path: Path to ds.config.json + token_path: Dot-notation path to token (e.g. 'colors.primary') + force_refresh: Bypass cache and recompile + + Returns: + Resolved token value or error message + """ + try: + return resolve_token(manifest_path, token_path, force_refresh) + except Exception as e: + return f"Error resolving token: {str(e)}" + + +def mcp_validate_manifest(manifest_path: str) -> str: + """ + MCP Tool: Validate Manifest + + Validates the ds.config.json against the schema. + + Args: + manifest_path: Path to ds.config.json + + Returns: + Validation result message + """ + try: + return validate_manifest(manifest_path) + except Exception as e: + return f"Validation error: {str(e)}" + + +def mcp_list_skins() -> str: + """ + MCP Tool: List Skins + + Lists all available skins in the registry. + + Returns: + JSON array of skin IDs + """ + try: + return list_skins() + except Exception as e: + return json.dumps({"error": str(e), "skins": []}) + + +def mcp_get_compiler_status() -> str: + """ + MCP Tool: Get Compiler Status + + Returns the health and configuration of the Context Compiler. + + Returns: + JSON object with compiler status + """ + try: + return get_compiler_status() + except Exception as e: + return json.dumps({"error": str(e), "status": "error"}) + + +# MCP Tool Registry +# This can be imported by dss-mcp-server.py to register the tools + +MCP_TOOLS = { + "dss_get_resolved_context": { + "function": mcp_get_resolved_context, + "description": "Get fully resolved design system context for a project", + "parameters": { + "manifest_path": { + "type": "string", + "description": "Path to ds.config.json", + "required": True + }, + "debug": { + "type": "boolean", + "description": "Enable debug provenance tracking", + "required": False, + "default": False + } + } + }, + "dss_resolve_token": { + "function": mcp_resolve_token, + "description": "Resolve a specific design token through the cascade", + "parameters": { + "manifest_path": { + "type": "string", + "description": "Path to ds.config.json", + "required": True + }, + "token_path": { + "type": "string", + "description": "Dot-notation path to token (e.g. 'colors.primary')", + "required": True + } + } + }, + "dss_validate_manifest": { + "function": mcp_validate_manifest, + "description": "Validate project manifest against schema", + "parameters": { + "manifest_path": { + "type": "string", + "description": "Path to ds.config.json", + "required": True + } + } + }, + "dss_list_skins": { + "function": mcp_list_skins, + "description": "List all available design system skins", + "parameters": {} + }, + "dss_get_compiler_status": { + "function": mcp_get_compiler_status, + "description": "Get Context Compiler health and configuration", + "parameters": {} + } +} diff --git a/dss-claude-plugin/core/runtime.py b/dss-claude-plugin/core/runtime.py new file mode 100644 index 0000000..0471254 --- /dev/null +++ b/dss-claude-plugin/core/runtime.py @@ -0,0 +1,308 @@ +""" +DSS Runtime - Dependency Injection & Boundary Enforcement + +This module provides a bounded runtime environment for DSS MCP tools. +All external API access (Figma, Browser, HTTP) MUST go through this runtime. + +Key Features: +- Dependency Injection pattern prevents direct external imports +- Capability Provider pattern controls what operations are allowed +- All access is validated against .dss-boundaries.yaml +- All violations are logged for audit + +Usage: + runtime = DSSRuntime(config_path=".dss-boundaries.yaml") + figma_client = runtime.get_figma_client() # Validated & wrapped + browser = runtime.get_browser() # Sandboxed +""" + +import logging +import json +from pathlib import Path +from typing import Optional, Dict, Any, List +from datetime import datetime +import yaml + +# Setup logging +logger = logging.getLogger("dss.runtime") + +class BoundaryViolationError(Exception): + """Raised when an operation violates DSS boundaries""" + pass + +class DSSRuntime: + """ + Bounded runtime environment for DSS operations. + + Enforces architectural boundaries by: + 1. Controlling all external API access + 2. Validating operations against boundary configuration + 3. Logging all access for audit trail + 4. Providing sandboxed clients instead of raw access + """ + + def __init__(self, config_path: str = ".dss-boundaries.yaml"): + """ + Initialize DSS Runtime with boundary configuration. + + Args: + config_path: Path to boundary configuration file + """ + self.config_path = Path(config_path) + self.config = self._load_config() + self.enforcement_mode = self.config.get("enforcement", {}).get("mode", "strict") + self.log_violations = self.config.get("enforcement", {}).get("log_violations", True) + self.violation_log_path = Path(self.config.get("enforcement", {}).get("violation_log", ".dss/logs/boundary-violations.jsonl")) + + # Client caches (lazy initialization) + self._figma_client = None + self._browser_strategy = None + self._http_client = None + + logger.info(f"DSSRuntime initialized with enforcement mode: {self.enforcement_mode}") + + def _load_config(self) -> Dict[str, Any]: + """Load boundary configuration from YAML""" + if not self.config_path.exists(): + logger.warning(f"Boundary config not found: {self.config_path}, using defaults") + return self._default_config() + + try: + with open(self.config_path) as f: + return yaml.safe_load(f) + except Exception as e: + logger.error(f"Failed to load boundary config: {e}") + return self._default_config() + + def _default_config(self) -> Dict[str, Any]: + """Default boundary configuration (strict)""" + return { + "version": "1.0", + "blocked_external_apis": ["api.figma.com"], + "blocked_imports": ["requests", "playwright", "httpx"], + "enforcement": { + "mode": "strict", + "log_violations": True, + "violation_log": ".dss/logs/boundary-violations.jsonl" + } + } + + def _log_violation(self, operation: str, details: Dict[str, Any]): + """Log boundary violation to audit trail""" + if not self.log_violations: + return + + self.violation_log_path.parent.mkdir(parents=True, exist_ok=True) + + log_entry = { + "timestamp": datetime.utcnow().isoformat(), + "type": "boundary_violation", + "operation": operation, + "enforcement_mode": self.enforcement_mode, + "details": details + } + + with open(self.violation_log_path, "a") as f: + f.write(json.dumps(log_entry) + "\n") + + logger.warning(f"Boundary violation: {operation} - {details}") + + def _log_access(self, operation: str, allowed: bool, details: Dict[str, Any]): + """Log successful access for audit trail""" + access_log_path = Path(".dss/logs/runtime-access.jsonl") + access_log_path.parent.mkdir(parents=True, exist_ok=True) + + log_entry = { + "timestamp": datetime.utcnow().isoformat(), + "type": "runtime_access", + "operation": operation, + "allowed": allowed, + "details": details + } + + with open(access_log_path, "a") as f: + f.write(json.dumps(log_entry) + "\n") + + def validate_operation(self, operation: str, context: Dict[str, Any]) -> bool: + """ + Validate if an operation is allowed by DSS boundaries. + + Args: + operation: Operation name (e.g., "figma_api_call", "browser_launch") + context: Operation context for validation + + Returns: + True if allowed, raises BoundaryViolationError if not (in strict mode) + """ + required_tools = self.config.get("required_dss_tools", {}) + + # Check if operation requires going through DSS tools + for category, tools in required_tools.items(): + if operation in category: + details = { + "operation": operation, + "context": context, + "required_tools": tools + } + + self._log_violation(operation, details) + + if self.enforcement_mode == "strict": + raise BoundaryViolationError( + f"Direct {operation} blocked. Use DSS tools: {', '.join(tools)}" + ) + elif self.enforcement_mode == "warn": + logger.warning(f"Boundary warning: {operation} should use DSS tools") + return True + + self._log_access(operation, True, context) + return True + + def get_figma_client(self, token: Optional[str] = None): + """ + Get a wrapped Figma API client with boundary enforcement. + + Args: + token: Optional Figma token (uses env var if not provided) + + Returns: + SafeFigmaClient instance (read-only by default) + """ + if self._figma_client is None: + from core.safe_figma_client import SafeFigmaClient + + self._figma_client = SafeFigmaClient( + token=token, + allow_write=False, # Read-only by default + runtime=self + ) + + logger.info("Figma client initialized (read-only mode)") + + return self._figma_client + + def get_browser(self, strategy: str = "local"): + """ + Get a sandboxed browser automation instance. + + Args: + strategy: Browser strategy ("local" or "remote") + + Returns: + BrowserStrategy instance with sandbox enforcement + """ + if self._browser_strategy is None: + if strategy == "local": + try: + from strategies.local.browser import LocalBrowserStrategy + self._browser_strategy = LocalBrowserStrategy(runtime=self) + logger.info("Local browser strategy initialized") + except ImportError: + raise BoundaryViolationError( + "LocalBrowserStrategy not available. Use dss_browser_* tools." + ) + elif strategy == "remote": + try: + from strategies.remote.browser import RemoteBrowserStrategy + self._browser_strategy = RemoteBrowserStrategy(runtime=self) + logger.info("Remote browser strategy initialized") + except ImportError: + raise BoundaryViolationError( + "RemoteBrowserStrategy not available. Use dss_browser_* tools." + ) + + return self._browser_strategy + + def get_http_client(self): + """ + Get a wrapped HTTP client with URL validation. + + Returns: + SafeHTTPClient instance that validates URLs against allowed domains + """ + if self._http_client is None: + from core.safe_http_client import SafeHTTPClient + + self._http_client = SafeHTTPClient( + blocked_domains=self.config.get("blocked_external_apis", []), + runtime=self + ) + + logger.info("HTTP client initialized with URL validation") + + return self._http_client + + def check_import(self, module_name: str) -> bool: + """ + Check if a direct import is allowed. + + Args: + module_name: Module being imported + + Returns: + True if allowed, raises BoundaryViolationError if blocked + """ + blocked = self.config.get("blocked_imports", []) + + if module_name in blocked: + details = { + "module": module_name, + "blocked_imports": blocked + } + + self._log_violation(f"direct_import:{module_name}", details) + + if self.enforcement_mode == "strict": + raise BoundaryViolationError( + f"Direct import of '{module_name}' blocked. " + f"Use DSS runtime clients instead." + ) + + return True + + def get_temp_dir(self, session_id: Optional[str] = None) -> Path: + """ + Get session-specific temporary directory. + + Args: + session_id: Optional session identifier (auto-generated if not provided) + + Returns: + Path to session temp directory + """ + if session_id is None: + session_id = f"session-{int(datetime.utcnow().timestamp())}" + + temp_dir = Path(".dss/temp") / session_id + temp_dir.mkdir(parents=True, exist_ok=True) + + return temp_dir + + def get_stats(self) -> Dict[str, Any]: + """ + Get runtime statistics. + + Returns: + Dictionary with access counts, violations, etc. + """ + return { + "enforcement_mode": self.enforcement_mode, + "clients_initialized": { + "figma": self._figma_client is not None, + "browser": self._browser_strategy is not None, + "http": self._http_client is not None, + }, + "config_version": self.config.get("version", "unknown") + } + +# Global runtime instance (singleton pattern) +_runtime_instance: Optional[DSSRuntime] = None + +def get_runtime() -> DSSRuntime: + """Get the global DSSRuntime instance (singleton)""" + global _runtime_instance + + if _runtime_instance is None: + _runtime_instance = DSSRuntime() + + return _runtime_instance diff --git a/dss-claude-plugin/core/schemas/ds.config.schema.json b/dss-claude-plugin/core/schemas/ds.config.schema.json new file mode 100644 index 0000000..5688040 --- /dev/null +++ b/dss-claude-plugin/core/schemas/ds.config.schema.json @@ -0,0 +1,52 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DSS Project Manifest", + "type": "object", + "required": ["version", "project", "extends", "stack"], + "properties": { + "version": {"type": "string", "pattern": "^2\\.0\\.0$"}, + "project": { + "type": "object", + "required": ["id", "name", "type"], + "properties": { + "id": {"type": "string", "pattern": "^[a-z0-9-]+$"}, + "name": {"type": "string"}, + "type": {"enum": ["web", "mobile", "desktop"]} + } + }, + "extends": { + "type": "object", + "required": ["skin", "version"], + "properties": { + "skin": {"type": "string"}, + "version": {"type": "string"} + } + }, + "stack": { + "type": "object", + "required": ["framework", "styling"], + "properties": { + "framework": {"enum": ["react", "vue", "angular", "ios", "android", "flutter", "vanilla"]}, + "styling": {"enum": ["tailwind", "css-modules", "styled-components", "emotion", "css-vars"]}, + "icons": {"enum": ["lucide", "heroicons", "material", "custom"]}, + "typescript": {"type": "boolean"} + } + }, + "compiler": { + "type": "object", + "properties": { + "strict_mode": {"type": "boolean"}, + "validation_level": {"enum": ["error", "warning", "info"]}, + "output_format": {"enum": ["css-vars", "tailwind-config", "js-tokens"]}, + "cache_strategy": {"enum": ["aggressive", "moderate", "disabled"]} + } + }, + "overrides": { + "type": "object", + "properties": { + "tokens": {"type": "object"}, + "files": {"type": "array", "items": {"type": "string"}} + } + } + } +} diff --git a/dss-claude-plugin/core/skins/base.json b/dss-claude-plugin/core/skins/base.json new file mode 100644 index 0000000..6ec64f5 --- /dev/null +++ b/dss-claude-plugin/core/skins/base.json @@ -0,0 +1,28 @@ +{ + "meta": { + "id": "base", + "version": "1.0.0", + "description": "Foundation tokens shared across all skins" + }, + "tokens": { + "colors": { + "transparent": "transparent", + "current": "currentColor", + "white": "#ffffff", + "black": "#000000" + }, + "spacing": { + "0": "0px", + "1": "4px", + "2": "8px", + "4": "16px", + "8": "32px" + }, + "typography": { + "fontFamily": { + "sans": ["system-ui", "sans-serif"], + "mono": ["monospace"] + } + } + } +} diff --git a/dss-claude-plugin/core/skins/classic.json b/dss-claude-plugin/core/skins/classic.json new file mode 100644 index 0000000..a208f5f --- /dev/null +++ b/dss-claude-plugin/core/skins/classic.json @@ -0,0 +1,21 @@ +{ + "meta": { + "id": "classic", + "version": "2.0.0", + "parent": "base" + }, + "tokens": { + "colors": { + "primary": "#3B82F6", + "secondary": "#10B981", + "danger": "#EF4444", + "background": "#F3F4F6", + "surface": "#FFFFFF", + "text": "#1F2937" + }, + "borderRadius": { + "default": "0.25rem", + "lg": "0.5rem" + } + } +} diff --git a/dss-claude-plugin/core/skins/workbench.json b/dss-claude-plugin/core/skins/workbench.json new file mode 100644 index 0000000..39b9609 --- /dev/null +++ b/dss-claude-plugin/core/skins/workbench.json @@ -0,0 +1,33 @@ +{ + "meta": { + "id": "workbench", + "version": "2.0.0", + "parent": "base", + "description": "High density technical interface skin" + }, + "tokens": { + "colors": { + "primary": "#2563EB", + "secondary": "#475569", + "danger": "#DC2626", + "background": "#0F172A", + "surface": "#1E293B", + "text": "#E2E8F0" + }, + "spacing": { + "1": "2px", + "2": "4px", + "4": "8px", + "8": "16px" + }, + "borderRadius": { + "default": "0px", + "lg": "2px" + }, + "typography": { + "fontFamily": { + "sans": ["Inter", "system-ui", "sans-serif"] + } + } + } +} diff --git a/dss-claude-plugin/core/structured_logger.py b/dss-claude-plugin/core/structured_logger.py new file mode 100644 index 0000000..dc95837 --- /dev/null +++ b/dss-claude-plugin/core/structured_logger.py @@ -0,0 +1,362 @@ +""" +DSS Structured Logger - JSON-based logging for AI-consumable audit trails + +Provides structured, machine-readable logging in JSONL format (one JSON object per line). +All DSS operations are logged with consistent fields for analysis, debugging, and compliance. + +Features: +- JSONL format (newline-delimited JSON) for easy parsing +- Structured log entries with standardized fields +- Context tracking (session_id, tool_name, operation) +- Performance metrics (duration, timestamps) +- Log rotation and cleanup +- Integration with DSSRuntime + +Usage: + from core.structured_logger import get_logger, LogContext + + logger = get_logger("dss.tool.sync_figma") + + with LogContext(session_id="abc123", tool="dss_sync_figma"): + logger.info("Starting Figma sync", extra={"file_key": "xyz"}) + # ... operation ... + logger.info("Figma sync complete", extra={"tokens_extracted": 42}) +""" + +import json +import logging +import os +import sys +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Dict, Optional +from contextlib import contextmanager +import threading + +# Thread-local storage for context +_context = threading.local() + + +class DSSJSONFormatter(logging.Formatter): + """ + Custom JSON formatter for structured logging. + + Outputs each log record as a single-line JSON object with standardized fields: + - timestamp: ISO 8601 UTC timestamp + - level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) + - logger: Logger name (e.g., "dss.tool.sync_figma") + - message: Human-readable log message + - context: Additional contextual data (session_id, tool_name, etc.) + - extra: Tool-specific extra data + """ + + def format(self, record: logging.LogRecord) -> str: + """Format log record as single-line JSON""" + + # Build base log entry + log_entry = { + "timestamp": datetime.now(timezone.utc).isoformat(), + "level": record.levelname, + "logger": record.name, + "message": record.getMessage(), + } + + # Add context from thread-local storage + if hasattr(_context, "session_id"): + log_entry["session_id"] = _context.session_id + if hasattr(_context, "tool_name"): + log_entry["tool"] = _context.tool_name + if hasattr(_context, "operation"): + log_entry["operation"] = _context.operation + + # Add extra fields from record + if hasattr(record, "extra_data"): + log_entry["extra"] = record.extra_data + + # Add exception info if present + if record.exc_info: + log_entry["exception"] = { + "type": record.exc_info[0].__name__ if record.exc_info[0] else None, + "message": str(record.exc_info[1]) if record.exc_info[1] else None, + "traceback": self.formatException(record.exc_info) if record.exc_info else None, + } + + # Add location info for ERROR and above + if record.levelno >= logging.ERROR: + log_entry["location"] = { + "file": record.pathname, + "line": record.lineno, + "function": record.funcName, + } + + return json.dumps(log_entry, default=str) + + +class DSSLogger(logging.Logger): + """ + Extended logger with structured logging support. + + Wraps standard Python logger with methods that accept extra data + as keyword arguments for structured logging. + """ + + def _log_with_extra(self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs): + """Internal method to log with extra structured data""" + if extra: + # Store extra data in a custom attribute + extra_record = {"extra_data": extra} + super()._log(level, msg, (), extra=extra_record, **kwargs) + else: + super()._log(level, msg, (), **kwargs) + + def debug(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs): + """Log DEBUG message with optional extra data""" + self._log_with_extra(logging.DEBUG, msg, extra, **kwargs) + + def info(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs): + """Log INFO message with optional extra data""" + self._log_with_extra(logging.INFO, msg, extra, **kwargs) + + def warning(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs): + """Log WARNING message with optional extra data""" + self._log_with_extra(logging.WARNING, msg, extra, **kwargs) + + def error(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs): + """Log ERROR message with optional extra data""" + self._log_with_extra(logging.ERROR, msg, extra, **kwargs) + + def critical(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs): + """Log CRITICAL message with optional extra data""" + self._log_with_extra(logging.CRITICAL, msg, extra, **kwargs) + + +# Configure custom logger class +logging.setLoggerClass(DSSLogger) + + +def get_logger(name: str, log_file: Optional[str] = None) -> DSSLogger: + """ + Get or create a structured logger instance. + + Args: + name: Logger name (e.g., "dss.tool.sync_figma") + log_file: Optional custom log file path (defaults to .dss/logs/dss-operations.jsonl) + + Returns: + DSSLogger instance configured for structured logging + + Example: + logger = get_logger("dss.tool.extract_tokens") + logger.info("Starting token extraction", extra={"source": "css"}) + """ + logger = logging.getLogger(name) + + # Only configure if not already configured + if not logger.handlers: + # Determine log file path + if log_file is None: + dss_home = os.environ.get("DSS_HOME", ".dss") + log_dir = Path(dss_home) / "logs" + log_dir.mkdir(parents=True, exist_ok=True) + log_file = str(log_dir / "dss-operations.jsonl") + + # Create file handler with JSON formatter + file_handler = logging.FileHandler(log_file, mode="a", encoding="utf-8") + file_handler.setFormatter(DSSJSONFormatter()) + logger.addHandler(file_handler) + + # Also add console handler for development (can be disabled in production) + if os.environ.get("DSS_LOG_CONSOLE", "false").lower() == "true": + console_handler = logging.StreamHandler(sys.stderr) + console_handler.setFormatter(DSSJSONFormatter()) + logger.addHandler(console_handler) + + # Set log level from environment or default to INFO + log_level = os.environ.get("DSS_LOG_LEVEL", "INFO").upper() + logger.setLevel(getattr(logging, log_level, logging.INFO)) + + # Prevent propagation to root logger + logger.propagate = False + + return logger + + +@contextmanager +def LogContext(session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None): + """ + Context manager for adding structured context to log entries. + + All log entries within this context will include the provided fields + (session_id, tool_name, operation). + + Args: + session_id: Unique session identifier + tool: Tool name (e.g., "dss_sync_figma") + operation: Operation being performed (e.g., "token_extraction") + + Example: + with LogContext(session_id="abc123", tool="dss_sync_figma"): + logger.info("Starting sync") + # This log will include session_id and tool fields + """ + # Store previous context + prev_session_id = getattr(_context, "session_id", None) + prev_tool_name = getattr(_context, "tool_name", None) + prev_operation = getattr(_context, "operation", None) + + # Set new context + if session_id: + _context.session_id = session_id + if tool: + _context.tool_name = tool + if operation: + _context.operation = operation + + try: + yield + finally: + # Restore previous context + if prev_session_id: + _context.session_id = prev_session_id + elif hasattr(_context, "session_id"): + delattr(_context, "session_id") + + if prev_tool_name: + _context.tool_name = prev_tool_name + elif hasattr(_context, "tool_name"): + delattr(_context, "tool_name") + + if prev_operation: + _context.operation = prev_operation + elif hasattr(_context, "operation"): + delattr(_context, "operation") + + +class PerformanceLogger: + """ + Helper for logging operation performance metrics. + + Automatically measures duration and logs performance data. + + Example: + perf = PerformanceLogger("token_extraction") + perf.start() + # ... operation ... + perf.end(extra={"tokens_found": 42}) + """ + + def __init__(self, operation: str, logger: Optional[DSSLogger] = None): + """ + Initialize performance logger. + + Args: + operation: Operation name + logger: Optional logger (defaults to root DSS logger) + """ + self.operation = operation + self.logger = logger or get_logger("dss.performance") + self.start_time = None + self.end_time = None + + def start(self): + """Mark operation start time""" + self.start_time = datetime.now(timezone.utc) + self.logger.debug(f"Started: {self.operation}", extra={ + "operation": self.operation, + "start_time": self.start_time.isoformat(), + }) + + def end(self, extra: Optional[Dict[str, Any]] = None): + """ + Mark operation end time and log performance. + + Args: + extra: Additional metrics to log + """ + self.end_time = datetime.now(timezone.utc) + + if self.start_time is None: + self.logger.warning(f"Performance logger end() called without start() for: {self.operation}") + return + + duration_ms = (self.end_time - self.start_time).total_seconds() * 1000 + + perf_data = { + "operation": self.operation, + "duration_ms": round(duration_ms, 2), + "start_time": self.start_time.isoformat(), + "end_time": self.end_time.isoformat(), + } + + if extra: + perf_data.update(extra) + + self.logger.info(f"Completed: {self.operation}", extra=perf_data) + + +def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5): + """ + Configure log rotation for DSS log files. + + Args: + log_dir: Log directory (defaults to .dss/logs/) + max_bytes: Max size per log file (default: 10MB) + backup_count: Number of backup files to keep (default: 5) + + Note: This uses RotatingFileHandler. For production, consider + using a log rotation service like logrotate. + """ + from logging.handlers import RotatingFileHandler + + if log_dir is None: + dss_home = os.environ.get("DSS_HOME", ".dss") + log_dir = Path(dss_home) / "logs" + + log_dir.mkdir(parents=True, exist_ok=True) + log_file = log_dir / "dss-operations.jsonl" + + # Get root DSS logger + logger = logging.getLogger("dss") + + # Remove existing file handlers + for handler in logger.handlers[:]: + if isinstance(handler, logging.FileHandler): + logger.removeHandler(handler) + + # Add rotating file handler + rotating_handler = RotatingFileHandler( + str(log_file), + maxBytes=max_bytes, + backupCount=backup_count, + encoding="utf-8" + ) + rotating_handler.setFormatter(DSSJSONFormatter()) + logger.addHandler(rotating_handler) + + logger.info("Log rotation configured", extra={ + "max_bytes": max_bytes, + "backup_count": backup_count, + "log_file": str(log_file), + }) + + +# Example usage (can be removed in production) +if __name__ == "__main__": + # Example 1: Basic logging + logger = get_logger("dss.example") + logger.info("DSS operation started", extra={"user": "admin"}) + + # Example 2: Context-based logging + with LogContext(session_id="session-123", tool="dss_sync_figma"): + logger.info("Syncing Figma file", extra={"file_key": "abc123"}) + logger.info("Sync complete", extra={"tokens_extracted": 42}) + + # Example 3: Performance logging + perf = PerformanceLogger("token_extraction", logger) + perf.start() + # Simulate work + import time + time.sleep(0.1) + perf.end(extra={"tokens_found": 100}) + + print(f"\nLogs written to: {Path('.dss/logs/dss-operations.jsonl').absolute()}") diff --git a/dss-claude-plugin/hooks/.state/.git-backup.lock b/dss-claude-plugin/hooks/.state/.git-backup.lock new file mode 100644 index 0000000..45eeca1 --- /dev/null +++ b/dss-claude-plugin/hooks/.state/.git-backup.lock @@ -0,0 +1 @@ +1765316404612 \ No newline at end of file diff --git a/dss-claude-plugin/hooks/dss-hooks-config.json b/dss-claude-plugin/hooks/dss-hooks-config.json new file mode 100644 index 0000000..57f7814 --- /dev/null +++ b/dss-claude-plugin/hooks/dss-hooks-config.json @@ -0,0 +1,58 @@ +{ + "description": "DSS Hooks Configuration - Customize hook behavior", + "version": "1.0.0", + + "security_check": { + "enabled": true, + "block_on_critical": false, + "warn_only": true, + "ignored_patterns": [] + }, + + "token_validator": { + "enabled": true, + "strict_mode": false, + "warn_only": true, + "categories": ["color", "spacing", "typography", "border", "effects", "layout"] + }, + + "component_checker": { + "enabled": true, + "categories": ["accessibility", "react", "typescript", "structure"], + "min_severity": "low" + }, + + "complexity_monitor": { + "enabled": true, + "max_function_lines": 50, + "max_component_lines": 200, + "max_props": 10, + "max_nesting_depth": 4, + "warn_only": true + }, + + "storybook_reminder": { + "enabled": true, + "component_patterns": ["**/components/**/*.tsx", "**/ui/**/*.tsx"], + "story_extensions": [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"], + "remind_on_new": true, + "remind_on_props_change": true + }, + + "session_summary": { + "enabled": true, + "output_file": ".dss-session-summary.md", + "include_git_diff": true, + "include_file_list": true, + "max_diff_lines": 100 + }, + + "git_backup": { + "enabled": true, + "require_git_repo": true, + "commit_only_if_changes": true, + "include_timestamp": true, + "commit_prefix": "dss-auto-backup", + "show_logs": false + } +} diff --git a/dss-claude-plugin/hooks/hooks.json b/dss-claude-plugin/hooks/hooks.json new file mode 100644 index 0000000..18df84c --- /dev/null +++ b/dss-claude-plugin/hooks/hooks.json @@ -0,0 +1,111 @@ +{ + "description": "DSS Developer Hooks - React/UI Development & QA Tools", + "version": "1.0.0", + "author": "DSS Team", + "hooks": { + "PreToolUse": [ + { + "description": "Security pattern validation for file edits", + "matcher": "Edit|Write", + "priority": 100, + "enabled": true, + "hooks": [ + { + "type": "command", + "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/security-check.py", + "timeout": 5000, + "continueOnError": true + } + ] + }, + { + "description": "Design token compliance validation", + "matcher": "Edit|Write", + "priority": 90, + "enabled": true, + "hooks": [ + { + "type": "command", + "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/token-validator.py", + "timeout": 5000, + "continueOnError": true + } + ] + } + ], + "PostToolUse": [ + { + "description": "React component best practices and accessibility", + "matcher": "Edit|Write", + "priority": 80, + "enabled": true, + "hooks": [ + { + "type": "command", + "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/component-checker.py", + "timeout": 5000, + "continueOnError": true + } + ] + }, + { + "description": "Code complexity tracking", + "matcher": "Edit|Write", + "priority": 70, + "enabled": true, + "hooks": [ + { + "type": "command", + "command": "node ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/complexity-monitor.js", + "timeout": 5000, + "continueOnError": true + } + ] + }, + { + "description": "Storybook coverage reminder for components", + "matcher": "Edit|Write", + "priority": 60, + "enabled": true, + "hooks": [ + { + "type": "command", + "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/storybook-reminder.py", + "timeout": 3000, + "continueOnError": true + } + ] + } + ], + "SessionEnd": [ + { + "description": "Generate session summary report", + "priority": 100, + "enabled": true, + "hooks": [ + { + "type": "command", + "command": "node ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/session-summary.js", + "timeout": 10000, + "continueOnError": true + } + ] + } + ], + "Stop": [ + { + "description": "Auto-backup changes to git", + "priority": 100, + "enabled": true, + "hooks": [ + { + "type": "command", + "command": "node ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/git-backup.js", + "timeout": 10000, + "continueOnError": false + } + ] + } + ] + } +} diff --git a/dss-claude-plugin/hooks/scripts/complexity-monitor.js b/dss-claude-plugin/hooks/scripts/complexity-monitor.js new file mode 100755 index 0000000..81bb3cb --- /dev/null +++ b/dss-claude-plugin/hooks/scripts/complexity-monitor.js @@ -0,0 +1,215 @@ +#!/usr/bin/env node +/** + * DSS Complexity Monitor Hook + * Tracks code complexity metrics and warns on high-complexity code. + * Written from scratch for DSS. + */ + +const fs = require('fs'); +const path = require('path'); + +// Configuration +const DEFAULT_CONFIG = { + complexity_monitor: { + enabled: true, + max_function_lines: 50, + max_component_lines: 200, + max_props: 10, + max_nesting_depth: 4, + warn_only: true + } +}; + +function loadConfig() { + const configPath = path.join(process.env.HOME || '', '.dss', 'hooks-config.json'); + try { + if (fs.existsSync(configPath)) { + const userConfig = JSON.parse(fs.readFileSync(configPath, 'utf8')); + return { ...DEFAULT_CONFIG, ...userConfig }; + } + } catch (e) { + // Use defaults + } + return DEFAULT_CONFIG; +} + +function countLines(content) { + return content.split('\n').length; +} + +function countProps(content) { + // Count props in interface/type definition + const propsMatch = content.match(/(?:interface|type)\s+\w*Props[^{]*\{([^}]+)\}/); + if (propsMatch) { + const propsContent = propsMatch[1]; + // Count semicolons or newlines with property definitions + const props = propsContent.split(/[;\n]/).filter(line => { + const trimmed = line.trim(); + return trimmed && !trimmed.startsWith('//') && trimmed.includes(':'); + }); + return props.length; + } + return 0; +} + +function countNestingDepth(content) { + let maxDepth = 0; + let currentDepth = 0; + + for (const char of content) { + if (char === '{' || char === '(') { + currentDepth++; + maxDepth = Math.max(maxDepth, currentDepth); + } else if (char === '}' || char === ')') { + currentDepth = Math.max(0, currentDepth - 1); + } + } + + return maxDepth; +} + +function countFunctions(content) { + const patterns = [ + /function\s+\w+\s*\([^)]*\)\s*\{/g, + /const\s+\w+\s*=\s*(?:async\s*)?\([^)]*\)\s*=>/g, + /const\s+\w+\s*=\s*(?:async\s*)?function/g + ]; + + let count = 0; + for (const pattern of patterns) { + const matches = content.match(pattern); + if (matches) count += matches.length; + } + return count; +} + +function analyzeComplexity(content, filePath, config) { + const issues = []; + const monitorConfig = config.complexity_monitor || {}; + const ext = path.extname(filePath).toLowerCase(); + + // Only analyze JS/TS files + if (!['.js', '.jsx', '.ts', '.tsx'].includes(ext)) { + return issues; + } + + const lines = countLines(content); + const props = countProps(content); + const nesting = countNestingDepth(content); + const functions = countFunctions(content); + + // Check component size (for tsx/jsx files) + if (['.tsx', '.jsx'].includes(ext)) { + if (lines > monitorConfig.max_component_lines) { + issues.push({ + type: 'component_size', + severity: 'medium', + message: `Component has ${lines} lines (max: ${monitorConfig.max_component_lines})`, + suggestion: 'Consider breaking into smaller components' + }); + } + + if (props > monitorConfig.max_props) { + issues.push({ + type: 'prop_count', + severity: 'medium', + message: `Component has ${props} props (max: ${monitorConfig.max_props})`, + suggestion: 'Consider grouping related props or using composition' + }); + } + } + + // Check nesting depth + if (nesting > monitorConfig.max_nesting_depth) { + issues.push({ + type: 'nesting_depth', + severity: 'high', + message: `Nesting depth of ${nesting} (max: ${monitorConfig.max_nesting_depth})`, + suggestion: 'Extract nested logic into separate functions' + }); + } + + // Check function count (indicator of file doing too much) + if (functions > 10) { + issues.push({ + type: 'function_count', + severity: 'low', + message: `File contains ${functions} functions`, + suggestion: 'Consider splitting into multiple modules' + }); + } + + return issues; +} + +function formatOutput(issues, filePath) { + if (issues.length === 0) return ''; + + const severityIcons = { + high: '[HIGH]', + medium: '[MED]', + low: '[LOW]' + }; + + const lines = [`\n=== DSS Complexity Monitor: ${filePath} ===\n`]; + + for (const issue of issues) { + const icon = severityIcons[issue.severity] || '[?]'; + lines.push(`${icon} ${issue.message}`); + lines.push(` Suggestion: ${issue.suggestion}\n`); + } + + lines.push('='.repeat(50)); + return lines.join('\n'); +} + +async function main() { + const config = loadConfig(); + + if (!config.complexity_monitor?.enabled) { + process.exit(0); + } + + // Read input from stdin + let inputData; + try { + const chunks = []; + for await (const chunk of process.stdin) { + chunks.push(chunk); + } + inputData = JSON.parse(Buffer.concat(chunks).toString()); + } catch (e) { + process.exit(0); + } + + const toolName = inputData.tool_name || ''; + const toolInput = inputData.tool_input || {}; + + if (!['Edit', 'Write'].includes(toolName)) { + process.exit(0); + } + + const filePath = toolInput.file_path || ''; + let content = ''; + + if (toolName === 'Write') { + content = toolInput.content || ''; + } else if (toolName === 'Edit') { + content = toolInput.new_string || ''; + } + + if (!content || !filePath) { + process.exit(0); + } + + const issues = analyzeComplexity(content, filePath, config); + + if (issues.length > 0) { + const output = formatOutput(issues, filePath); + console.error(output); + } + + process.exit(0); +} + +main().catch(() => process.exit(0)); diff --git a/dss-claude-plugin/hooks/scripts/component-checker.py b/dss-claude-plugin/hooks/scripts/component-checker.py new file mode 100755 index 0000000..873674f --- /dev/null +++ b/dss-claude-plugin/hooks/scripts/component-checker.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +""" +DSS Component Checker Hook +Validates React components for best practices and accessibility. +Written from scratch for DSS. +""" + +import json +import re +import sys +from pathlib import Path + +# React component patterns to check +COMPONENT_PATTERNS = [ + # Accessibility checks + { + "id": "a11y-img-alt", + "regex": r"]*alt=)[^>]*>", + "category": "accessibility", + "severity": "high", + "message": "Missing alt attribute on . Add alt text for accessibility.", + "file_types": [".jsx", ".tsx"] + }, + { + "id": "a11y-button-type", + "regex": r"]*type=)[^>]*>", + "category": "accessibility", + "severity": "medium", + "message": "Button missing type attribute. Add type='button' or type='submit'.", + "file_types": [".jsx", ".tsx"] + }, + { + "id": "a11y-anchor-href", + "regex": r"]*href=)[^>]*>", + "category": "accessibility", + "severity": "high", + "message": "Anchor tag missing href. Use button for actions without navigation.", + "file_types": [".jsx", ".tsx"] + }, + { + "id": "a11y-click-handler", + "regex": r"<(?:div|span)\s+[^>]*onClick", + "category": "accessibility", + "severity": "medium", + "message": "Click handler on non-interactive element. Use