Revert "chore: Remove dss-claude-plugin directory"
Some checks failed
DSS Project Analysis / dss-context-update (push) Has been cancelled
Some checks failed
DSS Project Analysis / dss-context-update (push) Has been cancelled
This reverts commit 72cb7319f5.
This commit is contained in:
20
dss-claude-plugin/.claude-plugin/plugin.json
Normal file
20
dss-claude-plugin/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "dss-claude-plugin",
|
||||
"version": "1.0.0",
|
||||
"description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components",
|
||||
"author": {
|
||||
"name": "overbits",
|
||||
"url": "https://github.com/overbits"
|
||||
},
|
||||
"homepage": "https://dss.overbits.luz.uy",
|
||||
"keywords": [
|
||||
"design-system",
|
||||
"tokens",
|
||||
"css",
|
||||
"scss",
|
||||
"tailwind",
|
||||
"figma",
|
||||
"storybook"
|
||||
],
|
||||
"commands": "./commands/"
|
||||
}
|
||||
22
dss-claude-plugin/.mcp.json
Normal file
22
dss-claude-plugin/.mcp.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"x-immutable-notice": {
|
||||
"protected": true,
|
||||
"reason": "MCP server configuration - maintains Claude Code integration stability",
|
||||
"lastModified": "2025-12-09",
|
||||
"bypassMethod": "Use 'DSS_IMMUTABLE_BYPASS=1 git commit' or commit message '[IMMUTABLE-UPDATE] reason'"
|
||||
},
|
||||
"mcpServers": {
|
||||
"dss": {
|
||||
"command": "python3",
|
||||
"args": ["${CLAUDE_PLUGIN_ROOT}/servers/dss-mcp-server.py"],
|
||||
"env": {
|
||||
"PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/..:${CLAUDE_PLUGIN_ROOT}",
|
||||
"DSS_HOME": "${CLAUDE_PLUGIN_ROOT}/../.dss",
|
||||
"DSS_DATABASE": "${CLAUDE_PLUGIN_ROOT}/../.dss/dss.db",
|
||||
"DSS_CACHE": "${CLAUDE_PLUGIN_ROOT}/../.dss/cache",
|
||||
"DSS_BASE_PATH": "${CLAUDE_PLUGIN_ROOT}/.."
|
||||
},
|
||||
"description": "Design System Server MCP server providing design token and component analysis tools"
|
||||
}
|
||||
}
|
||||
}
|
||||
230
dss-claude-plugin/agents/dss-architect.md
Normal file
230
dss-claude-plugin/agents/dss-architect.md
Normal file
@@ -0,0 +1,230 @@
|
||||
---
|
||||
name: dss-architect
|
||||
description: Design system planning and architecture agent
|
||||
model: sonnet
|
||||
---
|
||||
|
||||
# DSS Architect Agent
|
||||
|
||||
You are a Design System Architect agent specialized in planning and designing design system implementations.
|
||||
|
||||
## Your Role
|
||||
|
||||
You help teams plan, design, and implement design systems. You provide:
|
||||
- Strategic recommendations for design system adoption
|
||||
- Architecture decisions for token structures
|
||||
- Component library planning
|
||||
- Migration strategies
|
||||
- Best practices guidance
|
||||
|
||||
## Capabilities
|
||||
|
||||
### 1. Design System Assessment
|
||||
- Evaluate current codebase state
|
||||
- Identify design system maturity level
|
||||
- Assess team readiness
|
||||
- Recommend adoption approach
|
||||
|
||||
### 2. Token Architecture
|
||||
- Design token hierarchy
|
||||
- Plan naming conventions
|
||||
- Structure token categories
|
||||
- Handle theming requirements
|
||||
|
||||
### 3. Component Planning
|
||||
- Define component taxonomy
|
||||
- Plan component API patterns
|
||||
- Design variant strategies
|
||||
- Handle composition patterns
|
||||
|
||||
### 4. Migration Strategy
|
||||
- Phase migration plans
|
||||
- Identify dependencies
|
||||
- Plan rollback strategies
|
||||
- Define success metrics
|
||||
|
||||
## Available Tools
|
||||
|
||||
You have access to DSS tools:
|
||||
- `dss_analyze_project` - Analyze codebase
|
||||
- `dss_extract_tokens` - Extract existing tokens
|
||||
- `dss_audit_components` - Audit components
|
||||
- `dss_find_quick_wins` - Find quick wins
|
||||
- `dss_get_status` - Check DSS status
|
||||
|
||||
## Workflow
|
||||
|
||||
When invoked, follow this process:
|
||||
|
||||
1. **Understand Context**
|
||||
- What is the project type?
|
||||
- What frameworks are used?
|
||||
- What is the team size?
|
||||
- What are the goals?
|
||||
|
||||
2. **Analyze Current State**
|
||||
- Run project analysis
|
||||
- Check for existing patterns
|
||||
- Identify pain points
|
||||
- Assess complexity
|
||||
|
||||
3. **Design Architecture**
|
||||
- Propose token structure
|
||||
- Recommend component patterns
|
||||
- Plan integration approach
|
||||
- Define boundaries
|
||||
|
||||
4. **Create Plan**
|
||||
- Phase implementation
|
||||
- Identify milestones
|
||||
- Estimate effort
|
||||
- Define success criteria
|
||||
|
||||
5. **Provide Recommendations**
|
||||
- Best practices
|
||||
- Pitfalls to avoid
|
||||
- Tools to consider
|
||||
- Resources for learning
|
||||
|
||||
## Response Format
|
||||
|
||||
Structure your responses as:
|
||||
|
||||
```
|
||||
DESIGN SYSTEM ARCHITECTURE PLAN
|
||||
|
||||
EXECUTIVE SUMMARY
|
||||
[Brief overview of recommendations]
|
||||
|
||||
CURRENT STATE ASSESSMENT
|
||||
[Analysis of existing codebase]
|
||||
|
||||
PROPOSED ARCHITECTURE
|
||||
|
||||
Token Structure:
|
||||
[Token hierarchy and naming]
|
||||
|
||||
Component Strategy:
|
||||
[Component organization and patterns]
|
||||
|
||||
IMPLEMENTATION PHASES
|
||||
|
||||
Phase 1: Foundation
|
||||
[Core tokens and utilities]
|
||||
|
||||
Phase 2: Core Components
|
||||
[Essential component library]
|
||||
|
||||
Phase 3: Extended System
|
||||
[Full design system rollout]
|
||||
|
||||
RECOMMENDATIONS
|
||||
[Specific actionable items]
|
||||
|
||||
RISKS AND MITIGATIONS
|
||||
[Potential issues and solutions]
|
||||
|
||||
NEXT STEPS
|
||||
[Immediate actions to take]
|
||||
```
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Help me plan a design system for our React application"
|
||||
|
||||
**Agent Response:**
|
||||
|
||||
```
|
||||
DESIGN SYSTEM ARCHITECTURE PLAN
|
||||
|
||||
Let me analyze your project first...
|
||||
|
||||
[Uses dss_analyze_project]
|
||||
|
||||
EXECUTIVE SUMMARY
|
||||
|
||||
Based on analysis, I recommend a phased approach starting with
|
||||
token extraction and color consolidation, followed by core
|
||||
component development.
|
||||
|
||||
CURRENT STATE ASSESSMENT
|
||||
|
||||
Project: React + TypeScript
|
||||
Components: 45 identified
|
||||
Style approach: Mix of CSS modules and styled-components
|
||||
Consistency score: 3/10 (significant variation)
|
||||
|
||||
Key findings:
|
||||
- 156 unique color values (target: 12-15)
|
||||
- 34 spacing values (target: 6-8)
|
||||
- 8 button variants (target: 1 component with variants)
|
||||
|
||||
PROPOSED ARCHITECTURE
|
||||
|
||||
Token Structure:
|
||||
├── primitives/
|
||||
│ ├── colors.json (base colors)
|
||||
│ ├── typography.json (fonts, scales)
|
||||
│ └── spacing.json (spacing scale)
|
||||
├── semantic/
|
||||
│ ├── colors.json (primary, secondary, etc.)
|
||||
│ └── components.json (component-specific tokens)
|
||||
└── themes/
|
||||
├── light.json
|
||||
└── dark.json
|
||||
|
||||
Component Strategy:
|
||||
- Atomic design methodology
|
||||
- Compound component patterns for complex UI
|
||||
- TypeScript for type safety
|
||||
- Storybook for documentation
|
||||
|
||||
IMPLEMENTATION PHASES
|
||||
|
||||
Phase 1: Token Foundation
|
||||
- Extract and consolidate colors
|
||||
- Define spacing scale
|
||||
- Create typography system
|
||||
- Set up style-dictionary
|
||||
|
||||
Phase 2: Core Components
|
||||
- Button, Input, Card, Modal
|
||||
- Layout primitives (Box, Stack, Grid)
|
||||
- Typography components
|
||||
|
||||
Phase 3: Extended System
|
||||
- Complex components
|
||||
- Full documentation
|
||||
- Testing suite
|
||||
|
||||
RECOMMENDATIONS
|
||||
|
||||
1. Start with color tokens (biggest impact)
|
||||
2. Use CSS custom properties for flexibility
|
||||
3. Implement dark mode from start
|
||||
4. Document as you build
|
||||
|
||||
NEXT STEPS
|
||||
|
||||
1. Run /dss-extract to create initial tokens
|
||||
2. Review and refine token names
|
||||
3. Generate theme files with /dss-generate
|
||||
4. Begin Button component refactor
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Be Thorough**
|
||||
- Always analyze before recommending
|
||||
- Consider long-term implications
|
||||
- Account for team capabilities
|
||||
|
||||
2. **Be Practical**
|
||||
- Recommend achievable phases
|
||||
- Consider constraints
|
||||
- Provide concrete next steps
|
||||
|
||||
3. **Be Flexible**
|
||||
- Adapt to project needs
|
||||
- Offer alternatives
|
||||
- Explain trade-offs
|
||||
272
dss-claude-plugin/agents/dss-migrator.md
Normal file
272
dss-claude-plugin/agents/dss-migrator.md
Normal file
@@ -0,0 +1,272 @@
|
||||
---
|
||||
name: dss-migrator
|
||||
description: Design system migration and refactoring agent
|
||||
model: sonnet
|
||||
---
|
||||
|
||||
# DSS Migrator Agent
|
||||
|
||||
You are a Design System Migration agent specialized in helping teams migrate to and adopt design systems.
|
||||
|
||||
## Your Role
|
||||
|
||||
You help teams migrate existing codebases to use design system tokens and components. You provide:
|
||||
- Step-by-step migration guidance
|
||||
- Code refactoring assistance
|
||||
- Conflict resolution
|
||||
- Progress tracking
|
||||
- Rollback strategies
|
||||
|
||||
## Capabilities
|
||||
|
||||
### 1. Migration Planning
|
||||
- Analyze migration scope
|
||||
- Identify dependencies
|
||||
- Plan migration order
|
||||
- Estimate effort
|
||||
|
||||
### 2. Code Refactoring
|
||||
- Replace hardcoded values with tokens
|
||||
- Refactor components to use design system
|
||||
- Update style files
|
||||
- Maintain backwards compatibility
|
||||
|
||||
### 3. Progress Tracking
|
||||
- Track migration status
|
||||
- Identify blockers
|
||||
- Report completion metrics
|
||||
- Celebrate milestones
|
||||
|
||||
### 4. Conflict Resolution
|
||||
- Handle naming conflicts
|
||||
- Resolve style conflicts
|
||||
- Manage breaking changes
|
||||
- Provide fallback strategies
|
||||
|
||||
## Available Tools
|
||||
|
||||
You have access to DSS tools:
|
||||
- `dss_analyze_project` - Analyze codebase
|
||||
- `dss_audit_components` - Audit components
|
||||
- `dss_extract_tokens` - Extract tokens
|
||||
- `dss_find_quick_wins` - Find easy migrations
|
||||
- `dss_get_status` - Check status
|
||||
|
||||
## Workflow
|
||||
|
||||
When invoked, follow this process:
|
||||
|
||||
1. **Assess Scope**
|
||||
- Analyze what needs migration
|
||||
- Identify files and components
|
||||
- Calculate effort
|
||||
- Prioritize by impact
|
||||
|
||||
2. **Create Migration Plan**
|
||||
- Define phases
|
||||
- Set milestones
|
||||
- Identify risks
|
||||
- Plan rollback
|
||||
|
||||
3. **Execute Migration**
|
||||
- Start with quick wins
|
||||
- Migrate incrementally
|
||||
- Test continuously
|
||||
- Document changes
|
||||
|
||||
4. **Validate Results**
|
||||
- Check for regressions
|
||||
- Verify token usage
|
||||
- Test all variants
|
||||
- Update documentation
|
||||
|
||||
5. **Report Progress**
|
||||
- Track completion
|
||||
- Note blockers
|
||||
- Celebrate wins
|
||||
- Plan next phase
|
||||
|
||||
## Response Format
|
||||
|
||||
Structure your responses as:
|
||||
|
||||
```
|
||||
MIGRATION STATUS REPORT
|
||||
|
||||
OVERVIEW
|
||||
[Current migration state]
|
||||
|
||||
SCOPE ANALYSIS
|
||||
Files to migrate: X
|
||||
Components affected: Y
|
||||
Estimated effort: Z hours
|
||||
|
||||
MIGRATION PLAN
|
||||
|
||||
Phase 1: [Name]
|
||||
- [ ] Task 1
|
||||
- [ ] Task 2
|
||||
Status: Not started / In progress / Complete
|
||||
|
||||
Phase 2: [Name]
|
||||
- [ ] Task 1
|
||||
- [ ] Task 2
|
||||
Status: Not started
|
||||
|
||||
CURRENT TASK
|
||||
[What to migrate now]
|
||||
|
||||
CODE CHANGES
|
||||
[Specific refactoring needed]
|
||||
|
||||
NEXT STEPS
|
||||
[What to do after current task]
|
||||
```
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Help me migrate our button components to use design tokens"
|
||||
|
||||
**Agent Response:**
|
||||
|
||||
```
|
||||
BUTTON MIGRATION PLAN
|
||||
|
||||
Let me analyze your button components...
|
||||
|
||||
[Uses dss_audit_components]
|
||||
|
||||
OVERVIEW
|
||||
|
||||
Found 3 button files that need migration:
|
||||
- src/components/Button/Button.tsx
|
||||
- src/components/Button/IconButton.tsx
|
||||
- src/components/Button/ButtonGroup.tsx
|
||||
|
||||
SCOPE ANALYSIS
|
||||
|
||||
Hardcoded values to replace: 23
|
||||
- Colors: 8 values
|
||||
- Spacing: 6 values
|
||||
- Border radius: 3 values
|
||||
- Font sizes: 4 values
|
||||
- Shadows: 2 values
|
||||
|
||||
Estimated effort: 2 hours
|
||||
|
||||
MIGRATION PLAN
|
||||
|
||||
Phase 1: Token Preparation
|
||||
- [x] Identify all button values
|
||||
- [ ] Map to existing tokens
|
||||
- [ ] Create missing tokens
|
||||
Status: In Progress
|
||||
|
||||
Phase 2: Primary Button
|
||||
- [ ] Replace color values
|
||||
- [ ] Replace spacing values
|
||||
- [ ] Update hover/active states
|
||||
- [ ] Test all variants
|
||||
Status: Not started
|
||||
|
||||
Phase 3: Other Buttons
|
||||
- [ ] Migrate IconButton
|
||||
- [ ] Migrate ButtonGroup
|
||||
- [ ] Update documentation
|
||||
Status: Not started
|
||||
|
||||
CURRENT TASK: Map values to tokens
|
||||
|
||||
Let me create the token mapping:
|
||||
|
||||
| Current Value | Token | Category |
|
||||
|---------------|-------|----------|
|
||||
| #0066cc | --color-primary | color |
|
||||
| #0052a3 | --color-primary-dark | color |
|
||||
| #ffffff | --color-white | color |
|
||||
| 16px | --spacing-md | spacing |
|
||||
| 8px | --spacing-sm | spacing |
|
||||
| 4px | --radius-sm | radius |
|
||||
| 14px | --font-size-sm | typography |
|
||||
|
||||
REFACTORING NEEDED
|
||||
|
||||
Before:
|
||||
```tsx
|
||||
const Button = styled.button`
|
||||
background-color: #0066cc;
|
||||
color: #ffffff;
|
||||
padding: 8px 16px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
|
||||
&:hover {
|
||||
background-color: #0052a3;
|
||||
}
|
||||
`;
|
||||
```
|
||||
|
||||
After:
|
||||
```tsx
|
||||
const Button = styled.button`
|
||||
background-color: var(--color-primary);
|
||||
color: var(--color-white);
|
||||
padding: var(--spacing-sm) var(--spacing-md);
|
||||
border-radius: var(--radius-sm);
|
||||
font-size: var(--font-size-sm);
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-primary-dark);
|
||||
}
|
||||
`;
|
||||
```
|
||||
|
||||
NEXT STEPS
|
||||
|
||||
1. Confirm token mapping is correct
|
||||
2. Create any missing tokens
|
||||
3. Apply refactoring to Button.tsx
|
||||
4. Test all button variants
|
||||
5. Move to IconButton
|
||||
|
||||
Would you like me to proceed with the refactoring?
|
||||
```
|
||||
|
||||
## Migration Strategies
|
||||
|
||||
### 1. Incremental Migration
|
||||
- Migrate one component at a time
|
||||
- Test after each change
|
||||
- Keep old code working
|
||||
|
||||
### 2. Token-First Migration
|
||||
- Create all tokens first
|
||||
- Then update components
|
||||
- Ensures consistency
|
||||
|
||||
### 3. Component-First Migration
|
||||
- Migrate related components together
|
||||
- Update tokens as needed
|
||||
- Faster for small scopes
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always Test**
|
||||
- Visual regression testing
|
||||
- Unit tests for components
|
||||
- Integration tests
|
||||
|
||||
2. **Document Changes**
|
||||
- Keep changelog
|
||||
- Note breaking changes
|
||||
- Update component docs
|
||||
|
||||
3. **Communicate**
|
||||
- Notify team of changes
|
||||
- Get code reviews
|
||||
- Share progress
|
||||
|
||||
4. **Rollback Ready**
|
||||
- Keep old code accessible
|
||||
- Use feature flags if possible
|
||||
- Have restore plan
|
||||
82
dss-claude-plugin/commands/dss-analyze.md
Normal file
82
dss-claude-plugin/commands/dss-analyze.md
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
name: dss-analyze
|
||||
description: Analyze a project for design system patterns and opportunities
|
||||
arguments:
|
||||
- name: path
|
||||
description: Path to the project directory to analyze
|
||||
required: false
|
||||
---
|
||||
|
||||
# DSS Analyze Command
|
||||
|
||||
Analyze a project directory for design system patterns, component usage, and tokenization opportunities.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/dss-analyze [path]
|
||||
```
|
||||
|
||||
If no path is provided, analyzes the current working directory.
|
||||
|
||||
## What This Does
|
||||
|
||||
1. **Scans Project Structure**
|
||||
- Identifies all style files (CSS, SCSS, Tailwind)
|
||||
- Locates component files (React, Vue)
|
||||
- Maps project dependencies
|
||||
|
||||
2. **Analyzes Styles**
|
||||
- Extracts color values and patterns
|
||||
- Identifies typography usage
|
||||
- Finds spacing patterns
|
||||
- Detects shadows and borders
|
||||
|
||||
3. **Analyzes Components**
|
||||
- Maps React/Vue components
|
||||
- Identifies repeated patterns
|
||||
- Finds hardcoded values
|
||||
|
||||
4. **Generates Report**
|
||||
- Summary statistics
|
||||
- Pattern identification
|
||||
- Recommendations for improvement
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Use the `dss_analyze_project` tool with the provided path (or current directory if not specified)
|
||||
2. Wait for analysis results
|
||||
3. Present findings in a clear, organized format:
|
||||
- Summary section with key metrics
|
||||
- Style patterns found
|
||||
- Component analysis
|
||||
- Top recommendations
|
||||
4. Offer to drill deeper into specific areas
|
||||
5. Suggest next steps (token extraction, component audit, etc.)
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Design System Analysis: /path/to/project
|
||||
|
||||
SUMMARY
|
||||
- Files scanned: 127
|
||||
- Style files: 34
|
||||
- Components: 23
|
||||
- Unique colors: 156
|
||||
- Typography variations: 12
|
||||
|
||||
TOP FINDINGS
|
||||
1. Color inconsistency: 156 colors could be 12 tokens
|
||||
2. Spacing: 8 different scales in use
|
||||
3. 3 button variants that could consolidate
|
||||
|
||||
RECOMMENDATIONS
|
||||
1. Create color token system
|
||||
2. Standardize spacing scale
|
||||
3. Audit button components
|
||||
|
||||
Next: Run /dss-extract to create tokens from these patterns
|
||||
```
|
||||
103
dss-claude-plugin/commands/dss-audit.md
Normal file
103
dss-claude-plugin/commands/dss-audit.md
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
name: dss-audit
|
||||
description: Audit React components for design system adoption
|
||||
arguments:
|
||||
- name: path
|
||||
description: Path to components directory
|
||||
required: false
|
||||
---
|
||||
|
||||
# DSS Audit Command
|
||||
|
||||
Audit React/Vue components for design system readiness, identifying hardcoded values and consolidation opportunities.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/dss-audit [path]
|
||||
```
|
||||
|
||||
Examples:
|
||||
```
|
||||
/dss-audit
|
||||
/dss-audit ./src/components
|
||||
/dss-audit ./src/ui
|
||||
```
|
||||
|
||||
## What This Does
|
||||
|
||||
1. **Scans Components**
|
||||
- Finds all React/Vue components
|
||||
- Parses component code
|
||||
- Extracts styling information
|
||||
|
||||
2. **Identifies Issues**
|
||||
- Hardcoded color values
|
||||
- Inline spacing values
|
||||
- Inconsistent styling patterns
|
||||
- Duplicate component patterns
|
||||
|
||||
3. **Maps Dependencies**
|
||||
- Component relationships
|
||||
- Style imports
|
||||
- Shared utilities
|
||||
|
||||
4. **Generates Report**
|
||||
- Issues by severity
|
||||
- Consolidation opportunities
|
||||
- Refactoring recommendations
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Use `dss_audit_components` tool with the provided path
|
||||
2. Present findings organized by:
|
||||
- Summary statistics
|
||||
- Hardcoded values (table format)
|
||||
- Consolidation opportunities
|
||||
- Dependency issues
|
||||
3. Prioritize by impact and effort
|
||||
4. Provide specific fix recommendations
|
||||
5. Offer to create tokens for hardcoded values
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Component Audit: /src/components
|
||||
|
||||
SUMMARY
|
||||
- Components analyzed: 45
|
||||
- Hardcoded values: 127
|
||||
- Consolidation opportunities: 8
|
||||
- Accessibility issues: 23
|
||||
|
||||
HARDCODED VALUES (Top 10)
|
||||
|
||||
| File | Line | Value | Suggested Token |
|
||||
|------|------|-------|-----------------|
|
||||
| Button.tsx | 12 | #0066cc | --color-primary |
|
||||
| Button.tsx | 15 | 16px | --spacing-md |
|
||||
| Card.tsx | 8 | #ffffff | --color-surface |
|
||||
| Card.tsx | 22 | 8px | --radius-md |
|
||||
| Modal.tsx | 34 | rgba(0,0,0,0.5) | --color-overlay |
|
||||
|
||||
CONSOLIDATION OPPORTUNITIES
|
||||
|
||||
1. Button Components (3 variants)
|
||||
Files: PrimaryButton.tsx, SecondaryButton.tsx, GhostButton.tsx
|
||||
Suggestion: Merge into Button.tsx with 'variant' prop
|
||||
|
||||
2. Card Components (2 variants)
|
||||
Files: Card.tsx, FeaturedCard.tsx
|
||||
Suggestion: Add 'featured' prop to Card.tsx
|
||||
|
||||
RECOMMENDATIONS
|
||||
|
||||
1. [HIGH] Create color tokens for 89 hardcoded colors
|
||||
2. [HIGH] Implement spacing scale (34 values)
|
||||
3. [MEDIUM] Consolidate button variants
|
||||
4. [LOW] Add TypeScript types for tokens
|
||||
|
||||
Next: Run /dss-extract to create tokens from these values
|
||||
```
|
||||
106
dss-claude-plugin/commands/dss-extract.md
Normal file
106
dss-claude-plugin/commands/dss-extract.md
Normal file
@@ -0,0 +1,106 @@
|
||||
---
|
||||
name: dss-extract
|
||||
description: Extract design tokens from CSS, SCSS, Tailwind, or JSON sources
|
||||
arguments:
|
||||
- name: path
|
||||
description: Path to file or directory containing design tokens
|
||||
required: false
|
||||
- name: sources
|
||||
description: "Comma-separated source types: css,scss,tailwind,json"
|
||||
required: false
|
||||
---
|
||||
|
||||
# DSS Extract Command
|
||||
|
||||
Extract design tokens from various source formats and create a unified token collection.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/dss-extract [path] [sources]
|
||||
```
|
||||
|
||||
Examples:
|
||||
```
|
||||
/dss-extract
|
||||
/dss-extract ./src/styles
|
||||
/dss-extract ./src css,scss
|
||||
/dss-extract ./tailwind.config.js tailwind
|
||||
```
|
||||
|
||||
## What This Does
|
||||
|
||||
1. **Scans Sources**
|
||||
- CSS custom properties and values
|
||||
- SCSS variables and maps
|
||||
- Tailwind configuration
|
||||
- JSON token files
|
||||
|
||||
2. **Extracts Tokens**
|
||||
- Colors (hex, rgb, hsl)
|
||||
- Typography (fonts, sizes, weights)
|
||||
- Spacing (margins, paddings, gaps)
|
||||
- Sizing (widths, heights, radii)
|
||||
- Shadows and effects
|
||||
|
||||
3. **Merges Results**
|
||||
- Combines tokens from all sources
|
||||
- Resolves conflicts
|
||||
- Normalizes naming
|
||||
|
||||
4. **Returns Collection**
|
||||
- Categorized tokens
|
||||
- Metadata included
|
||||
- Ready for theme generation
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Parse the path argument (default to current directory)
|
||||
2. Parse sources argument (default to all: css, scss, tailwind, json)
|
||||
3. Use `dss_extract_tokens` tool with parsed arguments
|
||||
4. Present extracted tokens organized by category:
|
||||
- Colors
|
||||
- Typography
|
||||
- Spacing
|
||||
- Sizing
|
||||
- Shadows
|
||||
- Other
|
||||
5. Show token count and source breakdown
|
||||
6. Offer to generate theme files with `/dss-generate`
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Token Extraction: /path/to/project
|
||||
|
||||
SOURCES SCANNED
|
||||
- CSS: 12 files
|
||||
- SCSS: 8 files
|
||||
- Tailwind: 1 file
|
||||
|
||||
TOKENS EXTRACTED
|
||||
|
||||
Colors (24 tokens):
|
||||
primary: #0066cc
|
||||
secondary: #6c757d
|
||||
success: #28a745
|
||||
error: #dc3545
|
||||
...
|
||||
|
||||
Typography (8 tokens):
|
||||
font-family-base: "Inter", sans-serif
|
||||
font-size-base: 16px
|
||||
...
|
||||
|
||||
Spacing (6 tokens):
|
||||
xs: 4px
|
||||
sm: 8px
|
||||
md: 16px
|
||||
...
|
||||
|
||||
TOTAL: 38 unique tokens
|
||||
|
||||
Next: Run /dss-generate css to create theme files
|
||||
```
|
||||
138
dss-claude-plugin/commands/dss-figma.md
Normal file
138
dss-claude-plugin/commands/dss-figma.md
Normal file
@@ -0,0 +1,138 @@
|
||||
---
|
||||
name: dss-figma
|
||||
description: Sync design tokens from Figma files
|
||||
arguments:
|
||||
- name: file_key
|
||||
description: Figma file key from the file URL
|
||||
required: true
|
||||
---
|
||||
|
||||
# DSS Figma Command
|
||||
|
||||
Synchronize design tokens from Figma files using the Figma API.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/dss-figma <file_key>
|
||||
```
|
||||
|
||||
Example:
|
||||
```
|
||||
/dss-figma abc123xyz456
|
||||
```
|
||||
|
||||
## Finding Your File Key
|
||||
|
||||
The file key is in your Figma URL:
|
||||
```
|
||||
https://www.figma.com/file/abc123xyz456/Design-System
|
||||
^^^^^^^^^^^^
|
||||
This is the file key
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Set your Figma token as an environment variable:
|
||||
```bash
|
||||
export FIGMA_TOKEN=your-figma-personal-access-token
|
||||
```
|
||||
|
||||
## What This Does
|
||||
|
||||
1. **Connects to Figma API**
|
||||
- Authenticates with your token
|
||||
- Fetches file data
|
||||
- Handles rate limiting
|
||||
|
||||
2. **Extracts Tokens**
|
||||
- Colors from fill styles
|
||||
- Typography from text styles
|
||||
- Spacing from auto-layout
|
||||
- Shadows from effects
|
||||
- Border radii
|
||||
|
||||
3. **Normalizes Output**
|
||||
- Converts Figma naming to tokens
|
||||
- Organizes by category
|
||||
- Adds metadata
|
||||
|
||||
4. **Returns Token Collection**
|
||||
- Ready for theme generation
|
||||
- Merge-able with other sources
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Check if file_key is provided
|
||||
2. Use `dss_sync_figma` tool with file_key
|
||||
3. If error about missing token:
|
||||
- Explain how to get Figma token
|
||||
- Show how to set environment variable
|
||||
4. On success:
|
||||
- Display extracted tokens by category
|
||||
- Show token count
|
||||
- Offer to generate theme files
|
||||
5. Handle rate limiting gracefully
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Figma Sync: abc123xyz456
|
||||
|
||||
CONNECTING TO FIGMA...
|
||||
File: Design System v2.0
|
||||
Last modified: 2024-01-15
|
||||
|
||||
TOKENS EXTRACTED
|
||||
|
||||
Colors (24 tokens):
|
||||
primary/500: #0066CC
|
||||
primary/400: #3385D6
|
||||
primary/600: #0052A3
|
||||
secondary/500: #6C757D
|
||||
success/500: #28A745
|
||||
warning/500: #FFC107
|
||||
error/500: #DC3545
|
||||
|
||||
Typography (8 styles):
|
||||
heading/h1: Inter Bold 48px/56px
|
||||
heading/h2: Inter Bold 36px/44px
|
||||
heading/h3: Inter SemiBold 24px/32px
|
||||
body/large: Inter Regular 18px/28px
|
||||
body/regular: Inter Regular 16px/24px
|
||||
body/small: Inter Regular 14px/20px
|
||||
|
||||
Spacing (6 values):
|
||||
xs: 4px
|
||||
sm: 8px
|
||||
md: 16px
|
||||
lg: 24px
|
||||
xl: 32px
|
||||
2xl: 48px
|
||||
|
||||
Effects (3 shadows):
|
||||
shadow/sm: 0 1px 2px rgba(0,0,0,0.05)
|
||||
shadow/md: 0 4px 6px rgba(0,0,0,0.1)
|
||||
shadow/lg: 0 10px 15px rgba(0,0,0,0.1)
|
||||
|
||||
TOTAL: 41 tokens extracted
|
||||
|
||||
Next: Run /dss-generate css to create theme files
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
```
|
||||
ERROR: FIGMA_TOKEN not set
|
||||
|
||||
To sync with Figma, you need a Personal Access Token:
|
||||
|
||||
1. Go to Figma > Settings > Personal Access Tokens
|
||||
2. Create a new token
|
||||
3. Set it in your environment:
|
||||
export FIGMA_TOKEN=your-token-here
|
||||
|
||||
Then run /dss-figma again.
|
||||
```
|
||||
113
dss-claude-plugin/commands/dss-generate.md
Normal file
113
dss-claude-plugin/commands/dss-generate.md
Normal file
@@ -0,0 +1,113 @@
|
||||
---
|
||||
name: dss-generate
|
||||
description: Generate theme files from design tokens
|
||||
arguments:
|
||||
- name: format
|
||||
description: "Output format: css, scss, json, or js"
|
||||
required: true
|
||||
- name: name
|
||||
description: Theme name (default is "default")
|
||||
required: false
|
||||
---
|
||||
|
||||
# DSS Generate Command
|
||||
|
||||
Generate platform-specific theme files from design tokens using style-dictionary.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/dss-generate <format> [name]
|
||||
```
|
||||
|
||||
Examples:
|
||||
```
|
||||
/dss-generate css
|
||||
/dss-generate scss dark-theme
|
||||
/dss-generate json
|
||||
/dss-generate js tokens
|
||||
```
|
||||
|
||||
## Supported Formats
|
||||
|
||||
| Format | Output | Use Case |
|
||||
|--------|--------|----------|
|
||||
| css | CSS custom properties | Web projects using CSS variables |
|
||||
| scss | SCSS variables | Projects using Sass |
|
||||
| json | JSON structure | Framework-agnostic, APIs |
|
||||
| js | JavaScript module | React, Vue, JS projects |
|
||||
|
||||
## What This Does
|
||||
|
||||
1. **Prepares Tokens**
|
||||
- Validates token structure
|
||||
- Organizes by category
|
||||
- Applies naming conventions
|
||||
|
||||
2. **Transforms via Style Dictionary**
|
||||
- Applies platform transforms
|
||||
- Generates output files
|
||||
- Creates documentation
|
||||
|
||||
3. **Returns Generated Files**
|
||||
- Theme file content
|
||||
- Usage instructions
|
||||
- Integration guide
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Verify format is valid (css, scss, json, js)
|
||||
2. Check if tokens are available (from previous extraction or ask for path)
|
||||
3. Use `dss_generate_theme` tool with format and theme name
|
||||
4. Display generated file content
|
||||
5. Provide integration instructions for the format
|
||||
6. Offer to generate additional formats
|
||||
|
||||
If no tokens available:
|
||||
- Ask user to run `/dss-extract` first
|
||||
- Or ask for path to tokens file
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Theme Generation: dark-theme (CSS)
|
||||
|
||||
GENERATED FILE: theme-dark.css
|
||||
|
||||
:root {
|
||||
/* Colors */
|
||||
--color-primary: #3385d6;
|
||||
--color-primary-light: #66a3e0;
|
||||
--color-primary-dark: #0066cc;
|
||||
--color-background: #1a1a1a;
|
||||
--color-surface: #2d2d2d;
|
||||
--color-text: #ffffff;
|
||||
|
||||
/* Typography */
|
||||
--font-family-base: "Inter", sans-serif;
|
||||
--font-size-sm: 14px;
|
||||
--font-size-base: 16px;
|
||||
--font-size-lg: 18px;
|
||||
|
||||
/* Spacing */
|
||||
--spacing-xs: 4px;
|
||||
--spacing-sm: 8px;
|
||||
--spacing-md: 16px;
|
||||
--spacing-lg: 24px;
|
||||
}
|
||||
|
||||
USAGE
|
||||
|
||||
1. Import in your main CSS:
|
||||
@import 'theme-dark.css';
|
||||
|
||||
2. Use variables in components:
|
||||
.button {
|
||||
background: var(--color-primary);
|
||||
padding: var(--spacing-md);
|
||||
}
|
||||
|
||||
Generate another format? Try /dss-generate scss dark-theme
|
||||
```
|
||||
145
dss-claude-plugin/commands/dss-quick-wins.md
Normal file
145
dss-claude-plugin/commands/dss-quick-wins.md
Normal file
@@ -0,0 +1,145 @@
|
||||
---
|
||||
name: dss-quick-wins
|
||||
description: Find quick win opportunities for design system adoption
|
||||
arguments:
|
||||
- name: path
|
||||
description: Path to project directory
|
||||
required: false
|
||||
---
|
||||
|
||||
# DSS Quick Wins Command
|
||||
|
||||
Find low-effort, high-impact opportunities for design system adoption.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/dss-quick-wins [path]
|
||||
```
|
||||
|
||||
Examples:
|
||||
```
|
||||
/dss-quick-wins
|
||||
/dss-quick-wins ./src
|
||||
```
|
||||
|
||||
## What This Does
|
||||
|
||||
1. **Analyzes Codebase**
|
||||
- Scans styles and components
|
||||
- Identifies patterns
|
||||
- Measures usage frequency
|
||||
|
||||
2. **Finds Opportunities**
|
||||
- Color consolidation
|
||||
- Spacing standardization
|
||||
- Typography cleanup
|
||||
- Border radius normalization
|
||||
- Shadow standardization
|
||||
|
||||
3. **Scores by Impact/Effort**
|
||||
- Calculates potential impact
|
||||
- Estimates implementation effort
|
||||
- Ranks by ROI
|
||||
|
||||
4. **Generates Recommendations**
|
||||
- Prioritized list
|
||||
- Specific actions
|
||||
- Expected outcomes
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Use `dss_find_quick_wins` tool with path
|
||||
2. Present quick wins in priority order
|
||||
3. For each quick win, show:
|
||||
- Category (colors, spacing, etc.)
|
||||
- Impact level (high/medium/low)
|
||||
- Effort level (high/medium/low)
|
||||
- Specific values to consolidate
|
||||
- Files affected
|
||||
4. Provide total time estimate
|
||||
5. Offer to implement top quick wins
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Quick Win Analysis: /path/to/project
|
||||
|
||||
TOP QUICK WINS
|
||||
|
||||
1. COLOR CONSOLIDATION
|
||||
Impact: HIGH | Effort: LOW
|
||||
|
||||
Found 47 color values reducible to 8 tokens
|
||||
Files affected: 23
|
||||
|
||||
Consolidate:
|
||||
#0066cc, #0067cd, #0065cb -> primary
|
||||
#6c757d, #6b747c, #6d767e -> secondary
|
||||
|
||||
Estimated time: 2 hours
|
||||
|
||||
2. SPACING STANDARDIZATION
|
||||
Impact: HIGH | Effort: LOW
|
||||
|
||||
Found 34 spacing values reducible to 6 tokens
|
||||
Files affected: 31
|
||||
|
||||
Consolidate to 4px grid:
|
||||
4px, 8px, 16px, 24px, 32px, 48px
|
||||
|
||||
Estimated time: 3 hours
|
||||
|
||||
3. BORDER RADIUS NORMALIZATION
|
||||
Impact: MEDIUM | Effort: LOW
|
||||
|
||||
Found 12 radius values reducible to 4 tokens
|
||||
Files affected: 15
|
||||
|
||||
Consolidate:
|
||||
2px (sm), 4px (md), 8px (lg), 16px (xl)
|
||||
|
||||
Estimated time: 1 hour
|
||||
|
||||
4. SHADOW CLEANUP
|
||||
Impact: MEDIUM | Effort: LOW
|
||||
|
||||
Found 8 shadow definitions reducible to 3 tokens
|
||||
Files affected: 12
|
||||
|
||||
Consolidate:
|
||||
sm: 0 1px 2px rgba(0,0,0,0.05)
|
||||
md: 0 4px 6px rgba(0,0,0,0.1)
|
||||
lg: 0 10px 15px rgba(0,0,0,0.1)
|
||||
|
||||
Estimated time: 1 hour
|
||||
|
||||
5. FONT SIZE SCALE
|
||||
Impact: HIGH | Effort: MEDIUM
|
||||
|
||||
Found 15 font sizes reducible to 7 tokens
|
||||
Files affected: 28
|
||||
|
||||
Consolidate to type scale:
|
||||
12px, 14px, 16px, 18px, 24px, 32px, 48px
|
||||
|
||||
Estimated time: 3 hours
|
||||
|
||||
SUMMARY
|
||||
|
||||
Total quick wins: 5
|
||||
Total estimated time: 10 hours
|
||||
Expected impact: 60% reduction in style inconsistency
|
||||
|
||||
RECOMMENDED ORDER
|
||||
|
||||
1. Colors (biggest impact)
|
||||
2. Spacing (most widespread)
|
||||
3. Border radius (quick win)
|
||||
4. Shadows (contained scope)
|
||||
5. Font sizes (needs coordination)
|
||||
|
||||
Ready to implement? I can create tokens for any of these.
|
||||
```
|
||||
147
dss-claude-plugin/commands/dss-storybook.md
Normal file
147
dss-claude-plugin/commands/dss-storybook.md
Normal file
@@ -0,0 +1,147 @@
|
||||
---
|
||||
name: dss-storybook
|
||||
description: Set up and configure Storybook for design system components
|
||||
arguments:
|
||||
- name: action
|
||||
description: "Action to perform: scan, generate, or configure"
|
||||
required: true
|
||||
- name: path
|
||||
description: Path to project directory
|
||||
required: false
|
||||
---
|
||||
|
||||
# DSS Storybook Command
|
||||
|
||||
Set up, configure, and generate Storybook stories for design system documentation.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/dss-storybook <action> [path]
|
||||
```
|
||||
|
||||
Examples:
|
||||
```
|
||||
/dss-storybook scan
|
||||
/dss-storybook generate ./src/components
|
||||
/dss-storybook configure
|
||||
```
|
||||
|
||||
## Actions
|
||||
|
||||
| Action | Description |
|
||||
|--------|-------------|
|
||||
| scan | Scan for existing Storybook setup and components |
|
||||
| generate | Generate stories for components |
|
||||
| configure | Configure Storybook theme with design tokens |
|
||||
|
||||
## What This Does
|
||||
|
||||
### Scan
|
||||
- Checks for existing Storybook installation
|
||||
- Finds components without stories
|
||||
- Reports Storybook configuration status
|
||||
|
||||
### Generate
|
||||
- Creates story files for components
|
||||
- Generates prop documentation
|
||||
- Creates variant stories
|
||||
|
||||
### Configure
|
||||
- Sets up Storybook theme
|
||||
- Integrates design tokens
|
||||
- Configures addons
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Validate action is valid (scan, generate, configure)
|
||||
2. Use `dss_setup_storybook` tool with action and path
|
||||
3. Present results based on action:
|
||||
|
||||
**For scan:**
|
||||
- Show Storybook status
|
||||
- List components with/without stories
|
||||
- Recommend next steps
|
||||
|
||||
**For generate:**
|
||||
- Show generated story files
|
||||
- Display story code
|
||||
- Provide usage instructions
|
||||
|
||||
**For configure:**
|
||||
- Show configuration changes
|
||||
- Display theme setup
|
||||
- Provide run instructions
|
||||
|
||||
## Example Output
|
||||
|
||||
### Scan
|
||||
```
|
||||
Storybook Scan: /path/to/project
|
||||
|
||||
STATUS
|
||||
- Storybook installed: Yes (v7.6.0)
|
||||
- Stories found: 12
|
||||
- Components without stories: 8
|
||||
|
||||
MISSING STORIES
|
||||
- Accordion.tsx
|
||||
- Avatar.tsx
|
||||
- Badge.tsx
|
||||
- Dropdown.tsx
|
||||
- Pagination.tsx
|
||||
- Progress.tsx
|
||||
- Tabs.tsx
|
||||
- Toast.tsx
|
||||
|
||||
Run /dss-storybook generate to create stories
|
||||
```
|
||||
|
||||
### Generate
|
||||
```
|
||||
Storybook Story Generation
|
||||
|
||||
GENERATED STORIES
|
||||
|
||||
Button.stories.tsx:
|
||||
- Primary variant
|
||||
- Secondary variant
|
||||
- Disabled state
|
||||
- Loading state
|
||||
- With icon
|
||||
|
||||
Card.stories.tsx:
|
||||
- Default
|
||||
- With image
|
||||
- Interactive
|
||||
|
||||
Input.stories.tsx:
|
||||
- Default
|
||||
- With label
|
||||
- With error
|
||||
- Disabled
|
||||
|
||||
Run: npm run storybook
|
||||
```
|
||||
|
||||
### Configure
|
||||
```
|
||||
Storybook Configuration
|
||||
|
||||
CONFIGURATION CREATED
|
||||
|
||||
.storybook/theme.js:
|
||||
- Brand colors from tokens
|
||||
- Typography from tokens
|
||||
- Custom logo support
|
||||
|
||||
.storybook/preview.js:
|
||||
- Token CSS imported
|
||||
- Global decorators added
|
||||
- Controls configured
|
||||
|
||||
Run: npm run storybook
|
||||
Access: http://localhost:6006
|
||||
```
|
||||
32
dss-claude-plugin/core/__init__.py
Normal file
32
dss-claude-plugin/core/__init__.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""
|
||||
DSS Core Module - Configuration and Context Management
|
||||
Extended with Context Compiler for design system context resolution.
|
||||
"""
|
||||
|
||||
from .config import DSSConfig, DSSMode
|
||||
from .context import DSSContext
|
||||
from .compiler import ContextCompiler, EMERGENCY_SKIN
|
||||
from .mcp_extensions import (
|
||||
get_active_context,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status,
|
||||
with_context,
|
||||
COMPILER
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"DSSConfig",
|
||||
"DSSMode",
|
||||
"DSSContext",
|
||||
"ContextCompiler",
|
||||
"EMERGENCY_SKIN",
|
||||
"get_active_context",
|
||||
"resolve_token",
|
||||
"validate_manifest",
|
||||
"list_skins",
|
||||
"get_compiler_status",
|
||||
"with_context",
|
||||
"COMPILER"
|
||||
]
|
||||
179
dss-claude-plugin/core/compiler.py
Normal file
179
dss-claude-plugin/core/compiler.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
DSS Context Compiler
|
||||
Resolves project context via 3-layer cascade: Base -> Skin -> Project
|
||||
Includes Safe Boot Protocol and Debug Provenance.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import copy
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any, Optional, List, Union
|
||||
from pathlib import Path
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger("DSSCompiler")
|
||||
|
||||
# --- SAFE BOOT PROTOCOL ---
|
||||
# Hardcoded emergency skin in case file system or JSON parsing fails catastrophicly
|
||||
EMERGENCY_SKIN = {
|
||||
"meta": {"id": "emergency", "version": "1.0.0"},
|
||||
"tokens": {
|
||||
"colors": {
|
||||
"primary": "#FF0000",
|
||||
"background": "#FFFFFF",
|
||||
"text": "#000000"
|
||||
},
|
||||
"spacing": {"base": "4px"}
|
||||
},
|
||||
"status": "emergency_mode"
|
||||
}
|
||||
|
||||
class ContextCompiler:
|
||||
def __init__(self, skins_dir: str = "./skins"):
|
||||
self.skins_dir = Path(skins_dir)
|
||||
self.cache: Dict[str, Any] = {}
|
||||
self._manifest_mtimes: Dict[str, float] = {} # Track file modification times
|
||||
|
||||
def compile(self, manifest_path: str, debug: bool = False, force_refresh: bool = False) -> Dict[str, Any]:
|
||||
"""
|
||||
Main entry point. Compiles context by merging:
|
||||
1. Base Skin (Implicit or Explicit)
|
||||
2. Extended Skin (defined in manifest)
|
||||
3. Project Overrides (defined in manifest)
|
||||
|
||||
Args:
|
||||
manifest_path: Path to ds.config.json
|
||||
debug: Enable provenance tracking
|
||||
force_refresh: Bypass cache and recompile (for long-running servers)
|
||||
"""
|
||||
try:
|
||||
# Check cache with mtime validation (unless force_refresh or debug mode)
|
||||
# Note: Debug mode bypasses cache because provenance must be recalculated
|
||||
cache_key = f"{manifest_path}:debug={debug}"
|
||||
if not force_refresh and not debug and cache_key in self.cache:
|
||||
# Verify manifest hasn't changed
|
||||
manifest_file = Path(manifest_path)
|
||||
if manifest_file.exists():
|
||||
current_mtime = manifest_file.stat().st_mtime
|
||||
cached_mtime = self._manifest_mtimes.get(cache_key, 0)
|
||||
if current_mtime == cached_mtime:
|
||||
logger.debug(f"Cache hit for {manifest_path}")
|
||||
return self.cache[cache_key]
|
||||
else:
|
||||
logger.info(f"Manifest modified, invalidating cache: {manifest_path}")
|
||||
|
||||
# 1. Load Project Manifest
|
||||
manifest = self._load_json(manifest_path)
|
||||
|
||||
# 2. Resolve Skin
|
||||
skin_id = manifest.get("extends", {}).get("skin", "classic")
|
||||
skin = self._load_skin(skin_id)
|
||||
|
||||
# 3. Resolve Base (Single Inheritance Enforced)
|
||||
# If the skin extends another, we merge that first.
|
||||
# Simplified for Phase 1: We assume all skins extend 'base' implicitly unless specified
|
||||
base_skin = self._load_skin("base")
|
||||
|
||||
# 4. Cascade Merge: Base -> Skin -> Project
|
||||
# Merge Base + Skin
|
||||
context = self._deep_merge(base_skin, skin, path="base->skin", debug=debug)
|
||||
|
||||
# Merge Result + Project Overrides
|
||||
# Need to wrap project overrides in same structure as skins
|
||||
project_overrides_wrapped = {
|
||||
"tokens": manifest.get("overrides", {}).get("tokens", {})
|
||||
}
|
||||
final_context = self._deep_merge(context, project_overrides_wrapped, path="skin->project", debug=debug)
|
||||
|
||||
# Inject Metadata
|
||||
final_context["_meta"] = {
|
||||
"project_id": manifest["project"]["id"],
|
||||
"compiled_at": datetime.now(timezone.utc).isoformat(),
|
||||
"debug_enabled": debug,
|
||||
"compiler_config": manifest.get("compiler", {})
|
||||
}
|
||||
|
||||
if debug:
|
||||
final_context["_provenance"] = self.provenance_log
|
||||
|
||||
# Cache result with mtime tracking (only cache non-debug mode results)
|
||||
if not debug:
|
||||
manifest_file = Path(manifest_path)
|
||||
if manifest_file.exists():
|
||||
cache_key = f"{manifest_path}:debug={debug}"
|
||||
self.cache[cache_key] = final_context
|
||||
self._manifest_mtimes[cache_key] = manifest_file.stat().st_mtime
|
||||
logger.debug(f"Cached compilation result for {manifest_path}")
|
||||
|
||||
return final_context
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Compiler specific error: {e}")
|
||||
logger.warning("Initiating SAFE BOOT PROTOCOL")
|
||||
return self._enter_safe_mode(e)
|
||||
|
||||
def _load_skin(self, skin_id: str) -> Dict[str, Any]:
|
||||
"""Loads a skin by ID from the skins directory."""
|
||||
# Simple caching strategy
|
||||
if skin_id in self.cache:
|
||||
return self.cache[skin_id]
|
||||
|
||||
# Security: Prevent path traversal attacks
|
||||
path = (self.skins_dir / f"{skin_id}.json").resolve()
|
||||
if not str(path).startswith(str(self.skins_dir.resolve())):
|
||||
raise ValueError(f"Invalid skin ID (path traversal detected): {skin_id}")
|
||||
|
||||
if not path.exists():
|
||||
logger.warning(f"Skin {skin_id} not found, falling back to base.")
|
||||
if skin_id == "base":
|
||||
# Return emergency tokens if base is missing
|
||||
return EMERGENCY_SKIN
|
||||
return self._load_skin("base")
|
||||
|
||||
data = self._load_json(str(path))
|
||||
self.cache[skin_id] = data
|
||||
return data
|
||||
|
||||
def _load_json(self, path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
def _deep_merge(self, base: Dict, override: Dict, path: str = "", debug: bool = False, provenance: List[Dict] = None) -> Dict:
|
||||
"""
|
||||
Deep merge dictionaries. Replaces arrays.
|
||||
Populates provenance list if debug is True (thread-safe).
|
||||
"""
|
||||
# Thread-safe: use method parameter instead of instance variable
|
||||
if provenance is None and debug:
|
||||
provenance = []
|
||||
# Store reference on first call for later retrieval
|
||||
if not hasattr(self, 'provenance_log'):
|
||||
self.provenance_log = provenance
|
||||
|
||||
result = copy.deepcopy(base)
|
||||
|
||||
for key, value in override.items():
|
||||
if isinstance(value, dict) and key in result and isinstance(result[key], dict):
|
||||
# Recursive merge - pass provenance down
|
||||
result[key] = self._deep_merge(result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance)
|
||||
else:
|
||||
# Direct replacement (Primitive or Array)
|
||||
if debug and provenance is not None:
|
||||
provenance.append({
|
||||
"key": key,
|
||||
"action": "override",
|
||||
"layer": path,
|
||||
"value_type": type(value).__name__
|
||||
})
|
||||
result[key] = copy.deepcopy(value)
|
||||
|
||||
return result
|
||||
|
||||
def _enter_safe_mode(self, error: Exception) -> Dict[str, Any]:
|
||||
"""Returns the hardcoded emergency skin with error details."""
|
||||
safe_context = copy.deepcopy(EMERGENCY_SKIN)
|
||||
safe_context["_error"] = str(error)
|
||||
return safe_context
|
||||
161
dss-claude-plugin/core/config.py
Normal file
161
dss-claude-plugin/core/config.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
DSS Configuration Module
|
||||
========================
|
||||
|
||||
Handles configuration management for the Design System Server (DSS) Claude Plugin.
|
||||
Supports local/remote mode detection, persistent configuration storage, and
|
||||
environment variable overrides.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import uuid
|
||||
import asyncio
|
||||
import logging
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union, Any
|
||||
|
||||
import aiohttp
|
||||
from pydantic import BaseModel, Field, HttpUrl, ValidationError
|
||||
|
||||
# Configure module-level logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CONFIG_DIR = Path.home() / ".dss"
|
||||
CONFIG_FILE = CONFIG_DIR / "config.json"
|
||||
DEFAULT_REMOTE_URL = "https://dss.overbits.luz.uy"
|
||||
DEFAULT_LOCAL_URL = "http://localhost:6006"
|
||||
|
||||
|
||||
class DSSMode(str, Enum):
|
||||
"""Operation modes for the DSS plugin."""
|
||||
LOCAL = "local"
|
||||
REMOTE = "remote"
|
||||
AUTO = "auto"
|
||||
|
||||
|
||||
class DSSConfig(BaseModel):
|
||||
"""
|
||||
Configuration model for DSS Plugin.
|
||||
|
||||
Attributes:
|
||||
mode (DSSMode): The configured operation mode (default: AUTO).
|
||||
remote_url (str): URL for the remote DSS API.
|
||||
local_url (str): URL for the local DSS API (usually localhost).
|
||||
session_id (str): Unique identifier for this client instance.
|
||||
"""
|
||||
mode: DSSMode = Field(default=DSSMode.AUTO, description="Operation mode preference")
|
||||
remote_url: str = Field(default=DEFAULT_REMOTE_URL, description="Remote API endpoint")
|
||||
local_url: str = Field(default=DEFAULT_LOCAL_URL, description="Local API endpoint")
|
||||
session_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID")
|
||||
|
||||
class Config:
|
||||
validate_assignment = True
|
||||
extra = "ignore" # Allow forward compatibility with new config keys
|
||||
|
||||
@classmethod
|
||||
def load(cls) -> "DSSConfig":
|
||||
"""
|
||||
Load configuration from ~/.dss/config.json.
|
||||
Returns a default instance if the file does not exist or is invalid.
|
||||
"""
|
||||
if not CONFIG_FILE.exists():
|
||||
logger.debug(f"No config found at {CONFIG_FILE}, using defaults.")
|
||||
return cls()
|
||||
|
||||
try:
|
||||
content = CONFIG_FILE.read_text(encoding="utf-8")
|
||||
data = json.loads(content)
|
||||
# Ensure complex types are handled by Pydantic validation
|
||||
return cls.model_validate(data)
|
||||
except (json.JSONDecodeError, ValidationError) as e:
|
||||
logger.warning(f"Failed to load config from {CONFIG_FILE}: {e}. Using defaults.")
|
||||
return cls()
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error loading config: {e}")
|
||||
return cls()
|
||||
|
||||
def save(self) -> None:
|
||||
"""
|
||||
Save the current configuration to ~/.dss/config.json.
|
||||
Creates the directory if it does not exist.
|
||||
"""
|
||||
try:
|
||||
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Export using mode='json' to handle enums and urls correctly
|
||||
json_data = self.model_dump_json(indent=2)
|
||||
CONFIG_FILE.write_text(json_data, encoding="utf-8")
|
||||
logger.debug(f"Configuration saved to {CONFIG_FILE}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save config to {CONFIG_FILE}: {e}")
|
||||
raise
|
||||
|
||||
async def get_active_mode(self) -> DSSMode:
|
||||
"""
|
||||
Determine the actual runtime mode based on priority rules.
|
||||
|
||||
Priority:
|
||||
1. DSS_MODE environment variable
|
||||
2. Configured 'mode' (if not AUTO)
|
||||
3. Auto-detection (ping local health endpoint)
|
||||
4. Fallback to REMOTE
|
||||
|
||||
Returns:
|
||||
DSSMode: The resolved active mode (LOCAL or REMOTE).
|
||||
"""
|
||||
# 1. Check Environment Variable
|
||||
env_mode = os.getenv("DSS_MODE")
|
||||
if env_mode:
|
||||
try:
|
||||
# Normalize string to enum
|
||||
return DSSMode(env_mode.lower())
|
||||
except ValueError:
|
||||
logger.warning(f"Invalid DSS_MODE env var '{env_mode}', ignoring.")
|
||||
|
||||
# 2. Check Configuration (if explicit)
|
||||
if self.mode != DSSMode.AUTO:
|
||||
return self.mode
|
||||
|
||||
# 3. Auto-detect
|
||||
logger.info("Auto-detecting DSS mode...")
|
||||
is_local_healthy = await self._check_local_health()
|
||||
|
||||
if is_local_healthy:
|
||||
logger.info(f"Local server detected at {self.local_url}. Switching to LOCAL mode.")
|
||||
return DSSMode.LOCAL
|
||||
else:
|
||||
logger.info("Local server unreachable. Fallback to REMOTE mode.")
|
||||
# 4. Fallback
|
||||
return DSSMode.REMOTE
|
||||
|
||||
async def _check_local_health(self) -> bool:
|
||||
"""
|
||||
Ping the local server health endpoint to check availability.
|
||||
|
||||
Returns:
|
||||
bool: True if server responds with 200 OK, False otherwise.
|
||||
"""
|
||||
health_url = f"{self.local_url.rstrip('/')}/health"
|
||||
try:
|
||||
timeout = aiohttp.ClientTimeout(total=2.0) # Short timeout for responsiveness
|
||||
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||
async with session.get(health_url) as response:
|
||||
if response.status == 200:
|
||||
return True
|
||||
logger.debug(f"Local health check returned status {response.status}")
|
||||
except aiohttp.ClientError as e:
|
||||
logger.debug(f"Local health check connection failed: {e}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Unexpected error during health check: {e}")
|
||||
|
||||
return False
|
||||
|
||||
def get_api_url(self, active_mode: DSSMode) -> str:
|
||||
"""
|
||||
Helper to get the correct API URL for the determined mode.
|
||||
"""
|
||||
if active_mode == DSSMode.LOCAL:
|
||||
return self.local_url
|
||||
return self.remote_url
|
||||
181
dss-claude-plugin/core/context.py
Normal file
181
dss-claude-plugin/core/context.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""
|
||||
DSS Context Module
|
||||
==================
|
||||
|
||||
Singleton context manager for the DSS Plugin.
|
||||
Handles configuration loading, mode detection, and strategy instantiation.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from .config import DSSConfig, DSSMode
|
||||
|
||||
# Logger setup
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Protocol/Type placeholder for Strategies (to be replaced by base class in next steps)
|
||||
Strategy = Any
|
||||
|
||||
|
||||
class DSSContext:
|
||||
"""
|
||||
Singleton context manager for the DSS Plugin.
|
||||
|
||||
Handles configuration loading, mode detection (Local/Remote),
|
||||
and strategy instantiation.
|
||||
"""
|
||||
_instance: Optional['DSSContext'] = None
|
||||
_lock: asyncio.Lock = asyncio.Lock()
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Private initializer. Use get_instance() instead.
|
||||
"""
|
||||
if DSSContext._instance is not None:
|
||||
raise RuntimeError("DSSContext is a singleton. Use get_instance() to access it.")
|
||||
|
||||
self.config: Optional[DSSConfig] = None
|
||||
self.active_mode: DSSMode = DSSMode.REMOTE # Default safe fallback
|
||||
self._capabilities: Dict[str, bool] = {}
|
||||
self._strategy_cache: Dict[str, Strategy] = {}
|
||||
self.session_id: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls) -> 'DSSContext':
|
||||
"""
|
||||
Async factory method to get the singleton instance.
|
||||
Ensures config is loaded and mode is detected before returning.
|
||||
"""
|
||||
if not cls._instance:
|
||||
async with cls._lock:
|
||||
# Double-check locking pattern
|
||||
if not cls._instance:
|
||||
instance = cls()
|
||||
await instance._initialize()
|
||||
cls._instance = instance
|
||||
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
def reset(cls) -> None:
|
||||
"""
|
||||
Resets the singleton instance. Useful for testing.
|
||||
"""
|
||||
cls._instance = None
|
||||
|
||||
async def _initialize(self) -> None:
|
||||
"""
|
||||
Internal initialization logic:
|
||||
1. Load Config
|
||||
2. Detect Mode
|
||||
3. Cache Capabilities
|
||||
"""
|
||||
try:
|
||||
# 1. Load Configuration
|
||||
self.config = DSSConfig.load()
|
||||
self.session_id = self.config.session_id
|
||||
|
||||
# 2. Detect Mode (Async check)
|
||||
self.active_mode = await self.config.get_active_mode()
|
||||
|
||||
logger.info(f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}")
|
||||
|
||||
# 3. Cache Capabilities
|
||||
self._cache_capabilities()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize DSSContext: {e}")
|
||||
# Fallback to defaults if initialization fails
|
||||
self.active_mode = DSSMode.REMOTE
|
||||
self._capabilities = {"limited": True}
|
||||
|
||||
def _cache_capabilities(self) -> None:
|
||||
"""
|
||||
Determines what the plugin can do based on the active mode.
|
||||
"""
|
||||
# Base capabilities
|
||||
caps = {
|
||||
"can_read_files": False,
|
||||
"can_execute_browser": False,
|
||||
"can_screenshot": False,
|
||||
"can_connect_remote": True
|
||||
}
|
||||
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
# Local mode allows direct filesystem access and local browser control
|
||||
caps["can_read_files"] = True
|
||||
caps["can_execute_browser"] = True
|
||||
caps["can_screenshot"] = True
|
||||
elif self.active_mode == DSSMode.REMOTE:
|
||||
# Remote mode relies on API capabilities
|
||||
# Depending on remote configuration, these might differ
|
||||
caps["can_execute_browser"] = False # Typically restricted in pure remote unless via API
|
||||
caps["can_read_files"] = False # Security restriction
|
||||
|
||||
self._capabilities = caps
|
||||
|
||||
def get_capability(self, key: str) -> bool:
|
||||
"""Check if a specific capability is active."""
|
||||
return self._capabilities.get(key, False)
|
||||
|
||||
def get_api_url(self) -> str:
|
||||
"""Get the correct API URL for the current mode."""
|
||||
if self.config is None:
|
||||
return "https://dss.overbits.luz.uy" # Default fallback
|
||||
return self.config.get_api_url(self.active_mode)
|
||||
|
||||
def get_strategy(self, strategy_type: str) -> Any:
|
||||
"""
|
||||
Factory method to retrieve operational strategies.
|
||||
|
||||
Args:
|
||||
strategy_type: One of 'browser', 'filesystem', 'screenshot'
|
||||
|
||||
Returns:
|
||||
An instance of the requested strategy.
|
||||
"""
|
||||
# Return cached strategy if available
|
||||
if strategy_type in self._strategy_cache:
|
||||
return self._strategy_cache[strategy_type]
|
||||
|
||||
strategy_instance = None
|
||||
|
||||
# NOTE: Strategy classes will be implemented in the next step.
|
||||
# We use local imports here to avoid circular dependency issues
|
||||
# if strategies define their own types using DSSContext.
|
||||
|
||||
try:
|
||||
if strategy_type == "browser":
|
||||
# Will be implemented in Phase 2 & 3
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
from ..strategies.local.browser import LocalBrowserStrategy
|
||||
strategy_instance = LocalBrowserStrategy(self)
|
||||
else:
|
||||
from ..strategies.remote.browser import RemoteBrowserStrategy
|
||||
strategy_instance = RemoteBrowserStrategy(self)
|
||||
|
||||
elif strategy_type == "filesystem":
|
||||
# Will be implemented in Phase 2
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
from ..strategies.local.filesystem import LocalFilesystemStrategy
|
||||
strategy_instance = LocalFilesystemStrategy(self)
|
||||
else:
|
||||
from ..strategies.remote.filesystem import RemoteFilesystemStrategy
|
||||
strategy_instance = RemoteFilesystemStrategy(self)
|
||||
|
||||
elif strategy_type == "screenshot":
|
||||
# Screenshot is part of browser strategy
|
||||
return self.get_strategy("browser")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unknown strategy type: {strategy_type}")
|
||||
|
||||
except ImportError as e:
|
||||
logger.error(f"Failed to import strategy {strategy_type}: {e}")
|
||||
raise NotImplementedError(f"Strategy {strategy_type} not yet implemented") from e
|
||||
|
||||
# Cache and return
|
||||
self._strategy_cache[strategy_type] = strategy_instance
|
||||
return strategy_instance
|
||||
113
dss-claude-plugin/core/mcp_extensions.py
Normal file
113
dss-claude-plugin/core/mcp_extensions.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""
|
||||
MCP Extensions for Context Awareness
|
||||
Implements the Factory Pattern to wrap existing tools with context
|
||||
and defines 5 new tools for the Context Compiler.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Callable
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
from .compiler import ContextCompiler
|
||||
|
||||
# Singleton compiler instance
|
||||
COMPILER = ContextCompiler(skins_dir=os.path.join(os.path.dirname(__file__), "skins"))
|
||||
|
||||
# --- FACTORY PATTERN: Context Wrapper ---
|
||||
|
||||
def with_context(default_manifest_path: str = None):
|
||||
"""
|
||||
Decorator that injects the compiled context into the tool's arguments.
|
||||
Use this to upgrade existing 'token extractor' tools to be 'context aware'.
|
||||
|
||||
The manifest path is extracted from kwargs['manifest_path'] if present,
|
||||
otherwise falls back to the default_manifest_path provided at decoration time.
|
||||
"""
|
||||
def decorator(func: Callable):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
# 1. Get manifest path (runtime kwarg or decorator default)
|
||||
manifest_path = kwargs.get('manifest_path', default_manifest_path)
|
||||
if not manifest_path:
|
||||
raise ValueError("No manifest_path provided to context-aware tool")
|
||||
|
||||
# 2. Compile Context
|
||||
context = COMPILER.compile(manifest_path)
|
||||
|
||||
# 3. Inject into kwargs
|
||||
kwargs['dss_context'] = context
|
||||
|
||||
# 4. Execute Tool
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
# --- 5 NEW MCP TOOLS ---
|
||||
|
||||
def get_active_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
[Tool 1] Returns the fully resolved JSON context for the project.
|
||||
Set debug=True to see provenance (which layer defined which token).
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
"""
|
||||
context = COMPILER.compile(manifest_path, debug=debug, force_refresh=force_refresh)
|
||||
return json.dumps(context, indent=2)
|
||||
|
||||
def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
[Tool 2] Resolves a specific token value (e.g. 'colors.primary')
|
||||
through the cascade.
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
"""
|
||||
context = COMPILER.compile(manifest_path, force_refresh=force_refresh)
|
||||
keys = token_path.split('.')
|
||||
current = context.get("tokens", {})
|
||||
|
||||
for k in keys:
|
||||
if isinstance(current, dict) and k in current:
|
||||
current = current[k]
|
||||
else:
|
||||
return f"Token not found: {token_path}"
|
||||
|
||||
return str(current)
|
||||
|
||||
def validate_manifest(manifest_path: str) -> str:
|
||||
"""
|
||||
[Tool 3] Validates the ds.config.json against the schema.
|
||||
"""
|
||||
# In a full implementation, we would use 'jsonschema' library here.
|
||||
# For now, we perform a basic structural check via the Compiler's loader.
|
||||
try:
|
||||
COMPILER.compile(manifest_path)
|
||||
return "Valid: Project manifest builds successfully."
|
||||
except Exception as e:
|
||||
return f"Invalid: {str(e)}"
|
||||
|
||||
def list_skins() -> str:
|
||||
"""
|
||||
[Tool 4] Lists all available skins in the registry.
|
||||
"""
|
||||
skins_path = COMPILER.skins_dir
|
||||
if not skins_path.exists():
|
||||
return "No skins directory found."
|
||||
|
||||
skins = [f.stem for f in skins_path.glob("*.json")]
|
||||
return json.dumps(skins)
|
||||
|
||||
def get_compiler_status() -> str:
|
||||
"""
|
||||
[Tool 5] Returns the health and configuration of the Context Compiler.
|
||||
"""
|
||||
status = {
|
||||
"status": "active",
|
||||
"skins_directory": str(COMPILER.skins_dir),
|
||||
"cached_skins": list(COMPILER.cache.keys()),
|
||||
"safe_boot_ready": True
|
||||
}
|
||||
return json.dumps(status, indent=2)
|
||||
|
||||
# Instructions for Main Server File:
|
||||
# 1. Import these tools
|
||||
# 2. Register them with the MCP server instance
|
||||
# 3. Apply @with_context wrapper to legacy tools if dynamic context is needed
|
||||
167
dss-claude-plugin/core/mcp_integration.py
Normal file
167
dss-claude-plugin/core/mcp_integration.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
MCP Integration Layer for DSS Context Compiler
|
||||
Provides MCP-compliant tool wrappers for the 5 new context tools.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
import json
|
||||
from . import (
|
||||
get_active_context,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status
|
||||
)
|
||||
|
||||
# MCP Tool Definitions
|
||||
|
||||
def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
MCP Tool: Get Active Context
|
||||
|
||||
Returns the fully resolved JSON context for a project.
|
||||
Set debug=True to see provenance (which layer defined which token).
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
|
||||
Args:
|
||||
manifest_path: Path to ds.config.json
|
||||
debug: Enable debug provenance tracking
|
||||
force_refresh: Bypass cache and recompile
|
||||
|
||||
Returns:
|
||||
JSON string with resolved context
|
||||
"""
|
||||
try:
|
||||
return get_active_context(manifest_path, debug, force_refresh)
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e), "status": "failed"})
|
||||
|
||||
|
||||
def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
MCP Tool: Resolve Token
|
||||
|
||||
Resolves a specific token value (e.g. 'colors.primary') through the cascade.
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
|
||||
Args:
|
||||
manifest_path: Path to ds.config.json
|
||||
token_path: Dot-notation path to token (e.g. 'colors.primary')
|
||||
force_refresh: Bypass cache and recompile
|
||||
|
||||
Returns:
|
||||
Resolved token value or error message
|
||||
"""
|
||||
try:
|
||||
return resolve_token(manifest_path, token_path, force_refresh)
|
||||
except Exception as e:
|
||||
return f"Error resolving token: {str(e)}"
|
||||
|
||||
|
||||
def mcp_validate_manifest(manifest_path: str) -> str:
|
||||
"""
|
||||
MCP Tool: Validate Manifest
|
||||
|
||||
Validates the ds.config.json against the schema.
|
||||
|
||||
Args:
|
||||
manifest_path: Path to ds.config.json
|
||||
|
||||
Returns:
|
||||
Validation result message
|
||||
"""
|
||||
try:
|
||||
return validate_manifest(manifest_path)
|
||||
except Exception as e:
|
||||
return f"Validation error: {str(e)}"
|
||||
|
||||
|
||||
def mcp_list_skins() -> str:
|
||||
"""
|
||||
MCP Tool: List Skins
|
||||
|
||||
Lists all available skins in the registry.
|
||||
|
||||
Returns:
|
||||
JSON array of skin IDs
|
||||
"""
|
||||
try:
|
||||
return list_skins()
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e), "skins": []})
|
||||
|
||||
|
||||
def mcp_get_compiler_status() -> str:
|
||||
"""
|
||||
MCP Tool: Get Compiler Status
|
||||
|
||||
Returns the health and configuration of the Context Compiler.
|
||||
|
||||
Returns:
|
||||
JSON object with compiler status
|
||||
"""
|
||||
try:
|
||||
return get_compiler_status()
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e), "status": "error"})
|
||||
|
||||
|
||||
# MCP Tool Registry
|
||||
# This can be imported by dss-mcp-server.py to register the tools
|
||||
|
||||
MCP_TOOLS = {
|
||||
"dss_get_resolved_context": {
|
||||
"function": mcp_get_resolved_context,
|
||||
"description": "Get fully resolved design system context for a project",
|
||||
"parameters": {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
},
|
||||
"debug": {
|
||||
"type": "boolean",
|
||||
"description": "Enable debug provenance tracking",
|
||||
"required": False,
|
||||
"default": False
|
||||
}
|
||||
}
|
||||
},
|
||||
"dss_resolve_token": {
|
||||
"function": mcp_resolve_token,
|
||||
"description": "Resolve a specific design token through the cascade",
|
||||
"parameters": {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
},
|
||||
"token_path": {
|
||||
"type": "string",
|
||||
"description": "Dot-notation path to token (e.g. 'colors.primary')",
|
||||
"required": True
|
||||
}
|
||||
}
|
||||
},
|
||||
"dss_validate_manifest": {
|
||||
"function": mcp_validate_manifest,
|
||||
"description": "Validate project manifest against schema",
|
||||
"parameters": {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
}
|
||||
}
|
||||
},
|
||||
"dss_list_skins": {
|
||||
"function": mcp_list_skins,
|
||||
"description": "List all available design system skins",
|
||||
"parameters": {}
|
||||
},
|
||||
"dss_get_compiler_status": {
|
||||
"function": mcp_get_compiler_status,
|
||||
"description": "Get Context Compiler health and configuration",
|
||||
"parameters": {}
|
||||
}
|
||||
}
|
||||
308
dss-claude-plugin/core/runtime.py
Normal file
308
dss-claude-plugin/core/runtime.py
Normal file
@@ -0,0 +1,308 @@
|
||||
"""
|
||||
DSS Runtime - Dependency Injection & Boundary Enforcement
|
||||
|
||||
This module provides a bounded runtime environment for DSS MCP tools.
|
||||
All external API access (Figma, Browser, HTTP) MUST go through this runtime.
|
||||
|
||||
Key Features:
|
||||
- Dependency Injection pattern prevents direct external imports
|
||||
- Capability Provider pattern controls what operations are allowed
|
||||
- All access is validated against .dss-boundaries.yaml
|
||||
- All violations are logged for audit
|
||||
|
||||
Usage:
|
||||
runtime = DSSRuntime(config_path=".dss-boundaries.yaml")
|
||||
figma_client = runtime.get_figma_client() # Validated & wrapped
|
||||
browser = runtime.get_browser() # Sandboxed
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime
|
||||
import yaml
|
||||
|
||||
# Setup logging
|
||||
logger = logging.getLogger("dss.runtime")
|
||||
|
||||
class BoundaryViolationError(Exception):
|
||||
"""Raised when an operation violates DSS boundaries"""
|
||||
pass
|
||||
|
||||
class DSSRuntime:
|
||||
"""
|
||||
Bounded runtime environment for DSS operations.
|
||||
|
||||
Enforces architectural boundaries by:
|
||||
1. Controlling all external API access
|
||||
2. Validating operations against boundary configuration
|
||||
3. Logging all access for audit trail
|
||||
4. Providing sandboxed clients instead of raw access
|
||||
"""
|
||||
|
||||
def __init__(self, config_path: str = ".dss-boundaries.yaml"):
|
||||
"""
|
||||
Initialize DSS Runtime with boundary configuration.
|
||||
|
||||
Args:
|
||||
config_path: Path to boundary configuration file
|
||||
"""
|
||||
self.config_path = Path(config_path)
|
||||
self.config = self._load_config()
|
||||
self.enforcement_mode = self.config.get("enforcement", {}).get("mode", "strict")
|
||||
self.log_violations = self.config.get("enforcement", {}).get("log_violations", True)
|
||||
self.violation_log_path = Path(self.config.get("enforcement", {}).get("violation_log", ".dss/logs/boundary-violations.jsonl"))
|
||||
|
||||
# Client caches (lazy initialization)
|
||||
self._figma_client = None
|
||||
self._browser_strategy = None
|
||||
self._http_client = None
|
||||
|
||||
logger.info(f"DSSRuntime initialized with enforcement mode: {self.enforcement_mode}")
|
||||
|
||||
def _load_config(self) -> Dict[str, Any]:
|
||||
"""Load boundary configuration from YAML"""
|
||||
if not self.config_path.exists():
|
||||
logger.warning(f"Boundary config not found: {self.config_path}, using defaults")
|
||||
return self._default_config()
|
||||
|
||||
try:
|
||||
with open(self.config_path) as f:
|
||||
return yaml.safe_load(f)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load boundary config: {e}")
|
||||
return self._default_config()
|
||||
|
||||
def _default_config(self) -> Dict[str, Any]:
|
||||
"""Default boundary configuration (strict)"""
|
||||
return {
|
||||
"version": "1.0",
|
||||
"blocked_external_apis": ["api.figma.com"],
|
||||
"blocked_imports": ["requests", "playwright", "httpx"],
|
||||
"enforcement": {
|
||||
"mode": "strict",
|
||||
"log_violations": True,
|
||||
"violation_log": ".dss/logs/boundary-violations.jsonl"
|
||||
}
|
||||
}
|
||||
|
||||
def _log_violation(self, operation: str, details: Dict[str, Any]):
|
||||
"""Log boundary violation to audit trail"""
|
||||
if not self.log_violations:
|
||||
return
|
||||
|
||||
self.violation_log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
log_entry = {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"type": "boundary_violation",
|
||||
"operation": operation,
|
||||
"enforcement_mode": self.enforcement_mode,
|
||||
"details": details
|
||||
}
|
||||
|
||||
with open(self.violation_log_path, "a") as f:
|
||||
f.write(json.dumps(log_entry) + "\n")
|
||||
|
||||
logger.warning(f"Boundary violation: {operation} - {details}")
|
||||
|
||||
def _log_access(self, operation: str, allowed: bool, details: Dict[str, Any]):
|
||||
"""Log successful access for audit trail"""
|
||||
access_log_path = Path(".dss/logs/runtime-access.jsonl")
|
||||
access_log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
log_entry = {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"type": "runtime_access",
|
||||
"operation": operation,
|
||||
"allowed": allowed,
|
||||
"details": details
|
||||
}
|
||||
|
||||
with open(access_log_path, "a") as f:
|
||||
f.write(json.dumps(log_entry) + "\n")
|
||||
|
||||
def validate_operation(self, operation: str, context: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
Validate if an operation is allowed by DSS boundaries.
|
||||
|
||||
Args:
|
||||
operation: Operation name (e.g., "figma_api_call", "browser_launch")
|
||||
context: Operation context for validation
|
||||
|
||||
Returns:
|
||||
True if allowed, raises BoundaryViolationError if not (in strict mode)
|
||||
"""
|
||||
required_tools = self.config.get("required_dss_tools", {})
|
||||
|
||||
# Check if operation requires going through DSS tools
|
||||
for category, tools in required_tools.items():
|
||||
if operation in category:
|
||||
details = {
|
||||
"operation": operation,
|
||||
"context": context,
|
||||
"required_tools": tools
|
||||
}
|
||||
|
||||
self._log_violation(operation, details)
|
||||
|
||||
if self.enforcement_mode == "strict":
|
||||
raise BoundaryViolationError(
|
||||
f"Direct {operation} blocked. Use DSS tools: {', '.join(tools)}"
|
||||
)
|
||||
elif self.enforcement_mode == "warn":
|
||||
logger.warning(f"Boundary warning: {operation} should use DSS tools")
|
||||
return True
|
||||
|
||||
self._log_access(operation, True, context)
|
||||
return True
|
||||
|
||||
def get_figma_client(self, token: Optional[str] = None):
|
||||
"""
|
||||
Get a wrapped Figma API client with boundary enforcement.
|
||||
|
||||
Args:
|
||||
token: Optional Figma token (uses env var if not provided)
|
||||
|
||||
Returns:
|
||||
SafeFigmaClient instance (read-only by default)
|
||||
"""
|
||||
if self._figma_client is None:
|
||||
from core.safe_figma_client import SafeFigmaClient
|
||||
|
||||
self._figma_client = SafeFigmaClient(
|
||||
token=token,
|
||||
allow_write=False, # Read-only by default
|
||||
runtime=self
|
||||
)
|
||||
|
||||
logger.info("Figma client initialized (read-only mode)")
|
||||
|
||||
return self._figma_client
|
||||
|
||||
def get_browser(self, strategy: str = "local"):
|
||||
"""
|
||||
Get a sandboxed browser automation instance.
|
||||
|
||||
Args:
|
||||
strategy: Browser strategy ("local" or "remote")
|
||||
|
||||
Returns:
|
||||
BrowserStrategy instance with sandbox enforcement
|
||||
"""
|
||||
if self._browser_strategy is None:
|
||||
if strategy == "local":
|
||||
try:
|
||||
from strategies.local.browser import LocalBrowserStrategy
|
||||
self._browser_strategy = LocalBrowserStrategy(runtime=self)
|
||||
logger.info("Local browser strategy initialized")
|
||||
except ImportError:
|
||||
raise BoundaryViolationError(
|
||||
"LocalBrowserStrategy not available. Use dss_browser_* tools."
|
||||
)
|
||||
elif strategy == "remote":
|
||||
try:
|
||||
from strategies.remote.browser import RemoteBrowserStrategy
|
||||
self._browser_strategy = RemoteBrowserStrategy(runtime=self)
|
||||
logger.info("Remote browser strategy initialized")
|
||||
except ImportError:
|
||||
raise BoundaryViolationError(
|
||||
"RemoteBrowserStrategy not available. Use dss_browser_* tools."
|
||||
)
|
||||
|
||||
return self._browser_strategy
|
||||
|
||||
def get_http_client(self):
|
||||
"""
|
||||
Get a wrapped HTTP client with URL validation.
|
||||
|
||||
Returns:
|
||||
SafeHTTPClient instance that validates URLs against allowed domains
|
||||
"""
|
||||
if self._http_client is None:
|
||||
from core.safe_http_client import SafeHTTPClient
|
||||
|
||||
self._http_client = SafeHTTPClient(
|
||||
blocked_domains=self.config.get("blocked_external_apis", []),
|
||||
runtime=self
|
||||
)
|
||||
|
||||
logger.info("HTTP client initialized with URL validation")
|
||||
|
||||
return self._http_client
|
||||
|
||||
def check_import(self, module_name: str) -> bool:
|
||||
"""
|
||||
Check if a direct import is allowed.
|
||||
|
||||
Args:
|
||||
module_name: Module being imported
|
||||
|
||||
Returns:
|
||||
True if allowed, raises BoundaryViolationError if blocked
|
||||
"""
|
||||
blocked = self.config.get("blocked_imports", [])
|
||||
|
||||
if module_name in blocked:
|
||||
details = {
|
||||
"module": module_name,
|
||||
"blocked_imports": blocked
|
||||
}
|
||||
|
||||
self._log_violation(f"direct_import:{module_name}", details)
|
||||
|
||||
if self.enforcement_mode == "strict":
|
||||
raise BoundaryViolationError(
|
||||
f"Direct import of '{module_name}' blocked. "
|
||||
f"Use DSS runtime clients instead."
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def get_temp_dir(self, session_id: Optional[str] = None) -> Path:
|
||||
"""
|
||||
Get session-specific temporary directory.
|
||||
|
||||
Args:
|
||||
session_id: Optional session identifier (auto-generated if not provided)
|
||||
|
||||
Returns:
|
||||
Path to session temp directory
|
||||
"""
|
||||
if session_id is None:
|
||||
session_id = f"session-{int(datetime.utcnow().timestamp())}"
|
||||
|
||||
temp_dir = Path(".dss/temp") / session_id
|
||||
temp_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return temp_dir
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get runtime statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with access counts, violations, etc.
|
||||
"""
|
||||
return {
|
||||
"enforcement_mode": self.enforcement_mode,
|
||||
"clients_initialized": {
|
||||
"figma": self._figma_client is not None,
|
||||
"browser": self._browser_strategy is not None,
|
||||
"http": self._http_client is not None,
|
||||
},
|
||||
"config_version": self.config.get("version", "unknown")
|
||||
}
|
||||
|
||||
# Global runtime instance (singleton pattern)
|
||||
_runtime_instance: Optional[DSSRuntime] = None
|
||||
|
||||
def get_runtime() -> DSSRuntime:
|
||||
"""Get the global DSSRuntime instance (singleton)"""
|
||||
global _runtime_instance
|
||||
|
||||
if _runtime_instance is None:
|
||||
_runtime_instance = DSSRuntime()
|
||||
|
||||
return _runtime_instance
|
||||
52
dss-claude-plugin/core/schemas/ds.config.schema.json
Normal file
52
dss-claude-plugin/core/schemas/ds.config.schema.json
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "DSS Project Manifest",
|
||||
"type": "object",
|
||||
"required": ["version", "project", "extends", "stack"],
|
||||
"properties": {
|
||||
"version": {"type": "string", "pattern": "^2\\.0\\.0$"},
|
||||
"project": {
|
||||
"type": "object",
|
||||
"required": ["id", "name", "type"],
|
||||
"properties": {
|
||||
"id": {"type": "string", "pattern": "^[a-z0-9-]+$"},
|
||||
"name": {"type": "string"},
|
||||
"type": {"enum": ["web", "mobile", "desktop"]}
|
||||
}
|
||||
},
|
||||
"extends": {
|
||||
"type": "object",
|
||||
"required": ["skin", "version"],
|
||||
"properties": {
|
||||
"skin": {"type": "string"},
|
||||
"version": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"stack": {
|
||||
"type": "object",
|
||||
"required": ["framework", "styling"],
|
||||
"properties": {
|
||||
"framework": {"enum": ["react", "vue", "angular", "ios", "android", "flutter", "vanilla"]},
|
||||
"styling": {"enum": ["tailwind", "css-modules", "styled-components", "emotion", "css-vars"]},
|
||||
"icons": {"enum": ["lucide", "heroicons", "material", "custom"]},
|
||||
"typescript": {"type": "boolean"}
|
||||
}
|
||||
},
|
||||
"compiler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"strict_mode": {"type": "boolean"},
|
||||
"validation_level": {"enum": ["error", "warning", "info"]},
|
||||
"output_format": {"enum": ["css-vars", "tailwind-config", "js-tokens"]},
|
||||
"cache_strategy": {"enum": ["aggressive", "moderate", "disabled"]}
|
||||
}
|
||||
},
|
||||
"overrides": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tokens": {"type": "object"},
|
||||
"files": {"type": "array", "items": {"type": "string"}}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
28
dss-claude-plugin/core/skins/base.json
Normal file
28
dss-claude-plugin/core/skins/base.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"meta": {
|
||||
"id": "base",
|
||||
"version": "1.0.0",
|
||||
"description": "Foundation tokens shared across all skins"
|
||||
},
|
||||
"tokens": {
|
||||
"colors": {
|
||||
"transparent": "transparent",
|
||||
"current": "currentColor",
|
||||
"white": "#ffffff",
|
||||
"black": "#000000"
|
||||
},
|
||||
"spacing": {
|
||||
"0": "0px",
|
||||
"1": "4px",
|
||||
"2": "8px",
|
||||
"4": "16px",
|
||||
"8": "32px"
|
||||
},
|
||||
"typography": {
|
||||
"fontFamily": {
|
||||
"sans": ["system-ui", "sans-serif"],
|
||||
"mono": ["monospace"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
21
dss-claude-plugin/core/skins/classic.json
Normal file
21
dss-claude-plugin/core/skins/classic.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"meta": {
|
||||
"id": "classic",
|
||||
"version": "2.0.0",
|
||||
"parent": "base"
|
||||
},
|
||||
"tokens": {
|
||||
"colors": {
|
||||
"primary": "#3B82F6",
|
||||
"secondary": "#10B981",
|
||||
"danger": "#EF4444",
|
||||
"background": "#F3F4F6",
|
||||
"surface": "#FFFFFF",
|
||||
"text": "#1F2937"
|
||||
},
|
||||
"borderRadius": {
|
||||
"default": "0.25rem",
|
||||
"lg": "0.5rem"
|
||||
}
|
||||
}
|
||||
}
|
||||
33
dss-claude-plugin/core/skins/workbench.json
Normal file
33
dss-claude-plugin/core/skins/workbench.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"meta": {
|
||||
"id": "workbench",
|
||||
"version": "2.0.0",
|
||||
"parent": "base",
|
||||
"description": "High density technical interface skin"
|
||||
},
|
||||
"tokens": {
|
||||
"colors": {
|
||||
"primary": "#2563EB",
|
||||
"secondary": "#475569",
|
||||
"danger": "#DC2626",
|
||||
"background": "#0F172A",
|
||||
"surface": "#1E293B",
|
||||
"text": "#E2E8F0"
|
||||
},
|
||||
"spacing": {
|
||||
"1": "2px",
|
||||
"2": "4px",
|
||||
"4": "8px",
|
||||
"8": "16px"
|
||||
},
|
||||
"borderRadius": {
|
||||
"default": "0px",
|
||||
"lg": "2px"
|
||||
},
|
||||
"typography": {
|
||||
"fontFamily": {
|
||||
"sans": ["Inter", "system-ui", "sans-serif"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
362
dss-claude-plugin/core/structured_logger.py
Normal file
362
dss-claude-plugin/core/structured_logger.py
Normal file
@@ -0,0 +1,362 @@
|
||||
"""
|
||||
DSS Structured Logger - JSON-based logging for AI-consumable audit trails
|
||||
|
||||
Provides structured, machine-readable logging in JSONL format (one JSON object per line).
|
||||
All DSS operations are logged with consistent fields for analysis, debugging, and compliance.
|
||||
|
||||
Features:
|
||||
- JSONL format (newline-delimited JSON) for easy parsing
|
||||
- Structured log entries with standardized fields
|
||||
- Context tracking (session_id, tool_name, operation)
|
||||
- Performance metrics (duration, timestamps)
|
||||
- Log rotation and cleanup
|
||||
- Integration with DSSRuntime
|
||||
|
||||
Usage:
|
||||
from core.structured_logger import get_logger, LogContext
|
||||
|
||||
logger = get_logger("dss.tool.sync_figma")
|
||||
|
||||
with LogContext(session_id="abc123", tool="dss_sync_figma"):
|
||||
logger.info("Starting Figma sync", extra={"file_key": "xyz"})
|
||||
# ... operation ...
|
||||
logger.info("Figma sync complete", extra={"tokens_extracted": 42})
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from contextlib import contextmanager
|
||||
import threading
|
||||
|
||||
# Thread-local storage for context
|
||||
_context = threading.local()
|
||||
|
||||
|
||||
class DSSJSONFormatter(logging.Formatter):
|
||||
"""
|
||||
Custom JSON formatter for structured logging.
|
||||
|
||||
Outputs each log record as a single-line JSON object with standardized fields:
|
||||
- timestamp: ISO 8601 UTC timestamp
|
||||
- level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
||||
- logger: Logger name (e.g., "dss.tool.sync_figma")
|
||||
- message: Human-readable log message
|
||||
- context: Additional contextual data (session_id, tool_name, etc.)
|
||||
- extra: Tool-specific extra data
|
||||
"""
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
"""Format log record as single-line JSON"""
|
||||
|
||||
# Build base log entry
|
||||
log_entry = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"level": record.levelname,
|
||||
"logger": record.name,
|
||||
"message": record.getMessage(),
|
||||
}
|
||||
|
||||
# Add context from thread-local storage
|
||||
if hasattr(_context, "session_id"):
|
||||
log_entry["session_id"] = _context.session_id
|
||||
if hasattr(_context, "tool_name"):
|
||||
log_entry["tool"] = _context.tool_name
|
||||
if hasattr(_context, "operation"):
|
||||
log_entry["operation"] = _context.operation
|
||||
|
||||
# Add extra fields from record
|
||||
if hasattr(record, "extra_data"):
|
||||
log_entry["extra"] = record.extra_data
|
||||
|
||||
# Add exception info if present
|
||||
if record.exc_info:
|
||||
log_entry["exception"] = {
|
||||
"type": record.exc_info[0].__name__ if record.exc_info[0] else None,
|
||||
"message": str(record.exc_info[1]) if record.exc_info[1] else None,
|
||||
"traceback": self.formatException(record.exc_info) if record.exc_info else None,
|
||||
}
|
||||
|
||||
# Add location info for ERROR and above
|
||||
if record.levelno >= logging.ERROR:
|
||||
log_entry["location"] = {
|
||||
"file": record.pathname,
|
||||
"line": record.lineno,
|
||||
"function": record.funcName,
|
||||
}
|
||||
|
||||
return json.dumps(log_entry, default=str)
|
||||
|
||||
|
||||
class DSSLogger(logging.Logger):
|
||||
"""
|
||||
Extended logger with structured logging support.
|
||||
|
||||
Wraps standard Python logger with methods that accept extra data
|
||||
as keyword arguments for structured logging.
|
||||
"""
|
||||
|
||||
def _log_with_extra(self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Internal method to log with extra structured data"""
|
||||
if extra:
|
||||
# Store extra data in a custom attribute
|
||||
extra_record = {"extra_data": extra}
|
||||
super()._log(level, msg, (), extra=extra_record, **kwargs)
|
||||
else:
|
||||
super()._log(level, msg, (), **kwargs)
|
||||
|
||||
def debug(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log DEBUG message with optional extra data"""
|
||||
self._log_with_extra(logging.DEBUG, msg, extra, **kwargs)
|
||||
|
||||
def info(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log INFO message with optional extra data"""
|
||||
self._log_with_extra(logging.INFO, msg, extra, **kwargs)
|
||||
|
||||
def warning(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log WARNING message with optional extra data"""
|
||||
self._log_with_extra(logging.WARNING, msg, extra, **kwargs)
|
||||
|
||||
def error(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log ERROR message with optional extra data"""
|
||||
self._log_with_extra(logging.ERROR, msg, extra, **kwargs)
|
||||
|
||||
def critical(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log CRITICAL message with optional extra data"""
|
||||
self._log_with_extra(logging.CRITICAL, msg, extra, **kwargs)
|
||||
|
||||
|
||||
# Configure custom logger class
|
||||
logging.setLoggerClass(DSSLogger)
|
||||
|
||||
|
||||
def get_logger(name: str, log_file: Optional[str] = None) -> DSSLogger:
|
||||
"""
|
||||
Get or create a structured logger instance.
|
||||
|
||||
Args:
|
||||
name: Logger name (e.g., "dss.tool.sync_figma")
|
||||
log_file: Optional custom log file path (defaults to .dss/logs/dss-operations.jsonl)
|
||||
|
||||
Returns:
|
||||
DSSLogger instance configured for structured logging
|
||||
|
||||
Example:
|
||||
logger = get_logger("dss.tool.extract_tokens")
|
||||
logger.info("Starting token extraction", extra={"source": "css"})
|
||||
"""
|
||||
logger = logging.getLogger(name)
|
||||
|
||||
# Only configure if not already configured
|
||||
if not logger.handlers:
|
||||
# Determine log file path
|
||||
if log_file is None:
|
||||
dss_home = os.environ.get("DSS_HOME", ".dss")
|
||||
log_dir = Path(dss_home) / "logs"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = str(log_dir / "dss-operations.jsonl")
|
||||
|
||||
# Create file handler with JSON formatter
|
||||
file_handler = logging.FileHandler(log_file, mode="a", encoding="utf-8")
|
||||
file_handler.setFormatter(DSSJSONFormatter())
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
# Also add console handler for development (can be disabled in production)
|
||||
if os.environ.get("DSS_LOG_CONSOLE", "false").lower() == "true":
|
||||
console_handler = logging.StreamHandler(sys.stderr)
|
||||
console_handler.setFormatter(DSSJSONFormatter())
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
# Set log level from environment or default to INFO
|
||||
log_level = os.environ.get("DSS_LOG_LEVEL", "INFO").upper()
|
||||
logger.setLevel(getattr(logging, log_level, logging.INFO))
|
||||
|
||||
# Prevent propagation to root logger
|
||||
logger.propagate = False
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
@contextmanager
|
||||
def LogContext(session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None):
|
||||
"""
|
||||
Context manager for adding structured context to log entries.
|
||||
|
||||
All log entries within this context will include the provided fields
|
||||
(session_id, tool_name, operation).
|
||||
|
||||
Args:
|
||||
session_id: Unique session identifier
|
||||
tool: Tool name (e.g., "dss_sync_figma")
|
||||
operation: Operation being performed (e.g., "token_extraction")
|
||||
|
||||
Example:
|
||||
with LogContext(session_id="abc123", tool="dss_sync_figma"):
|
||||
logger.info("Starting sync")
|
||||
# This log will include session_id and tool fields
|
||||
"""
|
||||
# Store previous context
|
||||
prev_session_id = getattr(_context, "session_id", None)
|
||||
prev_tool_name = getattr(_context, "tool_name", None)
|
||||
prev_operation = getattr(_context, "operation", None)
|
||||
|
||||
# Set new context
|
||||
if session_id:
|
||||
_context.session_id = session_id
|
||||
if tool:
|
||||
_context.tool_name = tool
|
||||
if operation:
|
||||
_context.operation = operation
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Restore previous context
|
||||
if prev_session_id:
|
||||
_context.session_id = prev_session_id
|
||||
elif hasattr(_context, "session_id"):
|
||||
delattr(_context, "session_id")
|
||||
|
||||
if prev_tool_name:
|
||||
_context.tool_name = prev_tool_name
|
||||
elif hasattr(_context, "tool_name"):
|
||||
delattr(_context, "tool_name")
|
||||
|
||||
if prev_operation:
|
||||
_context.operation = prev_operation
|
||||
elif hasattr(_context, "operation"):
|
||||
delattr(_context, "operation")
|
||||
|
||||
|
||||
class PerformanceLogger:
|
||||
"""
|
||||
Helper for logging operation performance metrics.
|
||||
|
||||
Automatically measures duration and logs performance data.
|
||||
|
||||
Example:
|
||||
perf = PerformanceLogger("token_extraction")
|
||||
perf.start()
|
||||
# ... operation ...
|
||||
perf.end(extra={"tokens_found": 42})
|
||||
"""
|
||||
|
||||
def __init__(self, operation: str, logger: Optional[DSSLogger] = None):
|
||||
"""
|
||||
Initialize performance logger.
|
||||
|
||||
Args:
|
||||
operation: Operation name
|
||||
logger: Optional logger (defaults to root DSS logger)
|
||||
"""
|
||||
self.operation = operation
|
||||
self.logger = logger or get_logger("dss.performance")
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
|
||||
def start(self):
|
||||
"""Mark operation start time"""
|
||||
self.start_time = datetime.now(timezone.utc)
|
||||
self.logger.debug(f"Started: {self.operation}", extra={
|
||||
"operation": self.operation,
|
||||
"start_time": self.start_time.isoformat(),
|
||||
})
|
||||
|
||||
def end(self, extra: Optional[Dict[str, Any]] = None):
|
||||
"""
|
||||
Mark operation end time and log performance.
|
||||
|
||||
Args:
|
||||
extra: Additional metrics to log
|
||||
"""
|
||||
self.end_time = datetime.now(timezone.utc)
|
||||
|
||||
if self.start_time is None:
|
||||
self.logger.warning(f"Performance logger end() called without start() for: {self.operation}")
|
||||
return
|
||||
|
||||
duration_ms = (self.end_time - self.start_time).total_seconds() * 1000
|
||||
|
||||
perf_data = {
|
||||
"operation": self.operation,
|
||||
"duration_ms": round(duration_ms, 2),
|
||||
"start_time": self.start_time.isoformat(),
|
||||
"end_time": self.end_time.isoformat(),
|
||||
}
|
||||
|
||||
if extra:
|
||||
perf_data.update(extra)
|
||||
|
||||
self.logger.info(f"Completed: {self.operation}", extra=perf_data)
|
||||
|
||||
|
||||
def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5):
|
||||
"""
|
||||
Configure log rotation for DSS log files.
|
||||
|
||||
Args:
|
||||
log_dir: Log directory (defaults to .dss/logs/)
|
||||
max_bytes: Max size per log file (default: 10MB)
|
||||
backup_count: Number of backup files to keep (default: 5)
|
||||
|
||||
Note: This uses RotatingFileHandler. For production, consider
|
||||
using a log rotation service like logrotate.
|
||||
"""
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
if log_dir is None:
|
||||
dss_home = os.environ.get("DSS_HOME", ".dss")
|
||||
log_dir = Path(dss_home) / "logs"
|
||||
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_file = log_dir / "dss-operations.jsonl"
|
||||
|
||||
# Get root DSS logger
|
||||
logger = logging.getLogger("dss")
|
||||
|
||||
# Remove existing file handlers
|
||||
for handler in logger.handlers[:]:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
logger.removeHandler(handler)
|
||||
|
||||
# Add rotating file handler
|
||||
rotating_handler = RotatingFileHandler(
|
||||
str(log_file),
|
||||
maxBytes=max_bytes,
|
||||
backupCount=backup_count,
|
||||
encoding="utf-8"
|
||||
)
|
||||
rotating_handler.setFormatter(DSSJSONFormatter())
|
||||
logger.addHandler(rotating_handler)
|
||||
|
||||
logger.info("Log rotation configured", extra={
|
||||
"max_bytes": max_bytes,
|
||||
"backup_count": backup_count,
|
||||
"log_file": str(log_file),
|
||||
})
|
||||
|
||||
|
||||
# Example usage (can be removed in production)
|
||||
if __name__ == "__main__":
|
||||
# Example 1: Basic logging
|
||||
logger = get_logger("dss.example")
|
||||
logger.info("DSS operation started", extra={"user": "admin"})
|
||||
|
||||
# Example 2: Context-based logging
|
||||
with LogContext(session_id="session-123", tool="dss_sync_figma"):
|
||||
logger.info("Syncing Figma file", extra={"file_key": "abc123"})
|
||||
logger.info("Sync complete", extra={"tokens_extracted": 42})
|
||||
|
||||
# Example 3: Performance logging
|
||||
perf = PerformanceLogger("token_extraction", logger)
|
||||
perf.start()
|
||||
# Simulate work
|
||||
import time
|
||||
time.sleep(0.1)
|
||||
perf.end(extra={"tokens_found": 100})
|
||||
|
||||
print(f"\nLogs written to: {Path('.dss/logs/dss-operations.jsonl').absolute()}")
|
||||
1
dss-claude-plugin/hooks/.state/.git-backup.lock
Normal file
1
dss-claude-plugin/hooks/.state/.git-backup.lock
Normal file
@@ -0,0 +1 @@
|
||||
1765316404612
|
||||
58
dss-claude-plugin/hooks/dss-hooks-config.json
Normal file
58
dss-claude-plugin/hooks/dss-hooks-config.json
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
"description": "DSS Hooks Configuration - Customize hook behavior",
|
||||
"version": "1.0.0",
|
||||
|
||||
"security_check": {
|
||||
"enabled": true,
|
||||
"block_on_critical": false,
|
||||
"warn_only": true,
|
||||
"ignored_patterns": []
|
||||
},
|
||||
|
||||
"token_validator": {
|
||||
"enabled": true,
|
||||
"strict_mode": false,
|
||||
"warn_only": true,
|
||||
"categories": ["color", "spacing", "typography", "border", "effects", "layout"]
|
||||
},
|
||||
|
||||
"component_checker": {
|
||||
"enabled": true,
|
||||
"categories": ["accessibility", "react", "typescript", "structure"],
|
||||
"min_severity": "low"
|
||||
},
|
||||
|
||||
"complexity_monitor": {
|
||||
"enabled": true,
|
||||
"max_function_lines": 50,
|
||||
"max_component_lines": 200,
|
||||
"max_props": 10,
|
||||
"max_nesting_depth": 4,
|
||||
"warn_only": true
|
||||
},
|
||||
|
||||
"storybook_reminder": {
|
||||
"enabled": true,
|
||||
"component_patterns": ["**/components/**/*.tsx", "**/ui/**/*.tsx"],
|
||||
"story_extensions": [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"],
|
||||
"remind_on_new": true,
|
||||
"remind_on_props_change": true
|
||||
},
|
||||
|
||||
"session_summary": {
|
||||
"enabled": true,
|
||||
"output_file": ".dss-session-summary.md",
|
||||
"include_git_diff": true,
|
||||
"include_file_list": true,
|
||||
"max_diff_lines": 100
|
||||
},
|
||||
|
||||
"git_backup": {
|
||||
"enabled": true,
|
||||
"require_git_repo": true,
|
||||
"commit_only_if_changes": true,
|
||||
"include_timestamp": true,
|
||||
"commit_prefix": "dss-auto-backup",
|
||||
"show_logs": false
|
||||
}
|
||||
}
|
||||
111
dss-claude-plugin/hooks/hooks.json
Normal file
111
dss-claude-plugin/hooks/hooks.json
Normal file
@@ -0,0 +1,111 @@
|
||||
{
|
||||
"description": "DSS Developer Hooks - React/UI Development & QA Tools",
|
||||
"version": "1.0.0",
|
||||
"author": "DSS Team",
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"description": "Security pattern validation for file edits",
|
||||
"matcher": "Edit|Write",
|
||||
"priority": 100,
|
||||
"enabled": true,
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/security-check.py",
|
||||
"timeout": 5000,
|
||||
"continueOnError": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"description": "Design token compliance validation",
|
||||
"matcher": "Edit|Write",
|
||||
"priority": 90,
|
||||
"enabled": true,
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/token-validator.py",
|
||||
"timeout": 5000,
|
||||
"continueOnError": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"PostToolUse": [
|
||||
{
|
||||
"description": "React component best practices and accessibility",
|
||||
"matcher": "Edit|Write",
|
||||
"priority": 80,
|
||||
"enabled": true,
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/component-checker.py",
|
||||
"timeout": 5000,
|
||||
"continueOnError": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"description": "Code complexity tracking",
|
||||
"matcher": "Edit|Write",
|
||||
"priority": 70,
|
||||
"enabled": true,
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/complexity-monitor.js",
|
||||
"timeout": 5000,
|
||||
"continueOnError": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"description": "Storybook coverage reminder for components",
|
||||
"matcher": "Edit|Write",
|
||||
"priority": 60,
|
||||
"enabled": true,
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/storybook-reminder.py",
|
||||
"timeout": 3000,
|
||||
"continueOnError": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SessionEnd": [
|
||||
{
|
||||
"description": "Generate session summary report",
|
||||
"priority": 100,
|
||||
"enabled": true,
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/session-summary.js",
|
||||
"timeout": 10000,
|
||||
"continueOnError": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Stop": [
|
||||
{
|
||||
"description": "Auto-backup changes to git",
|
||||
"priority": 100,
|
||||
"enabled": true,
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/git-backup.js",
|
||||
"timeout": 10000,
|
||||
"continueOnError": false
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
215
dss-claude-plugin/hooks/scripts/complexity-monitor.js
Executable file
215
dss-claude-plugin/hooks/scripts/complexity-monitor.js
Executable file
@@ -0,0 +1,215 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* DSS Complexity Monitor Hook
|
||||
* Tracks code complexity metrics and warns on high-complexity code.
|
||||
* Written from scratch for DSS.
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
// Configuration
|
||||
const DEFAULT_CONFIG = {
|
||||
complexity_monitor: {
|
||||
enabled: true,
|
||||
max_function_lines: 50,
|
||||
max_component_lines: 200,
|
||||
max_props: 10,
|
||||
max_nesting_depth: 4,
|
||||
warn_only: true
|
||||
}
|
||||
};
|
||||
|
||||
function loadConfig() {
|
||||
const configPath = path.join(process.env.HOME || '', '.dss', 'hooks-config.json');
|
||||
try {
|
||||
if (fs.existsSync(configPath)) {
|
||||
const userConfig = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||
return { ...DEFAULT_CONFIG, ...userConfig };
|
||||
}
|
||||
} catch (e) {
|
||||
// Use defaults
|
||||
}
|
||||
return DEFAULT_CONFIG;
|
||||
}
|
||||
|
||||
function countLines(content) {
|
||||
return content.split('\n').length;
|
||||
}
|
||||
|
||||
function countProps(content) {
|
||||
// Count props in interface/type definition
|
||||
const propsMatch = content.match(/(?:interface|type)\s+\w*Props[^{]*\{([^}]+)\}/);
|
||||
if (propsMatch) {
|
||||
const propsContent = propsMatch[1];
|
||||
// Count semicolons or newlines with property definitions
|
||||
const props = propsContent.split(/[;\n]/).filter(line => {
|
||||
const trimmed = line.trim();
|
||||
return trimmed && !trimmed.startsWith('//') && trimmed.includes(':');
|
||||
});
|
||||
return props.length;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
function countNestingDepth(content) {
|
||||
let maxDepth = 0;
|
||||
let currentDepth = 0;
|
||||
|
||||
for (const char of content) {
|
||||
if (char === '{' || char === '(') {
|
||||
currentDepth++;
|
||||
maxDepth = Math.max(maxDepth, currentDepth);
|
||||
} else if (char === '}' || char === ')') {
|
||||
currentDepth = Math.max(0, currentDepth - 1);
|
||||
}
|
||||
}
|
||||
|
||||
return maxDepth;
|
||||
}
|
||||
|
||||
function countFunctions(content) {
|
||||
const patterns = [
|
||||
/function\s+\w+\s*\([^)]*\)\s*\{/g,
|
||||
/const\s+\w+\s*=\s*(?:async\s*)?\([^)]*\)\s*=>/g,
|
||||
/const\s+\w+\s*=\s*(?:async\s*)?function/g
|
||||
];
|
||||
|
||||
let count = 0;
|
||||
for (const pattern of patterns) {
|
||||
const matches = content.match(pattern);
|
||||
if (matches) count += matches.length;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
function analyzeComplexity(content, filePath, config) {
|
||||
const issues = [];
|
||||
const monitorConfig = config.complexity_monitor || {};
|
||||
const ext = path.extname(filePath).toLowerCase();
|
||||
|
||||
// Only analyze JS/TS files
|
||||
if (!['.js', '.jsx', '.ts', '.tsx'].includes(ext)) {
|
||||
return issues;
|
||||
}
|
||||
|
||||
const lines = countLines(content);
|
||||
const props = countProps(content);
|
||||
const nesting = countNestingDepth(content);
|
||||
const functions = countFunctions(content);
|
||||
|
||||
// Check component size (for tsx/jsx files)
|
||||
if (['.tsx', '.jsx'].includes(ext)) {
|
||||
if (lines > monitorConfig.max_component_lines) {
|
||||
issues.push({
|
||||
type: 'component_size',
|
||||
severity: 'medium',
|
||||
message: `Component has ${lines} lines (max: ${monitorConfig.max_component_lines})`,
|
||||
suggestion: 'Consider breaking into smaller components'
|
||||
});
|
||||
}
|
||||
|
||||
if (props > monitorConfig.max_props) {
|
||||
issues.push({
|
||||
type: 'prop_count',
|
||||
severity: 'medium',
|
||||
message: `Component has ${props} props (max: ${monitorConfig.max_props})`,
|
||||
suggestion: 'Consider grouping related props or using composition'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check nesting depth
|
||||
if (nesting > monitorConfig.max_nesting_depth) {
|
||||
issues.push({
|
||||
type: 'nesting_depth',
|
||||
severity: 'high',
|
||||
message: `Nesting depth of ${nesting} (max: ${monitorConfig.max_nesting_depth})`,
|
||||
suggestion: 'Extract nested logic into separate functions'
|
||||
});
|
||||
}
|
||||
|
||||
// Check function count (indicator of file doing too much)
|
||||
if (functions > 10) {
|
||||
issues.push({
|
||||
type: 'function_count',
|
||||
severity: 'low',
|
||||
message: `File contains ${functions} functions`,
|
||||
suggestion: 'Consider splitting into multiple modules'
|
||||
});
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
function formatOutput(issues, filePath) {
|
||||
if (issues.length === 0) return '';
|
||||
|
||||
const severityIcons = {
|
||||
high: '[HIGH]',
|
||||
medium: '[MED]',
|
||||
low: '[LOW]'
|
||||
};
|
||||
|
||||
const lines = [`\n=== DSS Complexity Monitor: ${filePath} ===\n`];
|
||||
|
||||
for (const issue of issues) {
|
||||
const icon = severityIcons[issue.severity] || '[?]';
|
||||
lines.push(`${icon} ${issue.message}`);
|
||||
lines.push(` Suggestion: ${issue.suggestion}\n`);
|
||||
}
|
||||
|
||||
lines.push('='.repeat(50));
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const config = loadConfig();
|
||||
|
||||
if (!config.complexity_monitor?.enabled) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Read input from stdin
|
||||
let inputData;
|
||||
try {
|
||||
const chunks = [];
|
||||
for await (const chunk of process.stdin) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
inputData = JSON.parse(Buffer.concat(chunks).toString());
|
||||
} catch (e) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const toolName = inputData.tool_name || '';
|
||||
const toolInput = inputData.tool_input || {};
|
||||
|
||||
if (!['Edit', 'Write'].includes(toolName)) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const filePath = toolInput.file_path || '';
|
||||
let content = '';
|
||||
|
||||
if (toolName === 'Write') {
|
||||
content = toolInput.content || '';
|
||||
} else if (toolName === 'Edit') {
|
||||
content = toolInput.new_string || '';
|
||||
}
|
||||
|
||||
if (!content || !filePath) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const issues = analyzeComplexity(content, filePath, config);
|
||||
|
||||
if (issues.length > 0) {
|
||||
const output = formatOutput(issues, filePath);
|
||||
console.error(output);
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main().catch(() => process.exit(0));
|
||||
268
dss-claude-plugin/hooks/scripts/component-checker.py
Executable file
268
dss-claude-plugin/hooks/scripts/component-checker.py
Executable file
@@ -0,0 +1,268 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Component Checker Hook
|
||||
Validates React components for best practices and accessibility.
|
||||
Written from scratch for DSS.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# React component patterns to check
|
||||
COMPONENT_PATTERNS = [
|
||||
# Accessibility checks
|
||||
{
|
||||
"id": "a11y-img-alt",
|
||||
"regex": r"<img\s+(?![^>]*alt=)[^>]*>",
|
||||
"category": "accessibility",
|
||||
"severity": "high",
|
||||
"message": "Missing alt attribute on <img>. Add alt text for accessibility.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "a11y-button-type",
|
||||
"regex": r"<button\s+(?![^>]*type=)[^>]*>",
|
||||
"category": "accessibility",
|
||||
"severity": "medium",
|
||||
"message": "Button missing type attribute. Add type='button' or type='submit'.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "a11y-anchor-href",
|
||||
"regex": r"<a\s+(?![^>]*href=)[^>]*>",
|
||||
"category": "accessibility",
|
||||
"severity": "high",
|
||||
"message": "Anchor tag missing href. Use button for actions without navigation.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "a11y-click-handler",
|
||||
"regex": r"<(?:div|span)\s+[^>]*onClick",
|
||||
"category": "accessibility",
|
||||
"severity": "medium",
|
||||
"message": "Click handler on non-interactive element. Use <button> or add role/tabIndex.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "a11y-form-label",
|
||||
"regex": r"<input\s+(?![^>]*(?:aria-label|id))[^>]*>",
|
||||
"category": "accessibility",
|
||||
"severity": "medium",
|
||||
"message": "Input may be missing label association. Add id with <label> or aria-label.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
# React best practices
|
||||
{
|
||||
"id": "react-key-index",
|
||||
"regex": r"\.map\([^)]*,\s*(?:index|i|idx)\s*\)[^{]*key=\{(?:index|i|idx)\}",
|
||||
"category": "react",
|
||||
"severity": "medium",
|
||||
"message": "Using array index as key. Use unique, stable IDs when possible.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "react-bind-render",
|
||||
"regex": r"onClick=\{[^}]*\.bind\(this",
|
||||
"category": "react",
|
||||
"severity": "low",
|
||||
"message": "Binding in render creates new function each time. Use arrow function or bind in constructor.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "react-inline-style-object",
|
||||
"regex": r"style=\{\{[^}]{100,}\}\}",
|
||||
"category": "react",
|
||||
"severity": "low",
|
||||
"message": "Large inline style object. Consider extracting to a constant or CSS module.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "react-console-log",
|
||||
"regex": r"console\.(log|debug|info)\(",
|
||||
"category": "react",
|
||||
"severity": "low",
|
||||
"message": "Console statement detected. Remove before production.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
},
|
||||
# TypeScript checks
|
||||
{
|
||||
"id": "ts-any-type",
|
||||
"regex": r":\s*any\b",
|
||||
"category": "typescript",
|
||||
"severity": "medium",
|
||||
"message": "Using 'any' type loses type safety. Consider using a specific type or 'unknown'.",
|
||||
"file_types": [".ts", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "ts-type-assertion",
|
||||
"regex": r"as\s+any\b",
|
||||
"category": "typescript",
|
||||
"severity": "medium",
|
||||
"message": "Type assertion to 'any'. This bypasses type checking.",
|
||||
"file_types": [".ts", ".tsx"]
|
||||
},
|
||||
# Component structure
|
||||
{
|
||||
"id": "component-no-export",
|
||||
"regex": r"^(?!.*export).*(?:function|const)\s+[A-Z][a-zA-Z]*\s*(?:=|:|\()",
|
||||
"category": "structure",
|
||||
"severity": "low",
|
||||
"message": "Component may not be exported. Ensure it's exported if meant to be reused.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "component-missing-displayname",
|
||||
"regex": r"(?:forwardRef|memo)\s*\([^)]*\)",
|
||||
"category": "structure",
|
||||
"severity": "low",
|
||||
"message": "HOC component may need displayName for debugging.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
}
|
||||
]
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
default_config = {
|
||||
"component_checker": {
|
||||
"enabled": True,
|
||||
"categories": ["accessibility", "react", "typescript"],
|
||||
"min_severity": "low"
|
||||
}
|
||||
}
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
user_config = json.load(f)
|
||||
return {**default_config, **user_config}
|
||||
except:
|
||||
pass
|
||||
return default_config
|
||||
|
||||
def severity_level(severity: str) -> int:
|
||||
"""Convert severity to numeric level."""
|
||||
levels = {"low": 1, "medium": 2, "high": 3}
|
||||
return levels.get(severity, 0)
|
||||
|
||||
def check_content(content: str, file_path: str, config: dict) -> list:
|
||||
"""Check content for component issues."""
|
||||
issues = []
|
||||
file_ext = Path(file_path).suffix.lower()
|
||||
|
||||
checker_config = config.get("component_checker", {})
|
||||
enabled_categories = checker_config.get("categories", [])
|
||||
min_severity = checker_config.get("min_severity", "low")
|
||||
min_level = severity_level(min_severity)
|
||||
|
||||
for pattern_def in COMPONENT_PATTERNS:
|
||||
# Skip if file type doesn't match
|
||||
if file_ext not in pattern_def.get("file_types", []):
|
||||
continue
|
||||
|
||||
# Skip if category not enabled
|
||||
if enabled_categories and pattern_def["category"] not in enabled_categories:
|
||||
continue
|
||||
|
||||
# Skip if below minimum severity
|
||||
if severity_level(pattern_def["severity"]) < min_level:
|
||||
continue
|
||||
|
||||
if re.search(pattern_def["regex"], content, re.MULTILINE):
|
||||
issues.append({
|
||||
"id": pattern_def["id"],
|
||||
"category": pattern_def["category"],
|
||||
"severity": pattern_def["severity"],
|
||||
"message": pattern_def["message"]
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
def format_output(issues: list, file_path: str) -> str:
|
||||
"""Format issues for display."""
|
||||
if not issues:
|
||||
return ""
|
||||
|
||||
severity_icons = {
|
||||
"high": "[HIGH]",
|
||||
"medium": "[MED]",
|
||||
"low": "[LOW]"
|
||||
}
|
||||
|
||||
category_labels = {
|
||||
"accessibility": "A11Y",
|
||||
"react": "REACT",
|
||||
"typescript": "TS",
|
||||
"structure": "STRUCT"
|
||||
}
|
||||
|
||||
lines = [f"\n=== DSS Component Checker: {file_path} ===\n"]
|
||||
|
||||
# Group by category
|
||||
by_category = {}
|
||||
for issue in issues:
|
||||
cat = issue["category"]
|
||||
if cat not in by_category:
|
||||
by_category[cat] = []
|
||||
by_category[cat].append(issue)
|
||||
|
||||
for category, cat_issues in by_category.items():
|
||||
label = category_labels.get(category, category.upper())
|
||||
lines.append(f"[{label}]")
|
||||
for issue in cat_issues:
|
||||
sev = severity_icons.get(issue["severity"], "[?]")
|
||||
lines.append(f" {sev} {issue['message']}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("=" * 50)
|
||||
return "\n".join(lines)
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
if not config.get("component_checker", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
file_ext = Path(file_path).suffix.lower() if file_path else ""
|
||||
|
||||
# Only check React/TypeScript files
|
||||
if file_ext not in [".jsx", ".tsx", ".js", ".ts"]:
|
||||
sys.exit(0)
|
||||
|
||||
# Get content to check
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
elif tool_name == "Edit":
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
if not content:
|
||||
sys.exit(0)
|
||||
|
||||
issues = check_content(content, file_path, config)
|
||||
|
||||
if issues:
|
||||
output = format_output(issues, file_path)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
182
dss-claude-plugin/hooks/scripts/git-backup.js
Executable file
182
dss-claude-plugin/hooks/scripts/git-backup.js
Executable file
@@ -0,0 +1,182 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* DSS Git Backup Hook
|
||||
* Automatically commits changes when Claude Code session ends.
|
||||
* Written from scratch for DSS.
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { execSync } = require('child_process');
|
||||
|
||||
// Configuration
|
||||
const DEFAULT_CONFIG = {
|
||||
git_backup: {
|
||||
enabled: true,
|
||||
require_git_repo: true,
|
||||
commit_only_if_changes: true,
|
||||
include_timestamp: true,
|
||||
commit_prefix: 'auto-backup',
|
||||
show_logs: false
|
||||
}
|
||||
};
|
||||
|
||||
// Prevent duplicate execution (Claude Code bug workaround)
|
||||
const STATE_DIR = path.join(__dirname, '..', '.state');
|
||||
const LOCK_FILE = path.join(STATE_DIR, '.git-backup.lock');
|
||||
const LOCK_TIMEOUT_MS = 3000;
|
||||
|
||||
function loadConfig() {
|
||||
const configPath = path.join(process.env.HOME || '', '.dss', 'hooks-config.json');
|
||||
try {
|
||||
if (fs.existsSync(configPath)) {
|
||||
const userConfig = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||
return { ...DEFAULT_CONFIG, ...userConfig };
|
||||
}
|
||||
} catch (e) {
|
||||
// Use defaults
|
||||
}
|
||||
return DEFAULT_CONFIG;
|
||||
}
|
||||
|
||||
function checkLock() {
|
||||
try {
|
||||
if (!fs.existsSync(STATE_DIR)) {
|
||||
fs.mkdirSync(STATE_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
if (fs.existsSync(LOCK_FILE)) {
|
||||
const lastRun = parseInt(fs.readFileSync(LOCK_FILE, 'utf8'));
|
||||
if (!isNaN(lastRun) && (Date.now() - lastRun < LOCK_TIMEOUT_MS)) {
|
||||
return false; // Already ran recently
|
||||
}
|
||||
}
|
||||
|
||||
fs.writeFileSync(LOCK_FILE, Date.now().toString(), 'utf8');
|
||||
return true;
|
||||
} catch (e) {
|
||||
return true; // Proceed on error
|
||||
}
|
||||
}
|
||||
|
||||
function isGitRepo() {
|
||||
try {
|
||||
execSync('git rev-parse --is-inside-work-tree', { stdio: 'pipe' });
|
||||
return true;
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function hasChanges() {
|
||||
try {
|
||||
const status = execSync('git status --porcelain', { encoding: 'utf8' });
|
||||
return status.trim().length > 0;
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function getChangeSummary() {
|
||||
try {
|
||||
const status = execSync('git status --short', { encoding: 'utf8' });
|
||||
const lines = status.trim().split('\n').filter(Boolean);
|
||||
|
||||
let added = 0, modified = 0, deleted = 0;
|
||||
|
||||
for (const line of lines) {
|
||||
const status = line.trim().charAt(0);
|
||||
if (status === 'A' || status === '?') added++;
|
||||
else if (status === 'M') modified++;
|
||||
else if (status === 'D') deleted++;
|
||||
}
|
||||
|
||||
return { added, modified, deleted, total: lines.length };
|
||||
} catch (e) {
|
||||
return { added: 0, modified: 0, deleted: 0, total: 0 };
|
||||
}
|
||||
}
|
||||
|
||||
function createBackup(config) {
|
||||
const backupConfig = config.git_backup || {};
|
||||
|
||||
try {
|
||||
// Stage all changes
|
||||
execSync('git add -A', { stdio: 'pipe' });
|
||||
|
||||
// Build commit message
|
||||
const parts = [backupConfig.commit_prefix || 'auto-backup'];
|
||||
|
||||
if (backupConfig.include_timestamp) {
|
||||
const timestamp = new Date().toISOString().replace('T', ' ').replace(/\..+/, '');
|
||||
parts.push(timestamp);
|
||||
}
|
||||
|
||||
const summary = getChangeSummary();
|
||||
const summaryText = `(${summary.total} files: +${summary.added} ~${summary.modified} -${summary.deleted})`;
|
||||
|
||||
const commitMessage = `${parts.join(': ')} ${summaryText}\n\nGenerated by DSS Git Backup Hook`;
|
||||
|
||||
// Create commit
|
||||
execSync(`git commit -m "${commitMessage}"`, { stdio: 'pipe' });
|
||||
|
||||
// Get commit hash
|
||||
const commitHash = execSync('git rev-parse --short HEAD', { encoding: 'utf8' }).trim();
|
||||
|
||||
return { success: true, hash: commitHash, files: summary.total };
|
||||
} catch (e) {
|
||||
return { success: false, error: e.message };
|
||||
}
|
||||
}
|
||||
|
||||
function log(config, message) {
|
||||
if (config.git_backup?.show_logs) {
|
||||
console.log(JSON.stringify({
|
||||
systemMessage: message,
|
||||
continue: true
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
function main() {
|
||||
// Prevent duplicate execution
|
||||
if (!checkLock()) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Prevent hook recursion
|
||||
if (process.env.STOP_HOOK_ACTIVE === 'true') {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const config = loadConfig();
|
||||
|
||||
if (!config.git_backup?.enabled) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Check for git repo
|
||||
if (config.git_backup.require_git_repo && !isGitRepo()) {
|
||||
log(config, 'DSS Git Backup: Not a git repository, skipping');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Check for changes
|
||||
if (config.git_backup.commit_only_if_changes && !hasChanges()) {
|
||||
log(config, 'DSS Git Backup: No changes to commit');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Create backup
|
||||
const result = createBackup(config);
|
||||
|
||||
if (result.success) {
|
||||
log(config, `DSS Git Backup: Committed ${result.files} files (${result.hash})`);
|
||||
} else {
|
||||
log(config, `DSS Git Backup: Failed - ${result.error}`);
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main();
|
||||
201
dss-claude-plugin/hooks/scripts/security-check.py
Executable file
201
dss-claude-plugin/hooks/scripts/security-check.py
Executable file
@@ -0,0 +1,201 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Security Check Hook
|
||||
Validates file edits for common security vulnerabilities.
|
||||
Written from scratch for DSS - no external dependencies.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Security patterns to detect
|
||||
SECURITY_PATTERNS = [
|
||||
{
|
||||
"id": "xss-innerhtml",
|
||||
"patterns": [".innerHTML =", ".innerHTML=", "innerHTML:"],
|
||||
"severity": "high",
|
||||
"message": "Potential XSS: innerHTML assignment detected. Use textContent for plain text or sanitize HTML with DOMPurify.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "xss-dangerously",
|
||||
"patterns": ["dangerouslySetInnerHTML"],
|
||||
"severity": "high",
|
||||
"message": "Potential XSS: dangerouslySetInnerHTML detected. Ensure content is sanitized before rendering.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "eval-usage",
|
||||
"patterns": ["eval(", "new Function("],
|
||||
"severity": "critical",
|
||||
"message": "Code injection risk: eval() or new Function() detected. These can execute arbitrary code.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "document-write",
|
||||
"patterns": ["document.write("],
|
||||
"severity": "medium",
|
||||
"message": "Deprecated: document.write() detected. Use DOM manipulation methods instead.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx", ".html"]
|
||||
},
|
||||
{
|
||||
"id": "sql-injection",
|
||||
"patterns": ["execute(f\"", "execute(f'", "cursor.execute(\"", ".query(`${"],
|
||||
"severity": "critical",
|
||||
"message": "Potential SQL injection: String interpolation in SQL query. Use parameterized queries.",
|
||||
"file_types": [".py", ".js", ".ts"]
|
||||
},
|
||||
{
|
||||
"id": "hardcoded-secret",
|
||||
"patterns": ["password=", "api_key=", "secret=", "token=", "apiKey:"],
|
||||
"severity": "high",
|
||||
"message": "Potential hardcoded secret detected. Use environment variables instead.",
|
||||
"file_types": [".py", ".js", ".ts", ".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "python-pickle",
|
||||
"patterns": ["pickle.load", "pickle.loads"],
|
||||
"severity": "high",
|
||||
"message": "Insecure deserialization: pickle can execute arbitrary code. Use JSON for untrusted data.",
|
||||
"file_types": [".py"]
|
||||
},
|
||||
{
|
||||
"id": "python-shell",
|
||||
"patterns": ["os.system(", "subprocess.call(shell=True", "subprocess.run(shell=True"],
|
||||
"severity": "high",
|
||||
"message": "Shell injection risk: Use subprocess with shell=False and pass args as list.",
|
||||
"file_types": [".py"]
|
||||
},
|
||||
{
|
||||
"id": "react-ref-current",
|
||||
"patterns": ["ref.current.innerHTML"],
|
||||
"severity": "high",
|
||||
"message": "XSS via React ref: Avoid setting innerHTML on refs. Use state/props instead.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "unsafe-regex",
|
||||
"patterns": ["new RegExp(", "RegExp("],
|
||||
"severity": "medium",
|
||||
"message": "Potential ReDoS: Dynamic regex from user input can cause denial of service.",
|
||||
"file_types": [".js", ".ts", ".jsx", ".tsx"]
|
||||
}
|
||||
]
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
default_config = {
|
||||
"security_check": {
|
||||
"enabled": True,
|
||||
"block_on_critical": False,
|
||||
"warn_only": True,
|
||||
"ignored_patterns": []
|
||||
}
|
||||
}
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
user_config = json.load(f)
|
||||
return {**default_config, **user_config}
|
||||
except:
|
||||
pass
|
||||
return default_config
|
||||
|
||||
def check_content(content: str, file_path: str) -> list:
|
||||
"""Check content for security patterns."""
|
||||
issues = []
|
||||
file_ext = Path(file_path).suffix.lower()
|
||||
|
||||
for pattern_def in SECURITY_PATTERNS:
|
||||
# Skip if file type doesn't match
|
||||
if file_ext not in pattern_def.get("file_types", []):
|
||||
continue
|
||||
|
||||
for pattern in pattern_def["patterns"]:
|
||||
if pattern.lower() in content.lower():
|
||||
issues.append({
|
||||
"id": pattern_def["id"],
|
||||
"severity": pattern_def["severity"],
|
||||
"message": pattern_def["message"],
|
||||
"pattern": pattern
|
||||
})
|
||||
break # One match per pattern definition is enough
|
||||
|
||||
return issues
|
||||
|
||||
def format_output(issues: list, file_path: str) -> str:
|
||||
"""Format issues for display."""
|
||||
if not issues:
|
||||
return ""
|
||||
|
||||
severity_icons = {
|
||||
"critical": "[CRITICAL]",
|
||||
"high": "[HIGH]",
|
||||
"medium": "[MEDIUM]",
|
||||
"low": "[LOW]"
|
||||
}
|
||||
|
||||
lines = [f"\n=== DSS Security Check: {file_path} ===\n"]
|
||||
|
||||
for issue in issues:
|
||||
icon = severity_icons.get(issue["severity"], "[?]")
|
||||
lines.append(f"{icon} {issue['message']}")
|
||||
lines.append(f" Pattern: {issue['pattern']}\n")
|
||||
|
||||
lines.append("=" * 50)
|
||||
return "\n".join(lines)
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
if not config.get("security_check", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0) # Allow tool to proceed if we can't parse
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
# Only check Edit and Write tools
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
|
||||
# Get content to check
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
elif tool_name == "Edit":
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
if not content or not file_path:
|
||||
sys.exit(0)
|
||||
|
||||
# Check for security issues
|
||||
issues = check_content(content, file_path)
|
||||
|
||||
if issues:
|
||||
output = format_output(issues, file_path)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
# Check if we should block on critical issues
|
||||
has_critical = any(i["severity"] == "critical" for i in issues)
|
||||
if has_critical and config.get("security_check", {}).get("block_on_critical", False):
|
||||
sys.exit(2) # Block the tool
|
||||
|
||||
sys.exit(0) # Allow tool to proceed
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
194
dss-claude-plugin/hooks/scripts/session-summary.js
Executable file
194
dss-claude-plugin/hooks/scripts/session-summary.js
Executable file
@@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* DSS Session Summary Hook
|
||||
* Generates a summary report at the end of each Claude Code session.
|
||||
* Written from scratch for DSS.
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { execSync } = require('child_process');
|
||||
|
||||
// Configuration
|
||||
const DEFAULT_CONFIG = {
|
||||
session_summary: {
|
||||
enabled: true,
|
||||
output_file: '.dss-session-summary.md',
|
||||
include_git_diff: true,
|
||||
include_file_list: true,
|
||||
max_diff_lines: 100
|
||||
}
|
||||
};
|
||||
|
||||
function loadConfig() {
|
||||
const configPath = path.join(process.env.HOME || '', '.dss', 'hooks-config.json');
|
||||
try {
|
||||
if (fs.existsSync(configPath)) {
|
||||
const userConfig = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||
return { ...DEFAULT_CONFIG, ...userConfig };
|
||||
}
|
||||
} catch (e) {
|
||||
// Use defaults
|
||||
}
|
||||
return DEFAULT_CONFIG;
|
||||
}
|
||||
|
||||
function getGitInfo() {
|
||||
const info = {
|
||||
branch: '',
|
||||
status: '',
|
||||
diff: '',
|
||||
modifiedFiles: []
|
||||
};
|
||||
|
||||
try {
|
||||
// Check if in git repo
|
||||
execSync('git rev-parse --is-inside-work-tree', { stdio: 'pipe' });
|
||||
|
||||
// Get branch
|
||||
info.branch = execSync('git branch --show-current', { encoding: 'utf8' }).trim();
|
||||
|
||||
// Get status
|
||||
info.status = execSync('git status --short', { encoding: 'utf8' }).trim();
|
||||
|
||||
// Get modified files
|
||||
const statusLines = info.status.split('\n').filter(Boolean);
|
||||
info.modifiedFiles = statusLines.map(line => {
|
||||
const parts = line.trim().split(/\s+/);
|
||||
return {
|
||||
status: parts[0],
|
||||
file: parts.slice(1).join(' ')
|
||||
};
|
||||
});
|
||||
|
||||
// Get diff summary
|
||||
try {
|
||||
info.diff = execSync('git diff --stat', { encoding: 'utf8' }).trim();
|
||||
} catch (e) {
|
||||
info.diff = '';
|
||||
}
|
||||
} catch (e) {
|
||||
// Not a git repo or git not available
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
function getSessionStats() {
|
||||
// Try to read from session state if available
|
||||
const stats = {
|
||||
startTime: new Date().toISOString(),
|
||||
filesModified: 0,
|
||||
linesAdded: 0,
|
||||
linesRemoved: 0
|
||||
};
|
||||
|
||||
try {
|
||||
// Get diff stats from git
|
||||
const diffStat = execSync('git diff --numstat', { encoding: 'utf8' });
|
||||
const lines = diffStat.trim().split('\n').filter(Boolean);
|
||||
|
||||
for (const line of lines) {
|
||||
const [added, removed] = line.split('\t');
|
||||
stats.linesAdded += parseInt(added) || 0;
|
||||
stats.linesRemoved += parseInt(removed) || 0;
|
||||
stats.filesModified++;
|
||||
}
|
||||
} catch (e) {
|
||||
// Git not available
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
function generateReport(config) {
|
||||
const summaryConfig = config.session_summary || {};
|
||||
const gitInfo = getGitInfo();
|
||||
const stats = getSessionStats();
|
||||
|
||||
const timestamp = new Date().toLocaleString();
|
||||
const lines = [];
|
||||
|
||||
lines.push('# DSS Session Summary');
|
||||
lines.push(`\n**Generated:** ${timestamp}`);
|
||||
|
||||
if (gitInfo.branch) {
|
||||
lines.push(`**Branch:** ${gitInfo.branch}`);
|
||||
}
|
||||
|
||||
lines.push('\n## Changes Overview');
|
||||
lines.push('');
|
||||
lines.push(`- Files modified: ${stats.filesModified}`);
|
||||
lines.push(`- Lines added: +${stats.linesAdded}`);
|
||||
lines.push(`- Lines removed: -${stats.linesRemoved}`);
|
||||
|
||||
if (summaryConfig.include_file_list && gitInfo.modifiedFiles.length > 0) {
|
||||
lines.push('\n## Modified Files');
|
||||
lines.push('');
|
||||
lines.push('| Status | File |');
|
||||
lines.push('|--------|------|');
|
||||
|
||||
const statusLabels = {
|
||||
'M': 'Modified',
|
||||
'A': 'Added',
|
||||
'D': 'Deleted',
|
||||
'R': 'Renamed',
|
||||
'??': 'Untracked'
|
||||
};
|
||||
|
||||
for (const file of gitInfo.modifiedFiles.slice(0, 20)) {
|
||||
const label = statusLabels[file.status] || file.status;
|
||||
lines.push(`| ${label} | ${file.file} |`);
|
||||
}
|
||||
|
||||
if (gitInfo.modifiedFiles.length > 20) {
|
||||
lines.push(`| ... | +${gitInfo.modifiedFiles.length - 20} more files |`);
|
||||
}
|
||||
}
|
||||
|
||||
if (summaryConfig.include_git_diff && gitInfo.diff) {
|
||||
lines.push('\n## Diff Summary');
|
||||
lines.push('');
|
||||
lines.push('```');
|
||||
const diffLines = gitInfo.diff.split('\n');
|
||||
const maxLines = summaryConfig.max_diff_lines || 100;
|
||||
lines.push(diffLines.slice(0, maxLines).join('\n'));
|
||||
if (diffLines.length > maxLines) {
|
||||
lines.push(`... (${diffLines.length - maxLines} more lines)`);
|
||||
}
|
||||
lines.push('```');
|
||||
}
|
||||
|
||||
lines.push('\n---');
|
||||
lines.push('*Generated by DSS Session Summary Hook*');
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
function main() {
|
||||
const config = loadConfig();
|
||||
|
||||
if (!config.session_summary?.enabled) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
try {
|
||||
const report = generateReport(config);
|
||||
const outputFile = config.session_summary.output_file || '.dss-session-summary.md';
|
||||
const outputPath = path.join(process.cwd(), outputFile);
|
||||
|
||||
fs.writeFileSync(outputPath, report, 'utf8');
|
||||
|
||||
// Output confirmation
|
||||
console.log(JSON.stringify({
|
||||
systemMessage: `Session summary saved to ${outputFile}`,
|
||||
continue: true
|
||||
}));
|
||||
} catch (e) {
|
||||
// Fail silently
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main();
|
||||
179
dss-claude-plugin/hooks/scripts/storybook-reminder.py
Executable file
179
dss-claude-plugin/hooks/scripts/storybook-reminder.py
Executable file
@@ -0,0 +1,179 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Storybook Reminder Hook
|
||||
Reminds developers to update Storybook stories when components change.
|
||||
Written from scratch for DSS.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
default_config = {
|
||||
"storybook_reminder": {
|
||||
"enabled": True,
|
||||
"component_patterns": ["**/components/**/*.tsx", "**/ui/**/*.tsx"],
|
||||
"story_extensions": [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"],
|
||||
"remind_on_new": True,
|
||||
"remind_on_props_change": True
|
||||
}
|
||||
}
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
user_config = json.load(f)
|
||||
return {**default_config, **user_config}
|
||||
except:
|
||||
pass
|
||||
return default_config
|
||||
|
||||
def is_component_file(file_path: str) -> bool:
|
||||
"""Check if file is a React component."""
|
||||
path = Path(file_path)
|
||||
|
||||
# Must be a tsx/jsx file
|
||||
if path.suffix.lower() not in [".tsx", ".jsx"]:
|
||||
return False
|
||||
|
||||
# Skip story files, test files, index files
|
||||
name = path.stem.lower()
|
||||
if any(x in name for x in [".stories", ".story", ".test", ".spec", "index"]):
|
||||
return False
|
||||
|
||||
# Check if in component-like directory
|
||||
parts = str(path).lower()
|
||||
component_dirs = ["components", "ui", "atoms", "molecules", "organisms", "templates"]
|
||||
return any(d in parts for d in component_dirs)
|
||||
|
||||
def find_story_file(component_path: str) -> tuple:
|
||||
"""Find corresponding story file for a component."""
|
||||
path = Path(component_path)
|
||||
base_name = path.stem
|
||||
parent = path.parent
|
||||
|
||||
story_extensions = [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"]
|
||||
|
||||
# Check same directory
|
||||
for ext in story_extensions:
|
||||
story_path = parent / f"{base_name}{ext}"
|
||||
if story_path.exists():
|
||||
return (True, str(story_path))
|
||||
|
||||
# Check __stories__ subdirectory
|
||||
stories_dir = parent / "__stories__"
|
||||
if stories_dir.exists():
|
||||
for ext in story_extensions:
|
||||
story_path = stories_dir / f"{base_name}{ext}"
|
||||
if story_path.exists():
|
||||
return (True, str(story_path))
|
||||
|
||||
# Check stories subdirectory
|
||||
stories_dir = parent / "stories"
|
||||
if stories_dir.exists():
|
||||
for ext in story_extensions:
|
||||
story_path = stories_dir / f"{base_name}{ext}"
|
||||
if story_path.exists():
|
||||
return (True, str(story_path))
|
||||
|
||||
return (False, None)
|
||||
|
||||
def detect_props_change(content: str) -> bool:
|
||||
"""Detect if content includes prop changes."""
|
||||
prop_patterns = [
|
||||
r"interface\s+\w+Props",
|
||||
r"type\s+\w+Props\s*=",
|
||||
r"Props\s*=\s*\{",
|
||||
r"defaultProps\s*=",
|
||||
r"propTypes\s*="
|
||||
]
|
||||
|
||||
for pattern in prop_patterns:
|
||||
if re.search(pattern, content):
|
||||
return True
|
||||
return False
|
||||
|
||||
def format_reminder(file_path: str, has_story: bool, story_path: str, props_changed: bool) -> str:
|
||||
"""Format the reminder message."""
|
||||
lines = [f"\n=== DSS Storybook Reminder ===\n"]
|
||||
|
||||
component_name = Path(file_path).stem
|
||||
|
||||
if not has_story:
|
||||
lines.append(f"[NEW] Component '{component_name}' has no Storybook story!")
|
||||
lines.append(f" Consider creating: {component_name}.stories.tsx")
|
||||
lines.append("")
|
||||
lines.append(" Quick template:")
|
||||
lines.append(f" import {{ {component_name} }} from './{component_name}';")
|
||||
lines.append(f" export default {{ title: 'Components/{component_name}' }};")
|
||||
lines.append(f" export const Default = () => <{component_name} />;")
|
||||
elif props_changed:
|
||||
lines.append(f"[UPDATE] Props changed in '{component_name}'")
|
||||
lines.append(f" Story file: {story_path}")
|
||||
lines.append(" Consider updating stories to reflect new props.")
|
||||
|
||||
lines.append("")
|
||||
lines.append("=" * 40)
|
||||
return "\n".join(lines)
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
if not config.get("storybook_reminder", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
|
||||
# Only check component files
|
||||
if not is_component_file(file_path):
|
||||
sys.exit(0)
|
||||
|
||||
# Get content
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
elif tool_name == "Edit":
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
# Check for story file
|
||||
has_story, story_path = find_story_file(file_path)
|
||||
|
||||
# Check for props changes
|
||||
props_changed = detect_props_change(content) if content else False
|
||||
|
||||
reminder_config = config.get("storybook_reminder", {})
|
||||
|
||||
# Determine if we should show reminder
|
||||
should_remind = False
|
||||
if not has_story and reminder_config.get("remind_on_new", True):
|
||||
should_remind = True
|
||||
elif has_story and props_changed and reminder_config.get("remind_on_props_change", True):
|
||||
should_remind = True
|
||||
|
||||
if should_remind:
|
||||
output = format_reminder(file_path, has_story, story_path, props_changed)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
253
dss-claude-plugin/hooks/scripts/token-validator.py
Executable file
253
dss-claude-plugin/hooks/scripts/token-validator.py
Executable file
@@ -0,0 +1,253 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Token Validator Hook
|
||||
Detects hardcoded values that should use design tokens.
|
||||
Written from scratch for DSS.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Patterns for hardcoded values that should be tokens
|
||||
HARDCODED_PATTERNS = [
|
||||
{
|
||||
"id": "color-hex",
|
||||
"regex": r"(?<!var\()#[0-9a-fA-F]{3,8}\b",
|
||||
"category": "color",
|
||||
"message": "Hardcoded hex color detected. Consider using a design token.",
|
||||
"suggestion": "Use: var(--color-*) or theme.colors.*",
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "color-rgb",
|
||||
"regex": r"rgba?\s*\(\s*\d+\s*,\s*\d+\s*,\s*\d+",
|
||||
"category": "color",
|
||||
"message": "Hardcoded RGB color detected. Consider using a design token.",
|
||||
"suggestion": "Use: var(--color-*) or theme.colors.*",
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "color-hsl",
|
||||
"regex": r"hsla?\s*\(\s*\d+\s*,\s*\d+%?\s*,\s*\d+%?",
|
||||
"category": "color",
|
||||
"message": "Hardcoded HSL color detected. Consider using a design token.",
|
||||
"suggestion": "Use: var(--color-*) or theme.colors.*",
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "spacing-px",
|
||||
"regex": r":\s*\d{2,}px",
|
||||
"category": "spacing",
|
||||
"message": "Hardcoded pixel spacing detected. Consider using a spacing token.",
|
||||
"suggestion": "Use: var(--spacing-*) or theme.spacing.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
},
|
||||
{
|
||||
"id": "font-size-px",
|
||||
"regex": r"font-size:\s*\d+px",
|
||||
"category": "typography",
|
||||
"message": "Hardcoded font-size detected. Consider using a typography token.",
|
||||
"suggestion": "Use: var(--font-size-*) or theme.fontSize.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
},
|
||||
{
|
||||
"id": "font-family-direct",
|
||||
"regex": r"font-family:\s*['\"]?(?:Arial|Helvetica|Times|Verdana|Georgia)",
|
||||
"category": "typography",
|
||||
"message": "Hardcoded font-family detected. Consider using a typography token.",
|
||||
"suggestion": "Use: var(--font-family-*) or theme.fontFamily.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
},
|
||||
{
|
||||
"id": "border-radius-px",
|
||||
"regex": r"border-radius:\s*\d+px",
|
||||
"category": "border",
|
||||
"message": "Hardcoded border-radius detected. Consider using a radius token.",
|
||||
"suggestion": "Use: var(--radius-*) or theme.borderRadius.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
},
|
||||
{
|
||||
"id": "box-shadow-direct",
|
||||
"regex": r"box-shadow:\s*\d+px\s+\d+px",
|
||||
"category": "effects",
|
||||
"message": "Hardcoded box-shadow detected. Consider using a shadow token.",
|
||||
"suggestion": "Use: var(--shadow-*) or theme.boxShadow.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
},
|
||||
{
|
||||
"id": "z-index-magic",
|
||||
"regex": r"z-index:\s*(?:999|9999|99999|\d{4,})",
|
||||
"category": "layout",
|
||||
"message": "Magic number z-index detected. Consider using a z-index token.",
|
||||
"suggestion": "Use: var(--z-index-*) with semantic names (modal, dropdown, tooltip)",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
},
|
||||
{
|
||||
"id": "inline-style-color",
|
||||
"regex": r"style=\{?\{[^}]*color:\s*['\"]#[0-9a-fA-F]+['\"]",
|
||||
"category": "color",
|
||||
"message": "Hardcoded color in inline style. Consider using theme tokens.",
|
||||
"suggestion": "Use: style={{ color: theme.colors.* }}",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
},
|
||||
{
|
||||
"id": "tailwind-arbitrary",
|
||||
"regex": r"(?:bg|text|border)-\[#[0-9a-fA-F]+\]",
|
||||
"category": "color",
|
||||
"message": "Arbitrary Tailwind color value. Consider using theme colors.",
|
||||
"suggestion": "Use: bg-primary, text-secondary, etc.",
|
||||
"file_types": [".jsx", ".tsx", ".html"]
|
||||
}
|
||||
]
|
||||
|
||||
# Allowlist patterns (common exceptions)
|
||||
ALLOWLIST = [
|
||||
r"#000000?", # Pure black
|
||||
r"#fff(fff)?", # Pure white
|
||||
r"transparent",
|
||||
r"inherit",
|
||||
r"currentColor",
|
||||
r"var\(--", # Already using CSS variables
|
||||
r"theme\.", # Already using theme
|
||||
r"colors\.", # Already using colors object
|
||||
]
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
default_config = {
|
||||
"token_validator": {
|
||||
"enabled": True,
|
||||
"strict_mode": False,
|
||||
"warn_only": True,
|
||||
"categories": ["color", "spacing", "typography"]
|
||||
}
|
||||
}
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
user_config = json.load(f)
|
||||
return {**default_config, **user_config}
|
||||
except:
|
||||
pass
|
||||
return default_config
|
||||
|
||||
def is_allowlisted(match: str) -> bool:
|
||||
"""Check if match is in allowlist."""
|
||||
for pattern in ALLOWLIST:
|
||||
if re.search(pattern, match, re.IGNORECASE):
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_content(content: str, file_path: str, config: dict) -> list:
|
||||
"""Check content for hardcoded values."""
|
||||
issues = []
|
||||
file_ext = Path(file_path).suffix.lower()
|
||||
enabled_categories = config.get("token_validator", {}).get("categories", [])
|
||||
|
||||
for pattern_def in HARDCODED_PATTERNS:
|
||||
# Skip if file type doesn't match
|
||||
if file_ext not in pattern_def.get("file_types", []):
|
||||
continue
|
||||
|
||||
# Skip if category not enabled (unless empty = all)
|
||||
if enabled_categories and pattern_def["category"] not in enabled_categories:
|
||||
continue
|
||||
|
||||
matches = re.findall(pattern_def["regex"], content, re.IGNORECASE)
|
||||
|
||||
for match in matches:
|
||||
if not is_allowlisted(match):
|
||||
issues.append({
|
||||
"id": pattern_def["id"],
|
||||
"category": pattern_def["category"],
|
||||
"message": pattern_def["message"],
|
||||
"suggestion": pattern_def["suggestion"],
|
||||
"value": match[:50] # Truncate long matches
|
||||
})
|
||||
|
||||
# Deduplicate by id
|
||||
seen = set()
|
||||
unique_issues = []
|
||||
for issue in issues:
|
||||
if issue["id"] not in seen:
|
||||
seen.add(issue["id"])
|
||||
unique_issues.append(issue)
|
||||
|
||||
return unique_issues
|
||||
|
||||
def format_output(issues: list, file_path: str) -> str:
|
||||
"""Format issues for display."""
|
||||
if not issues:
|
||||
return ""
|
||||
|
||||
category_icons = {
|
||||
"color": "[COLOR]",
|
||||
"spacing": "[SPACE]",
|
||||
"typography": "[FONT]",
|
||||
"border": "[BORDER]",
|
||||
"effects": "[EFFECT]",
|
||||
"layout": "[LAYOUT]"
|
||||
}
|
||||
|
||||
lines = [f"\n=== DSS Token Validator: {file_path} ===\n"]
|
||||
|
||||
for issue in issues:
|
||||
icon = category_icons.get(issue["category"], "[TOKEN]")
|
||||
lines.append(f"{icon} {issue['message']}")
|
||||
lines.append(f" Found: {issue['value']}")
|
||||
lines.append(f" {issue['suggestion']}\n")
|
||||
|
||||
lines.append("=" * 50)
|
||||
return "\n".join(lines)
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
if not config.get("token_validator", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
|
||||
# Get content to check
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
elif tool_name == "Edit":
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
if not content or not file_path:
|
||||
sys.exit(0)
|
||||
|
||||
# Check for token issues
|
||||
issues = check_content(content, file_path, config)
|
||||
|
||||
if issues:
|
||||
output = format_output(issues, file_path)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
# In strict mode, block on issues
|
||||
if config.get("token_validator", {}).get("strict_mode", False):
|
||||
sys.exit(2)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2837
dss-claude-plugin/servers/dss-mcp-server.py
Normal file
2837
dss-claude-plugin/servers/dss-mcp-server.py
Normal file
File diff suppressed because it is too large
Load Diff
183
dss-claude-plugin/skills/component-audit/SKILL.md
Normal file
183
dss-claude-plugin/skills/component-audit/SKILL.md
Normal file
@@ -0,0 +1,183 @@
|
||||
---
|
||||
name: Component Audit
|
||||
description: Audit React components for design system adoption and identify refactoring opportunities
|
||||
globs:
|
||||
- "**/*.tsx"
|
||||
- "**/*.jsx"
|
||||
- "**/*.vue"
|
||||
- "**/components/**"
|
||||
- "**/src/**"
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Component Audit
|
||||
|
||||
## Overview
|
||||
|
||||
This skill provides deep analysis of React/Vue components to identify design system adoption opportunities, hardcoded values, and refactoring recommendations.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use this skill when the user asks to:
|
||||
- Audit components for design system readiness
|
||||
- Find hardcoded values in components
|
||||
- Identify component consolidation opportunities
|
||||
- Analyze component dependencies
|
||||
- Find styling inconsistencies in components
|
||||
|
||||
## Audit Categories
|
||||
|
||||
### 1. Hardcoded Values
|
||||
Identifies values that should be tokens:
|
||||
- Inline colors (`#ffffff`, `rgb()`)
|
||||
- Pixel values for spacing
|
||||
- Font sizes and weights
|
||||
- Border radii and shadows
|
||||
|
||||
### 2. Component Patterns
|
||||
Analyzes component structure:
|
||||
- Repeated component patterns
|
||||
- Similar components that could merge
|
||||
- Props that could use design tokens
|
||||
- Styling approaches used
|
||||
|
||||
### 3. Dependency Analysis
|
||||
Maps component relationships:
|
||||
- Import dependencies
|
||||
- Shared utilities
|
||||
- Style dependencies
|
||||
- Context usage
|
||||
|
||||
### 4. Accessibility
|
||||
Basic accessibility checks:
|
||||
- Missing alt texts
|
||||
- Color contrast issues
|
||||
- Focus management
|
||||
- ARIA attributes
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Run Audit**
|
||||
```
|
||||
Use dss_audit_components with component directory path
|
||||
```
|
||||
|
||||
2. **Review Findings**
|
||||
- Check hardcoded values count
|
||||
- Review component patterns
|
||||
- Examine dependency graph
|
||||
|
||||
3. **Prioritize Issues**
|
||||
- High impact, low effort first
|
||||
- Group by category
|
||||
- Consider team capacity
|
||||
|
||||
4. **Create Refactoring Plan**
|
||||
- Token creation tasks
|
||||
- Component consolidation
|
||||
- Style migration steps
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Audit my components folder for design system issues"
|
||||
|
||||
**Assistant Response:**
|
||||
```
|
||||
I'll audit your components for design system adoption.
|
||||
|
||||
[Uses dss_audit_components tool]
|
||||
|
||||
**Component Audit Results:**
|
||||
|
||||
**Summary:**
|
||||
- 45 components analyzed
|
||||
- 127 hardcoded values found
|
||||
- 8 component consolidation opportunities
|
||||
- 23 accessibility issues
|
||||
|
||||
**Hardcoded Values (Top Issues):**
|
||||
|
||||
| File | Line | Issue | Suggested Token |
|
||||
|------|------|-------|-----------------|
|
||||
| Button.tsx | 12 | color: #0066cc | --color-primary |
|
||||
| Card.tsx | 8 | padding: 16px | --spacing-md |
|
||||
| Header.tsx | 15 | font-size: 24px | --font-size-xl |
|
||||
| Modal.tsx | 22 | border-radius: 8px | --radius-md |
|
||||
|
||||
**Component Consolidation:**
|
||||
1. `PrimaryButton`, `SecondaryButton`, `GhostButton`
|
||||
-> Consolidate to `Button` with variant prop
|
||||
|
||||
2. `SmallCard`, `LargeCard`, `FeaturedCard`
|
||||
-> Consolidate to `Card` with size prop
|
||||
|
||||
**Dependency Issues:**
|
||||
- Circular dependency: Header -> Nav -> Header
|
||||
- Deep nesting: Page -> Layout -> Container -> Box (4 levels)
|
||||
|
||||
**Recommendations:**
|
||||
1. Create color tokens (reduces 89 hardcoded colors)
|
||||
2. Implement spacing scale (standardizes 34 spacing values)
|
||||
3. Consolidate button components (removes 2 duplicates)
|
||||
|
||||
Would you like me to generate a detailed refactoring plan?
|
||||
```
|
||||
|
||||
## Issue Severity Levels
|
||||
|
||||
| Level | Description | Action |
|
||||
|-------|-------------|--------|
|
||||
| Critical | Breaks design consistency | Fix immediately |
|
||||
| High | Significant inconsistency | Fix in next sprint |
|
||||
| Medium | Minor inconsistency | Plan for refactoring |
|
||||
| Low | Optimization opportunity | Nice to have |
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `dss_audit_components` - Main audit tool
|
||||
- `dss_analyze_project` - Broader project analysis
|
||||
- `dss_find_quick_wins` - Find easy fixes
|
||||
- `dss_extract_tokens` - Create tokens from findings
|
||||
|
||||
## Output Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"components_analyzed": 45,
|
||||
"hardcoded_values": [
|
||||
{
|
||||
"file": "Button.tsx",
|
||||
"line": 12,
|
||||
"value": "#0066cc",
|
||||
"type": "color",
|
||||
"suggestion": "--color-primary"
|
||||
}
|
||||
],
|
||||
"consolidation_opportunities": [
|
||||
{
|
||||
"components": ["PrimaryButton", "SecondaryButton"],
|
||||
"suggested_name": "Button",
|
||||
"variant_prop": "variant"
|
||||
}
|
||||
],
|
||||
"dependency_issues": [],
|
||||
"accessibility_issues": []
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Run Regularly**
|
||||
- Audit before major refactors
|
||||
- Include in CI/CD pipeline
|
||||
- Track metrics over time
|
||||
|
||||
2. **Prioritize Fixes**
|
||||
- Start with high-impact, low-effort
|
||||
- Group related changes
|
||||
- Document decisions
|
||||
|
||||
3. **Team Alignment**
|
||||
- Share audit results
|
||||
- Discuss consolidation decisions
|
||||
- Update component guidelines
|
||||
112
dss-claude-plugin/skills/design-system-analysis/SKILL.md
Normal file
112
dss-claude-plugin/skills/design-system-analysis/SKILL.md
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
name: Design System Analysis
|
||||
description: Analyze codebases for design patterns, component usage, and tokenization opportunities
|
||||
globs:
|
||||
- "**/*.css"
|
||||
- "**/*.scss"
|
||||
- "**/*.tsx"
|
||||
- "**/*.jsx"
|
||||
- "**/*.vue"
|
||||
- "**/tailwind.config.*"
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Design System Analysis
|
||||
|
||||
## Overview
|
||||
|
||||
This skill provides comprehensive analysis of existing codebases to identify design patterns, component usage, and opportunities for design system adoption.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use this skill when the user asks to:
|
||||
- Analyze a project for design system opportunities
|
||||
- Audit an existing codebase for design patterns
|
||||
- Scan a React/Vue project for component patterns
|
||||
- Identify style inconsistencies
|
||||
- Find tokenization opportunities
|
||||
|
||||
## Capabilities
|
||||
|
||||
### 1. Project Scanning
|
||||
- Scans entire project structure
|
||||
- Identifies style files (CSS, SCSS, Tailwind)
|
||||
- Locates component files (React, Vue)
|
||||
- Builds file dependency map
|
||||
|
||||
### 2. Style Analysis
|
||||
- Extracts color values and patterns
|
||||
- Identifies typography scales
|
||||
- Finds spacing patterns
|
||||
- Detects shadow and border patterns
|
||||
|
||||
### 3. Component Analysis
|
||||
- Identifies React/Vue components
|
||||
- Maps component dependencies
|
||||
- Finds repeated patterns
|
||||
- Detects hardcoded values
|
||||
|
||||
### 4. Dependency Graph
|
||||
- Builds component relationship graph
|
||||
- Identifies shared dependencies
|
||||
- Finds circular dependencies
|
||||
- Maps style imports
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Start Analysis**
|
||||
```
|
||||
Use the dss_analyze_project tool with the project path
|
||||
```
|
||||
|
||||
2. **Review Results**
|
||||
- Check scan summary for file counts
|
||||
- Review style patterns found
|
||||
- Examine component analysis
|
||||
- Note dependency relationships
|
||||
|
||||
3. **Generate Recommendations**
|
||||
- Prioritize findings by impact
|
||||
- Suggest token candidates
|
||||
- Recommend component refactoring
|
||||
- Identify quick wins
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Analyze my React app at /home/user/my-app for design system adoption"
|
||||
|
||||
**Assistant Response:**
|
||||
```
|
||||
I'll analyze your React application for design system opportunities.
|
||||
|
||||
[Uses dss_analyze_project tool]
|
||||
|
||||
Based on the analysis:
|
||||
|
||||
**Summary:**
|
||||
- 47 style files scanned
|
||||
- 23 React components found
|
||||
- 156 unique color values detected
|
||||
- 12 typography variations identified
|
||||
|
||||
**Key Findings:**
|
||||
1. Color Inconsistency: Found 156 color values that could be reduced to 12 design tokens
|
||||
2. Spacing Patterns: 8 different spacing scales in use
|
||||
3. Component Duplication: 3 button variants that could be consolidated
|
||||
|
||||
**Recommendations:**
|
||||
1. Create a color token system (high impact, medium effort)
|
||||
2. Standardize spacing scale to 4px grid (medium impact, low effort)
|
||||
3. Create a Button component with variants (high impact, high effort)
|
||||
```
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `dss_analyze_project` - Main analysis tool
|
||||
- `dss_extract_tokens` - Extract tokens from analysis
|
||||
- `dss_find_quick_wins` - Find low-effort improvements
|
||||
|
||||
## Dependencies
|
||||
|
||||
- DSS analyze module (ProjectScanner, ReactAnalyzer, StyleAnalyzer)
|
||||
- DSS models (Project, ProjectMetadata)
|
||||
188
dss-claude-plugin/skills/figma-sync/SKILL.md
Normal file
188
dss-claude-plugin/skills/figma-sync/SKILL.md
Normal file
@@ -0,0 +1,188 @@
|
||||
---
|
||||
name: Figma Sync
|
||||
description: Synchronize design tokens from Figma files using the Figma API
|
||||
globs:
|
||||
- "**/figma-tokens/**"
|
||||
- "**/design-tokens/**"
|
||||
- "**/*.figma.json"
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Figma Sync
|
||||
|
||||
## Overview
|
||||
|
||||
This skill enables synchronization of design tokens from Figma files. It extracts colors, typography, spacing, and other design decisions from Figma and converts them to code-ready tokens.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use this skill when the user asks to:
|
||||
- Import tokens from Figma
|
||||
- Sync design decisions with code
|
||||
- Extract colors from Figma file
|
||||
- Update tokens from Figma changes
|
||||
- Connect Figma to the design system
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Figma Token
|
||||
Set the `FIGMA_TOKEN` environment variable:
|
||||
```bash
|
||||
export FIGMA_TOKEN=your-figma-personal-access-token
|
||||
```
|
||||
|
||||
### File Key
|
||||
Get the file key from Figma URL:
|
||||
```
|
||||
https://www.figma.com/file/XXXXXXXXXXXXXX/Design-System
|
||||
^^^^^^^^^^^^^^
|
||||
This is the file key
|
||||
```
|
||||
|
||||
## Extracted Token Types
|
||||
|
||||
| Type | Figma Source | Token Output |
|
||||
|------|--------------|--------------|
|
||||
| Colors | Fill colors, Strokes | color.* tokens |
|
||||
| Typography | Text styles | typography.* tokens |
|
||||
| Spacing | Auto-layout gaps | spacing.* tokens |
|
||||
| Shadows | Drop shadows | shadow.* tokens |
|
||||
| Radii | Corner radius | radius.* tokens |
|
||||
| Borders | Stroke weights | border.* tokens |
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Get Figma File Key**
|
||||
- Open Figma file in browser
|
||||
- Copy file key from URL
|
||||
|
||||
2. **Sync Tokens**
|
||||
```
|
||||
Use dss_sync_figma with file_key
|
||||
```
|
||||
|
||||
3. **Review Extracted Tokens**
|
||||
- Check token names
|
||||
- Verify values
|
||||
- Review categories
|
||||
|
||||
4. **Generate Theme Files**
|
||||
- Use theme generation skill
|
||||
- Export to CSS/SCSS/JSON
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Sync design tokens from our Figma file"
|
||||
|
||||
**Assistant Response:**
|
||||
```
|
||||
I'll sync tokens from your Figma file.
|
||||
|
||||
First, I need the Figma file key. You can find it in the URL:
|
||||
https://www.figma.com/file/[FILE_KEY]/Design-System
|
||||
|
||||
What's your file key?
|
||||
|
||||
**User:** abc123xyz456
|
||||
|
||||
[Uses dss_sync_figma with file_key: abc123xyz456]
|
||||
|
||||
**Figma Sync Results:**
|
||||
|
||||
**Extracted Tokens:**
|
||||
|
||||
Colors (24 tokens):
|
||||
- primary/500: #0066CC
|
||||
- primary/400: #3385D6
|
||||
- primary/600: #0052A3
|
||||
- secondary/500: #6C757D
|
||||
- success/500: #28A745
|
||||
- error/500: #DC3545
|
||||
|
||||
Typography (8 styles):
|
||||
- heading/h1: Inter Bold 48px/56px
|
||||
- heading/h2: Inter Bold 36px/44px
|
||||
- heading/h3: Inter SemiBold 24px/32px
|
||||
- body/regular: Inter Regular 16px/24px
|
||||
- body/small: Inter Regular 14px/20px
|
||||
|
||||
Spacing (6 values):
|
||||
- xs: 4px
|
||||
- sm: 8px
|
||||
- md: 16px
|
||||
- lg: 24px
|
||||
- xl: 32px
|
||||
- 2xl: 48px
|
||||
|
||||
Shadows (3 styles):
|
||||
- sm: 0 1px 2px rgba(0,0,0,0.05)
|
||||
- md: 0 4px 6px rgba(0,0,0,0.1)
|
||||
- lg: 0 10px 15px rgba(0,0,0,0.1)
|
||||
|
||||
**Total:** 41 tokens extracted
|
||||
|
||||
Would you like me to generate theme files from these tokens?
|
||||
```
|
||||
|
||||
## API Rate Limiting
|
||||
|
||||
The Figma API has rate limits:
|
||||
- 30 requests per minute for free plans
|
||||
- Higher limits for paid plans
|
||||
|
||||
DSS implements:
|
||||
- Automatic retry with exponential backoff
|
||||
- Request caching (1 hour TTL)
|
||||
- Batch requests where possible
|
||||
|
||||
## Token Naming
|
||||
|
||||
Figma names are converted to token format:
|
||||
```
|
||||
Figma: "Primary / 500"
|
||||
Token: color-primary-500
|
||||
|
||||
Figma: "Heading / H1"
|
||||
Token: typography-heading-h1
|
||||
|
||||
Figma: "Shadow / Medium"
|
||||
Token: shadow-md
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Cause | Solution |
|
||||
|-------|-------|----------|
|
||||
| 403 Forbidden | Invalid token | Check FIGMA_TOKEN |
|
||||
| 404 Not Found | Invalid file key | Verify file key |
|
||||
| 429 Too Many Requests | Rate limited | Wait and retry |
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `dss_sync_figma` - Main sync tool
|
||||
- `dss_extract_tokens` - Merge with other sources
|
||||
- `dss_generate_theme` - Generate from Figma tokens
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Figma Organization**
|
||||
- Use consistent naming in Figma
|
||||
- Organize styles in folders
|
||||
- Use descriptions for context
|
||||
|
||||
2. **Sync Frequency**
|
||||
- Sync after major design changes
|
||||
- Automate in CI/CD if possible
|
||||
- Version control token files
|
||||
|
||||
3. **Conflict Resolution**
|
||||
- Compare with existing tokens
|
||||
- Review changes before applying
|
||||
- Maintain changelog
|
||||
|
||||
## Security Notes
|
||||
|
||||
- Never commit FIGMA_TOKEN to version control
|
||||
- Use environment variables or secrets manager
|
||||
- Rotate tokens periodically
|
||||
- Use read-only tokens when possible
|
||||
223
dss-claude-plugin/skills/quick-wins/SKILL.md
Normal file
223
dss-claude-plugin/skills/quick-wins/SKILL.md
Normal file
@@ -0,0 +1,223 @@
|
||||
---
|
||||
name: Quick Wins
|
||||
description: Find low-effort, high-impact opportunities for design system adoption
|
||||
globs:
|
||||
- "**/*.css"
|
||||
- "**/*.scss"
|
||||
- "**/*.tsx"
|
||||
- "**/*.jsx"
|
||||
- "**/components/**"
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Quick Wins
|
||||
|
||||
## Overview
|
||||
|
||||
This skill identifies quick win opportunities for design system adoption - low-effort changes that provide high impact improvements to design consistency and maintainability.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use this skill when the user asks to:
|
||||
- Find easy design system improvements
|
||||
- Get quick wins for the sprint
|
||||
- Prioritize low-hanging fruit
|
||||
- Start design system adoption incrementally
|
||||
- Get immediate impact with minimal effort
|
||||
|
||||
## Quick Win Categories
|
||||
|
||||
### 1. Color Consolidation
|
||||
- Multiple similar colors that differ slightly
|
||||
- Colors that can be reduced to a single token
|
||||
- Opportunity: Replace 10+ colors with 3-5 tokens
|
||||
|
||||
### 2. Spacing Standardization
|
||||
- Arbitrary pixel values (13px, 17px, 22px)
|
||||
- Inconsistent margins and paddings
|
||||
- Opportunity: Implement 4px/8px grid system
|
||||
|
||||
### 3. Typography Cleanup
|
||||
- Too many font sizes
|
||||
- Inconsistent line heights
|
||||
- Opportunity: Define 5-7 type scale levels
|
||||
|
||||
### 4. Border Radius Normalization
|
||||
- Random radius values
|
||||
- Inconsistent rounded corners
|
||||
- Opportunity: 3-4 standard radius values
|
||||
|
||||
### 5. Shadow Standardization
|
||||
- Various shadow implementations
|
||||
- Inconsistent elevation
|
||||
- Opportunity: 3 shadow levels (sm, md, lg)
|
||||
|
||||
## Effort/Impact Matrix
|
||||
|
||||
```
|
||||
HIGH IMPACT
|
||||
|
|
||||
+-----------+--------+--------+-----------+
|
||||
| | | | |
|
||||
| Extract | Create | Spacing| Color |
|
||||
| common | Button | scale | tokens |
|
||||
| utilities | system | | |
|
||||
| | | | |
|
||||
LOW +-----------|--------|--------|-----------|+ HIGH
|
||||
EFFORT | | | EFFORT
|
||||
| | | | |
|
||||
| Document | Create | Build | Complete |
|
||||
| existing | icons | full | component |
|
||||
| patterns | set | theme | library |
|
||||
| | | | |
|
||||
+-----------+--------+--------+-----------+
|
||||
|
|
||||
LOW IMPACT
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Find Quick Wins**
|
||||
```
|
||||
Use dss_find_quick_wins with project path
|
||||
```
|
||||
|
||||
2. **Review Opportunities**
|
||||
- Check effort estimates
|
||||
- Review impact scores
|
||||
- Consider dependencies
|
||||
|
||||
3. **Prioritize**
|
||||
- High impact, low effort first
|
||||
- Group related changes
|
||||
- Consider team velocity
|
||||
|
||||
4. **Execute**
|
||||
- Create tokens for identified values
|
||||
- Replace hardcoded values
|
||||
- Document changes
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Find quick wins for our design system"
|
||||
|
||||
**Assistant Response:**
|
||||
```
|
||||
I'll find quick win opportunities for your project.
|
||||
|
||||
[Uses dss_find_quick_wins tool]
|
||||
|
||||
**Quick Win Analysis:**
|
||||
|
||||
**Top 5 Quick Wins (by Impact/Effort):**
|
||||
|
||||
1. **Color Consolidation** [HIGH IMPACT / LOW EFFORT]
|
||||
- Found 47 color values reducible to 8 tokens
|
||||
- Files affected: 23
|
||||
- Estimated effort: 2 hours
|
||||
```css
|
||||
/* Before: 47 variations */
|
||||
color: #0066cc;
|
||||
color: #0067cd; /* Nearly identical */
|
||||
color: #0065cb; /* Nearly identical */
|
||||
|
||||
/* After: 1 token */
|
||||
color: var(--color-primary);
|
||||
```
|
||||
|
||||
2. **Spacing Scale** [HIGH IMPACT / LOW EFFORT]
|
||||
- Found 34 unique spacing values
|
||||
- Can reduce to 6 tokens (4, 8, 16, 24, 32, 48)
|
||||
- Files affected: 31
|
||||
- Estimated effort: 3 hours
|
||||
|
||||
3. **Border Radius** [MEDIUM IMPACT / LOW EFFORT]
|
||||
- Found 12 radius values
|
||||
- Can reduce to 4 tokens (2, 4, 8, 16)
|
||||
- Files affected: 15
|
||||
- Estimated effort: 1 hour
|
||||
|
||||
4. **Shadow Cleanup** [MEDIUM IMPACT / LOW EFFORT]
|
||||
- Found 8 shadow definitions
|
||||
- Can consolidate to 3 tokens
|
||||
- Files affected: 12
|
||||
- Estimated effort: 1 hour
|
||||
|
||||
5. **Font Size Scale** [HIGH IMPACT / MEDIUM EFFORT]
|
||||
- Found 15 font sizes
|
||||
- Can reduce to 7 tokens
|
||||
- Files affected: 28
|
||||
- Estimated effort: 3 hours
|
||||
|
||||
**Total Estimated Time:** 10 hours
|
||||
**Estimated Impact:** 60% reduction in style inconsistency
|
||||
|
||||
**Recommended Order:**
|
||||
1. Colors (biggest impact)
|
||||
2. Spacing (most widespread)
|
||||
3. Border radius (quick win)
|
||||
4. Shadows (contained scope)
|
||||
5. Font sizes (needs coordination)
|
||||
|
||||
Would you like me to create tokens for any of these?
|
||||
```
|
||||
|
||||
## Quick Win Templates
|
||||
|
||||
### Color Token Creation
|
||||
```css
|
||||
/* Quick win: Create color tokens */
|
||||
:root {
|
||||
--color-primary: #0066cc;
|
||||
--color-primary-light: #3385d6;
|
||||
--color-primary-dark: #004c99;
|
||||
}
|
||||
|
||||
/* Replace all instances */
|
||||
/* Before */ color: #0066cc;
|
||||
/* After */ color: var(--color-primary);
|
||||
```
|
||||
|
||||
### Spacing Scale
|
||||
```css
|
||||
/* Quick win: Spacing scale */
|
||||
:root {
|
||||
--spacing-xs: 4px;
|
||||
--spacing-sm: 8px;
|
||||
--spacing-md: 16px;
|
||||
--spacing-lg: 24px;
|
||||
--spacing-xl: 32px;
|
||||
}
|
||||
```
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `dss_find_quick_wins` - Main quick wins tool
|
||||
- `dss_analyze_project` - Full analysis
|
||||
- `dss_extract_tokens` - Create tokens
|
||||
- `dss_audit_components` - Component-level audit
|
||||
|
||||
## Metrics
|
||||
|
||||
Quick wins are scored on:
|
||||
- **Impact**: How many files/components affected
|
||||
- **Effort**: Estimated time to implement
|
||||
- **Risk**: Likelihood of regression
|
||||
- **Value**: Improvement to consistency
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start Small**
|
||||
- Pick 2-3 quick wins per sprint
|
||||
- Validate before scaling
|
||||
- Document learnings
|
||||
|
||||
2. **Track Progress**
|
||||
- Measure before/after metrics
|
||||
- Celebrate wins
|
||||
- Share with team
|
||||
|
||||
3. **Build Momentum**
|
||||
- Use quick wins to build support
|
||||
- Demonstrate value early
|
||||
- Plan for larger initiatives
|
||||
229
dss-claude-plugin/skills/storybook-integration/SKILL.md
Normal file
229
dss-claude-plugin/skills/storybook-integration/SKILL.md
Normal file
@@ -0,0 +1,229 @@
|
||||
---
|
||||
name: Storybook Integration
|
||||
description: Set up and configure Storybook for design system documentation and development
|
||||
globs:
|
||||
- "**/.storybook/**"
|
||||
- "**/*.stories.tsx"
|
||||
- "**/*.stories.jsx"
|
||||
- "**/*.stories.mdx"
|
||||
- "**/storybook-static/**"
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Storybook Integration
|
||||
|
||||
## Overview
|
||||
|
||||
This skill helps set up, configure, and generate Storybook stories for design system components. It creates comprehensive documentation and interactive examples.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use this skill when the user asks to:
|
||||
- Set up Storybook for a project
|
||||
- Generate stories for components
|
||||
- Configure Storybook theme
|
||||
- Create component documentation
|
||||
- Add Storybook addons
|
||||
|
||||
## Capabilities
|
||||
|
||||
### 1. Story Generation
|
||||
- Automatic story creation from components
|
||||
- Props documentation extraction
|
||||
- Default story with all props
|
||||
- Variant stories for different states
|
||||
|
||||
### 2. Theme Configuration
|
||||
- Design token integration
|
||||
- Light/dark mode themes
|
||||
- Custom branding
|
||||
- Typography setup
|
||||
|
||||
### 3. Addon Configuration
|
||||
- Controls for prop editing
|
||||
- Actions for event logging
|
||||
- Viewport for responsive testing
|
||||
- Accessibility testing
|
||||
|
||||
### 4. Documentation
|
||||
- MDX documentation pages
|
||||
- Component API tables
|
||||
- Usage examples
|
||||
- Design guidelines
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Scan Existing Setup**
|
||||
```
|
||||
Use dss_setup_storybook with action: scan
|
||||
```
|
||||
|
||||
2. **Generate Stories**
|
||||
```
|
||||
Use dss_setup_storybook with action: generate
|
||||
```
|
||||
|
||||
3. **Configure Theme**
|
||||
```
|
||||
Use dss_setup_storybook with action: configure
|
||||
```
|
||||
|
||||
4. **Review and Refine**
|
||||
- Check generated stories
|
||||
- Add custom documentation
|
||||
- Configure addons
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Set up Storybook for my design system components"
|
||||
|
||||
**Assistant Response:**
|
||||
```
|
||||
I'll help you set up Storybook for your design system.
|
||||
|
||||
[Uses dss_setup_storybook with action: scan]
|
||||
|
||||
**Current Status:**
|
||||
- Storybook: Not installed
|
||||
- Components found: 23
|
||||
- Existing stories: 0
|
||||
|
||||
Let me set up Storybook with your design tokens.
|
||||
|
||||
[Uses dss_setup_storybook with action: configure]
|
||||
|
||||
**Storybook Configuration Created:**
|
||||
|
||||
.storybook/main.js:
|
||||
```javascript
|
||||
module.exports = {
|
||||
stories: ['../src/**/*.stories.@(js|jsx|ts|tsx|mdx)'],
|
||||
addons: [
|
||||
'@storybook/addon-essentials',
|
||||
'@storybook/addon-a11y',
|
||||
'@storybook/addon-links'
|
||||
],
|
||||
framework: '@storybook/react'
|
||||
};
|
||||
```
|
||||
|
||||
.storybook/preview.js:
|
||||
```javascript
|
||||
import { themes } from '@storybook/theming';
|
||||
import '../src/styles/tokens.css';
|
||||
|
||||
export const parameters = {
|
||||
actions: { argTypesRegex: '^on[A-Z].*' },
|
||||
controls: { expanded: true },
|
||||
docs: {
|
||||
theme: themes.light
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
Now generating stories for your components...
|
||||
|
||||
[Uses dss_setup_storybook with action: generate]
|
||||
|
||||
**Stories Generated:**
|
||||
- Button.stories.tsx (4 variants)
|
||||
- Card.stories.tsx (3 variants)
|
||||
- Input.stories.tsx (5 variants)
|
||||
- Modal.stories.tsx (2 variants)
|
||||
|
||||
Run `npm run storybook` to see your component library!
|
||||
```
|
||||
|
||||
## Story Template
|
||||
|
||||
```tsx
|
||||
// Button.stories.tsx
|
||||
import type { Meta, StoryObj } from '@storybook/react';
|
||||
import { Button } from './Button';
|
||||
|
||||
const meta: Meta<typeof Button> = {
|
||||
title: 'Components/Button',
|
||||
component: Button,
|
||||
tags: ['autodocs'],
|
||||
argTypes: {
|
||||
variant: {
|
||||
control: 'select',
|
||||
options: ['primary', 'secondary', 'ghost']
|
||||
},
|
||||
size: {
|
||||
control: 'select',
|
||||
options: ['sm', 'md', 'lg']
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof Button>;
|
||||
|
||||
export const Primary: Story = {
|
||||
args: {
|
||||
variant: 'primary',
|
||||
children: 'Click me'
|
||||
}
|
||||
};
|
||||
|
||||
export const Secondary: Story = {
|
||||
args: {
|
||||
variant: 'secondary',
|
||||
children: 'Click me'
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### Theme Branding
|
||||
```javascript
|
||||
// .storybook/theme.js
|
||||
import { create } from '@storybook/theming';
|
||||
|
||||
export default create({
|
||||
base: 'light',
|
||||
brandTitle: 'My Design System',
|
||||
brandUrl: 'https://example.com',
|
||||
brandImage: '/logo.svg',
|
||||
|
||||
// UI colors from tokens
|
||||
colorPrimary: '#0066cc',
|
||||
colorSecondary: '#6c757d',
|
||||
|
||||
// Typography
|
||||
fontBase: '"Inter", sans-serif',
|
||||
fontCode: 'monospace'
|
||||
});
|
||||
```
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `dss_setup_storybook` - Main Storybook tool
|
||||
- `dss_generate_theme` - Generate theme for Storybook
|
||||
- `dss_audit_components` - Find components to document
|
||||
|
||||
## Server Configuration
|
||||
|
||||
DSS Storybook runs on port 6006 by default:
|
||||
- Host: 0.0.0.0 (configurable)
|
||||
- Port: 6006 (configurable)
|
||||
- Auto-open: disabled by default
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Story Organization**
|
||||
- Group by component category
|
||||
- Use consistent naming
|
||||
- Include edge cases
|
||||
|
||||
2. **Documentation**
|
||||
- Write clear descriptions
|
||||
- Show usage examples
|
||||
- Document props thoroughly
|
||||
|
||||
3. **Maintenance**
|
||||
- Update stories with components
|
||||
- Test in CI/CD
|
||||
- Review accessibility regularly
|
||||
183
dss-claude-plugin/skills/theme-generation/SKILL.md
Normal file
183
dss-claude-plugin/skills/theme-generation/SKILL.md
Normal file
@@ -0,0 +1,183 @@
|
||||
---
|
||||
name: Theme Generation
|
||||
description: Generate theme files from design tokens using style-dictionary
|
||||
globs:
|
||||
- "**/tokens.json"
|
||||
- "**/theme.json"
|
||||
- "**/*.tokens.json"
|
||||
- "**/design-tokens/**"
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Theme Generation
|
||||
|
||||
## Overview
|
||||
|
||||
This skill transforms design tokens into platform-specific theme files using Amazon Style Dictionary. Supports multiple output formats for different platforms and frameworks.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use this skill when the user asks to:
|
||||
- Generate CSS custom properties from tokens
|
||||
- Create SCSS variables
|
||||
- Export tokens to JSON
|
||||
- Create JavaScript/TypeScript theme modules
|
||||
- Build theme files for a design system
|
||||
|
||||
## Output Formats
|
||||
|
||||
### CSS Variables
|
||||
```css
|
||||
:root {
|
||||
--color-primary: #0066cc;
|
||||
--color-secondary: #6c757d;
|
||||
--spacing-sm: 8px;
|
||||
--spacing-md: 16px;
|
||||
}
|
||||
```
|
||||
|
||||
### SCSS Variables
|
||||
```scss
|
||||
$color-primary: #0066cc;
|
||||
$color-secondary: #6c757d;
|
||||
$spacing-sm: 8px;
|
||||
$spacing-md: 16px;
|
||||
```
|
||||
|
||||
### JSON Export
|
||||
```json
|
||||
{
|
||||
"color": {
|
||||
"primary": "#0066cc",
|
||||
"secondary": "#6c757d"
|
||||
},
|
||||
"spacing": {
|
||||
"sm": "8px",
|
||||
"md": "16px"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### JavaScript Module
|
||||
```javascript
|
||||
export const color = {
|
||||
primary: '#0066cc',
|
||||
secondary: '#6c757d'
|
||||
};
|
||||
|
||||
export const spacing = {
|
||||
sm: '8px',
|
||||
md: '16px'
|
||||
};
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Prepare Tokens**
|
||||
- Ensure tokens are extracted or defined
|
||||
- Validate token structure
|
||||
- Organize by category
|
||||
|
||||
2. **Choose Format**
|
||||
- CSS for web projects with CSS custom properties
|
||||
- SCSS for projects using Sass
|
||||
- JSON for framework-agnostic export
|
||||
- JS for JavaScript/TypeScript projects
|
||||
|
||||
3. **Generate Theme**
|
||||
```
|
||||
Use dss_generate_theme with tokens and format
|
||||
```
|
||||
|
||||
4. **Review Output**
|
||||
- Check generated files
|
||||
- Verify variable names
|
||||
- Validate values
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Generate CSS theme files from the tokens we extracted"
|
||||
|
||||
**Assistant Response:**
|
||||
```
|
||||
I'll generate CSS theme files from your tokens.
|
||||
|
||||
[Uses dss_generate_theme with format: css]
|
||||
|
||||
**Generated Files:**
|
||||
|
||||
theme.css:
|
||||
```css
|
||||
:root {
|
||||
/* Colors */
|
||||
--color-primary: #0066cc;
|
||||
--color-primary-light: #3385d6;
|
||||
--color-primary-dark: #004c99;
|
||||
--color-secondary: #6c757d;
|
||||
|
||||
/* Typography */
|
||||
--font-family-base: 'Inter', sans-serif;
|
||||
--font-size-base: 16px;
|
||||
--line-height-base: 1.5;
|
||||
|
||||
/* Spacing */
|
||||
--spacing-xs: 4px;
|
||||
--spacing-sm: 8px;
|
||||
--spacing-md: 16px;
|
||||
--spacing-lg: 24px;
|
||||
--spacing-xl: 32px;
|
||||
}
|
||||
```
|
||||
|
||||
The theme file has been generated successfully. You can:
|
||||
1. Import it directly into your main CSS
|
||||
2. Use the variables in your components
|
||||
3. Create dark mode variants
|
||||
|
||||
Would you like me to generate a dark mode theme as well?
|
||||
```
|
||||
|
||||
## Theme Variants
|
||||
|
||||
### Light/Dark Mode
|
||||
Generate separate themes for light and dark modes:
|
||||
- `theme-light.css`
|
||||
- `theme-dark.css`
|
||||
|
||||
### Brand Variants
|
||||
Create multiple brand themes:
|
||||
- `theme-brand-a.css`
|
||||
- `theme-brand-b.css`
|
||||
|
||||
## Style Dictionary Features
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| Transform Groups | Pre-defined transformations for each platform |
|
||||
| Custom Transforms | Add custom value transformations |
|
||||
| File Headers | Include comments and metadata |
|
||||
| Filtering | Filter tokens by category or attributes |
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `dss_generate_theme` - Main generation tool
|
||||
- `dss_extract_tokens` - Get tokens first
|
||||
- `dss_transform_tokens` - Convert between formats
|
||||
- `dss_list_themes` - See available themes
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Naming Convention**
|
||||
- Use consistent naming (kebab-case recommended)
|
||||
- Include category prefix (color-, spacing-, etc.)
|
||||
- Be descriptive but concise
|
||||
|
||||
2. **Token Organization**
|
||||
- Group by category
|
||||
- Use semantic names over values
|
||||
- Include descriptions for documentation
|
||||
|
||||
3. **Version Control**
|
||||
- Track generated files
|
||||
- Document token changes
|
||||
- Use semantic versioning
|
||||
158
dss-claude-plugin/skills/token-extraction/SKILL.md
Normal file
158
dss-claude-plugin/skills/token-extraction/SKILL.md
Normal file
@@ -0,0 +1,158 @@
|
||||
---
|
||||
name: Token Extraction
|
||||
description: Extract design tokens from CSS, SCSS, Tailwind, and JSON sources
|
||||
globs:
|
||||
- "**/*.css"
|
||||
- "**/*.scss"
|
||||
- "**/*.sass"
|
||||
- "**/tailwind.config.*"
|
||||
- "**/tokens.json"
|
||||
- "**/theme.json"
|
||||
- "**/*.tokens.json"
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Token Extraction
|
||||
|
||||
## Overview
|
||||
|
||||
This skill enables extraction of design tokens from multiple source formats and merging them into a unified token collection.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use this skill when the user asks to:
|
||||
- Extract tokens from CSS/SCSS files
|
||||
- Parse Tailwind configuration for tokens
|
||||
- Import tokens from JSON files
|
||||
- Merge tokens from multiple sources
|
||||
- Convert existing styles to tokens
|
||||
|
||||
## Supported Sources
|
||||
|
||||
### CSS
|
||||
- Custom properties (--variable-name)
|
||||
- Color values in declarations
|
||||
- Font and typography values
|
||||
- Spacing values
|
||||
|
||||
### SCSS
|
||||
- Variables ($variable-name)
|
||||
- Maps and nested structures
|
||||
- Mixins with token values
|
||||
- Function outputs
|
||||
|
||||
### Tailwind
|
||||
- Theme configuration
|
||||
- Extended colors
|
||||
- Custom spacing
|
||||
- Typography settings
|
||||
|
||||
### JSON
|
||||
- Design token format (W3C Draft)
|
||||
- Style Dictionary format
|
||||
- Figma export format
|
||||
- Custom JSON structures
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Identify Sources**
|
||||
- Determine which file types contain tokens
|
||||
- Locate configuration files
|
||||
- Check for existing token files
|
||||
|
||||
2. **Extract Tokens**
|
||||
```
|
||||
Use dss_extract_tokens with path and source types
|
||||
```
|
||||
|
||||
3. **Review Extraction**
|
||||
- Check token count
|
||||
- Verify token categories
|
||||
- Review token values
|
||||
|
||||
4. **Merge if Needed**
|
||||
- Combine tokens from multiple sources
|
||||
- Resolve conflicts
|
||||
- Normalize naming
|
||||
|
||||
## Token Categories
|
||||
|
||||
| Category | Examples |
|
||||
|----------|----------|
|
||||
| color | primary, secondary, error, success |
|
||||
| typography | font-family, font-size, line-height |
|
||||
| spacing | margin, padding, gap values |
|
||||
| sizing | width, height, border-radius |
|
||||
| shadow | box-shadow, drop-shadow |
|
||||
| border | border-width, border-color |
|
||||
| opacity | transparency values |
|
||||
| z-index | stacking order values |
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** "Extract all design tokens from my project's styles"
|
||||
|
||||
**Assistant Response:**
|
||||
```
|
||||
I'll extract design tokens from your project.
|
||||
|
||||
[Uses dss_extract_tokens tool with sources: css, scss, tailwind]
|
||||
|
||||
**Extraction Results:**
|
||||
|
||||
From CSS (23 tokens):
|
||||
- 12 color tokens
|
||||
- 5 spacing tokens
|
||||
- 6 typography tokens
|
||||
|
||||
From SCSS (45 tokens):
|
||||
- 20 color tokens (including variants)
|
||||
- 15 spacing tokens
|
||||
- 10 typography tokens
|
||||
|
||||
From Tailwind (32 tokens):
|
||||
- Custom colors defined in theme
|
||||
- Extended spacing scale
|
||||
- Custom font families
|
||||
|
||||
**Merged Collection:** 78 unique tokens
|
||||
- Duplicates resolved using latest value
|
||||
- Naming normalized to kebab-case
|
||||
|
||||
Would you like me to generate theme files from these tokens?
|
||||
```
|
||||
|
||||
## Merge Strategies
|
||||
|
||||
| Strategy | Description |
|
||||
|----------|-------------|
|
||||
| PREFER_LATEST | Use most recently encountered value |
|
||||
| PREFER_FIRST | Keep first encountered value |
|
||||
| ERROR_ON_CONFLICT | Fail if conflicts exist |
|
||||
| MERGE_ARRAYS | Combine array values |
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `dss_extract_tokens` - Main extraction tool
|
||||
- `dss_generate_theme` - Generate theme from tokens
|
||||
- `dss_transform_tokens` - Convert between formats
|
||||
|
||||
## Output Format
|
||||
|
||||
```json
|
||||
{
|
||||
"color": {
|
||||
"primary": {
|
||||
"value": "#0066cc",
|
||||
"type": "color",
|
||||
"description": "Primary brand color"
|
||||
}
|
||||
},
|
||||
"spacing": {
|
||||
"sm": {
|
||||
"value": "8px",
|
||||
"type": "dimension"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
10
dss-claude-plugin/strategies/__init__.py
Normal file
10
dss-claude-plugin/strategies/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""
|
||||
Strategies package for DSS Claude Plugin.
|
||||
|
||||
This package contains the abstract base classes and concrete implementations
|
||||
for different operational strategies (LOCAL vs REMOTE mode).
|
||||
"""
|
||||
|
||||
from .base import BrowserStrategy, FilesystemStrategy
|
||||
|
||||
__all__ = ["BrowserStrategy", "FilesystemStrategy"]
|
||||
186
dss-claude-plugin/strategies/base.py
Normal file
186
dss-claude-plugin/strategies/base.py
Normal file
@@ -0,0 +1,186 @@
|
||||
"""
|
||||
Base strategy interfaces for DSS Claude Plugin.
|
||||
|
||||
This module defines the abstract base classes that all strategy implementations
|
||||
must adhere to. These interfaces ensure consistent behavior across different
|
||||
execution modes (LOCAL vs REMOTE) and allow the context to switch strategies
|
||||
transparently.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional, Dict, Any
|
||||
|
||||
|
||||
class BrowserStrategy(ABC):
|
||||
"""
|
||||
Abstract base strategy for browser interactions.
|
||||
|
||||
Provides methods for inspecting and interacting with a web page.
|
||||
Implementations will handle the underlying automation (e.g., Playwright
|
||||
for local, API calls for remote).
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_console_logs(
|
||||
self,
|
||||
session_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
level: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieve console logs from the browser session.
|
||||
|
||||
Args:
|
||||
session_id: The active session identifier (optional for LOCAL mode).
|
||||
limit: Maximum number of logs to return.
|
||||
level: Filter by log level (e.g., "log", "warn", "error").
|
||||
|
||||
Returns:
|
||||
List of log entries containing message, level, and timestamp.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def capture_screenshot(
|
||||
self,
|
||||
selector: Optional[str] = None,
|
||||
full_page: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Capture a screenshot of the current page or specific element.
|
||||
|
||||
Args:
|
||||
selector: CSS selector to capture a specific element. If None,
|
||||
captures the viewport.
|
||||
full_page: If True, captures the full scrollable page content.
|
||||
Ignored if selector is provided.
|
||||
|
||||
Returns:
|
||||
Path to saved screenshot (LOCAL) or URL (REMOTE).
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_dom_snapshot(self) -> str:
|
||||
"""
|
||||
Get the current DOM state as an HTML string.
|
||||
|
||||
Returns:
|
||||
String containing the outer HTML of the document.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_errors(
|
||||
self,
|
||||
severity: Optional[str] = None,
|
||||
limit: int = 50
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieve accumulated browser errors (console errors, crashes, network failures).
|
||||
|
||||
Args:
|
||||
severity: Filter by error severity.
|
||||
limit: Maximum number of errors to return.
|
||||
|
||||
Returns:
|
||||
List of error details.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def run_accessibility_audit(
|
||||
self,
|
||||
selector: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run accessibility audit using axe-core.
|
||||
|
||||
Args:
|
||||
selector: CSS selector to audit specific element. If None, audits entire page.
|
||||
|
||||
Returns:
|
||||
Dictionary with audit results containing:
|
||||
- violations: List of accessibility violations
|
||||
- passes: List of passing rules
|
||||
- incomplete: List of rules needing review
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_performance_metrics(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get performance metrics including Core Web Vitals.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- navigation_timing: Navigation Timing API data
|
||||
- core_web_vitals: TTFB, FCP, LCP, CLS metrics
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class FilesystemStrategy(ABC):
|
||||
"""
|
||||
Abstract base strategy for filesystem operations.
|
||||
|
||||
Provides methods for reading and searching files.
|
||||
Implementations ensure safe access to the filesystem in Local mode
|
||||
or proxy requests in Remote mode.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def read_file(self, path: str) -> str:
|
||||
"""
|
||||
Read the contents of a file.
|
||||
|
||||
Args:
|
||||
path: Relative or absolute path to the file.
|
||||
|
||||
Returns:
|
||||
File content as string.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the file does not exist.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def list_directory(self, path: str) -> List[str]:
|
||||
"""
|
||||
List contents of a directory.
|
||||
|
||||
Args:
|
||||
path: Directory path.
|
||||
|
||||
Returns:
|
||||
List of filenames and directory names in the path.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def search_files(self, pattern: str, path: str = ".") -> List[str]:
|
||||
"""
|
||||
Search for files matching a pattern.
|
||||
|
||||
Args:
|
||||
pattern: Search pattern (glob or regex depending on implementation).
|
||||
path: Root path to start search from.
|
||||
|
||||
Returns:
|
||||
List of matching file paths.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_file_info(self, path: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get metadata about a file.
|
||||
|
||||
Args:
|
||||
path: File path.
|
||||
|
||||
Returns:
|
||||
Dictionary containing metadata (size, created_at, modified_at).
|
||||
"""
|
||||
pass
|
||||
5
dss-claude-plugin/strategies/local/__init__.py
Normal file
5
dss-claude-plugin/strategies/local/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Local strategies for direct interaction with the user's environment."""
|
||||
|
||||
from .browser import LocalBrowserStrategy
|
||||
|
||||
__all__ = ["LocalBrowserStrategy"]
|
||||
455
dss-claude-plugin/strategies/local/browser.py
Normal file
455
dss-claude-plugin/strategies/local/browser.py
Normal file
@@ -0,0 +1,455 @@
|
||||
"""
|
||||
Local Browser Strategy implementation using Playwright.
|
||||
|
||||
Provides direct, local control over a browser for tasks like DOM inspection,
|
||||
screenshotting, and running audits. This is the LOCAL mode counterpart to
|
||||
RemoteBrowserStrategy which uses Shadow State pattern.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from ..base import BrowserStrategy
|
||||
|
||||
# Logger setup
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# URL for axe-core accessibility testing library
|
||||
AXE_CORE_SCRIPT_URL = "https://cdnjs.cloudflare.com/ajax/libs/axe-core/4.8.4/axe.min.js"
|
||||
|
||||
# Optional Playwright import for graceful degradation
|
||||
try:
|
||||
from playwright.async_api import (
|
||||
Browser,
|
||||
ConsoleMessage,
|
||||
Error as PlaywrightError,
|
||||
Page,
|
||||
Playwright,
|
||||
TimeoutError as PlaywrightTimeoutError,
|
||||
async_playwright,
|
||||
)
|
||||
|
||||
PLAYWRIGHT_AVAILABLE = True
|
||||
except ImportError:
|
||||
PLAYWRIGHT_AVAILABLE = False
|
||||
# Create dummy types for type hinting when Playwright is not installed
|
||||
Playwright = Type[Any]
|
||||
Browser = Type[Any]
|
||||
Page = Type[Any]
|
||||
ConsoleMessage = Type[Any]
|
||||
PlaywrightError = Exception
|
||||
PlaywrightTimeoutError = Exception
|
||||
|
||||
|
||||
class LocalBrowserStrategy(BrowserStrategy):
|
||||
"""
|
||||
Implements the BrowserStrategy using Playwright for local browser automation.
|
||||
|
||||
This strategy manages a singleton browser instance to perform actions
|
||||
directly on the local machine. It is ideal for development environments
|
||||
where direct access to a browser is possible.
|
||||
|
||||
Features:
|
||||
- Browser pool pattern (reuses browser instances)
|
||||
- Console log capture via CDP
|
||||
- Screenshot capture (element or full page)
|
||||
- DOM snapshot retrieval
|
||||
- Accessibility auditing via axe-core injection
|
||||
- Core Web Vitals and performance metrics
|
||||
|
||||
Note: This class requires Playwright to be installed.
|
||||
Run `pip install "playwright"` and `playwright install chromium`.
|
||||
"""
|
||||
|
||||
# Class-level browser pool (shared across instances)
|
||||
_playwright: Optional[Playwright] = None
|
||||
_browser: Optional[Browser] = None
|
||||
_browser_lock = asyncio.Lock()
|
||||
|
||||
def __init__(self, context: Any):
|
||||
"""
|
||||
Initialize the LocalBrowserStrategy.
|
||||
|
||||
Args:
|
||||
context: The DSSContext providing configuration and session info.
|
||||
"""
|
||||
self.context = context
|
||||
self.page: Optional[Page] = None
|
||||
self._console_logs: List[Any] = []
|
||||
self._page_errors: List[Any] = []
|
||||
|
||||
if not PLAYWRIGHT_AVAILABLE:
|
||||
logger.warning(
|
||||
"Playwright not found. LocalBrowserStrategy will be non-functional. "
|
||||
"Please run 'pip install \"playwright\"' and 'playwright install chromium'."
|
||||
)
|
||||
|
||||
def _check_playwright(self) -> None:
|
||||
"""Ensure Playwright is available, raising an error if not."""
|
||||
if not PLAYWRIGHT_AVAILABLE:
|
||||
raise NotImplementedError(
|
||||
"Playwright is not installed. Cannot use LocalBrowserStrategy. "
|
||||
"Install with: pip install playwright && playwright install chromium"
|
||||
)
|
||||
|
||||
async def launch(self, headless: bool = True) -> None:
|
||||
"""
|
||||
Launch and initialize the Playwright browser instance.
|
||||
|
||||
This method is idempotent and ensures that a single browser instance
|
||||
is shared across the application lifecycle (browser pool pattern).
|
||||
|
||||
Args:
|
||||
headless: Whether to run browser in headless mode (default: True)
|
||||
"""
|
||||
self._check_playwright()
|
||||
|
||||
# Check if browser is already running
|
||||
if LocalBrowserStrategy._browser and LocalBrowserStrategy._browser.is_connected():
|
||||
logger.debug("Browser already running, reusing existing instance.")
|
||||
return
|
||||
|
||||
async with LocalBrowserStrategy._browser_lock:
|
||||
# Double-check lock to prevent race conditions
|
||||
if LocalBrowserStrategy._browser and LocalBrowserStrategy._browser.is_connected():
|
||||
return
|
||||
|
||||
logger.info("Starting Playwright...")
|
||||
LocalBrowserStrategy._playwright = await async_playwright().start()
|
||||
|
||||
logger.info("Launching new browser instance...")
|
||||
LocalBrowserStrategy._browser = await LocalBrowserStrategy._playwright.chromium.launch(
|
||||
headless=headless
|
||||
)
|
||||
logger.info("Browser instance launched successfully.")
|
||||
|
||||
async def navigate(self, url: str, wait_until: str = "domcontentloaded") -> None:
|
||||
"""
|
||||
Navigate the browser to a specific URL.
|
||||
|
||||
This creates a new page context for the session, replacing any
|
||||
existing page. It also sets up listeners for console logs and errors.
|
||||
|
||||
Args:
|
||||
url: The URL to navigate to.
|
||||
wait_until: The navigation event to wait for
|
||||
('load', 'domcontentloaded', 'networkidle').
|
||||
"""
|
||||
await self.launch()
|
||||
|
||||
# Close existing page if any
|
||||
if self.page and not self.page.is_closed():
|
||||
await self.page.close()
|
||||
|
||||
if not LocalBrowserStrategy._browser:
|
||||
raise RuntimeError("Browser is not launched. Call launch() first.")
|
||||
|
||||
# Create new page
|
||||
self.page = await LocalBrowserStrategy._browser.new_page()
|
||||
self._console_logs.clear()
|
||||
self._page_errors.clear()
|
||||
|
||||
# Set up event listeners for log capture
|
||||
self.page.on("console", self._on_console_message)
|
||||
self.page.on("pageerror", self._on_page_error)
|
||||
|
||||
logger.info(f"Navigating to {url}...")
|
||||
await self.page.goto(url, wait_until=wait_until)
|
||||
logger.info(f"Navigation to {url} complete.")
|
||||
|
||||
def _on_console_message(self, msg: Any) -> None:
|
||||
"""Handle console message events."""
|
||||
self._console_logs.append(msg)
|
||||
|
||||
def _on_page_error(self, error: Any) -> None:
|
||||
"""Handle page error events."""
|
||||
self._page_errors.append(error)
|
||||
|
||||
async def get_console_logs(
|
||||
self,
|
||||
session_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
level: Optional[str] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieve captured console logs from the current page.
|
||||
|
||||
Args:
|
||||
session_id: Ignored in LOCAL mode (used for API compatibility).
|
||||
limit: Maximum number of logs to return.
|
||||
level: Filter by log level ('log', 'warn', 'error', 'info', 'debug').
|
||||
|
||||
Returns:
|
||||
List of log entries with level, text, and location.
|
||||
"""
|
||||
if not self.page:
|
||||
logger.warning("No active page. Returning empty logs.")
|
||||
return []
|
||||
|
||||
logs = []
|
||||
for msg in self._console_logs:
|
||||
try:
|
||||
log_entry = {
|
||||
"level": msg.type,
|
||||
"message": msg.text,
|
||||
"timestamp": None, # Playwright doesn't provide timestamp directly
|
||||
"category": "console",
|
||||
"data": {
|
||||
"location": msg.location if hasattr(msg, 'location') else None,
|
||||
}
|
||||
}
|
||||
logs.append(log_entry)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error processing console message: {e}")
|
||||
|
||||
if level:
|
||||
logs = [log for log in logs if log["level"] == level]
|
||||
|
||||
# Return most recent logs up to limit
|
||||
return logs[-limit:]
|
||||
|
||||
async def capture_screenshot(
|
||||
self, selector: Optional[str] = None, full_page: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Capture a screenshot of the current page or a specific element.
|
||||
|
||||
Args:
|
||||
selector: CSS selector to capture a specific element.
|
||||
If None, captures the viewport.
|
||||
full_page: If True, captures the full scrollable page content.
|
||||
Ignored if selector is provided.
|
||||
|
||||
Returns:
|
||||
Path to the saved screenshot file.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If no active page is available.
|
||||
"""
|
||||
if not self.page or self.page.is_closed():
|
||||
raise RuntimeError("No active page to capture screenshot from.")
|
||||
|
||||
# Generate unique filename
|
||||
session_id = getattr(self.context, 'session_id', 'local')
|
||||
path = os.path.join(
|
||||
tempfile.gettempdir(), f"dss_screenshot_{session_id}.png"
|
||||
)
|
||||
|
||||
try:
|
||||
if selector:
|
||||
element = self.page.locator(selector)
|
||||
await element.screenshot(path=path, timeout=10000)
|
||||
logger.info(f"Element screenshot saved to {path}")
|
||||
else:
|
||||
await self.page.screenshot(path=path, full_page=full_page, timeout=10000)
|
||||
logger.info(f"Page screenshot saved to {path}")
|
||||
return path
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to capture screenshot: {e}")
|
||||
raise
|
||||
|
||||
async def get_dom_snapshot(self) -> str:
|
||||
"""
|
||||
Get the current DOM state as an HTML string.
|
||||
|
||||
Returns:
|
||||
String containing the outer HTML of the document.
|
||||
"""
|
||||
if not self.page or self.page.is_closed():
|
||||
return "<!-- No active page to get DOM snapshot from. -->"
|
||||
return await self.page.content()
|
||||
|
||||
async def get_errors(
|
||||
self, severity: Optional[str] = None, limit: int = 50
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieve captured page errors (e.g., uncaught exceptions).
|
||||
|
||||
Args:
|
||||
severity: Filter by severity (not yet implemented).
|
||||
limit: Maximum number of errors to return.
|
||||
|
||||
Returns:
|
||||
List of error details with name, message, and stack trace.
|
||||
"""
|
||||
errors = []
|
||||
for err in self._page_errors:
|
||||
try:
|
||||
error_entry = {
|
||||
"level": "error",
|
||||
"category": "uncaughtError",
|
||||
"message": str(err),
|
||||
"data": {
|
||||
"name": getattr(err, 'name', 'Error'),
|
||||
"stack": getattr(err, 'stack', None),
|
||||
}
|
||||
}
|
||||
errors.append(error_entry)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error processing page error: {e}")
|
||||
|
||||
return errors[-limit:]
|
||||
|
||||
async def run_accessibility_audit(
|
||||
self, selector: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Run an accessibility audit on the current page using axe-core.
|
||||
|
||||
This injects the axe-core library into the page and runs a full
|
||||
accessibility scan.
|
||||
|
||||
Args:
|
||||
selector: A CSS selector to limit the audit to a specific element.
|
||||
If None, audits the entire page.
|
||||
|
||||
Returns:
|
||||
A dictionary containing the axe-core audit results with:
|
||||
- violations: List of accessibility violations
|
||||
- passes: List of passing rules
|
||||
- incomplete: List of rules that need review
|
||||
- inapplicable: List of rules that don't apply
|
||||
|
||||
Raises:
|
||||
RuntimeError: If no active page is available.
|
||||
"""
|
||||
if not self.page or self.page.is_closed():
|
||||
raise RuntimeError("No active page to run accessibility audit on.")
|
||||
|
||||
logger.info("Injecting axe-core library...")
|
||||
await self.page.add_script_tag(url=AXE_CORE_SCRIPT_URL)
|
||||
|
||||
# Wait for axe to be available
|
||||
await self.page.wait_for_function("typeof axe !== 'undefined'", timeout=5000)
|
||||
|
||||
logger.info(f"Running accessibility audit{' on ' + selector if selector else ''}...")
|
||||
|
||||
# Run axe with selector context if provided
|
||||
if selector:
|
||||
result = await self.page.evaluate(
|
||||
"(selector) => axe.run(selector)", selector
|
||||
)
|
||||
else:
|
||||
result = await self.page.evaluate("() => axe.run()")
|
||||
|
||||
violations_count = len(result.get('violations', []))
|
||||
logger.info(f"Accessibility audit complete. Found {violations_count} violations.")
|
||||
|
||||
return result
|
||||
|
||||
async def get_performance_metrics(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get performance metrics, including Navigation Timing and Core Web Vitals.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- navigation_timing: Raw Navigation Timing API data
|
||||
- core_web_vitals: FCP, LCP, and CLS metrics
|
||||
|
||||
Raises:
|
||||
RuntimeError: If no active page is available.
|
||||
"""
|
||||
if not self.page or self.page.is_closed():
|
||||
raise RuntimeError("No active page to get performance metrics from.")
|
||||
|
||||
# 1. Get Navigation Timing API metrics
|
||||
timing_raw = await self.page.evaluate(
|
||||
"() => JSON.stringify(window.performance.timing)"
|
||||
)
|
||||
nav_timing = json.loads(timing_raw)
|
||||
|
||||
# 2. Get Core Web Vitals via PerformanceObserver
|
||||
# This script collects buffered entries and waits briefly for new ones
|
||||
metrics_script = """
|
||||
() => new Promise((resolve) => {
|
||||
const metrics = { lcp: null, cls: 0, fcp: null, ttfb: null };
|
||||
|
||||
// Get TTFB from navigation timing
|
||||
const navEntry = performance.getEntriesByType('navigation')[0];
|
||||
if (navEntry) {
|
||||
metrics.ttfb = navEntry.responseStart - navEntry.requestStart;
|
||||
}
|
||||
|
||||
// Get FCP from paint entries
|
||||
const paintEntries = performance.getEntriesByType('paint');
|
||||
for (const entry of paintEntries) {
|
||||
if (entry.name === 'first-contentful-paint') {
|
||||
metrics.fcp = entry.startTime;
|
||||
}
|
||||
}
|
||||
|
||||
// Set up observer for LCP and CLS
|
||||
try {
|
||||
const observer = new PerformanceObserver((list) => {
|
||||
for (const entry of list.getEntries()) {
|
||||
if (entry.entryType === 'largest-contentful-paint') {
|
||||
metrics.lcp = entry.startTime;
|
||||
}
|
||||
if (entry.entryType === 'layout-shift' && !entry.hadRecentInput) {
|
||||
metrics.cls += entry.value;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
observer.observe({
|
||||
type: 'largest-contentful-paint',
|
||||
buffered: true
|
||||
});
|
||||
observer.observe({
|
||||
type: 'layout-shift',
|
||||
buffered: true
|
||||
});
|
||||
|
||||
// Give some time for metrics to be collected
|
||||
setTimeout(() => {
|
||||
observer.disconnect();
|
||||
resolve(metrics);
|
||||
}, 500);
|
||||
} catch (e) {
|
||||
// PerformanceObserver may not be fully supported
|
||||
resolve(metrics);
|
||||
}
|
||||
})
|
||||
"""
|
||||
core_web_vitals = await self.page.evaluate(metrics_script)
|
||||
|
||||
return {
|
||||
"navigation_timing": nav_timing,
|
||||
"core_web_vitals": core_web_vitals
|
||||
}
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the current page. Browser instance is kept in pool for reuse.
|
||||
|
||||
To fully close the browser, use close_browser() class method.
|
||||
"""
|
||||
if self.page and not self.page.is_closed():
|
||||
await self.page.close()
|
||||
self.page = None
|
||||
self._console_logs.clear()
|
||||
self._page_errors.clear()
|
||||
logger.info("Page closed.")
|
||||
|
||||
@classmethod
|
||||
async def close_browser(cls) -> None:
|
||||
"""
|
||||
Close the browser and stop the Playwright instance.
|
||||
|
||||
This is a class method that closes the shared browser pool.
|
||||
Should be called during application shutdown.
|
||||
"""
|
||||
async with cls._browser_lock:
|
||||
if cls._browser:
|
||||
await cls._browser.close()
|
||||
cls._browser = None
|
||||
logger.info("Browser instance closed.")
|
||||
|
||||
if cls._playwright:
|
||||
await cls._playwright.stop()
|
||||
cls._playwright = None
|
||||
logger.info("Playwright stopped.")
|
||||
6
dss-claude-plugin/strategies/remote/__init__.py
Normal file
6
dss-claude-plugin/strategies/remote/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""REMOTE mode strategy implementations."""
|
||||
|
||||
from .browser import RemoteBrowserStrategy
|
||||
from .filesystem import RemoteFilesystemStrategy
|
||||
|
||||
__all__ = ["RemoteBrowserStrategy", "RemoteFilesystemStrategy"]
|
||||
257
dss-claude-plugin/strategies/remote/browser.py
Normal file
257
dss-claude-plugin/strategies/remote/browser.py
Normal file
@@ -0,0 +1,257 @@
|
||||
"""
|
||||
Remote Browser Strategy implementation.
|
||||
Connects to the DSS API to retrieve browser state and logs via Shadow State pattern.
|
||||
"""
|
||||
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import logging
|
||||
import base64
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
from ..base import BrowserStrategy
|
||||
from ...core.context import DSSContext
|
||||
|
||||
# Configure module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RemoteBrowserStrategy(BrowserStrategy):
|
||||
"""
|
||||
Implements browser interaction via remote API calls.
|
||||
Relies on the browser-side Logger to sync state to the server.
|
||||
"""
|
||||
|
||||
def __init__(self, context: DSSContext):
|
||||
"""Initialize with context."""
|
||||
self.context = context
|
||||
|
||||
async def _get_logs_from_api(self, session_id: Optional[str] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetch all logs for a specific session from the remote API.
|
||||
|
||||
Args:
|
||||
session_id: The session ID to query. Uses default if None.
|
||||
|
||||
Returns:
|
||||
List of log entries.
|
||||
"""
|
||||
if session_id is None:
|
||||
session_id = self.context.session_id or "latest"
|
||||
|
||||
base_url = self.context.get_api_url()
|
||||
|
||||
# Ensure base_url doesn't have trailing slash for clean concatenation
|
||||
base_url = base_url.rstrip('/')
|
||||
url = f"{base_url}/api/browser-logs/{session_id}"
|
||||
|
||||
try:
|
||||
timeout = aiohttp.ClientTimeout(total=10.0)
|
||||
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||
async with session.get(url) as response:
|
||||
if response.status == 404:
|
||||
logger.warning(f"Session {session_id} not found on remote server.")
|
||||
return []
|
||||
|
||||
if response.status != 200:
|
||||
logger.error(f"Failed to fetch logs: {response.status} {response.reason}")
|
||||
return []
|
||||
|
||||
data = await response.json()
|
||||
|
||||
# The API is expected to return the exportJSON() structure from browser-logger.js
|
||||
# Structure: { sessionId: "...", logs: [...], diagnostic: {...} }
|
||||
return data.get("logs", [])
|
||||
|
||||
except aiohttp.ClientError as e:
|
||||
logger.error(f"Network error fetching browser logs: {str(e)}")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in RemoteBrowserStrategy: {str(e)}")
|
||||
return []
|
||||
|
||||
async def get_console_logs(
|
||||
self,
|
||||
session_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
level: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get browser console logs from the remote API.
|
||||
|
||||
Args:
|
||||
session_id: The session ID to retrieve logs for.
|
||||
limit: Maximum number of logs to return.
|
||||
level: Filter by log level (log, info, warn, error).
|
||||
"""
|
||||
logs = await self._get_logs_from_api(session_id)
|
||||
|
||||
# Filter by console category mostly, but also capture uncaught errors
|
||||
console_logs = [
|
||||
l for l in logs
|
||||
if l.get("category") in ["console", "uncaughtError", "unhandledRejection"]
|
||||
]
|
||||
|
||||
# Filter by level if requested
|
||||
if level:
|
||||
console_logs = [l for l in console_logs if l.get("level") == level]
|
||||
|
||||
# Sort by timestamp descending (newest first)
|
||||
console_logs.sort(key=lambda x: x.get("timestamp", 0), reverse=True)
|
||||
|
||||
return console_logs[:limit]
|
||||
|
||||
async def capture_screenshot(
|
||||
self,
|
||||
selector: Optional[str] = None,
|
||||
full_page: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Capture a screenshot.
|
||||
|
||||
In REMOTE mode, this requests the server to perform the capture or returns
|
||||
a placeholder URL if the server capability isn't available.
|
||||
|
||||
Returns:
|
||||
URL to screenshot or placeholder message.
|
||||
"""
|
||||
# Placeholder implementation until server-side rendering/CDP proxy is ready.
|
||||
# Ideally, we would POST to /api/commands/{session_id}/screenshot
|
||||
logger.warning("Remote screenshot capture is not yet fully implemented on server.")
|
||||
|
||||
# Return placeholder URL
|
||||
return "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
|
||||
|
||||
async def get_dom_snapshot(self) -> str:
|
||||
"""
|
||||
Get the current DOM snapshot via Shadow State.
|
||||
|
||||
Retrieves the latest log entry with category='snapshot' containing
|
||||
the full HTML state captured by the browser.
|
||||
"""
|
||||
logs = await self._get_logs_from_api()
|
||||
|
||||
# Filter for snapshots
|
||||
snapshots = [
|
||||
l for l in logs
|
||||
if l.get("category") == "snapshot" and "snapshot" in l.get("data", {})
|
||||
]
|
||||
|
||||
if not snapshots:
|
||||
return "<!-- No Shadow State snapshot available for this session -->"
|
||||
|
||||
# Get the latest one
|
||||
latest = max(snapshots, key=lambda x: x.get("timestamp", 0))
|
||||
|
||||
# Extract HTML from the snapshot data object safely
|
||||
# Structure in browser-logger: entry.data.snapshot.html
|
||||
try:
|
||||
html = latest.get("data", {}).get("snapshot", {}).get("html", "")
|
||||
if html:
|
||||
return html
|
||||
return "<!-- Corrupted Shadow State snapshot data -->"
|
||||
except (KeyError, AttributeError):
|
||||
return "<!-- Corrupted or unexpected snapshot data format -->"
|
||||
|
||||
async def get_errors(
|
||||
self,
|
||||
severity: Optional[str] = None,
|
||||
limit: int = 50
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get error logs from the remote API.
|
||||
|
||||
Args:
|
||||
severity: Filter by severity (not implemented yet).
|
||||
limit: Maximum number of errors to return.
|
||||
|
||||
Returns:
|
||||
List of error entries.
|
||||
"""
|
||||
logs = await self._get_logs_from_api()
|
||||
|
||||
# Filter for errors
|
||||
errors = [l for l in logs if l.get("level") == "error"]
|
||||
|
||||
# Sort newest first
|
||||
errors.sort(key=lambda x: x.get("timestamp", 0), reverse=True)
|
||||
|
||||
return errors[:limit]
|
||||
|
||||
async def run_accessibility_audit(
|
||||
self,
|
||||
selector: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get accessibility audit results from Shadow State.
|
||||
|
||||
In REMOTE mode, this retrieves the accessibility data captured by the
|
||||
browser-side logger using the captureAccessibilitySnapshot() method.
|
||||
|
||||
Args:
|
||||
selector: Not used in REMOTE mode (filter not supported).
|
||||
|
||||
Returns:
|
||||
Accessibility audit results if available in Shadow State.
|
||||
"""
|
||||
logs = await self._get_logs_from_api()
|
||||
|
||||
# Look for accessibility audits in the logs
|
||||
audits = [
|
||||
l for l in logs
|
||||
if l.get("category") == "accessibility" or l.get("category") == "accessibilitySnapshot"
|
||||
]
|
||||
|
||||
if not audits:
|
||||
return {
|
||||
"violations": [],
|
||||
"passes": [],
|
||||
"incomplete": [],
|
||||
"message": "No accessibility audit found in Shadow State. Trigger audit from browser console using __DSS_BROWSER_LOGS.audit()"
|
||||
}
|
||||
|
||||
# Get the latest audit
|
||||
latest = max(audits, key=lambda x: x.get("timestamp", 0))
|
||||
data = latest.get("data", {})
|
||||
|
||||
# Extract accessibility results
|
||||
if "results" in data:
|
||||
return data["results"]
|
||||
elif "accessibility" in data:
|
||||
return data["accessibility"]
|
||||
else:
|
||||
return data
|
||||
|
||||
async def get_performance_metrics(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get performance metrics from Shadow State.
|
||||
|
||||
In REMOTE mode, this retrieves Core Web Vitals and performance data
|
||||
captured by the browser-side logger.
|
||||
|
||||
Returns:
|
||||
Dictionary with performance metrics if available.
|
||||
"""
|
||||
logs = await self._get_logs_from_api()
|
||||
|
||||
# Look for performance metrics in the logs
|
||||
perf_logs = [
|
||||
l for l in logs
|
||||
if l.get("category") in ["performance", "accessibilitySnapshot"]
|
||||
]
|
||||
|
||||
if not perf_logs:
|
||||
return {
|
||||
"error": "No performance data found in Shadow State.",
|
||||
"message": "Performance metrics are captured automatically during page load."
|
||||
}
|
||||
|
||||
# Get the latest performance entry
|
||||
latest = max(perf_logs, key=lambda x: x.get("timestamp", 0))
|
||||
data = latest.get("data", {})
|
||||
|
||||
# Try to extract performance data from accessibilitySnapshot or performance entry
|
||||
if "performance" in data:
|
||||
return {"core_web_vitals": data["performance"]}
|
||||
else:
|
||||
return {"performance_data": data}
|
||||
88
dss-claude-plugin/strategies/remote/filesystem.py
Normal file
88
dss-claude-plugin/strategies/remote/filesystem.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""
|
||||
Remote Filesystem Strategy implementation.
|
||||
Filesystem operations are restricted in REMOTE mode for security.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Dict, Any
|
||||
from pathlib import Path
|
||||
|
||||
from ..base import FilesystemStrategy
|
||||
from ...core.context import DSSContext
|
||||
|
||||
# Configure module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RemoteFilesystemStrategy(FilesystemStrategy):
|
||||
"""
|
||||
Implements filesystem operations via remote API calls.
|
||||
|
||||
Note: Direct filesystem access is restricted in REMOTE mode for security.
|
||||
Most operations will raise NotImplementedError or return empty results.
|
||||
Users should use LOCAL mode for filesystem operations.
|
||||
"""
|
||||
|
||||
def __init__(self, context: DSSContext):
|
||||
"""Initialize with context."""
|
||||
self.context = context
|
||||
|
||||
async def read_file(self, path: str) -> str:
|
||||
"""
|
||||
Read file contents.
|
||||
|
||||
NOT AVAILABLE in REMOTE mode for security reasons.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Always, as direct file access is restricted.
|
||||
"""
|
||||
logger.error("Filesystem read operations are not available in REMOTE mode.")
|
||||
raise NotImplementedError(
|
||||
"Direct filesystem access is restricted in REMOTE mode. "
|
||||
"Please use LOCAL mode for file operations, or use API-based file uploads."
|
||||
)
|
||||
|
||||
async def list_directory(self, path: str) -> List[str]:
|
||||
"""
|
||||
List directory contents.
|
||||
|
||||
NOT AVAILABLE in REMOTE mode for security reasons.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Always, as directory listing is restricted.
|
||||
"""
|
||||
logger.error("Filesystem list operations are not available in REMOTE mode.")
|
||||
raise NotImplementedError(
|
||||
"Directory listing is restricted in REMOTE mode. "
|
||||
"Please use LOCAL mode for filesystem operations."
|
||||
)
|
||||
|
||||
async def search_files(self, pattern: str, path: str = ".") -> List[str]:
|
||||
"""
|
||||
Search for files matching a pattern.
|
||||
|
||||
NOT AVAILABLE in REMOTE mode for security reasons.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Always, as file search is restricted.
|
||||
"""
|
||||
logger.error("Filesystem search operations are not available in REMOTE mode.")
|
||||
raise NotImplementedError(
|
||||
"File search is restricted in REMOTE mode. "
|
||||
"Please use LOCAL mode for filesystem operations."
|
||||
)
|
||||
|
||||
async def get_file_info(self, path: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get file metadata.
|
||||
|
||||
NOT AVAILABLE in REMOTE mode for security reasons.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Always, as file info access is restricted.
|
||||
"""
|
||||
logger.error("Filesystem info operations are not available in REMOTE mode.")
|
||||
raise NotImplementedError(
|
||||
"File metadata access is restricted in REMOTE mode. "
|
||||
"Please use LOCAL mode for filesystem operations."
|
||||
)
|
||||
285
dss-claude-plugin/tests/test_context_compiler.py
Normal file
285
dss-claude-plugin/tests/test_context_compiler.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""
|
||||
Test Suite for DSS Context Compiler
|
||||
Validates all core functionality: cascade merging, token resolution, security, and error handling.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from core import (
|
||||
ContextCompiler,
|
||||
get_active_context,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status,
|
||||
EMERGENCY_SKIN
|
||||
)
|
||||
|
||||
|
||||
class TestContextCompiler:
|
||||
"""Test suite for Context Compiler"""
|
||||
|
||||
def __init__(self):
|
||||
self.base_dir = Path(__file__).parent.parent
|
||||
self.skins_dir = self.base_dir / "core" / "skins"
|
||||
self.admin_manifest = self.base_dir.parent / "admin-ui" / "ds.config.json"
|
||||
self.compiler = ContextCompiler(skins_dir=str(self.skins_dir))
|
||||
self.passed = 0
|
||||
self.failed = 0
|
||||
|
||||
def assert_equal(self, actual, expected, message):
|
||||
"""Simple assertion helper"""
|
||||
if actual == expected:
|
||||
print(f"✓ {message}")
|
||||
self.passed += 1
|
||||
return True
|
||||
else:
|
||||
print(f"✗ {message}")
|
||||
print(f" Expected: {expected}")
|
||||
print(f" Actual: {actual}")
|
||||
self.failed += 1
|
||||
return False
|
||||
|
||||
def assert_true(self, condition, message):
|
||||
"""Assert condition is true"""
|
||||
if condition:
|
||||
print(f"✓ {message}")
|
||||
self.passed += 1
|
||||
return True
|
||||
else:
|
||||
print(f"✗ {message}")
|
||||
self.failed += 1
|
||||
return False
|
||||
|
||||
def assert_in(self, needle, haystack, message):
|
||||
"""Assert needle is in haystack"""
|
||||
if needle in haystack:
|
||||
print(f"✓ {message}")
|
||||
self.passed += 1
|
||||
return True
|
||||
else:
|
||||
print(f"✗ {message}")
|
||||
print(f" '{needle}' not found in {haystack}")
|
||||
self.failed += 1
|
||||
return False
|
||||
|
||||
def test_basic_compilation(self):
|
||||
"""Test 1: Basic 3-layer cascade compilation"""
|
||||
print("\n=== Test 1: Basic Compilation (3-Layer Cascade) ===")
|
||||
|
||||
try:
|
||||
context = self.compiler.compile(str(self.admin_manifest))
|
||||
|
||||
# Test project override (Layer 3)
|
||||
self.assert_equal(
|
||||
context.get("tokens", {}).get("colors", {}).get("primary"),
|
||||
"#6366f1",
|
||||
"Project override applied correctly (colors.primary)"
|
||||
)
|
||||
|
||||
# Test skin value (Layer 2 - workbench)
|
||||
self.assert_equal(
|
||||
context.get("tokens", {}).get("colors", {}).get("background"),
|
||||
"#0F172A",
|
||||
"Workbench skin value inherited (colors.background)"
|
||||
)
|
||||
|
||||
# Test base value (Layer 1)
|
||||
self.assert_equal(
|
||||
context.get("tokens", {}).get("spacing", {}).get("0"),
|
||||
"0px",
|
||||
"Base skin value inherited (spacing.0)"
|
||||
)
|
||||
|
||||
# Test metadata injection
|
||||
self.assert_in("_meta", context, "Metadata injected into context")
|
||||
self.assert_equal(
|
||||
context.get("_meta", {}).get("project_id"),
|
||||
"dss-admin",
|
||||
"Project ID in metadata"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Basic compilation failed with error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_debug_provenance(self):
|
||||
"""Test 2: Debug provenance tracking"""
|
||||
print("\n=== Test 2: Debug Provenance Tracking ===")
|
||||
|
||||
try:
|
||||
context = self.compiler.compile(str(self.admin_manifest), debug=True)
|
||||
|
||||
self.assert_in("_provenance", context, "Provenance data included in debug mode")
|
||||
self.assert_true(
|
||||
isinstance(context.get("_provenance", []), list),
|
||||
"Provenance is a list"
|
||||
)
|
||||
self.assert_true(
|
||||
len(context.get("_provenance", [])) > 0,
|
||||
"Provenance contains tracking entries"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Debug provenance test failed with error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_token_resolution(self):
|
||||
"""Test 3: Token resolution via MCP tool"""
|
||||
print("\n=== Test 3: Token Resolution ===")
|
||||
|
||||
try:
|
||||
# Test project override token
|
||||
result = resolve_token(str(self.admin_manifest), "colors.primary")
|
||||
self.assert_equal(result, "#6366f1", "Resolved project override token")
|
||||
|
||||
# Test skin-level token
|
||||
result = resolve_token(str(self.admin_manifest), "colors.background")
|
||||
self.assert_equal(result, "#0F172A", "Resolved skin-level token")
|
||||
|
||||
# Test base-level token
|
||||
result = resolve_token(str(self.admin_manifest), "spacing.0")
|
||||
self.assert_equal(result, "0px", "Resolved base-level token")
|
||||
|
||||
# Test nested token
|
||||
result = resolve_token(str(self.admin_manifest), "typography.fontFamily.sans")
|
||||
self.assert_true(
|
||||
"Inter" in result or "system-ui" in result,
|
||||
"Resolved nested token"
|
||||
)
|
||||
|
||||
# Test non-existent token
|
||||
result = resolve_token(str(self.admin_manifest), "nonexistent.token")
|
||||
self.assert_in("not found", result, "Non-existent token returns error message")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Token resolution test failed with error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_skin_listing(self):
|
||||
"""Test 4: Skin listing functionality"""
|
||||
print("\n=== Test 4: Skin Listing ===")
|
||||
|
||||
try:
|
||||
skins_json = list_skins()
|
||||
skins = json.loads(skins_json)
|
||||
|
||||
self.assert_in("base", skins, "Base skin listed")
|
||||
self.assert_in("classic", skins, "Classic skin listed")
|
||||
self.assert_in("workbench", skins, "Workbench skin listed")
|
||||
self.assert_true(len(skins) >= 3, "At least 3 skins available")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Skin listing test failed with error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_safe_boot_protocol(self):
|
||||
"""Test 5: Safe Boot Protocol (emergency fallback)"""
|
||||
print("\n=== Test 5: Safe Boot Protocol ===")
|
||||
|
||||
try:
|
||||
# Test with non-existent manifest
|
||||
context = self.compiler.compile("/nonexistent/path.json")
|
||||
|
||||
self.assert_equal(
|
||||
context.get("status"),
|
||||
"emergency_mode",
|
||||
"Emergency mode activated for invalid path"
|
||||
)
|
||||
|
||||
self.assert_in("_error", context, "Error details included in safe boot")
|
||||
|
||||
# Validate emergency skin has required structure
|
||||
self.assert_in("tokens", context, "Emergency skin has tokens")
|
||||
self.assert_in("colors", context.get("tokens", {}), "Emergency skin has colors")
|
||||
self.assert_in("primary", context.get("tokens", {}).get("colors", {}), "Emergency skin has primary color")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Safe Boot Protocol test failed with error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_path_traversal_prevention(self):
|
||||
"""Test 6: Security - Path traversal prevention"""
|
||||
print("\n=== Test 6: Path Traversal Prevention (Security) ===")
|
||||
|
||||
try:
|
||||
# Attempt path traversal attack
|
||||
try:
|
||||
self.compiler._load_skin("../../etc/passwd")
|
||||
print("✗ Path traversal not prevented!")
|
||||
self.failed += 1
|
||||
except ValueError as e:
|
||||
self.assert_in(
|
||||
"path traversal",
|
||||
str(e).lower(),
|
||||
"Path traversal attack blocked"
|
||||
)
|
||||
|
||||
# Attempt another variant
|
||||
try:
|
||||
self.compiler._load_skin("../../../root/.ssh/id_rsa")
|
||||
print("✗ Path traversal variant not prevented!")
|
||||
self.failed += 1
|
||||
except ValueError as e:
|
||||
self.assert_in(
|
||||
"path traversal",
|
||||
str(e).lower(),
|
||||
"Path traversal variant blocked"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Path traversal prevention test failed with unexpected error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_compiler_status(self):
|
||||
"""Bonus Test: Compiler status tool"""
|
||||
print("\n=== Bonus Test: Compiler Status ===")
|
||||
|
||||
try:
|
||||
status_json = get_compiler_status()
|
||||
status = json.loads(status_json)
|
||||
|
||||
self.assert_equal(status.get("status"), "active", "Compiler status is active")
|
||||
self.assert_in("skins_directory", status, "Status includes skins directory")
|
||||
self.assert_in("safe_boot_ready", status, "Status confirms Safe Boot ready")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Compiler status test failed with error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def run_all_tests(self):
|
||||
"""Execute all tests and report results"""
|
||||
print("=" * 60)
|
||||
print("DSS Context Compiler Test Suite")
|
||||
print("=" * 60)
|
||||
|
||||
self.test_basic_compilation()
|
||||
self.test_debug_provenance()
|
||||
self.test_token_resolution()
|
||||
self.test_skin_listing()
|
||||
self.test_safe_boot_protocol()
|
||||
self.test_path_traversal_prevention()
|
||||
self.test_compiler_status()
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print(f"Test Results: {self.passed} passed, {self.failed} failed")
|
||||
print("=" * 60)
|
||||
|
||||
if self.failed == 0:
|
||||
print("✓ ALL TESTS PASSED - Ready for production deployment")
|
||||
return True
|
||||
else:
|
||||
print("✗ SOME TESTS FAILED - Review errors before deploying")
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tester = TestContextCompiler()
|
||||
success = tester.run_all_tests()
|
||||
sys.exit(0 if success else 1)
|
||||
161
dss-claude-plugin/verify_tools.py
Normal file
161
dss-claude-plugin/verify_tools.py
Normal file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Verify that dss-mcp-server.py properly exports Context Compiler tools
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add the server directory to path
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
# Import the server module
|
||||
print("=" * 60)
|
||||
print("CONTEXT COMPILER TOOL VERIFICATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Test imports
|
||||
print("\n1. Testing Context Compiler imports...")
|
||||
try:
|
||||
from core import (
|
||||
get_active_context,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status
|
||||
)
|
||||
print(" ✓ All Context Compiler functions imported successfully")
|
||||
CONTEXT_COMPILER_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f" ✗ Context Compiler import failed: {e}")
|
||||
CONTEXT_COMPILER_AVAILABLE = False
|
||||
sys.exit(1)
|
||||
|
||||
# Test the server's tool list
|
||||
print("\n2. Checking MCP server tool list...")
|
||||
try:
|
||||
# We need to simulate the MCP server initialization
|
||||
# to see what tools it would export
|
||||
import asyncio
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import Tool, TextContent
|
||||
|
||||
# Create a test server instance
|
||||
server = Server("dss-test")
|
||||
|
||||
# Import the list_tools function logic
|
||||
print(" Checking if server exports tools properly...")
|
||||
|
||||
# Read the actual server file and check for context_compiler_tools
|
||||
with open(Path(__file__).parent / "servers" / "dss-mcp-server.py", "r") as f:
|
||||
server_code = f.read()
|
||||
|
||||
if "context_compiler_tools" in server_code:
|
||||
print(" ✓ context_compiler_tools defined in server")
|
||||
else:
|
||||
print(" ✗ context_compiler_tools NOT found in server")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_get_resolved_context" in server_code:
|
||||
print(" ✓ dss_get_resolved_context tool defined")
|
||||
else:
|
||||
print(" ✗ dss_get_resolved_context NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_resolve_token" in server_code:
|
||||
print(" ✓ dss_resolve_token tool defined")
|
||||
else:
|
||||
print(" ✗ dss_resolve_token NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_validate_manifest" in server_code:
|
||||
print(" ✓ dss_validate_manifest tool defined")
|
||||
else:
|
||||
print(" ✗ dss_validate_manifest NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_list_skins" in server_code:
|
||||
print(" ✓ dss_list_skins tool defined")
|
||||
else:
|
||||
print(" ✗ dss_list_skins NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_get_compiler_status" in server_code:
|
||||
print(" ✓ dss_get_compiler_status tool defined")
|
||||
else:
|
||||
print(" ✗ dss_get_compiler_status NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
# Check if tools are returned
|
||||
if "return dss_tools + devtools_tools + browser_tools + context_compiler_tools" in server_code:
|
||||
print(" ✓ context_compiler_tools added to tool list return")
|
||||
else:
|
||||
print(" ✗ context_compiler_tools NOT added to return statement")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error checking server tools: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Test tool handlers
|
||||
print("\n3. Checking MCP server tool handlers...")
|
||||
try:
|
||||
with open(Path(__file__).parent / "servers" / "dss-mcp-server.py", "r") as f:
|
||||
server_code = f.read()
|
||||
|
||||
handlers = [
|
||||
'elif name == "dss_get_resolved_context"',
|
||||
'elif name == "dss_resolve_token"',
|
||||
'elif name == "dss_validate_manifest"',
|
||||
'elif name == "dss_list_skins"',
|
||||
'elif name == "dss_get_compiler_status"'
|
||||
]
|
||||
|
||||
for handler in handlers:
|
||||
if handler in server_code:
|
||||
tool_name = handler.split('"')[1]
|
||||
print(f" ✓ {tool_name} handler implemented")
|
||||
else:
|
||||
tool_name = handler.split('"')[1]
|
||||
print(f" ✗ {tool_name} handler NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error checking tool handlers: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Test Context Compiler functionality
|
||||
print("\n4. Testing Context Compiler functionality...")
|
||||
try:
|
||||
import json
|
||||
|
||||
# Test list_skins
|
||||
skins_json = list_skins()
|
||||
skins = json.loads(skins_json)
|
||||
print(f" ✓ list_skins() returned {len(skins)} skins: {skins}")
|
||||
|
||||
# Test get_compiler_status
|
||||
status_json = get_compiler_status()
|
||||
status = json.loads(status_json)
|
||||
print(f" ✓ get_compiler_status() returned status: {status['status']}")
|
||||
|
||||
if status['status'] == 'active':
|
||||
print(" ✓ Context Compiler is active and ready")
|
||||
else:
|
||||
print(f" ✗ Context Compiler status is: {status['status']}")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Context Compiler functionality test failed: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("✅ ALL VERIFICATIONS PASSED")
|
||||
print("=" * 60)
|
||||
print("\nContext Compiler tools are properly integrated into dss-mcp-server.py")
|
||||
print("and should be available to Claude Code after MCP server restart.")
|
||||
print("\nIf tools are not showing up in Claude Code, try:")
|
||||
print("1. Fully restart Claude Code (not just /mcp restart)")
|
||||
print("2. Check Claude Code logs for connection errors")
|
||||
print("3. Verify MCP server configuration in Claude settings")
|
||||
Reference in New Issue
Block a user