auto-backup: 2025-12-11 20:35:05 (68 files: +19 ~23 -25)

Generated by DSS Git Backup Hook
This commit is contained in:
2025-12-11 17:35:05 -03:00
parent 09b234a07f
commit 1ff198c177
68 changed files with 3229 additions and 7102 deletions

View File

@@ -1,15 +1,14 @@
{
"enabledPlugins": {
"dss-claude-plugin@local": true
},
"localPlugins": {
"dss-claude-plugin": {
"path": "./dss-claude-plugin"
}
},
"permissions": {
"allow": [
"mcp__zen__listmodels"
]
},
"enableAllProjectMcpServers": true,
"enabledMcpjsonServers": [
"dss"
],
"enabledPlugins": {
"dss-claude-plugin@dss": true
}
}

View File

@@ -0,0 +1,20 @@
{"id": "e86bf101-40a", "timestamp": "2025-12-11T07:49:51.791244", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 4, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "dc9b68b8-9f3", "timestamp": "2025-12-11T07:50:07.035285", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 4, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "0011a5dc-1b0", "timestamp": "2025-12-11T08:01:26.823275", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 53, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "4a0fe126-f37", "timestamp": "2025-12-11T08:02:35.548806", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "4d1ace92-c05", "timestamp": "2025-12-11T08:03:42.738367", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "b66abbc7-431", "timestamp": "2025-12-11T08:07:10.245195", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "a7d4564d-02a", "timestamp": "2025-12-11T08:09:31.190998", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 6, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "783d7d77-2c0", "timestamp": "2025-12-11T08:13:57.991330", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "0ba399b9-b6d", "timestamp": "2025-12-11T08:17:19.961112", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "f97422d1-e4c", "timestamp": "2025-12-11T08:18:03.166336", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "f0af79d9-65f", "timestamp": "2025-12-11T08:22:07.368109", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "1d60f899-d77", "timestamp": "2025-12-11T08:24:04.038534", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "3ed0582a-99b", "timestamp": "2025-12-11T08:28:44.464249", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 6, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "fed5cda6-db0", "timestamp": "2025-12-11T08:31:30.259400", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 6, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "2a16a9c6-af3", "timestamp": "2025-12-11T08:42:59.542999", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "d33532a0-0e0", "timestamp": "2025-12-11T08:45:32.260772", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 25, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "a1d0a7aa-317", "timestamp": "2025-12-11T09:37:40.666691", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 4, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "72a8c3f9-b7e", "timestamp": "2025-12-11T10:53:47.155433", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 4, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "2c33055d-1c3", "timestamp": "2025-12-11T10:57:07.802585", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 4, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}
{"id": "d83ddb46-96d", "timestamp": "2025-12-11T13:07:54.640979", "action": "ai_chat", "entity_type": "chat", "entity_id": "claude", "entity_name": null, "project_id": null, "user_id": null, "user_name": null, "team_context": null, "category": "other", "severity": "info", "description": "Ai Chat chat", "details": {"message_length": 4, "tools_enabled": true, "model": "claude"}, "ip_address": null, "user_agent": null}

1
.dss/dss.pid Normal file
View File

@@ -0,0 +1 @@
61719

4
.gitignore vendored
View File

@@ -68,5 +68,5 @@ dist/
out/
storybook-static/
venv/
# Local MCP config (generated)
# .mcp.json
# Local MCP config (generated by ./scripts/setup-mcp.sh)
.claude/mcp.json

View File

@@ -1,17 +1,4 @@
{
"$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json",
"mcpServers": {
"dss": {
"command": "/home/overbits/dss/.venv/bin/python3",
"args": ["/home/overbits/dss/dss-claude-plugin/servers/dss-mcp-server.py"],
"env": {
"PYTHONPATH": "/home/overbits/dss:/home/overbits/dss/dss-claude-plugin",
"DSS_HOME": "/home/overbits/dss/.dss",
"DSS_DATABASE": "/home/overbits/dss/.dss/dss.db",
"DSS_CACHE": "/home/overbits/dss/.dss/cache",
"DSS_BASE_PATH": "/home/overbits/dss"
},
"description": "Design System Server MCP - local development"
}
}
"mcpServers": {}
}

124
CLAUDE.md Normal file
View File

@@ -0,0 +1,124 @@
# DSS - Design System Server
## FIRST: Enable DSS MCP Tools
**Before starting work, check if DSS tools are available.** If you don't see `dss_*` tools in your available MCP tools, run:
```bash
./scripts/setup-mcp.sh
```
Then tell the user: "I've configured the DSS MCP server. Please restart Claude Code to load the DSS tools, then continue this conversation."
The DSS MCP server provides 40+ design system tools for Figma sync, token management, component generation, and project analysis.
### Manual verification
Check if `.claude/mcp.json` exists and has valid paths:
- `command` should point to `.venv/bin/python3` (must exist)
- `args` should point to `dss-claude-plugin/servers/dss-mcp-server.py` (must exist)
## Project Structure
```
dss/
├── dss/ # Core Python library
│ ├── mcp_server/ # MCP server implementation
│ ├── analyze/ # Code analysis tools
│ ├── ingest/ # Token ingestion
│ ├── figma/ # Figma integration
│ ├── storybook/ # Storybook generation
│ └── storage/ # JSON-based storage
├── apps/
│ ├── api/ # FastAPI server (port 6220)
│ └── cli/ # TypeScript CLI
├── admin-ui/ # Admin dashboard (port 6221)
├── dss-claude-plugin/ # Claude Code MCP plugin
│ └── servers/ # MCP server scripts
└── scripts/ # Setup & utility scripts
```
## Standard Ports
| Service | Port |
|------------|------|
| API Server | 6220 |
| Admin UI | 6221 |
| MCP Server | 6222 |
| Storybook | 6226 |
## Development Setup
```bash
# Python environment
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
# Admin UI
cd admin-ui && npm install
# Generate MCP config
./scripts/setup-mcp.sh
```
## Starting Services
```bash
# API Server
source .venv/bin/activate
PYTHONPATH="/path/to/dss:/path/to/dss/apps/api" uvicorn apps.api.server:app --host 0.0.0.0 --port 6220
# Admin UI
cd admin-ui && npm run dev
```
## Key Files
- `dss/mcp_server/handler.py` - MCP tool execution handler
- `dss/storage/json_store.py` - JSON-based data storage
- `apps/api/server.py` - FastAPI server
- `.claude/mcp.json` - Local MCP configuration (generated)
## Troubleshooting MCP Connection Issues
### DSS MCP server fails to connect
If `/mcp` shows "Failed to reconnect to dss", check:
1. **Virtual environment exists**: The `.venv` directory must exist with Python installed
```bash
# If missing, create it:
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
```
2. **MCP config paths are valid**: Check `.claude/mcp.json` points to existing files:
- `.venv/bin/python3` must exist
- `dss-claude-plugin/servers/dss-mcp-server.py` must exist
3. **Restart Claude Code** after fixing any configuration issues
### Disabling unwanted MCP servers
MCP servers can be configured in multiple locations. Check all of these:
| Location | Used By |
|----------|---------|
| `~/.claude/mcp.json` | Claude Code (global) |
| `~/.config/claude/claude_desktop_config.json` | Claude Desktop app |
| `.claude/mcp.json` (project) | Claude Code (project-specific) |
| `../.mcp.json` | Parent directory inheritance |
To disable a server, remove its entry from the relevant config file and restart Claude Code.
### Common issue: Figma MCP errors
If you see repeated `MCP server "figma": No token data found` errors, the figma server is likely configured in `~/.config/claude/claude_desktop_config.json`. Remove the `"figma"` entry from that file.
## Notes
- DSS uses JSON-based storage, not SQL database
- The `dss/mcp_server/` directory was renamed from `dss/mcp/` to avoid shadowing the pip `mcp` package
- Integration configs (Figma, Jira, etc.) are stored encrypted when database is configured

131
README.md
View File

@@ -5,11 +5,138 @@ Monolithic design system platform. Ingest tokens from Figma/CSS/SCSS/Tailwind, n
## Quick Start
```bash
# 1. Create Python virtual environment
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
python tools/api/server.py # REST API on :3456
python tools/api/mcp_server.py # MCP server on :3457
# 2. Generate MCP config for Claude Code
./scripts/setup-mcp.sh
# 3. Start services
PYTHONPATH="$PWD:$PWD/apps/api" uvicorn apps.api.server:app --host 0.0.0.0 --port 6220
```
## Claude Code Plugin Integration
DSS integrates with Claude Code as a **plugin** that provides MCP tools, slash commands, skills, and agents.
### Installation
**Step 1: Set up the Python environment**
```bash
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
```
**Step 2: Run the setup script**
```bash
./scripts/setup-mcp.sh
```
**Step 3: Add the DSS marketplace and install the plugin**
In Claude Code, run:
```
/plugin marketplace add /path/to/dss/dss-claude-plugin
```
Replace `/path/to/dss` with your actual DSS installation path.
Then install the plugin:
```
/plugin install dss-claude-plugin@dss
```
**Alternative: Manual configuration**
Add to your `~/.claude/settings.json`:
```json
{
"extraKnownMarketplaces": {
"dss": {
"source": {
"source": "directory",
"path": "/path/to/dss/dss-claude-plugin"
}
}
},
"enabledPlugins": {
"dss-claude-plugin@dss": true
}
}
```
**Step 4: Restart Claude Code** completely (quit and reopen)
### Verification
After restart, verify the plugin is loaded:
1. Run `/mcp` - DSS server should appear in the list
2. If DSS shows as disconnected, select it to enable
3. DSS tools will be available as `dss_*` functions
### Troubleshooting
**Plugin not found error in debug logs?**
The plugin must be discoverable. Ensure the path in `.claude/mcp.json` points to valid files:
```bash
# Verify paths exist
ls -la .venv/bin/python3
ls -la dss-claude-plugin/servers/dss-mcp-server.py
```
**DSS server not connecting?**
Add DSS to your global MCP config (`~/.claude/mcp.json`):
```json
{
"mcpServers": {
"dss": {
"command": "/path/to/dss/.venv/bin/python3",
"args": ["/path/to/dss/dss-claude-plugin/servers/dss-mcp-server.py"],
"env": {
"PYTHONPATH": "/path/to/dss:/path/to/dss/dss-claude-plugin",
"DSS_HOME": "/path/to/dss/.dss",
"DSS_BASE_PATH": "/path/to/dss"
}
}
}
}
```
**Test the MCP server manually:**
```bash
source .venv/bin/activate
PYTHONPATH="$PWD:$PWD/dss-claude-plugin" \
python3 dss-claude-plugin/servers/dss-mcp-server.py
```
**Check debug logs:**
```bash
cat ~/.claude/debug/latest | grep -i "dss\|plugin"
```
### Available Tools
Once connected, DSS provides tools prefixed with `dss_`:
- `dss_figma_*` - Figma integration and token extraction
- `dss_token_*` - Design token management
- `dss_component_*` - Component generation
- `dss_project_*` - Project analysis
## Structure
```

View File

@@ -27,9 +27,9 @@ admin-ui/src/
│ ├── client.ts # API client with all endpoints
│ └── types.ts # TypeScript interfaces
├── components/
│ ├── base/ # Button, Card, Input, Badge, Spinner
│ ├── base/ # Button, Card, Input, Badge, Spinner, Skeleton
│ ├── layout/ # Shell, Header, Sidebar, Panel, ChatSidebar
│ └── shared/ # CommandPalette, Toast
│ └── shared/ # CommandPalette, Toast, Modal, ErrorBoundary, DataTable
├── workdesks/ # Team-specific views
│ ├── UIWorkdesk.tsx # Figma extraction, code generation
│ ├── UXWorkdesk.tsx # Token list, Figma files
@@ -170,8 +170,10 @@ endpoints.mcp.status() // GET /api/mcp/status
**Tools**:
- `dashboard` - Figma connection status, file sync status
- `token-list` - View tokens from Figma
- `figma-files` - Manage connected Figma files
- `token-list` - View tokens from Figma
- `asset-list` - Gallery of design assets (icons, images, illustrations)
- `component-list` - Design system components
- `figma-plugin` - Plugin installation info
### QA Team (`QAWorkdesk.tsx`)
@@ -179,8 +181,10 @@ endpoints.mcp.status() // GET /api/mcp/status
**Tools**:
- `dashboard` - Health score, ESRE definitions count
- `figma-live-compare` - QA validation: Figma vs live implementation
- `esre-editor` - Create/edit/delete ESRE definitions
- `console-viewer` - Browser console log capture
- `network-monitor` - Track network requests in real-time
- `test-results` - View ESRE test results
### Admin (`AdminWorkdesk.tsx`)
@@ -190,6 +194,7 @@ endpoints.mcp.status() // GET /api/mcp/status
- `settings` - Server config, Figma token, Storybook URL
- `projects` - CRUD for projects
- `integrations` - External service connections
- `mcp-tools` - View and execute MCP tools for AI assistants
- `audit-log` - System activity with filtering/export
- `cache-management` - Clear/purge cache
- `health-monitor` - Service status with auto-refresh

View File

@@ -4,6 +4,7 @@ import { theme, initializeApp } from './state';
import { Shell } from './components/layout/Shell';
import { CommandPalette } from './components/shared/CommandPalette';
import { ToastContainer } from './components/shared/Toast';
import { ErrorBoundary } from './components/shared/ErrorBoundary';
import { useKeyboardShortcuts } from './hooks/useKeyboard';
export function App() {
@@ -43,10 +44,10 @@ export function App() {
}, []);
return (
<>
<ErrorBoundary>
<Shell />
<CommandPalette />
<ToastContainer />
</>
</ErrorBoundary>
);
}

View File

@@ -0,0 +1,141 @@
/* Skeleton Loader Styles */
.skeleton {
background-color: var(--color-muted);
display: block;
}
/* Variants */
.skeleton-text {
height: 1em;
border-radius: var(--radius-sm);
margin-bottom: var(--spacing-2);
}
.skeleton-text:last-child {
margin-bottom: 0;
}
.skeleton-circular {
border-radius: 50%;
}
.skeleton-rectangular {
border-radius: 0;
}
.skeleton-rounded {
border-radius: var(--radius-md);
}
/* Animations */
.skeleton-pulse {
animation: skeleton-pulse 1.5s ease-in-out infinite;
}
@keyframes skeleton-pulse {
0% {
opacity: 1;
}
50% {
opacity: 0.4;
}
100% {
opacity: 1;
}
}
.skeleton-wave {
position: relative;
overflow: hidden;
}
.skeleton-wave::after {
content: '';
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
transform: translateX(-100%);
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.3),
transparent
);
animation: skeleton-wave 1.5s linear infinite;
}
@keyframes skeleton-wave {
100% {
transform: translateX(100%);
}
}
[data-theme="dark"] .skeleton-wave::after {
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.1),
transparent
);
}
/* Skeleton patterns */
.skeleton-text-block {
display: flex;
flex-direction: column;
gap: var(--spacing-2);
}
.skeleton-card {
background-color: var(--color-surface-0);
border-radius: var(--radius-lg);
overflow: hidden;
border: 1px solid var(--color-border);
}
.skeleton-card-content {
padding: var(--spacing-4);
display: flex;
flex-direction: column;
gap: var(--spacing-2);
}
.skeleton-list-item {
display: flex;
align-items: center;
gap: var(--spacing-3);
padding: var(--spacing-3) 0;
}
.skeleton-list-content {
flex: 1;
display: flex;
flex-direction: column;
gap: var(--spacing-1);
}
.skeleton-table {
display: flex;
flex-direction: column;
gap: var(--spacing-1);
}
.skeleton-table-row {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(100px, 1fr));
gap: var(--spacing-4);
padding: var(--spacing-3) 0;
border-bottom: 1px solid var(--color-border);
}
.skeleton-table-header {
padding-bottom: var(--spacing-3);
border-bottom: 2px solid var(--color-border);
}
.skeleton-table-row:last-child {
border-bottom: none;
}

View File

@@ -0,0 +1,106 @@
import { JSX } from 'preact';
import './Skeleton.css';
export interface SkeletonProps {
variant?: 'text' | 'circular' | 'rectangular' | 'rounded';
width?: string | number;
height?: string | number;
animation?: 'pulse' | 'wave' | 'none';
className?: string;
style?: JSX.CSSProperties;
}
export function Skeleton({
variant = 'text',
width,
height,
animation = 'pulse',
className = '',
style
}: SkeletonProps) {
const classes = [
'skeleton',
`skeleton-${variant}`,
animation !== 'none' && `skeleton-${animation}`,
className
].filter(Boolean).join(' ');
const combinedStyle: JSX.CSSProperties = {
width: typeof width === 'number' ? `${width}px` : width,
height: typeof height === 'number' ? `${height}px` : height,
...style
};
return <div className={classes} style={combinedStyle} />;
}
// Predefined skeleton patterns
export function SkeletonText({ lines = 3, className = '' }: { lines?: number; className?: string }) {
return (
<div className={`skeleton-text-block ${className}`}>
{Array.from({ length: lines }).map((_, i) => (
<Skeleton
key={i}
variant="text"
width={i === lines - 1 ? '60%' : '100%'}
/>
))}
</div>
);
}
export function SkeletonCard({ className = '' }: { className?: string }) {
return (
<div className={`skeleton-card ${className}`}>
<Skeleton variant="rectangular" height={140} />
<div className="skeleton-card-content">
<Skeleton variant="text" width="80%" />
<Skeleton variant="text" width="60%" />
</div>
</div>
);
}
export function SkeletonAvatar({ size = 40, className = '' }: { size?: number; className?: string }) {
return (
<Skeleton
variant="circular"
width={size}
height={size}
className={className}
/>
);
}
export function SkeletonListItem({ className = '' }: { className?: string }) {
return (
<div className={`skeleton-list-item ${className}`}>
<SkeletonAvatar size={40} />
<div className="skeleton-list-content">
<Skeleton variant="text" width="40%" />
<Skeleton variant="text" width="70%" />
</div>
</div>
);
}
export function SkeletonTable({ rows = 5, columns = 4, className = '' }: { rows?: number; columns?: number; className?: string }) {
return (
<div className={`skeleton-table ${className}`}>
{/* Header */}
<div className="skeleton-table-row skeleton-table-header">
{Array.from({ length: columns }).map((_, i) => (
<Skeleton key={i} variant="text" width="80%" />
))}
</div>
{/* Body */}
{Array.from({ length: rows }).map((_, rowIndex) => (
<div key={rowIndex} className="skeleton-table-row">
{Array.from({ length: columns }).map((_, colIndex) => (
<Skeleton key={colIndex} variant="text" width={colIndex === 0 ? '90%' : '60%'} />
))}
</div>
))}
</div>
);
}

View File

@@ -0,0 +1,164 @@
/* DataTable Styles */
.data-table-container {
display: flex;
flex-direction: column;
gap: var(--spacing-4);
}
.data-table-search {
max-width: 300px;
}
.data-table-loading {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: var(--spacing-3);
padding: var(--spacing-8);
color: var(--color-muted-foreground);
}
.data-table-wrapper {
overflow-x: auto;
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
}
.data-table {
width: 100%;
border-collapse: collapse;
font-size: var(--font-size-sm);
}
.data-table th,
.data-table td {
padding: var(--spacing-3) var(--spacing-4);
text-align: left;
border-bottom: 1px solid var(--color-border);
}
.data-table th {
background-color: var(--color-surface-1);
font-weight: var(--font-weight-medium);
color: var(--color-muted-foreground);
white-space: nowrap;
}
.data-table th.sortable {
cursor: pointer;
user-select: none;
}
.data-table th.sortable:hover {
background-color: var(--color-muted);
}
.th-content {
display: flex;
align-items: center;
gap: var(--spacing-1);
}
.sort-indicator {
font-size: var(--font-size-xs);
color: var(--color-primary);
}
.data-table tbody tr {
transition: background-color var(--duration-fast) var(--timing-out);
}
.data-table tbody tr:hover {
background-color: var(--color-surface-1);
}
.data-table tbody tr.clickable {
cursor: pointer;
}
.data-table tbody tr:last-child td {
border-bottom: none;
}
.data-table td {
color: var(--color-foreground);
}
.empty-message {
text-align: center;
color: var(--color-muted-foreground);
padding: var(--spacing-8) !important;
}
.actions-column {
width: 100px;
text-align: right !important;
}
.actions-cell {
text-align: right;
white-space: nowrap;
}
.actions-cell > * {
margin-left: var(--spacing-1);
}
/* Pagination */
.data-table-pagination {
display: flex;
align-items: center;
justify-content: space-between;
flex-wrap: wrap;
gap: var(--spacing-3);
}
.pagination-info {
font-size: var(--font-size-sm);
color: var(--color-muted-foreground);
}
.pagination-controls {
display: flex;
align-items: center;
gap: var(--spacing-1);
}
.page-indicator {
padding: 0 var(--spacing-3);
font-size: var(--font-size-sm);
color: var(--color-muted-foreground);
}
/* Code in cells */
.data-table td code {
padding: var(--spacing-1) var(--spacing-2);
background-color: var(--color-surface-1);
border-radius: var(--radius-sm);
font-size: var(--font-size-xs);
font-family: var(--font-mono);
}
/* Status badges in cells */
.data-table td .badge {
margin: 0;
}
/* Responsive */
@media (max-width: 640px) {
.data-table th,
.data-table td {
padding: var(--spacing-2) var(--spacing-3);
}
.data-table-pagination {
flex-direction: column;
align-items: stretch;
}
.pagination-controls {
justify-content: center;
}
}

View File

@@ -0,0 +1,249 @@
import { ComponentChildren } from 'preact';
import { useState, useMemo } from 'preact/hooks';
import { Spinner } from '../base/Spinner';
import { Input } from '../base/Input';
import { Button } from '../base/Button';
import './DataTable.css';
export interface Column<T> {
key: string;
header: string;
width?: string;
sortable?: boolean;
render?: (value: unknown, row: T, index: number) => ComponentChildren;
}
export interface DataTableProps<T extends Record<string, unknown>> {
columns: Column<T>[];
data: T[];
loading?: boolean;
emptyMessage?: string;
searchable?: boolean;
searchKeys?: string[];
sortable?: boolean;
pagination?: boolean;
pageSize?: number;
onRowClick?: (row: T, index: number) => void;
rowClassName?: (row: T, index: number) => string;
actions?: (row: T, index: number) => ComponentChildren;
}
type SortDirection = 'asc' | 'desc' | null;
export function DataTable<T extends Record<string, unknown>>({
columns,
data,
loading = false,
emptyMessage = 'No data available',
searchable = false,
searchKeys = [],
sortable = true,
pagination = true,
pageSize = 10,
onRowClick,
rowClassName,
actions
}: DataTableProps<T>) {
const [searchTerm, setSearchTerm] = useState('');
const [sortColumn, setSortColumn] = useState<string | null>(null);
const [sortDirection, setSortDirection] = useState<SortDirection>(null);
const [currentPage, setCurrentPage] = useState(1);
// Filter data based on search
const filteredData = useMemo(() => {
if (!searchTerm || searchKeys.length === 0) return data;
const term = searchTerm.toLowerCase();
return data.filter(row =>
searchKeys.some(key => {
const value = row[key];
return value && String(value).toLowerCase().includes(term);
})
);
}, [data, searchTerm, searchKeys]);
// Sort data
const sortedData = useMemo(() => {
if (!sortColumn || !sortDirection) return filteredData;
return [...filteredData].sort((a, b) => {
const aVal = a[sortColumn];
const bVal = b[sortColumn];
if (aVal === bVal) return 0;
if (aVal === null || aVal === undefined) return 1;
if (bVal === null || bVal === undefined) return -1;
const comparison = String(aVal).localeCompare(String(bVal), undefined, { numeric: true });
return sortDirection === 'asc' ? comparison : -comparison;
});
}, [filteredData, sortColumn, sortDirection]);
// Paginate data
const paginatedData = useMemo(() => {
if (!pagination) return sortedData;
const start = (currentPage - 1) * pageSize;
return sortedData.slice(start, start + pageSize);
}, [sortedData, currentPage, pageSize, pagination]);
const totalPages = Math.ceil(sortedData.length / pageSize);
// Handle sort toggle
function handleSort(columnKey: string) {
if (!sortable) return;
const column = columns.find(c => c.key === columnKey);
if (column?.sortable === false) return;
if (sortColumn === columnKey) {
if (sortDirection === 'asc') {
setSortDirection('desc');
} else if (sortDirection === 'desc') {
setSortColumn(null);
setSortDirection(null);
}
} else {
setSortColumn(columnKey);
setSortDirection('asc');
}
}
// Reset to page 1 when search changes
const handleSearch = (value: string) => {
setSearchTerm(value);
setCurrentPage(1);
};
if (loading) {
return (
<div className="data-table-loading">
<Spinner size="lg" />
<span>Loading data...</span>
</div>
);
}
return (
<div className="data-table-container">
{searchable && (
<div className="data-table-search">
<Input
placeholder="Search..."
value={searchTerm}
onChange={(e) => handleSearch((e.target as HTMLInputElement).value)}
size="sm"
/>
</div>
)}
<div className="data-table-wrapper">
<table className="data-table">
<thead>
<tr>
{columns.map(column => (
<th
key={column.key}
style={column.width ? { width: column.width } : undefined}
className={sortable && column.sortable !== false ? 'sortable' : ''}
onClick={() => sortable && column.sortable !== false && handleSort(column.key)}
>
<span className="th-content">
{column.header}
{sortColumn === column.key && (
<span className="sort-indicator">
{sortDirection === 'asc' ? ' \u2191' : ' \u2193'}
</span>
)}
</span>
</th>
))}
{actions && <th className="actions-column">Actions</th>}
</tr>
</thead>
<tbody>
{paginatedData.length === 0 ? (
<tr>
<td colSpan={columns.length + (actions ? 1 : 0)} className="empty-message">
{emptyMessage}
</td>
</tr>
) : (
paginatedData.map((row, index) => {
const actualIndex = (currentPage - 1) * pageSize + index;
return (
<tr
key={actualIndex}
className={[
onRowClick ? 'clickable' : '',
rowClassName ? rowClassName(row, actualIndex) : ''
].filter(Boolean).join(' ')}
onClick={() => onRowClick?.(row, actualIndex)}
>
{columns.map(column => (
<td key={column.key}>
{column.render
? column.render(row[column.key], row, actualIndex)
: String(row[column.key] ?? '-')}
</td>
))}
{actions && (
<td className="actions-cell">
{actions(row, actualIndex)}
</td>
)}
</tr>
);
})
)}
</tbody>
</table>
</div>
{pagination && totalPages > 1 && (
<div className="data-table-pagination">
<span className="pagination-info">
Showing {(currentPage - 1) * pageSize + 1} - {Math.min(currentPage * pageSize, sortedData.length)} of {sortedData.length}
</span>
<div className="pagination-controls">
<Button
variant="ghost"
size="sm"
disabled={currentPage === 1}
onClick={() => setCurrentPage(1)}
>
First
</Button>
<Button
variant="ghost"
size="sm"
disabled={currentPage === 1}
onClick={() => setCurrentPage(p => p - 1)}
>
Prev
</Button>
<span className="page-indicator">
{currentPage} / {totalPages}
</span>
<Button
variant="ghost"
size="sm"
disabled={currentPage === totalPages}
onClick={() => setCurrentPage(p => p + 1)}
>
Next
</Button>
<Button
variant="ghost"
size="sm"
disabled={currentPage === totalPages}
onClick={() => setCurrentPage(totalPages)}
>
Last
</Button>
</div>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,63 @@
/* Error Boundary Styles */
.error-boundary {
display: flex;
align-items: center;
justify-content: center;
min-height: 300px;
padding: var(--spacing-6);
}
.error-boundary .card {
max-width: 500px;
width: 100%;
}
.error-details {
display: flex;
flex-direction: column;
gap: var(--spacing-4);
}
.error-message {
padding: var(--spacing-3);
background-color: var(--color-error-bg);
border: 1px solid var(--color-error);
border-radius: var(--radius-md);
color: var(--color-error);
font-family: var(--font-mono);
font-size: var(--font-size-sm);
word-break: break-word;
}
.error-stack {
margin-top: var(--spacing-2);
}
.error-stack summary {
cursor: pointer;
font-size: var(--font-size-sm);
color: var(--color-muted-foreground);
padding: var(--spacing-2) 0;
}
.error-stack summary:hover {
color: var(--color-foreground);
}
.error-stack pre {
margin-top: var(--spacing-2);
padding: var(--spacing-3);
background-color: var(--color-surface-1);
border-radius: var(--radius-md);
font-size: var(--font-size-xs);
overflow-x: auto;
white-space: pre-wrap;
word-break: break-word;
max-height: 200px;
overflow-y: auto;
}
.error-boundary .card-footer {
gap: var(--spacing-3);
}

View File

@@ -0,0 +1,104 @@
import { Component, ComponentChildren } from 'preact';
import { Button } from '../base/Button';
import { Card, CardHeader, CardContent, CardFooter } from '../base/Card';
import './ErrorBoundary.css';
interface ErrorBoundaryProps {
children: ComponentChildren;
fallback?: ComponentChildren;
onError?: (error: Error, errorInfo: { componentStack: string }) => void;
}
interface ErrorBoundaryState {
hasError: boolean;
error: Error | null;
errorInfo: { componentStack: string } | null;
}
export class ErrorBoundary extends Component<ErrorBoundaryProps, ErrorBoundaryState> {
state: ErrorBoundaryState = {
hasError: false,
error: null,
errorInfo: null
};
static getDerivedStateFromError(error: Error): Partial<ErrorBoundaryState> {
return { hasError: true, error };
}
componentDidCatch(error: Error, errorInfo: { componentStack: string }) {
this.setState({ errorInfo });
// Log error to console
console.error('ErrorBoundary caught an error:', error, errorInfo);
// Call optional error handler
this.props.onError?.(error, errorInfo);
}
handleReset = () => {
this.setState({ hasError: false, error: null, errorInfo: null });
};
handleReload = () => {
window.location.reload();
};
render() {
if (this.state.hasError) {
// Custom fallback if provided
if (this.props.fallback) {
return this.props.fallback;
}
// Default error UI
return (
<div className="error-boundary">
<Card variant="bordered" padding="lg">
<CardHeader
title="Something went wrong"
subtitle="An unexpected error occurred"
/>
<CardContent>
<div className="error-details">
<p className="error-message">
{this.state.error?.message || 'Unknown error'}
</p>
{process.env.NODE_ENV === 'development' && this.state.errorInfo && (
<details className="error-stack">
<summary>Stack trace</summary>
<pre>{this.state.errorInfo.componentStack}</pre>
</details>
)}
</div>
</CardContent>
<CardFooter>
<Button variant="primary" onClick={this.handleReset}>
Try Again
</Button>
<Button variant="outline" onClick={this.handleReload}>
Reload Page
</Button>
</CardFooter>
</Card>
</div>
);
}
return this.props.children;
}
}
// Hook-based wrapper for functional components
export function withErrorBoundary<P extends object>(
WrappedComponent: (props: P) => ComponentChildren,
fallback?: ComponentChildren
) {
return function WithErrorBoundary(props: P) {
return (
<ErrorBoundary fallback={fallback}>
<WrappedComponent {...props} />
</ErrorBoundary>
);
};
}

View File

@@ -0,0 +1,165 @@
/* Modal Styles */
.modal-overlay {
position: fixed;
inset: 0;
z-index: var(--z-50);
display: flex;
align-items: center;
justify-content: center;
padding: var(--spacing-4);
background-color: rgba(0, 0, 0, 0.5);
backdrop-filter: blur(2px);
animation: fadeIn var(--duration-fast) var(--timing-out);
}
@keyframes fadeIn {
from {
opacity: 0;
}
to {
opacity: 1;
}
}
.modal {
display: flex;
flex-direction: column;
background-color: var(--color-surface-0);
border-radius: var(--radius-lg);
box-shadow: var(--shadow-xl);
max-height: calc(100vh - var(--spacing-8));
animation: slideUp var(--duration-normal) var(--timing-out);
}
@keyframes slideUp {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* Modal sizes */
.modal-sm {
width: 100%;
max-width: 400px;
}
.modal-md {
width: 100%;
max-width: 560px;
}
.modal-lg {
width: 100%;
max-width: 720px;
}
.modal-xl {
width: 100%;
max-width: 960px;
}
.modal-full {
width: calc(100vw - var(--spacing-8));
max-width: none;
height: calc(100vh - var(--spacing-8));
}
/* Modal header */
.modal-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: var(--spacing-4) var(--spacing-6);
border-bottom: 1px solid var(--color-border);
flex-shrink: 0;
}
.modal-title {
margin: 0;
font-size: var(--font-size-lg);
font-weight: var(--font-weight-semibold);
color: var(--color-foreground);
}
.modal-close {
display: flex;
align-items: center;
justify-content: center;
width: 32px;
height: 32px;
padding: 0;
margin: calc(var(--spacing-1) * -1);
background: transparent;
border: none;
border-radius: var(--radius-md);
color: var(--color-muted-foreground);
cursor: pointer;
transition: all var(--duration-fast) var(--timing-out);
}
.modal-close:hover {
background-color: var(--color-muted);
color: var(--color-foreground);
}
.modal-close:focus-visible {
outline: 2px solid var(--color-primary);
outline-offset: 2px;
}
/* Modal content */
.modal-content {
flex: 1;
padding: var(--spacing-6);
overflow-y: auto;
}
/* Modal footer */
.modal-footer {
display: flex;
align-items: center;
justify-content: flex-end;
gap: var(--spacing-3);
padding: var(--spacing-4) var(--spacing-6);
border-top: 1px solid var(--color-border);
flex-shrink: 0;
}
/* Confirm dialog specific */
.confirm-message {
margin: 0;
color: var(--color-muted-foreground);
line-height: var(--line-height-relaxed);
}
/* Dark mode adjustments */
[data-theme="dark"] .modal-overlay {
background-color: rgba(0, 0, 0, 0.7);
}
/* Responsive */
@media (max-width: 640px) {
.modal-overlay {
padding: var(--spacing-2);
align-items: flex-end;
}
.modal {
max-height: calc(100vh - var(--spacing-4));
border-bottom-left-radius: 0;
border-bottom-right-radius: 0;
}
.modal-sm,
.modal-md,
.modal-lg,
.modal-xl {
max-width: none;
}
}

View File

@@ -0,0 +1,152 @@
import { JSX, ComponentChildren } from 'preact';
import { useEffect, useCallback } from 'preact/hooks';
import { signal } from '@preact/signals';
import { Button } from '../base/Button';
import './Modal.css';
// Global modal state
export const modalOpen = signal(false);
export const modalContent = signal<ComponentChildren | null>(null);
export const modalTitle = signal<string>('');
export interface ModalProps {
isOpen: boolean;
onClose: () => void;
title?: string;
children: ComponentChildren;
size?: 'sm' | 'md' | 'lg' | 'xl' | 'full';
closeOnOverlayClick?: boolean;
closeOnEscape?: boolean;
showCloseButton?: boolean;
footer?: ComponentChildren;
}
export function Modal({
isOpen,
onClose,
title,
children,
size = 'md',
closeOnOverlayClick = true,
closeOnEscape = true,
showCloseButton = true,
footer
}: ModalProps) {
// Handle escape key
const handleKeyDown = useCallback((e: KeyboardEvent) => {
if (e.key === 'Escape' && closeOnEscape) {
onClose();
}
}, [closeOnEscape, onClose]);
// Handle overlay click
const handleOverlayClick = useCallback((e: MouseEvent) => {
if (closeOnOverlayClick && e.target === e.currentTarget) {
onClose();
}
}, [closeOnOverlayClick, onClose]);
// Add/remove event listeners and body scroll lock
useEffect(() => {
if (isOpen) {
document.addEventListener('keydown', handleKeyDown);
document.body.style.overflow = 'hidden';
}
return () => {
document.removeEventListener('keydown', handleKeyDown);
document.body.style.overflow = '';
};
}, [isOpen, handleKeyDown]);
if (!isOpen) return null;
return (
<div className="modal-overlay" onClick={handleOverlayClick as unknown as JSX.MouseEventHandler<HTMLDivElement>}>
<div className={`modal modal-${size}`} role="dialog" aria-modal="true" aria-labelledby={title ? 'modal-title' : undefined}>
{(title || showCloseButton) && (
<div className="modal-header">
{title && <h2 id="modal-title" className="modal-title">{title}</h2>}
{showCloseButton && (
<button className="modal-close" onClick={onClose} aria-label="Close modal">
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
<line x1="18" y1="6" x2="6" y2="18" />
<line x1="6" y1="6" x2="18" y2="18" />
</svg>
</button>
)}
</div>
)}
<div className="modal-content">
{children}
</div>
{footer && (
<div className="modal-footer">
{footer}
</div>
)}
</div>
</div>
);
}
// Confirm dialog helper
export interface ConfirmDialogProps {
isOpen: boolean;
onClose: () => void;
onConfirm: () => void;
title: string;
message: string;
confirmText?: string;
cancelText?: string;
variant?: 'danger' | 'warning' | 'default';
loading?: boolean;
}
export function ConfirmDialog({
isOpen,
onClose,
onConfirm,
title,
message,
confirmText = 'Confirm',
cancelText = 'Cancel',
variant = 'default',
loading = false
}: ConfirmDialogProps) {
return (
<Modal
isOpen={isOpen}
onClose={onClose}
title={title}
size="sm"
footer={
<>
<Button variant="ghost" onClick={onClose} disabled={loading}>
{cancelText}
</Button>
<Button
variant={variant === 'danger' ? 'danger' : 'primary'}
onClick={onConfirm}
loading={loading}
>
{confirmText}
</Button>
</>
}
>
<p className="confirm-message">{message}</p>
</Modal>
);
}
// Hook for managing modal state
export function useModal() {
const isOpen = signal(false);
const open = () => { isOpen.value = true; };
const close = () => { isOpen.value = false; };
const toggle = () => { isOpen.value = !isOpen.value; };
return { isOpen: isOpen.value, open, close, toggle };
}

View File

@@ -50,11 +50,12 @@ export const TEAM_CONFIGS: Record<TeamId, TeamConfig> = {
description: 'Design consistency & token validation',
tools: [
{ id: 'dashboard', name: 'Dashboard', description: 'Team metrics and quick actions' },
{ id: 'live-canvas', name: 'Live Canvas', description: 'AI-powered component generator - your own Figma Make' },
{ id: 'figma-files', name: 'Figma Files', description: 'Manage connected Figma files' },
{ id: 'figma-plugin', name: 'Figma Plugin', description: 'Export tokens/assets/components from Figma' },
{ id: 'token-list', name: 'Token List', description: 'View all design tokens' },
{ id: 'asset-list', name: 'Asset List', description: 'Gallery of design assets' },
{ id: 'component-list', name: 'Component List', description: 'Design system components' },
{ id: 'navigation-demos', name: 'Navigation Demos', description: 'Generate navigation flow demos' }
{ id: 'component-list', name: 'Component List', description: 'Design system components' }
],
panels: ['metrics', 'diff', 'accessibility', 'screenshots', 'chat'],
metrics: ['figmaFiles', 'syncedFiles', 'pendingSync', 'designTokens'],
@@ -69,8 +70,8 @@ export const TEAM_CONFIGS: Record<TeamId, TeamConfig> = {
{ id: 'figma-live-compare', name: 'Figma vs Live', description: 'QA validation: Figma design vs live implementation' },
{ id: 'esre-editor', name: 'ESRE Editor', description: 'Edit Explicit Style Requirements and Expectations' },
{ id: 'console-viewer', name: 'Console Viewer', description: 'Monitor browser console logs', mcpTool: 'browser_get_logs' },
{ id: 'network-monitor', name: 'Network Monitor', description: 'Track network requests', mcpTool: 'devtools_network_requests' },
{ id: 'error-tracker', name: 'Error Tracker', description: 'Track uncaught exceptions', mcpTool: 'browser_get_errors' }
{ id: 'network-monitor', name: 'Network Monitor', description: 'Track network requests in real-time' },
{ id: 'test-results', name: 'Test Results', description: 'View ESRE test results and history' }
],
panels: ['metrics', 'console', 'network', 'tests', 'chat'],
metrics: ['healthScore', 'esreDefinitions', 'testsRun', 'testsPassed'],
@@ -84,6 +85,7 @@ export const TEAM_CONFIGS: Record<TeamId, TeamConfig> = {
{ id: 'settings', name: 'System Settings', description: 'Configure DSS hostname, port, and setup type' },
{ id: 'projects', name: 'Projects', description: 'Create and manage design system projects' },
{ id: 'integrations', name: 'Integrations', description: 'Configure Figma, Jira, and other integrations' },
{ id: 'mcp-tools', name: 'MCP Tools', description: 'View and execute MCP tools for AI assistants' },
{ id: 'audit-log', name: 'Audit Log', description: 'View all system activity' },
{ id: 'cache-management', name: 'Cache Management', description: 'Clear and manage system cache' },
{ id: 'health-monitor', name: 'Health Monitor', description: 'System health dashboard' },

View File

@@ -6,7 +6,7 @@ import { Badge } from '../components/base/Badge';
import { Input, Select } from '../components/base/Input';
import { Spinner } from '../components/base/Spinner';
import { endpoints } from '../api/client';
import type { Project, RuntimeConfig, AuditEntry, SystemHealth, Service } from '../api/types';
import type { Project, RuntimeConfig, AuditEntry, SystemHealth, Service, MCPTool } from '../api/types';
import './Workdesk.css';
interface AdminWorkdeskProps {
@@ -21,6 +21,7 @@ export default function AdminWorkdesk({ activeTool }: AdminWorkdeskProps) {
const toolViews: Record<string, JSX.Element> = {
'projects': <ProjectsTool />,
'integrations': <IntegrationsTool />,
'mcp-tools': <MCPToolsTool />,
'audit-log': <AuditLogTool />,
'cache-management': <CacheManagementTool />,
'health-monitor': <HealthMonitorTool />,
@@ -448,6 +449,173 @@ function IntegrationsTool() {
);
}
function MCPToolsTool() {
const [tools, setTools] = useState<MCPTool[]>([]);
const [loading, setLoading] = useState(true);
const [selectedTool, setSelectedTool] = useState<MCPTool | null>(null);
const [executing, setExecuting] = useState(false);
const [params, setParams] = useState<Record<string, string>>({});
const [result, setResult] = useState<unknown | null>(null);
const [error, setError] = useState<string | null>(null);
const [mcpStatus, setMcpStatus] = useState<{ connected: boolean; tools: number } | null>(null);
useEffect(() => {
loadTools();
}, []);
async function loadTools() {
setLoading(true);
try {
const [toolsResult, statusResult] = await Promise.allSettled([
endpoints.mcp.tools(),
endpoints.mcp.status()
]);
if (toolsResult.status === 'fulfilled') {
setTools(toolsResult.value);
}
if (statusResult.status === 'fulfilled') {
setMcpStatus(statusResult.value);
}
} catch (err) {
console.error('Failed to load MCP tools:', err);
} finally {
setLoading(false);
}
}
async function handleExecute() {
if (!selectedTool) return;
setExecuting(true);
setError(null);
setResult(null);
try {
const response = await endpoints.mcp.execute(selectedTool.name, params);
setResult(response);
} catch (err) {
setError(err instanceof Error ? err.message : 'Execution failed');
} finally {
setExecuting(false);
}
}
function handleSelectTool(tool: MCPTool) {
setSelectedTool(tool);
setParams({});
setResult(null);
setError(null);
}
if (loading) {
return (
<div className="workdesk">
<div className="workdesk-loading">
<Spinner size="lg" />
<span>Loading MCP tools...</span>
</div>
</div>
);
}
return (
<div className="workdesk">
<div className="workdesk-header">
<h1 className="workdesk-title">MCP Tools</h1>
<p className="workdesk-subtitle">Model Context Protocol tools for AI assistants</p>
</div>
{/* MCP Status */}
{mcpStatus && (
<div className={`connection-status ${mcpStatus.connected ? 'connected' : 'disconnected'}`}>
<Badge variant={mcpStatus.connected ? 'success' : 'error'} size="sm">
MCP: {mcpStatus.connected ? 'Connected' : 'Not Connected'}
</Badge>
<span className="tools-count">{mcpStatus.tools} tools available</span>
</div>
)}
{/* Tools List */}
<Card variant="bordered" padding="md">
<CardHeader
title="Available Tools"
subtitle={`${tools.length} tools`}
action={<Button variant="ghost" size="sm" onClick={loadTools}>Refresh</Button>}
/>
<CardContent>
{tools.length === 0 ? (
<p className="text-muted">No MCP tools available</p>
) : (
<div className="mcp-tools-list">
{tools.map(tool => (
<div
key={tool.name}
className={`mcp-tool-item ${selectedTool?.name === tool.name ? 'selected' : ''}`}
onClick={() => handleSelectTool(tool)}
>
<div className="mcp-tool-info">
<span className="mcp-tool-name">{tool.name}</span>
<span className="mcp-tool-desc">{tool.description}</span>
</div>
{tool.category && <Badge size="sm">{tool.category}</Badge>}
</div>
))}
</div>
)}
</CardContent>
</Card>
{/* Tool Execution */}
{selectedTool && (
<Card variant="bordered" padding="md">
<CardHeader
title={selectedTool.name}
subtitle={selectedTool.description}
/>
<CardContent>
<div className="mcp-tool-params">
{Object.entries(selectedTool.input_schema.properties || {}).map(([key, prop]) => (
<Input
key={key}
label={`${key}${selectedTool.input_schema.required?.includes(key) ? ' *' : ''}`}
value={params[key] || ''}
onChange={(e) => setParams(p => ({ ...p, [key]: (e.target as HTMLInputElement).value }))}
placeholder={prop.description || `Enter ${key}`}
hint={prop.type}
fullWidth
/>
))}
</div>
{error && (
<div className="form-error">
<Badge variant="error">{error}</Badge>
</div>
)}
</CardContent>
<CardFooter>
<Button variant="primary" onClick={handleExecute} loading={executing}>
Execute Tool
</Button>
</CardFooter>
</Card>
)}
{/* Results */}
{result && (
<Card variant="bordered" padding="md">
<CardHeader title="Execution Result" />
<CardContent>
<pre className="mcp-result">
{JSON.stringify(result, null, 2)}
</pre>
</CardContent>
</Card>
)}
</div>
);
}
function AuditLogTool() {
const [entries, setEntries] = useState<AuditEntry[]>([]);
const [loading, setLoading] = useState(true);

View File

@@ -22,6 +22,7 @@ export default function QAWorkdesk({ activeTool }: QAWorkdeskProps) {
const toolViews: Record<string, JSX.Element> = {
'esre-editor': <ESREEditorTool />,
'console-viewer': <ConsoleViewerTool />,
'network-monitor': <NetworkMonitorTool />,
'figma-live-compare': <FigmaLiveCompareTool />,
'test-results': <TestResultsTool />,
};
@@ -532,6 +533,147 @@ function ConsoleViewerTool() {
);
}
function NetworkMonitorTool() {
const [requests, setRequests] = useState<Array<{
id: number;
method: string;
url: string;
status: number | null;
duration: number | null;
size: string | null;
type: string;
timestamp: string;
}>>([]);
const [filter, setFilter] = useState('all');
const [isCapturing, setIsCapturing] = useState(false);
useEffect(() => {
if (!isCapturing) return;
// Intercept fetch requests
const originalFetch = window.fetch;
window.fetch = async function(...args) {
const startTime = performance.now();
const url = typeof args[0] === 'string' ? args[0] : (args[0] as Request).url;
const method = typeof args[0] === 'string' ? (args[1]?.method || 'GET') : (args[0] as Request).method || 'GET';
const requestId = Date.now();
// Add pending request
setRequests(prev => [...prev.slice(-99), {
id: requestId,
method: method.toUpperCase(),
url,
status: null,
duration: null,
size: null,
type: 'fetch',
timestamp: new Date().toLocaleTimeString()
}]);
try {
const response = await originalFetch.apply(this, args);
const duration = Math.round(performance.now() - startTime);
const contentLength = response.headers.get('content-length');
// Update with response data
setRequests(prev => prev.map(r =>
r.id === requestId
? { ...r, status: response.status, duration, size: contentLength ? `${Math.round(parseInt(contentLength) / 1024)}KB` : null }
: r
));
return response;
} catch (error) {
setRequests(prev => prev.map(r =>
r.id === requestId
? { ...r, status: 0, duration: Math.round(performance.now() - startTime) }
: r
));
throw error;
}
};
return () => {
window.fetch = originalFetch;
};
}, [isCapturing]);
const filteredRequests = filter === 'all'
? requests
: requests.filter(r => {
if (filter === 'success') return r.status !== null && r.status >= 200 && r.status < 400;
if (filter === 'error') return r.status === null || r.status === 0 || r.status >= 400;
return true;
});
return (
<div className="workdesk">
<div className="workdesk-header">
<h1 className="workdesk-title">Network Monitor</h1>
<p className="workdesk-subtitle">Track network requests in real-time</p>
</div>
<Card variant="bordered" padding="md">
<CardHeader
title="Network Requests"
action={
<div className="network-controls">
<Select
size="sm"
value={filter}
onChange={(e) => setFilter((e.target as HTMLSelectElement).value)}
options={[
{ value: 'all', label: 'All' },
{ value: 'success', label: 'Success' },
{ value: 'error', label: 'Errors' }
]}
/>
<Button
variant={isCapturing ? 'danger' : 'primary'}
size="sm"
onClick={() => setIsCapturing(!isCapturing)}
>
{isCapturing ? 'Stop Capture' : 'Start Capture'}
</Button>
<Button variant="ghost" size="sm" onClick={() => setRequests([])}>Clear</Button>
</div>
}
/>
<CardContent>
{!isCapturing && requests.length === 0 ? (
<p className="text-muted">Click "Start Capture" to begin monitoring network requests</p>
) : (
<div className="network-list">
{filteredRequests.length === 0 ? (
<p className="text-muted">No requests captured yet</p>
) : (
filteredRequests.map(request => (
<div key={request.id} className={`network-item ${request.status === null ? 'pending' : request.status >= 400 || request.status === 0 ? 'error' : 'success'}`}>
<Badge
variant={request.status === null ? 'warning' : request.status >= 400 || request.status === 0 ? 'error' : 'success'}
size="sm"
>
{request.method}
</Badge>
<span className="network-url">{request.url}</span>
<span className="network-status">
{request.status === null ? 'Pending' : request.status === 0 ? 'Error' : request.status}
</span>
<span className="network-duration">{request.duration ? `${request.duration}ms` : '-'}</span>
<span className="network-size">{request.size || '-'}</span>
<span className="network-time">{request.timestamp}</span>
</div>
))
)}
</div>
)}
</CardContent>
</Card>
</div>
);
}
function FigmaLiveCompareTool() {
const [figmaUrl, setFigmaUrl] = useState('');
const [liveUrl, setLiveUrl] = useState('');

View File

@@ -1,5 +1,5 @@
import { JSX } from 'preact';
import { useState, useEffect } from 'preact/hooks';
import { useState, useEffect, useRef } from 'preact/hooks';
import { Card, CardHeader, CardContent } from '../components/base/Card';
import { Button } from '../components/base/Button';
import { Badge } from '../components/base/Badge';
@@ -21,8 +21,11 @@ export default function UXWorkdesk({ activeTool }: UXWorkdeskProps) {
const toolViews: Record<string, JSX.Element> = {
'token-list': <TokenListTool />,
'asset-list': <AssetListTool />,
'component-list': <ComponentListTool />,
'figma-plugin': <FigmaPluginTool />,
'figma-files': <FigmaFilesTool />,
'live-canvas': <LiveCanvasTool />,
};
return toolViews[activeTool] || <ToolPlaceholder name={activeTool} />;
@@ -492,6 +495,228 @@ function FigmaFilesTool() {
);
}
function AssetListTool() {
const [assets, setAssets] = useState<Array<{
id: string;
name: string;
type: 'icon' | 'image' | 'illustration';
format: string;
size: string;
url?: string;
}>>([]);
const [loading, setLoading] = useState(true);
const [searchTerm, setSearchTerm] = useState('');
const [typeFilter, setTypeFilter] = useState('all');
useEffect(() => {
loadAssets();
}, []);
async function loadAssets() {
setLoading(true);
try {
const projectId = currentProject.value?.id;
if (projectId) {
// Try to load assets from Figma extraction
const files = await endpoints.projects.figmaFiles(projectId);
if (files.length > 0) {
const result = await endpoints.figma.extractStyles(files[0].file_key);
// Transform styles into assets
const extractedAssets = result.items
.filter(item => item.type === 'effect' || item.type === 'paint')
.map((item, idx) => ({
id: String(idx),
name: item.name,
type: 'icon' as const,
format: 'svg',
size: '-'
}));
setAssets(extractedAssets);
}
}
} catch (err) {
console.error('Failed to load assets:', err);
// Fallback demo data
setAssets([
{ id: '1', name: 'icon-check', type: 'icon', format: 'svg', size: '24x24' },
{ id: '2', name: 'icon-close', type: 'icon', format: 'svg', size: '24x24' },
{ id: '3', name: 'icon-menu', type: 'icon', format: 'svg', size: '24x24' },
{ id: '4', name: 'logo-primary', type: 'image', format: 'png', size: '200x50' },
{ id: '5', name: 'hero-illustration', type: 'illustration', format: 'svg', size: '800x600' }
]);
} finally {
setLoading(false);
}
}
const filteredAssets = assets.filter(asset => {
const matchesSearch = !searchTerm || asset.name.toLowerCase().includes(searchTerm.toLowerCase());
const matchesType = typeFilter === 'all' || asset.type === typeFilter;
return matchesSearch && matchesType;
});
if (loading) {
return (
<div className="workdesk">
<div className="workdesk-loading">
<Spinner size="lg" />
<span>Loading assets...</span>
</div>
</div>
);
}
return (
<div className="workdesk">
<div className="workdesk-header">
<h1 className="workdesk-title">Asset List</h1>
<p className="workdesk-subtitle">Design assets and icons from Figma</p>
</div>
<Card variant="bordered" padding="md">
<CardHeader
title="Assets"
subtitle={`${filteredAssets.length} assets`}
action={
<div className="asset-controls">
<Input
placeholder="Search assets..."
size="sm"
value={searchTerm}
onChange={(e) => setSearchTerm((e.target as HTMLInputElement).value)}
/>
<select
className="select-sm"
value={typeFilter}
onChange={(e) => setTypeFilter((e.target as HTMLSelectElement).value)}
>
<option value="all">All Types</option>
<option value="icon">Icons</option>
<option value="image">Images</option>
<option value="illustration">Illustrations</option>
</select>
</div>
}
/>
<CardContent>
{filteredAssets.length === 0 ? (
<p className="text-muted">No assets found</p>
) : (
<div className="assets-grid">
{filteredAssets.map(asset => (
<div key={asset.id} className="asset-item">
<div className="asset-preview">
<span className="asset-icon">{asset.type === 'icon' ? '\u25A1' : asset.type === 'image' ? '\u25A3' : '\u25A2'}</span>
</div>
<div className="asset-info">
<span className="asset-name">{asset.name}</span>
<span className="asset-meta">{asset.format} | {asset.size}</span>
</div>
<Badge size="sm">{asset.type}</Badge>
</div>
))}
</div>
)}
</CardContent>
</Card>
</div>
);
}
function ComponentListTool() {
const [components, setComponents] = useState<Array<{
id: string;
name: string;
description?: string;
variants?: number;
status: 'ready' | 'in-progress' | 'planned';
}>>([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
loadComponents();
}, []);
async function loadComponents() {
setLoading(true);
try {
const projectId = currentProject.value?.id;
if (projectId) {
const result = await endpoints.projects.components(projectId);
setComponents(result.map(c => ({
id: c.id,
name: c.display_name || c.name,
description: c.description,
variants: c.variants?.length || 0,
status: (c as unknown as { status?: string }).status === 'active' ? 'ready' : 'in-progress'
})));
}
} catch (err) {
console.error('Failed to load components:', err);
setComponents([]);
} finally {
setLoading(false);
}
}
if (loading) {
return (
<div className="workdesk">
<div className="workdesk-loading">
<Spinner size="lg" />
<span>Loading components...</span>
</div>
</div>
);
}
return (
<div className="workdesk">
<div className="workdesk-header">
<h1 className="workdesk-title">Component List</h1>
<p className="workdesk-subtitle">Design system components</p>
</div>
<Card variant="bordered" padding="md">
<CardHeader
title="Components"
subtitle={`${components.length} components`}
action={<Button variant="ghost" size="sm" onClick={loadComponents}>Refresh</Button>}
/>
<CardContent>
{components.length === 0 ? (
<p className="text-muted">No components found. Extract components from Figma first.</p>
) : (
<div className="components-list">
{components.map(component => (
<div key={component.id} className="component-item">
<div className="component-info">
<span className="component-name">{component.name}</span>
{component.description && (
<span className="component-desc">{component.description}</span>
)}
</div>
<div className="component-meta">
{component.variants && component.variants > 0 && (
<span className="component-variants">{component.variants} variants</span>
)}
<Badge
variant={component.status === 'ready' ? 'success' : component.status === 'in-progress' ? 'warning' : 'default'}
size="sm"
>
{component.status}
</Badge>
</div>
</div>
))}
</div>
)}
</CardContent>
</Card>
</div>
);
}
function FigmaPluginTool() {
return (
<div className="workdesk">
@@ -523,6 +748,317 @@ function FigmaPluginTool() {
);
}
interface GeneratedComponent {
id: string;
prompt: string;
code: string;
timestamp: number;
}
function LiveCanvasTool() {
const [prompt, setPrompt] = useState('');
const [isGenerating, setIsGenerating] = useState(false);
const [generatedCode, setGeneratedCode] = useState<string | null>(null);
const [error, setError] = useState<string | null>(null);
const [history, setHistory] = useState<GeneratedComponent[]>([]);
const [viewport, setViewport] = useState<'desktop' | 'tablet' | 'mobile'>('desktop');
const iframeRef = useRef<HTMLIFrameElement>(null);
const viewportSizes = {
desktop: { width: '100%', maxWidth: '1200px' },
tablet: { width: '768px', maxWidth: '768px' },
mobile: { width: '375px', maxWidth: '375px' }
};
async function handleGenerate() {
if (!prompt.trim()) return;
setIsGenerating(true);
setError(null);
try {
// Build the context for Claude
const context = {
team: 'ux',
request: 'generate_ui_component',
project: currentProject.value ? {
id: currentProject.value.id,
name: currentProject.value.name
} : null
};
// Create a detailed prompt for component generation
const componentPrompt = `Generate a React/Preact component for the following request. Return ONLY the JSX code that can be rendered directly in an iframe. Do not include imports or exports. Use inline styles or standard CSS class names. Make it visually complete and polished.
Request: ${prompt}
Requirements:
- Use modern, clean design principles
- Include proper spacing and typography
- Use a cohesive color scheme (prefer neutral/professional colors)
- Make it responsive
- Return ONLY the JSX code, nothing else`;
const response = await endpoints.claude.chat(componentPrompt, currentProject.value?.id, context);
if (response.message?.content) {
// Extract code from the response
let code = response.message.content;
// Try to extract code from markdown code blocks if present
const codeBlockMatch = code.match(/```(?:jsx?|tsx?|html)?\s*([\s\S]*?)```/);
if (codeBlockMatch) {
code = codeBlockMatch[1].trim();
}
setGeneratedCode(code);
// Add to history
const newComponent: GeneratedComponent = {
id: `gen-${Date.now()}`,
prompt: prompt,
code: code,
timestamp: Date.now()
};
setHistory(prev => [newComponent, ...prev.slice(0, 9)]);
// Render in iframe
renderInIframe(code);
} else {
setError('No response from Claude. Check your API key configuration.');
}
} catch (err) {
console.error('Generation failed:', err);
setError(err instanceof Error ? err.message : 'Failed to generate component');
} finally {
setIsGenerating(false);
}
}
function renderInIframe(code: string) {
const iframe = iframeRef.current;
if (!iframe) return;
// Create the HTML document to render in the iframe
const htmlContent = `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script src="https://unpkg.com/react@18/umd/react.development.js"></script>
<script src="https://unpkg.com/react-dom@18/umd/react-dom.development.js"></script>
<script src="https://unpkg.com/@babel/standalone/babel.min.js"></script>
<style>
* { box-sizing: border-box; margin: 0; padding: 0; }
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
padding: 20px;
background: #fff;
min-height: 100vh;
}
#root { width: 100%; }
</style>
</head>
<body>
<div id="root"></div>
<script type="text/babel">
const { useState, useEffect, useRef } = React;
function App() {
return (
<>
${code}
</>
);
}
ReactDOM.createRoot(document.getElementById('root')).render(<App />);
</script>
</body>
</html>`;
// Write to iframe
const doc = iframe.contentDocument || iframe.contentWindow?.document;
if (doc) {
doc.open();
doc.write(htmlContent);
doc.close();
}
}
function handleLoadFromHistory(item: GeneratedComponent) {
setPrompt(item.prompt);
setGeneratedCode(item.code);
renderInIframe(item.code);
}
function handleKeyDown(e: KeyboardEvent) {
if (e.key === 'Enter' && (e.metaKey || e.ctrlKey)) {
e.preventDefault();
handleGenerate();
}
}
function handleRefresh() {
if (generatedCode) {
renderInIframe(generatedCode);
}
}
function handleCopyCode() {
if (generatedCode) {
navigator.clipboard.writeText(generatedCode);
}
}
return (
<div className="workdesk live-canvas-workdesk">
<div className="workdesk-header">
<h1 className="workdesk-title">Live Canvas</h1>
<p className="workdesk-subtitle">Generate UI components with AI - your own Figma Make</p>
</div>
{/* Prompt Input Area */}
<Card variant="bordered" padding="md">
<CardHeader title="Build with AI" subtitle="Describe what you want to build" />
<CardContent>
<div className="canvas-prompt-area">
<textarea
className="canvas-prompt-input"
placeholder="Describe the component you want to build... e.g., 'A login form with email and password fields, a remember me checkbox, and a submit button with modern styling'"
value={prompt}
onInput={(e) => setPrompt((e.target as HTMLTextAreaElement).value)}
onKeyDown={handleKeyDown}
rows={3}
disabled={isGenerating}
/>
<div className="canvas-prompt-actions">
<span className="canvas-hint">Press Cmd+Enter to generate</span>
<Button
variant="primary"
onClick={handleGenerate}
loading={isGenerating}
disabled={!prompt.trim()}
>
{isGenerating ? 'Generating...' : 'Generate Component'}
</Button>
</div>
</div>
{error && (
<div className="canvas-error">
<Badge variant="error">{error}</Badge>
</div>
)}
</CardContent>
</Card>
{/* Canvas Preview Area */}
<Card variant="bordered" padding="md" className="canvas-preview-card">
<CardHeader
title="Preview"
subtitle={generatedCode ? 'Live component preview' : 'Generate a component to see preview'}
action={
<div className="canvas-toolbar">
<div className="viewport-switcher">
<button
className={`viewport-btn ${viewport === 'desktop' ? 'active' : ''}`}
onClick={() => setViewport('desktop')}
title="Desktop"
>
<span>\u25A1</span>
</button>
<button
className={`viewport-btn ${viewport === 'tablet' ? 'active' : ''}`}
onClick={() => setViewport('tablet')}
title="Tablet"
>
<span>\u25A3</span>
</button>
<button
className={`viewport-btn ${viewport === 'mobile' ? 'active' : ''}`}
onClick={() => setViewport('mobile')}
title="Mobile"
>
<span>\u25AF</span>
</button>
</div>
<Button variant="ghost" size="sm" onClick={handleRefresh} disabled={!generatedCode}>
Refresh
</Button>
<Button variant="ghost" size="sm" onClick={handleCopyCode} disabled={!generatedCode}>
Copy Code
</Button>
</div>
}
/>
<CardContent>
<div className="canvas-frame-container">
<div
className="canvas-frame"
style={{
width: viewportSizes[viewport].width,
maxWidth: viewportSizes[viewport].maxWidth
}}
>
{generatedCode ? (
<iframe
ref={iframeRef}
className="canvas-iframe"
title="Component Preview"
sandbox="allow-scripts"
/>
) : (
<div className="canvas-placeholder">
<div className="canvas-placeholder-icon">\u2728</div>
<p>Your generated component will appear here</p>
<p className="text-muted">Try: "A pricing card with three tiers"</p>
</div>
)}
</div>
</div>
</CardContent>
</Card>
{/* Code Panel */}
{generatedCode && (
<Card variant="bordered" padding="md">
<CardHeader
title="Generated Code"
action={<Button variant="ghost" size="sm" onClick={handleCopyCode}>Copy</Button>}
/>
<CardContent>
<pre className="canvas-code-preview">
<code>{generatedCode}</code>
</pre>
</CardContent>
</Card>
)}
{/* History */}
{history.length > 0 && (
<Card variant="bordered" padding="md">
<CardHeader title="Recent Generations" subtitle={`${history.length} items`} />
<CardContent>
<div className="canvas-history">
{history.map(item => (
<div
key={item.id}
className="canvas-history-item"
onClick={() => handleLoadFromHistory(item)}
>
<span className="history-prompt">{item.prompt.slice(0, 60)}...</span>
<span className="history-time">{formatTimeAgo(new Date(item.timestamp).toISOString())}</span>
</div>
))}
</div>
</CardContent>
</Card>
)}
</div>
);
}
function ToolPlaceholder({ name }: { name: string }) {
return (
<div className="workdesk">

View File

@@ -621,3 +621,701 @@
.preview-details strong {
color: var(--color-foreground);
}
/* ============ Network Monitor ============ */
.network-controls {
display: flex;
gap: var(--spacing-2);
align-items: center;
}
.network-list {
display: flex;
flex-direction: column;
gap: var(--spacing-2);
max-height: 400px;
overflow-y: auto;
}
.network-item {
display: flex;
align-items: center;
gap: var(--spacing-3);
padding: var(--spacing-2) var(--spacing-3);
background-color: var(--color-surface-0);
border-radius: var(--radius-md);
border-left: 3px solid var(--color-border);
font-size: var(--font-size-sm);
}
.network-item.pending { border-left-color: var(--color-warning); }
.network-item.success { border-left-color: var(--color-success); }
.network-item.error { border-left-color: var(--color-error); }
.network-url {
flex: 1;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
color: var(--color-foreground);
font-family: var(--font-family-mono);
font-size: var(--font-size-xs);
}
.network-status,
.network-duration,
.network-size,
.network-time {
font-size: var(--font-size-xs);
color: var(--color-muted-foreground);
white-space: nowrap;
}
.network-status { min-width: 50px; }
.network-duration { min-width: 60px; }
.network-size { min-width: 50px; }
.network-time { min-width: 70px; }
/* ============ MCP Tools ============ */
.mcp-tools-list {
display: flex;
flex-direction: column;
gap: var(--spacing-2);
max-height: 300px;
overflow-y: auto;
}
.mcp-tool-item {
display: flex;
align-items: center;
gap: var(--spacing-3);
padding: var(--spacing-3);
background-color: var(--color-surface-0);
border-radius: var(--radius-md);
border: 1px solid var(--color-border);
cursor: pointer;
transition: all var(--duration-fast) var(--timing-out);
}
.mcp-tool-item:hover {
background-color: var(--color-surface-1);
border-color: var(--color-ring);
}
.mcp-tool-item.selected {
border-color: var(--color-primary);
background-color: var(--color-surface-1);
}
.mcp-tool-info {
flex: 1;
display: flex;
flex-direction: column;
gap: var(--spacing-0-5);
}
.mcp-tool-name {
font-weight: var(--font-weight-medium);
color: var(--color-foreground);
}
.mcp-tool-desc {
font-size: var(--font-size-xs);
color: var(--color-muted-foreground);
}
.mcp-tool-params {
display: flex;
flex-direction: column;
gap: var(--spacing-4);
}
.mcp-result {
padding: var(--spacing-4);
background-color: var(--color-surface-2);
border-radius: var(--radius-md);
font-family: var(--font-family-mono);
font-size: var(--font-size-xs);
overflow-x: auto;
max-height: 300px;
overflow-y: auto;
white-space: pre-wrap;
word-break: break-all;
}
.tools-count {
font-size: var(--font-size-sm);
color: var(--color-muted-foreground);
margin-left: var(--spacing-3);
}
/* ============ Asset List ============ */
.asset-controls {
display: flex;
gap: var(--spacing-2);
align-items: center;
}
.select-sm {
height: 32px;
padding: 0 var(--spacing-2);
font-size: var(--font-size-sm);
background-color: var(--color-surface-0);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
color: var(--color-foreground);
}
.assets-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
gap: var(--spacing-4);
}
.asset-item {
display: flex;
flex-direction: column;
gap: var(--spacing-2);
padding: var(--spacing-3);
background-color: var(--color-surface-0);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
}
.asset-preview {
display: flex;
align-items: center;
justify-content: center;
height: 80px;
background-color: var(--color-surface-1);
border-radius: var(--radius-sm);
}
.asset-icon {
font-size: var(--font-size-3xl);
color: var(--color-muted-foreground);
}
.asset-info {
display: flex;
flex-direction: column;
gap: var(--spacing-0-5);
}
.asset-name {
font-weight: var(--font-weight-medium);
font-size: var(--font-size-sm);
}
.asset-meta {
font-size: var(--font-size-xs);
color: var(--color-muted-foreground);
}
/* ============ Component List ============ */
.components-list {
display: flex;
flex-direction: column;
gap: var(--spacing-3);
}
.component-item {
display: flex;
align-items: center;
justify-content: space-between;
gap: var(--spacing-4);
padding: var(--spacing-3);
background-color: var(--color-surface-0);
border-radius: var(--radius-md);
border: 1px solid var(--color-border);
}
.component-info {
flex: 1;
display: flex;
flex-direction: column;
gap: var(--spacing-0-5);
}
.component-name {
font-weight: var(--font-weight-medium);
}
.component-desc {
font-size: var(--font-size-xs);
color: var(--color-muted-foreground);
}
.component-meta {
display: flex;
align-items: center;
gap: var(--spacing-2);
}
.component-variants {
font-size: var(--font-size-xs);
color: var(--color-muted-foreground);
}
/* ============ Loading State ============ */
.workdesk-loading {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: var(--spacing-4);
min-height: 300px;
color: var(--color-muted-foreground);
}
/* ============ Connection Status ============ */
.connection-status {
display: flex;
align-items: center;
gap: var(--spacing-3);
padding: var(--spacing-3) var(--spacing-4);
background-color: var(--color-surface-1);
border-radius: var(--radius-md);
border: 1px solid var(--color-border);
}
/* ============ Code Preview ============ */
.code-preview {
padding: var(--spacing-4);
background-color: var(--color-surface-2);
border-radius: var(--radius-md);
font-family: var(--font-family-mono);
font-size: var(--font-size-xs);
overflow-x: auto;
max-height: 300px;
overflow-y: auto;
white-space: pre-wrap;
word-break: break-all;
}
/* ============ Token Drift ============ */
.drift-list {
display: flex;
flex-direction: column;
gap: var(--spacing-3);
}
.drift-item {
display: flex;
flex-direction: column;
gap: var(--spacing-2);
padding: var(--spacing-3);
background-color: var(--color-surface-0);
border-radius: var(--radius-md);
border: 1px solid var(--color-border);
}
.drift-info {
display: flex;
flex-direction: column;
gap: var(--spacing-0-5);
}
.drift-token {
font-weight: var(--font-weight-medium);
}
.drift-file {
font-size: var(--font-size-xs);
color: var(--color-muted-foreground);
font-family: var(--font-family-mono);
}
.drift-values {
display: flex;
gap: var(--spacing-4);
font-size: var(--font-size-sm);
}
.drift-expected,
.drift-actual {
padding: var(--spacing-1) var(--spacing-2);
background-color: var(--color-surface-1);
border-radius: var(--radius-sm);
font-family: var(--font-family-mono);
font-size: var(--font-size-xs);
}
.drift-actions {
display: flex;
align-items: center;
justify-content: space-between;
}
/* ============ Health Overview ============ */
.health-overview {
display: flex;
align-items: center;
justify-content: space-between;
}
.health-timestamp {
font-size: var(--font-size-sm);
color: var(--color-muted-foreground);
}
.service-status-indicator[data-status="warning"] {
background-color: var(--color-warning);
}
/* ============ Alerts ============ */
.alert {
padding: var(--spacing-3) var(--spacing-4);
border-radius: var(--radius-md);
margin-bottom: var(--spacing-4);
}
.alert-success {
background-color: hsla(142, 76%, 36%, 0.1);
border: 1px solid var(--color-success);
}
.alert-error {
background-color: hsla(0, 84%, 60%, 0.1);
border: 1px solid var(--color-error);
}
/* ============ Form Error ============ */
.form-error {
margin-top: var(--spacing-3);
}
/* ============ Responsive ============ */
@media (max-width: 768px) {
.workdesk {
gap: var(--spacing-4);
}
.workdesk-title {
font-size: var(--font-size-xl);
}
.metrics-grid {
grid-template-columns: repeat(2, 1fr);
}
.metric-value {
font-size: var(--font-size-2xl);
}
.quick-actions-grid {
flex-direction: column;
}
.quick-actions-grid .btn {
width: 100%;
}
.figma-file-item,
.esre-item,
.project-item,
.component-item {
flex-direction: column;
align-items: flex-start;
}
.figma-file-status,
.esre-actions,
.project-actions,
.component-meta {
margin-top: var(--spacing-2);
}
.integrations-grid {
grid-template-columns: 1fr;
}
.cache-stats {
grid-template-columns: 1fr;
}
.audit-controls,
.console-controls,
.network-controls,
.asset-controls {
flex-wrap: wrap;
}
.audit-table {
font-size: var(--font-size-xs);
}
.audit-table th,
.audit-table td {
padding: var(--spacing-2);
}
.network-item {
flex-wrap: wrap;
}
.network-url {
flex-basis: 100%;
order: 1;
margin-top: var(--spacing-1);
}
.assets-grid {
grid-template-columns: repeat(2, 1fr);
}
.drift-values {
flex-direction: column;
gap: var(--spacing-2);
}
}
@media (max-width: 480px) {
.metrics-grid {
grid-template-columns: 1fr;
}
.assets-grid {
grid-template-columns: 1fr;
}
.test-item {
flex-direction: column;
align-items: flex-start;
}
.test-status {
margin-top: var(--spacing-2);
}
}
/* ===========================================
Live Canvas Tool Styles
=========================================== */
.live-canvas-workdesk {
max-width: 100%;
}
/* Prompt Area */
.canvas-prompt-area {
display: flex;
flex-direction: column;
gap: var(--spacing-3);
}
.canvas-prompt-input {
width: 100%;
min-height: 80px;
padding: var(--spacing-3);
font-family: inherit;
font-size: var(--font-size-base);
background-color: var(--color-surface-0);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
color: var(--color-foreground);
resize: vertical;
transition: border-color 0.15s ease;
}
.canvas-prompt-input:focus {
outline: none;
border-color: var(--color-primary);
}
.canvas-prompt-input::placeholder {
color: var(--color-muted-foreground);
}
.canvas-prompt-input:disabled {
opacity: 0.6;
cursor: not-allowed;
}
.canvas-prompt-actions {
display: flex;
align-items: center;
justify-content: space-between;
gap: var(--spacing-3);
}
.canvas-hint {
font-size: var(--font-size-sm);
color: var(--color-muted-foreground);
}
.canvas-error {
margin-top: var(--spacing-2);
}
/* Canvas Preview Card */
.canvas-preview-card {
flex: 1;
}
.canvas-toolbar {
display: flex;
align-items: center;
gap: var(--spacing-2);
}
.viewport-switcher {
display: flex;
align-items: center;
gap: var(--spacing-1);
padding: var(--spacing-1);
background-color: var(--color-surface-1);
border-radius: var(--radius-md);
}
.viewport-btn {
display: flex;
align-items: center;
justify-content: center;
width: 32px;
height: 28px;
padding: 0;
background: transparent;
border: none;
border-radius: var(--radius-sm);
color: var(--color-muted-foreground);
cursor: pointer;
transition: all 0.15s ease;
}
.viewport-btn:hover {
background-color: var(--color-surface-2);
color: var(--color-foreground);
}
.viewport-btn.active {
background-color: var(--color-primary);
color: var(--color-primary-foreground);
}
/* Canvas Frame */
.canvas-frame-container {
display: flex;
justify-content: center;
padding: var(--spacing-4);
background-color: var(--color-surface-1);
border-radius: var(--radius-md);
min-height: 400px;
}
.canvas-frame {
background-color: #ffffff;
border-radius: var(--radius-md);
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1);
transition: all 0.3s ease;
overflow: hidden;
}
.canvas-iframe {
width: 100%;
height: 500px;
border: none;
display: block;
}
.canvas-placeholder {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: var(--spacing-8);
text-align: center;
min-height: 400px;
color: var(--color-muted-foreground);
}
.canvas-placeholder-icon {
font-size: 48px;
margin-bottom: var(--spacing-4);
}
.canvas-placeholder p {
margin: var(--spacing-1) 0;
}
/* Code Preview */
.canvas-code-preview {
max-height: 300px;
overflow: auto;
padding: var(--spacing-4);
background-color: var(--color-surface-1);
border-radius: var(--radius-md);
font-family: ui-monospace, 'SF Mono', Menlo, Monaco, monospace;
font-size: var(--font-size-sm);
line-height: 1.6;
white-space: pre-wrap;
word-break: break-word;
}
.canvas-code-preview code {
color: var(--color-foreground);
}
/* History */
.canvas-history {
display: flex;
flex-direction: column;
gap: var(--spacing-2);
}
.canvas-history-item {
display: flex;
align-items: center;
justify-content: space-between;
padding: var(--spacing-3);
background-color: var(--color-surface-0);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
cursor: pointer;
transition: all 0.15s ease;
}
.canvas-history-item:hover {
border-color: var(--color-primary);
background-color: var(--color-surface-1);
}
.history-prompt {
font-size: var(--font-size-sm);
color: var(--color-foreground);
flex: 1;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.history-time {
font-size: var(--font-size-xs);
color: var(--color-muted-foreground);
margin-left: var(--spacing-3);
white-space: nowrap;
}
/* Responsive */
@media (max-width: 768px) {
.canvas-toolbar {
flex-wrap: wrap;
}
.canvas-prompt-actions {
flex-direction: column;
align-items: stretch;
}
.canvas-hint {
text-align: center;
}
.canvas-frame-container {
padding: var(--spacing-2);
}
.canvas-iframe {
height: 350px;
}
}

View File

@@ -57,11 +57,11 @@ export default defineConfig(({ mode }) => ({
},
server: {
host: '0.0.0.0', // Bind to all interfaces for nginx proxy compatibility
port: 3456,
port: 6221, // DSS Admin UI port
allowedHosts: ['dss.overbits.luz.uy', 'localhost', '.localhost'], // Allow external domain and localhost
proxy: {
'/api': {
target: 'http://localhost:8002',
target: 'http://localhost:6220', // DSS API port
changeOrigin: true
}
}

1
analyze Symbolic link
View File

@@ -0,0 +1 @@
dss/analyze

View File

@@ -7,6 +7,7 @@ Handles model-specific API calls and tool execution
import asyncio
import json
import os
import subprocess
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
@@ -39,19 +40,68 @@ class AIProvider(ABC):
class ClaudeProvider(AIProvider):
"""Anthropic Claude provider."""
# SoFi LLM Proxy configuration
PROXY_BASE_URL = "https://internal.sofitest.com/llm-proxy"
API_KEY_HELPER = os.path.expanduser("~/.local/bin/llm-proxy-keys")
def __init__(self):
self.api_key = os.getenv("ANTHROPIC_API_KEY")
self.base_url = os.getenv("ANTHROPIC_BASE_URL", self.PROXY_BASE_URL)
self.default_model = "claude-sonnet-4-5-20250929"
self._proxy_key = None
def _get_proxy_key(self) -> Optional[str]:
"""Get API key from SoFi LLM proxy helper script"""
if self._proxy_key:
return self._proxy_key
try:
if os.path.exists(self.API_KEY_HELPER):
result = subprocess.run(
[self.API_KEY_HELPER],
capture_output=True,
text=True,
timeout=10
)
if result.returncode == 0:
# Extract the key from output (last line with sk- prefix)
for line in result.stdout.strip().split('\n'):
if line.startswith('sk-'):
self._proxy_key = line.strip()
return self._proxy_key
except Exception as e:
print(f"Error getting proxy key: {e}")
return None
def is_available(self) -> bool:
"""Check if Claude is available."""
try:
from anthropic import Anthropic
return bool(self.api_key)
# Available if SDK is installed (proxy may have keys)
return True
except ImportError:
return False
def _create_client(self):
"""Create Anthropic client configured for SoFi proxy"""
from anthropic import Anthropic
import httpx
# Create httpx client that skips SSL verification (for corporate proxy)
http_client = httpx.Client(verify=False)
# Get API key: prefer env var, then proxy helper
api_key = self.api_key or self._get_proxy_key()
if not api_key:
raise ValueError("No API key available. Set ANTHROPIC_API_KEY or ensure llm-proxy-keys is installed.")
return Anthropic(
api_key=api_key,
base_url=self.base_url,
http_client=http_client
)
async def chat(
self,
message: str,
@@ -67,7 +117,7 @@ class ClaudeProvider(AIProvider):
if not self.is_available():
return {
"success": False,
"response": "Claude not available. Install anthropic SDK or set ANTHROPIC_API_KEY.",
"response": "Claude not available. Install anthropic SDK.",
"model": "error",
"tools_used": [],
"stop_reason": "error",
@@ -75,7 +125,17 @@ class ClaudeProvider(AIProvider):
from anthropic import Anthropic
client = Anthropic(api_key=self.api_key)
# Create client with SoFi proxy settings
try:
client = self._create_client()
except ValueError as e:
return {
"success": False,
"response": str(e),
"model": "error",
"tools_used": [],
"stop_reason": "error"
}
# Build messages
messages = []
@@ -99,8 +159,20 @@ class ClaudeProvider(AIProvider):
if tools:
api_params["tools"] = tools
# Initial call
response = await asyncio.to_thread(client.messages.create, **api_params)
# Make API call via SoFi proxy
try:
response = await asyncio.to_thread(
client.messages.create,
**api_params
)
except Exception as e:
return {
"success": False,
"response": f"Claude API error: {str(e)}",
"model": "error",
"tools_used": [],
"stop_reason": "error"
}
# Handle tool use loop
tools_used = []

View File

@@ -494,8 +494,7 @@ async def health():
if str(project_root) not in sys.path:
sys.path.insert(0, str(project_root))
from dss.mcp.handler import get_mcp_handler
from dss.mcp_server.handler import get_mcp_handler
handler = get_mcp_handler()
mcp_ok = handler is not None
except Exception as e:
@@ -2425,10 +2424,16 @@ async def claude_chat(request_data: ClaudeChatRequest):
"model": "error",
}
# Import MCP handler
from dss_mcp.handler import MCPContext, get_mcp_handler
mcp_handler = get_mcp_handler()
# Import MCP handler (may fail if database not migrated)
mcp_handler = None
MCPContext = None
try:
from dss_mcp.handler import get_mcp_handler, MCPContext as _MCPContext
MCPContext = _MCPContext
mcp_handler = get_mcp_handler()
except Exception as e:
# MCP handler not available, proceed without tools
enable_tools = False
# Build system prompt with design system context
system_prompt = """You are a design system assistant with access to DSS (Design System Server) tools.
@@ -2449,7 +2454,7 @@ RULES:
- Always provide actionable insights from tool data"""
# Add project context if available
if project_id:
if project_id and mcp_handler:
try:
project_context = await mcp_handler.get_project_context(project_id, user_id)
if project_context:
@@ -2462,6 +2467,8 @@ CURRENT PROJECT CONTEXT:
- Integrations: {', '.join(project_context.integrations.keys()) if project_context.integrations else 'None configured'}"""
except:
system_prompt += f"\n\nProject ID: {project_id} (context not loaded)"
elif project_id:
system_prompt += f"\n\nProject ID: {project_id}"
# Add user context
if context:
@@ -2477,11 +2484,16 @@ CURRENT PROJECT CONTEXT:
# Get tools if enabled
tools = None
if enable_tools and project_id:
if enable_tools and project_id and mcp_handler:
tools = mcp_handler.get_tools_for_claude()
# Create MCP context
mcp_context = MCPContext(project_id=project_id, user_id=user_id)
# Create MCP context (or None if MCP not available)
mcp_context = None
if MCPContext is not None:
mcp_context = MCPContext(
project_id=project_id,
user_id=user_id
)
# Call AI provider with all context
result = await provider.chat(
@@ -3087,7 +3099,8 @@ def kill_port(port: int, wait: float = 0.5) -> None:
if __name__ == "__main__":
import uvicorn
port = int(os.getenv("PORT", "3456"))
# DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226
port = int(os.getenv("DSS_API_PORT", "6220"))
host = os.getenv("HOST", "0.0.0.0")
# Kill any existing process on the port (twice to handle respawning)

View File

@@ -16,7 +16,8 @@ export class DSSApiClient {
constructor(options: ApiOptions = {}) {
const config = getConfig();
const port = options.port || config.port || 3456;
// DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226
const port = options.port || config.port || 6220;
this.baseUrl = options.baseUrl || `http://localhost:${port}/api`;
}

View File

@@ -9,11 +9,12 @@ import { existsSync, readFileSync, writeFileSync } from 'fs';
import { join } from 'path';
// Global user config (stored in home directory)
// DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226
const globalConfig = new Conf({
projectName: 'dss',
schema: {
figmaToken: { type: 'string' },
defaultPort: { type: 'number', default: 3456 },
defaultPort: { type: 'number', default: 6220 },
defaultFormat: { type: 'string', default: 'css' },
},
});

View File

@@ -9,11 +9,12 @@ import { existsSync, readFileSync, writeFileSync } from 'fs';
import { join } from 'path';
// Global user config (stored in home directory)
// DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226
const globalConfig = new Conf({
projectName: 'dss',
schema: {
figmaToken: { type: 'string' },
defaultPort: { type: 'number', default: 3456 },
defaultPort: { type: 'number', default: 6220 },
defaultFormat: { type: 'string', default: 'css' },
},
});

View File

@@ -0,0 +1 @@
../commands

View File

@@ -0,0 +1,16 @@
{
"name": "dss",
"description": "Design System Server marketplace - tools for design token management and component analysis",
"owner": {
"name": "overbits",
"email": "bruno@overbits.luz.uy"
},
"plugins": [
{
"name": "dss-claude-plugin",
"source": "./",
"description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components",
"version": "1.0.0"
}
]
}

View File

@@ -1,6 +1,6 @@
{
"name": "dss-claude-plugin",
"version": "1.0.0",
"version": "1.0.1",
"description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components",
"author": {
"name": "overbits",

View File

@@ -0,0 +1 @@
../servers

View File

@@ -1,22 +1,8 @@
{
"x-immutable-notice": {
"protected": true,
"reason": "MCP server configuration - maintains Claude Code integration stability",
"lastModified": "2025-12-09",
"bypassMethod": "Use 'DSS_IMMUTABLE_BYPASS=1 git commit' or commit message '[IMMUTABLE-UPDATE] reason'"
},
"mcpServers": {
"dss": {
"command": "python3",
"args": ["${CLAUDE_PLUGIN_ROOT}/servers/dss-mcp-server.py"],
"env": {
"PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/..:${CLAUDE_PLUGIN_ROOT}",
"DSS_HOME": "${CLAUDE_PLUGIN_ROOT}/../.dss",
"DSS_DATABASE": "${CLAUDE_PLUGIN_ROOT}/../.dss/dss.db",
"DSS_CACHE": "${CLAUDE_PLUGIN_ROOT}/../.dss/cache",
"DSS_BASE_PATH": "${CLAUDE_PLUGIN_ROOT}/.."
},
"description": "Design System Server MCP server providing design token and component analysis tools"
}
"reason": "MCP server configuration is handled by setup-mcp.sh - see .claude/mcp.json",
"lastModified": "2025-12-11",
"note": "Plugin commands/skills work without MCP. Run ./scripts/setup-mcp.sh to configure DSS MCP server."
}
}

View File

@@ -0,0 +1,20 @@
{
"name": "dss-claude-plugin",
"version": "1.0.1",
"description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components",
"author": {
"name": "overbits",
"url": "https://github.com/overbits"
},
"homepage": "https://dss.overbits.luz.uy",
"keywords": [
"design-system",
"tokens",
"css",
"scss",
"tailwind",
"figma",
"storybook"
],
"commands": "./commands/"
}

View File

@@ -6,7 +6,7 @@ Supports local development and remote team deployment.
Usage:
from dss import settings, Projects, Components
from dss.mcp import MCPServer
from dss.mcp_server import MCPServer
from dss.storage import Projects, Components, Tokens
"""

View File

@@ -1,8 +0,0 @@
"""
DSS MCP Server.
Model Context Protocol server for Design System Server.
Provides project-isolated context and tools to Claude chat instances.
"""
__version__ = "0.8.0"

View File

@@ -1,349 +0,0 @@
"""
DSS MCP Audit Module.
Tracks all operations for compliance, debugging, and audit trails.
Maintains immutable logs of all state-changing operations with before/after snapshots.
"""
import json
import uuid
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
class AuditEventType(Enum):
"""Types of auditable events."""
TOOL_CALL = "tool_call"
CREDENTIAL_ACCESS = "credential_access"
CREDENTIAL_CREATE = "credential_create"
CREDENTIAL_DELETE = "credential_delete"
PROJECT_CREATE = "project_create"
PROJECT_UPDATE = "project_update"
PROJECT_DELETE = "project_delete"
COMPONENT_SYNC = "component_sync"
TOKEN_SYNC = "token_sync"
STATE_TRANSITION = "state_transition"
ERROR = "error"
SECURITY_EVENT = "security_event"
class AuditLog:
"""
Persistent operation audit trail.
All operations are logged with:
- Full operation details
- User who performed it
- Timestamp
- Before/after state snapshots
- Result status
"""
@staticmethod
def log_operation(
event_type: AuditEventType,
operation_name: str,
operation_id: str,
user_id: Optional[str],
project_id: Optional[str],
args: Dict[str, Any],
result: Optional[Dict[str, Any]] = None,
error: Optional[str] = None,
before_state: Optional[Dict[str, Any]] = None,
after_state: Optional[Dict[str, Any]] = None,
) -> str:
"""
Log an operation to the audit trail.
Args:
event_type: Type of event
operation_name: Human-readable operation name
operation_id: Unique operation ID
user_id: User who performed the operation
project_id: Associated project ID
args: Operation arguments (will be scrubbed of sensitive data)
result: Operation result
error: Error message if operation failed
before_state: State before operation
after_state: State after operation
Returns:
Audit log entry ID
"""
audit_id = str(uuid.uuid4())
# Scrub sensitive data from args
scrubbed_args = AuditLog._scrub_sensitive_data(args)
with get_connection() as conn:
conn.execute(
"""
INSERT INTO audit_log (
id, event_type, operation_name, operation_id, user_id,
project_id, args, result, error, before_state, after_state,
created_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
audit_id,
event_type.value,
operation_name,
operation_id,
user_id,
project_id,
json.dumps(scrubbed_args),
json.dumps(result) if result else None,
error,
json.dumps(before_state) if before_state else None,
json.dumps(after_state) if after_state else None,
datetime.utcnow().isoformat(),
),
)
return audit_id
@staticmethod
def get_operation_history(
project_id: Optional[str] = None,
user_id: Optional[str] = None,
operation_name: Optional[str] = None,
limit: int = 100,
offset: int = 0,
) -> list:
"""
Get operation history with optional filtering.
Args:
project_id: Filter by project
user_id: Filter by user
operation_name: Filter by operation
limit: Number of records to return
offset: Pagination offset
Returns:
List of audit log entries
"""
with get_connection() as conn:
cursor = conn.cursor()
query = "SELECT * FROM audit_log WHERE 1=1"
params = []
if project_id:
query += " AND project_id = ?"
params.append(project_id)
if user_id:
query += " AND user_id = ?"
params.append(user_id)
if operation_name:
query += " AND operation_name = ?"
params.append(operation_name)
query += " ORDER BY created_at DESC LIMIT ? OFFSET ?"
params.extend([limit, offset])
cursor.execute(query, params)
return [dict(row) for row in cursor.fetchall()]
@staticmethod
def get_audit_trail(
start_date: datetime, end_date: datetime, event_type: Optional[str] = None
) -> list:
"""
Get audit trail for a date range.
Useful for compliance reports and security audits.
Args:
start_date: Start of date range
end_date: End of date range
event_type: Optional event type filter
Returns:
List of audit log entries
"""
with get_connection() as conn:
cursor = conn.cursor()
query = """
SELECT * FROM audit_log
WHERE created_at >= ? AND created_at <= ?
"""
params = [start_date.isoformat(), end_date.isoformat()]
if event_type:
query += " AND event_type = ?"
params.append(event_type)
query += " ORDER BY created_at DESC"
cursor.execute(query, params)
return [dict(row) for row in cursor.fetchall()]
@staticmethod
def get_user_activity(user_id: str, days: int = 30) -> Dict[str, Any]:
"""
Get user activity summary for the past N days.
Args:
user_id: User to analyze
days: Number of past days to include
Returns:
Activity summary including operation counts and patterns
"""
from datetime import timedelta
start_date = datetime.utcnow() - timedelta(days=days)
with get_connection() as conn:
cursor = conn.cursor()
# Get total operations
cursor.execute(
"""
SELECT COUNT(*) FROM audit_log
WHERE user_id = ? AND created_at >= ?
""",
(user_id, start_date.isoformat()),
)
total_ops = cursor.fetchone()[0]
# Get operations by type
cursor.execute(
"""
SELECT event_type, COUNT(*) as count
FROM audit_log
WHERE user_id = ? AND created_at >= ?
GROUP BY event_type
ORDER BY count DESC
""",
(user_id, start_date.isoformat()),
)
ops_by_type = {row[0]: row[1] for row in cursor.fetchall()}
# Get error count
cursor.execute(
"""
SELECT COUNT(*) FROM audit_log
WHERE user_id = ? AND created_at >= ? AND error IS NOT NULL
""",
(user_id, start_date.isoformat()),
)
errors = cursor.fetchone()[0]
# Get unique projects
cursor.execute(
"""
SELECT COUNT(DISTINCT project_id) FROM audit_log
WHERE user_id = ? AND created_at >= ?
""",
(user_id, start_date.isoformat()),
)
projects = cursor.fetchone()[0]
return {
"user_id": user_id,
"days": days,
"total_operations": total_ops,
"operations_by_type": ops_by_type,
"errors": errors,
"projects_touched": projects,
"average_ops_per_day": round(total_ops / days, 2) if days > 0 else 0,
}
@staticmethod
def search_audit_log(search_term: str, limit: int = 50) -> list:
"""
Search audit log by operation name or error message.
Args:
search_term: Term to search for
limit: Maximum results
Returns:
List of matching audit entries
"""
with get_connection() as conn:
cursor = conn.cursor()
cursor.execute(
"""
SELECT * FROM audit_log
WHERE operation_name LIKE ? OR error LIKE ?
ORDER BY created_at DESC
LIMIT ?
""",
(f"%{search_term}%", f"%{search_term}%", limit),
)
return [dict(row) for row in cursor.fetchall()]
@staticmethod
def _scrub_sensitive_data(data: Dict[str, Any]) -> Dict[str, Any]:
"""
Remove sensitive data from arguments for safe logging.
Removes API tokens, passwords, and other secrets.
"""
sensitive_keys = {
"token",
"api_key",
"secret",
"password",
"credential",
"auth",
"figma_token",
"encrypted_data",
}
scrubbed = {}
for key, value in data.items():
if any(sensitive in key.lower() for sensitive in sensitive_keys):
scrubbed[key] = "***REDACTED***"
elif isinstance(value, dict):
scrubbed[key] = AuditLog._scrub_sensitive_data(value)
elif isinstance(value, list):
scrubbed[key] = [
AuditLog._scrub_sensitive_data(item) if isinstance(item, dict) else item
for item in value
]
else:
scrubbed[key] = value
return scrubbed
@staticmethod
def ensure_audit_log_table():
"""Ensure audit_log table exists."""
with get_connection() as conn:
conn.execute(
"""
CREATE TABLE IF NOT EXISTS audit_log (
id TEXT PRIMARY KEY,
event_type TEXT NOT NULL,
operation_name TEXT NOT NULL,
operation_id TEXT,
user_id TEXT,
project_id TEXT,
args TEXT,
result TEXT,
error TEXT,
before_state TEXT,
after_state TEXT,
created_at TEXT DEFAULT CURRENT_TIMESTAMP
)
"""
)
conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_user ON audit_log(user_id)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_project ON audit_log(project_id)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_type ON audit_log(event_type)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_audit_date ON audit_log(created_at)")
# Initialize table on import
AuditLog.ensure_audit_log_table()

View File

@@ -1,143 +0,0 @@
"""
MCP Server Configuration.
Loads configuration from environment variables and provides settings
for the MCP server, integrations, and security.
"""
import os
from pathlib import Path
from typing import Optional
from cryptography.fernet import Fernet
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Base paths
PROJECT_ROOT = Path(__file__).parent.parent.parent
TOOLS_DIR = PROJECT_ROOT / "tools"
STORAGE_DIR = PROJECT_ROOT / "tools" / "storage"
CACHE_DIR = PROJECT_ROOT / os.getenv("DSS_CACHE_DIR", ".dss/cache")
class MCPConfig:
"""MCP Server Configuration."""
# Server Settings
HOST: str = os.getenv("DSS_MCP_HOST", "127.0.0.1")
PORT: int = int(os.getenv("DSS_MCP_PORT", "3457"))
# Database
DATABASE_PATH: str = os.getenv("DATABASE_PATH", str(STORAGE_DIR / "dss.db"))
# Context Caching
CONTEXT_CACHE_TTL: int = int(os.getenv("DSS_CONTEXT_CACHE_TTL", "300")) # 5 minutes
# Encryption
ENCRYPTION_KEY: Optional[str] = os.getenv("DSS_ENCRYPTION_KEY")
@classmethod
def get_cipher(cls) -> Optional[Fernet]:
"""Get Fernet cipher for encryption/decryption."""
if not cls.ENCRYPTION_KEY:
return None
return Fernet(cls.ENCRYPTION_KEY.encode())
@classmethod
def generate_encryption_key(cls) -> str:
"""Generate a new encryption key."""
return Fernet.generate_key().decode()
# Redis/Celery for worker pool
REDIS_URL: str = os.getenv("REDIS_URL", "redis://localhost:6379/0")
CELERY_BROKER_URL: str = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0")
CELERY_RESULT_BACKEND: str = os.getenv("CELERY_RESULT_BACKEND", "redis://localhost:6379/0")
# Circuit Breaker
CIRCUIT_BREAKER_FAILURE_THRESHOLD: int = int(
os.getenv("CIRCUIT_BREAKER_FAILURE_THRESHOLD", "5")
)
CIRCUIT_BREAKER_TIMEOUT_SECONDS: int = int(os.getenv("CIRCUIT_BREAKER_TIMEOUT_SECONDS", "60"))
# Logging
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO").upper()
class IntegrationConfig:
"""External Integration Configuration."""
# Figma
FIGMA_TOKEN: Optional[str] = os.getenv("FIGMA_TOKEN")
FIGMA_CACHE_TTL: int = int(os.getenv("FIGMA_CACHE_TTL", "300"))
# Anthropic (for Sequential Thinking)
ANTHROPIC_API_KEY: Optional[str] = os.getenv("ANTHROPIC_API_KEY")
# Jira (defaults, can be overridden per-user)
JIRA_URL: Optional[str] = os.getenv("JIRA_URL")
JIRA_USERNAME: Optional[str] = os.getenv("JIRA_USERNAME")
JIRA_API_TOKEN: Optional[str] = os.getenv("JIRA_API_TOKEN")
# Confluence (defaults, can be overridden per-user)
CONFLUENCE_URL: Optional[str] = os.getenv("CONFLUENCE_URL")
CONFLUENCE_USERNAME: Optional[str] = os.getenv("CONFLUENCE_USERNAME")
CONFLUENCE_API_TOKEN: Optional[str] = os.getenv("CONFLUENCE_API_TOKEN")
# Singleton instances
mcp_config = MCPConfig()
integration_config = IntegrationConfig()
def validate_config() -> list[str]:
"""
Validate configuration and return list of warnings.
Returns:
List of warning messages for missing optional config
"""
warnings = []
if not mcp_config.ENCRYPTION_KEY:
warnings.append(
"DSS_ENCRYPTION_KEY not set. Integration credentials will not be encrypted. "
'Generate one with: python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"'
)
if not integration_config.ANTHROPIC_API_KEY:
warnings.append(
"ANTHROPIC_API_KEY not set. Sequential Thinking tools will not be available."
)
if not integration_config.FIGMA_TOKEN:
warnings.append("FIGMA_TOKEN not set. Figma tools will not be available.")
return warnings
if __name__ == "__main__":
print("=== DSS MCP Configuration ===\n")
print(f"MCP Server: {mcp_config.HOST}:{mcp_config.PORT}")
print(f"Database: {mcp_config.DATABASE_PATH}")
print(f"Context Cache TTL: {mcp_config.CONTEXT_CACHE_TTL}s")
print(f"Encryption Key: {'✓ Set' if mcp_config.ENCRYPTION_KEY else '✗ Not Set'}")
print(f"Redis URL: {mcp_config.REDIS_URL}")
print("\nCircuit Breaker:")
print(f" Failure Threshold: {mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD}")
print(f" Timeout: {mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS}s")
print("\n=== Integration Configuration ===\n")
print(f"Figma Token: {'✓ Set' if integration_config.FIGMA_TOKEN else '✗ Not Set'}")
print(f"Anthropic API Key: {'✓ Set' if integration_config.ANTHROPIC_API_KEY else '✗ Not Set'}")
print(f"Jira URL: {integration_config.JIRA_URL or '✗ Not Set'}")
print(f"Confluence URL: {integration_config.CONFLUENCE_URL or '✗ Not Set'}")
warnings = validate_config()
if warnings:
print("\n⚠️ Warnings:")
for warning in warnings:
print(f" - {warning}")
else:
print("\n✓ Configuration is valid")

View File

@@ -1,429 +0,0 @@
"""
Project Context Manager.
Provides cached, project-isolated context for Claude MCP sessions.
Loads all relevant project data (components, tokens, config, health, etc.)
and caches it for performance.
"""
import asyncio
import json
# Import from existing DSS modules
import sys
from dataclasses import asdict, dataclass
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional
from analyze.scanner import ProjectScanner
from ..config import mcp_config
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
@dataclass
class ProjectContext:
"""Complete project context for MCP sessions."""
project_id: str
name: str
description: Optional[str]
path: Optional[Path]
# Component data
components: List[Dict[str, Any]]
component_count: int
# Token/Style data
tokens: Dict[str, Any]
styles: List[Dict[str, Any]]
# Project configuration
config: Dict[str, Any]
# User's enabled integrations (user-scoped)
integrations: Dict[str, Any]
# Project health & metrics
health: Dict[str, Any]
stats: Dict[str, Any]
# Discovery/scan results
discovery: Dict[str, Any]
# Metadata
loaded_at: datetime
cache_expires_at: datetime
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for JSON serialization."""
data = asdict(self)
data["loaded_at"] = self.loaded_at.isoformat()
data["cache_expires_at"] = self.cache_expires_at.isoformat()
if self.path:
data["path"] = str(self.path)
return data
def is_expired(self) -> bool:
"""Check if cache has expired."""
return datetime.now() >= self.cache_expires_at
class ProjectContextManager:
"""
Manages project contexts with TTL-based caching.
Provides fast access to project data for MCP tools while ensuring
data freshness and project isolation.
"""
def __init__(self):
self._cache: Dict[str, ProjectContext] = {}
self._cache_ttl = timedelta(seconds=mcp_config.CONTEXT_CACHE_TTL)
async def get_context(
self, project_id: str, user_id: Optional[int] = None, force_refresh: bool = False
) -> Optional[ProjectContext]:
"""
Get project context, using cache if available.
Args:
project_id: Project ID
user_id: User ID for loading user-scoped integrations
force_refresh: Force cache refresh
Returns:
ProjectContext or None if project not found
"""
# Check cache first
cache_key = f"{project_id}:{user_id or 'anonymous'}"
if not force_refresh and cache_key in self._cache:
ctx = self._cache[cache_key]
if not ctx.is_expired():
return ctx
# Load fresh context
context = await self._load_context(project_id, user_id)
if context:
self._cache[cache_key] = context
return context
async def _load_context(
self, project_id: str, user_id: Optional[int] = None
) -> Optional[ProjectContext]:
"""Load complete project context from database and filesystem."""
# Run database queries in thread pool to avoid blocking
loop = asyncio.get_event_loop()
# Load project metadata
project = await loop.run_in_executor(None, self._load_project, project_id)
if not project:
return None
# Load components, styles, stats in parallel
components_task = loop.run_in_executor(None, self._load_components, project_id)
styles_task = loop.run_in_executor(None, self._load_styles, project_id)
stats_task = loop.run_in_executor(None, self._load_stats, project_id)
integrations_task = loop.run_in_executor(None, self._load_integrations, project_id, user_id)
components = await components_task
styles = await styles_task
stats = await stats_task
integrations = await integrations_task
# Load tokens from filesystem if project has a path
tokens = {}
project_path = None
if project.get("figma_file_key"):
# Try to find project path based on naming convention
# (This can be enhanced based on actual project structure)
project_path = Path.cwd()
tokens = await loop.run_in_executor(None, self._load_tokens, project_path)
# Load discovery/scan data
discovery = await loop.run_in_executor(None, self._load_discovery, project_path)
# Compute health score
health = self._compute_health(components, tokens, stats)
# Build context
now = datetime.now()
context = ProjectContext(
project_id=project_id,
name=project["name"],
description=project.get("description"),
path=project_path,
components=components,
component_count=len(components),
tokens=tokens,
styles=styles,
config={
"figma_file_key": project.get("figma_file_key"),
"status": project.get("status", "active"),
},
integrations=integrations,
health=health,
stats=stats,
discovery=discovery,
loaded_at=now,
cache_expires_at=now + self._cache_ttl,
)
return context
def _load_project(self, project_id: str) -> Optional[Dict[str, Any]]:
"""Load project metadata from database."""
try:
with get_connection() as conn:
row = conn.execute("SELECT * FROM projects WHERE id = ?", (project_id,)).fetchone()
if row:
return dict(row)
return None
except Exception as e:
print(f"Error loading project: {e}")
return None
def _load_components(self, project_id: str) -> List[Dict[str, Any]]:
"""Load all components for project."""
try:
with get_connection() as conn:
rows = conn.execute(
"""
SELECT id, name, figma_key, description,
properties, variants, code_generated,
created_at, updated_at
FROM components
WHERE project_id = ?
ORDER BY name
""",
(project_id,),
).fetchall()
components = []
for row in rows:
comp = dict(row)
# Parse JSON fields
if comp.get("properties"):
comp["properties"] = json.loads(comp["properties"])
if comp.get("variants"):
comp["variants"] = json.loads(comp["variants"])
components.append(comp)
return components
except Exception as e:
print(f"Error loading components: {e}")
return []
def _load_styles(self, project_id: str) -> List[Dict[str, Any]]:
"""Load all styles for project."""
try:
with get_connection() as conn:
rows = conn.execute(
"""
SELECT id, name, type, figma_key, properties, created_at
FROM styles
WHERE project_id = ?
ORDER BY type, name
""",
(project_id,),
).fetchall()
styles = []
for row in rows:
style = dict(row)
if style.get("properties"):
style["properties"] = json.loads(style["properties"])
styles.append(style)
return styles
except Exception as e:
print(f"Error loading styles: {e}")
return []
def _load_stats(self, project_id: str) -> Dict[str, Any]:
"""Load project statistics."""
try:
with get_connection() as conn:
# Component count by type
component_stats = conn.execute(
"""
SELECT COUNT(*) as total,
SUM(CASE WHEN code_generated = 1 THEN 1 ELSE 0 END) as generated
FROM components
WHERE project_id = ?
""",
(project_id,),
).fetchone()
# Style count by type
style_stats = conn.execute(
"""
SELECT type, COUNT(*) as count
FROM styles
WHERE project_id = ?
GROUP BY type
""",
(project_id,),
).fetchall()
return {
"components": dict(component_stats)
if component_stats
else {"total": 0, "generated": 0},
"styles": {row["type"]: row["count"] for row in style_stats},
}
except Exception as e:
print(f"Error loading stats: {e}")
return {"components": {"total": 0, "generated": 0}, "styles": {}}
def _load_integrations(self, project_id: str, user_id: Optional[int]) -> Dict[str, Any]:
"""Load user's enabled integrations for this project."""
if not user_id:
return {}
try:
with get_connection() as conn:
rows = conn.execute(
"""
SELECT integration_type, config, enabled, last_used_at
FROM project_integrations
WHERE project_id = ? AND user_id = ? AND enabled = 1
""",
(project_id, user_id),
).fetchall()
# Return decrypted config for each integration
integrations = {}
cipher = mcp_config.get_cipher()
for row in rows:
integration_type = row["integration_type"]
encrypted_config = row["config"]
# Decrypt config
if cipher:
try:
decrypted_config = cipher.decrypt(encrypted_config.encode()).decode()
config = json.loads(decrypted_config)
except Exception as e:
print(f"Error decrypting integration config: {e}")
config = {}
else:
# No encryption key, try to parse as JSON
try:
config = json.loads(encrypted_config)
except:
config = {}
integrations[integration_type] = {
"enabled": True,
"config": config,
"last_used_at": row["last_used_at"],
}
return integrations
except Exception as e:
print(f"Error loading integrations: {e}")
return {}
def _load_tokens(self, project_path: Optional[Path]) -> Dict[str, Any]:
"""Load design tokens from filesystem."""
if not project_path:
return {}
tokens = {}
token_files = ["tokens.json", "design-tokens.json", "variables.json"]
for token_file in token_files:
token_path = project_path / token_file
if token_path.exists():
try:
with open(token_path) as f:
tokens = json.load(f)
break
except Exception as e:
print(f"Error loading tokens from {token_path}: {e}")
return tokens
def _load_discovery(self, project_path: Optional[Path]) -> Dict[str, Any]:
"""Load project discovery data."""
if not project_path:
return {}
try:
scanner = ProjectScanner(str(project_path))
discovery = scanner.scan()
return discovery
except Exception as e:
print(f"Error running discovery scan: {e}")
return {}
def _compute_health(self, components: List[Dict], tokens: Dict, stats: Dict) -> Dict[str, Any]:
"""Compute project health score."""
score = 100
issues = []
# Deduct points for missing components
if stats["components"]["total"] == 0:
score -= 30
issues.append("No components defined")
# Deduct points for no tokens
if not tokens:
score -= 20
issues.append("No design tokens defined")
# Deduct points for ungeneratedcomponents
total = stats["components"]["total"]
generated = stats["components"]["generated"]
if total > 0 and generated < total:
percentage = (generated / total) * 100
if percentage < 50:
score -= 20
issues.append(f"Low code generation: {percentage:.1f}%")
elif percentage < 80:
score -= 10
issues.append(f"Medium code generation: {percentage:.1f}%")
# Compute grade
if score >= 90:
grade = "A"
elif score >= 80:
grade = "B"
elif score >= 70:
grade = "C"
elif score >= 60:
grade = "D"
else:
grade = "F"
return {"score": max(0, score), "grade": grade, "issues": issues}
def clear_cache(self, project_id: Optional[str] = None):
"""Clear cache for specific project or all projects."""
if project_id:
# Clear all cache entries for this project
keys_to_remove = [k for k in self._cache.keys() if k.startswith(f"{project_id}:")]
for key in keys_to_remove:
del self._cache[key]
else:
# Clear all cache
self._cache.clear()
# Singleton instance
_context_manager = None
def get_context_manager() -> ProjectContextManager:
"""Get singleton context manager instance."""
global _context_manager
if _context_manager is None:
_context_manager = ProjectContextManager()
return _context_manager

View File

@@ -1,471 +0,0 @@
"""
Unified MCP Handler.
Central handler for all MCP tool execution. Used by:
- Direct API calls (/api/mcp/tools/{name}/execute)
- Claude chat (inline tool execution)
- SSE streaming connections
This module ensures all MCP requests go through a single code path
for consistent logging, error handling, and security.
"""
import asyncio
import json
from dataclasses import asdict, dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional
from .config import integration_config, mcp_config
from .context.project_context import ProjectContext, get_context_manager
from .integrations.base import CircuitBreakerOpen
from .integrations.confluence import CONFLUENCE_TOOLS, ConfluenceTools
from .integrations.figma import FIGMA_TOOLS, FigmaTools
from .integrations.jira import JIRA_TOOLS, JiraTools
from .integrations.storybook import STORYBOOK_TOOLS, StorybookTools
from .integrations.translations import TRANSLATION_TOOLS, TranslationTools
from .tools.analysis_tools import ANALYSIS_TOOLS, AnalysisTools
from .tools.project_tools import PROJECT_TOOLS, ProjectTools
# Note: sys.path is set up by the importing module (server.py)
# Do NOT modify sys.path here as it causes relative import issues
@dataclass
class ToolResult:
"""Result of a tool execution."""
tool_name: str
success: bool
result: Any
error: Optional[str] = None
duration_ms: int = 0
timestamp: str = None
def __post_init__(self):
if not self.timestamp:
self.timestamp = datetime.now().isoformat()
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@dataclass
class MCPContext:
"""Context for MCP operations."""
project_id: str
user_id: Optional[int] = None
session_id: Optional[str] = None
class MCPHandler:
"""
Unified MCP tool handler.
Provides:
- Tool discovery (list all available tools)
- Tool execution with proper context
- Integration management
- Logging and metrics
"""
def __init__(self):
self.context_manager = get_context_manager()
self._tool_registry: Dict[str, Dict[str, Any]] = {}
self._initialize_tools()
def _initialize_tools(self):
"""Initialize tool registry with all available tools."""
# Register base project tools
for tool in PROJECT_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "project",
"requires_integration": False,
}
# Register analysis tools
for tool in ANALYSIS_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "analysis",
"requires_integration": False,
}
# Register Figma tools
for tool in FIGMA_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "figma",
"requires_integration": True,
"integration_type": "figma",
}
# Register Storybook tools
for tool in STORYBOOK_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "storybook",
"requires_integration": False,
}
# Register Jira tools
for tool in JIRA_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "jira",
"requires_integration": True,
"integration_type": "jira",
}
# Register Confluence tools
for tool in CONFLUENCE_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "confluence",
"requires_integration": True,
"integration_type": "confluence",
}
# Register Translation tools
for tool in TRANSLATION_TOOLS:
self._tool_registry[tool.name] = {
"tool": tool,
"category": "translations",
"requires_integration": False,
}
def list_tools(self, include_details: bool = False) -> Dict[str, Any]:
"""
List all available MCP tools.
Args:
include_details: Include full tool schemas
Returns:
Tool listing by category
"""
tools_by_category = {}
for name, info in self._tool_registry.items():
category = info["category"]
if category not in tools_by_category:
tools_by_category[category] = []
tool_info = {
"name": name,
"description": info["tool"].description,
"requires_integration": info.get("requires_integration", False),
}
if include_details:
tool_info["input_schema"] = info["tool"].inputSchema
tools_by_category[category].append(tool_info)
return {"tools": tools_by_category, "total_count": len(self._tool_registry)}
def get_tool_info(self, tool_name: str) -> Optional[Dict[str, Any]]:
"""Get information about a specific tool."""
if tool_name not in self._tool_registry:
return None
info = self._tool_registry[tool_name]
return {
"name": tool_name,
"description": info["tool"].description,
"category": info["category"],
"input_schema": info["tool"].inputSchema,
"requires_integration": info.get("requires_integration", False),
"integration_type": info.get("integration_type"),
}
async def execute_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> ToolResult:
"""
Execute an MCP tool.
Args:
tool_name: Name of the tool to execute
arguments: Tool arguments
context: MCP context (project_id, user_id)
Returns:
ToolResult with success/failure and data
"""
start_time = datetime.now()
# Check if tool exists
if tool_name not in self._tool_registry:
return ToolResult(
tool_name=tool_name, success=False, result=None, error=f"Unknown tool: {tool_name}"
)
tool_info = self._tool_registry[tool_name]
category = tool_info["category"]
try:
# Execute based on category
if category == "project":
result = await self._execute_project_tool(tool_name, arguments, context)
elif category == "analysis":
result = await self._execute_analysis_tool(tool_name, arguments, context)
elif category == "figma":
result = await self._execute_figma_tool(tool_name, arguments, context)
elif category == "storybook":
result = await self._execute_storybook_tool(tool_name, arguments, context)
elif category == "jira":
result = await self._execute_jira_tool(tool_name, arguments, context)
elif category == "confluence":
result = await self._execute_confluence_tool(tool_name, arguments, context)
elif category == "translations":
result = await self._execute_translations_tool(tool_name, arguments, context)
else:
result = {"error": f"Unknown tool category: {category}"}
# Check for error in result
success = "error" not in result
error = result.get("error") if not success else None
# Calculate duration
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
# Log execution
await self._log_tool_usage(
tool_name=tool_name,
category=category,
project_id=context.project_id,
user_id=context.user_id,
success=success,
duration_ms=duration_ms,
error=error,
)
return ToolResult(
tool_name=tool_name,
success=success,
result=result if success else None,
error=error,
duration_ms=duration_ms,
)
except CircuitBreakerOpen as e:
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
return ToolResult(
tool_name=tool_name,
success=False,
result=None,
error=str(e),
duration_ms=duration_ms,
)
except Exception as e:
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
await self._log_tool_usage(
tool_name=tool_name,
category=category,
project_id=context.project_id,
user_id=context.user_id,
success=False,
duration_ms=duration_ms,
error=str(e),
)
return ToolResult(
tool_name=tool_name,
success=False,
result=None,
error=str(e),
duration_ms=duration_ms,
)
async def _execute_project_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
"""Execute a project tool."""
# Ensure project_id is set
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
project_tools = ProjectTools(context.user_id)
return await project_tools.execute_tool(tool_name, arguments)
async def _execute_analysis_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
"""Execute an analysis tool."""
# Ensure project_id is set for context if needed, though project_path is explicit
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
analysis_tools = AnalysisTools(context.user_id)
return await analysis_tools.execute_tool(tool_name, arguments)
async def _execute_figma_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
"""Execute a Figma tool."""
# Get Figma config
config = await self._get_integration_config("figma", context)
if not config:
# Try global config
if integration_config.FIGMA_TOKEN:
config = {"api_token": integration_config.FIGMA_TOKEN}
else:
return {"error": "Figma not configured. Please add Figma API token."}
figma_tools = FigmaTools(config)
return await figma_tools.execute_tool(tool_name, arguments)
async def _execute_storybook_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
"""Execute a Storybook tool."""
# Ensure project_id is set
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
storybook_tools = StorybookTools()
return await storybook_tools.execute_tool(tool_name, arguments)
async def _execute_jira_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
"""Execute a Jira tool."""
config = await self._get_integration_config("jira", context)
if not config:
return {"error": "Jira not configured. Please configure Jira integration."}
jira_tools = JiraTools(config)
return await jira_tools.execute_tool(tool_name, arguments)
async def _execute_confluence_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
"""Execute a Confluence tool."""
config = await self._get_integration_config("confluence", context)
if not config:
return {"error": "Confluence not configured. Please configure Confluence integration."}
confluence_tools = ConfluenceTools(config)
return await confluence_tools.execute_tool(tool_name, arguments)
async def _execute_translations_tool(
self, tool_name: str, arguments: Dict[str, Any], context: MCPContext
) -> Dict[str, Any]:
"""Execute a Translation tool."""
# Ensure project_id is set
if "project_id" not in arguments:
arguments["project_id"] = context.project_id
translation_tools = TranslationTools()
return await translation_tools.execute_tool(tool_name, arguments)
async def _get_integration_config(
self, integration_type: str, context: MCPContext
) -> Optional[Dict[str, Any]]:
"""Get decrypted integration config for user/project."""
if not context.user_id or not context.project_id:
return None
loop = asyncio.get_event_loop()
def get_config():
try:
with get_connection() as conn:
row = conn.execute(
"""
SELECT config FROM project_integrations
WHERE project_id = ? AND user_id = ? AND integration_type = ? AND enabled = 1
""",
(context.project_id, context.user_id, integration_type),
).fetchone()
if not row:
return None
encrypted_config = row["config"]
# Decrypt
cipher = mcp_config.get_cipher()
if cipher:
try:
decrypted = cipher.decrypt(encrypted_config.encode()).decode()
return json.loads(decrypted)
except:
pass
# Try parsing as plain JSON
try:
return json.loads(encrypted_config)
except:
return None
except:
return None
return await loop.run_in_executor(None, get_config)
async def _log_tool_usage(
self,
tool_name: str,
category: str,
project_id: str,
user_id: Optional[int],
success: bool,
duration_ms: int,
error: Optional[str] = None,
):
"""Log tool execution to database."""
loop = asyncio.get_event_loop()
def log():
try:
with get_connection() as conn:
conn.execute(
"""
INSERT INTO mcp_tool_usage
(project_id, user_id, tool_name, tool_category, duration_ms, success, error_message)
VALUES (?, ?, ?, ?, ?, ?, ?)
""",
(project_id, user_id, tool_name, category, duration_ms, success, error),
)
except:
pass # Don't fail on logging errors
await loop.run_in_executor(None, log)
async def get_project_context(
self, project_id: str, user_id: Optional[int] = None
) -> Optional[ProjectContext]:
"""Get project context for Claude system prompt."""
return await self.context_manager.get_context(project_id, user_id)
def get_tools_for_claude(self) -> List[Dict[str, Any]]:
"""
Get tools formatted for Claude's tool_use feature.
Returns:
List of tools in Anthropic's tool format
"""
tools = []
for name, info in self._tool_registry.items():
tools.append(
{
"name": name,
"description": info["tool"].description,
"input_schema": info["tool"].inputSchema,
}
)
return tools
# Singleton instance
_mcp_handler: Optional[MCPHandler] = None
def get_mcp_handler() -> MCPHandler:
"""Get singleton MCP handler instance."""
global _mcp_handler
if _mcp_handler is None:
_mcp_handler = MCPHandler()
return _mcp_handler

View File

@@ -1,268 +0,0 @@
"""
Base Integration Classes.
Provides circuit breaker pattern and base classes for external integrations.
"""
import asyncio
import time
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Callable, Dict, Optional
from ..config import mcp_config
class CircuitState(Enum):
"""Circuit breaker states."""
CLOSED = "closed" # Normal operation
OPEN = "open" # Failing, reject requests
HALF_OPEN = "half_open" # Testing if service recovered
@dataclass
class CircuitBreakerStats:
"""Circuit breaker statistics."""
state: CircuitState
failure_count: int
success_count: int
last_failure_time: Optional[float]
last_success_time: Optional[float]
opened_at: Optional[float]
next_retry_time: Optional[float]
class CircuitBreakerOpen(Exception):
"""Exception raised when circuit breaker is open."""
pass
class CircuitBreaker:
"""
Circuit Breaker pattern implementation.
Protects external service calls from cascading failures.
Three states: CLOSED (normal), OPEN (failing), HALF_OPEN (testing).
"""
def __init__(
self,
integration_type: str,
failure_threshold: int = None,
timeout_seconds: int = None,
half_open_max_calls: int = 3,
):
"""
Args:
integration_type: Type of integration (figma, jira, confluence, etc.)
failure_threshold: Number of failures before opening circuit
timeout_seconds: Seconds to wait before trying again
half_open_max_calls: Max successful calls in half-open before closing
"""
self.integration_type = integration_type
self.failure_threshold = failure_threshold or mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD
self.timeout_seconds = timeout_seconds or mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS
self.half_open_max_calls = half_open_max_calls
# In-memory state (could be moved to Redis for distributed setup)
self.state = CircuitState.CLOSED
self.failure_count = 0
self.success_count = 0
self.last_failure_time: Optional[float] = None
self.last_success_time: Optional[float] = None
self.opened_at: Optional[float] = None
async def call(self, func: Callable, *args, **kwargs) -> Any:
"""
Call a function through the circuit breaker.
Args:
func: Function to call (can be sync or async)
*args, **kwargs: Arguments to pass to func
Returns:
Function result
Raises:
CircuitBreakerOpen: If circuit is open
Exception: Original exception from func if it fails
"""
# Check circuit state
if self.state == CircuitState.OPEN:
# Check if timeout has elapsed
if time.time() - self.opened_at < self.timeout_seconds:
await self._record_failure("Circuit breaker is OPEN", db_only=True)
raise CircuitBreakerOpen(
f"{self.integration_type} service is temporarily unavailable. "
f"Retry after {self._seconds_until_retry():.0f}s"
)
else:
# Timeout elapsed, move to HALF_OPEN
self.state = CircuitState.HALF_OPEN
self.success_count = 0
# Execute function
try:
# Handle both sync and async functions
if asyncio.iscoroutinefunction(func):
result = await func(*args, **kwargs)
else:
result = func(*args, **kwargs)
# Success!
await self._record_success()
# If in HALF_OPEN, check if we can close the circuit
if self.state == CircuitState.HALF_OPEN:
if self.success_count >= self.half_open_max_calls:
self.state = CircuitState.CLOSED
self.failure_count = 0
return result
except Exception as e:
# Failure
await self._record_failure(str(e))
# Check if we should open the circuit
if self.failure_count >= self.failure_threshold:
self.state = CircuitState.OPEN
self.opened_at = time.time()
raise
async def _record_success(self):
"""Record successful call."""
self.success_count += 1
self.last_success_time = time.time()
# Update database
await self._update_health_db(is_healthy=True, error=None)
async def _record_failure(self, error_message: str, db_only: bool = False):
"""Record failed call."""
if not db_only:
self.failure_count += 1
self.last_failure_time = time.time()
# Update database
await self._update_health_db(is_healthy=False, error=error_message)
async def _update_health_db(self, is_healthy: bool, error: Optional[str]):
"""Update integration health in database."""
loop = asyncio.get_event_loop()
def update_db():
try:
with get_connection() as conn:
circuit_open_until = None
if self.state == CircuitState.OPEN and self.opened_at:
circuit_open_until = datetime.fromtimestamp(
self.opened_at + self.timeout_seconds
).isoformat()
if is_healthy:
conn.execute(
"""
UPDATE integration_health
SET is_healthy = 1,
failure_count = 0,
last_success_at = CURRENT_TIMESTAMP,
circuit_open_until = NULL,
updated_at = CURRENT_TIMESTAMP
WHERE integration_type = ?
""",
(self.integration_type,),
)
else:
conn.execute(
"""
UPDATE integration_health
SET is_healthy = 0,
failure_count = ?,
last_failure_at = CURRENT_TIMESTAMP,
circuit_open_until = ?,
updated_at = CURRENT_TIMESTAMP
WHERE integration_type = ?
""",
(self.failure_count, circuit_open_until, self.integration_type),
)
except Exception as e:
print(f"Error updating integration health: {e}")
await loop.run_in_executor(None, update_db)
def _seconds_until_retry(self) -> float:
"""Get seconds until circuit can be retried."""
if self.state != CircuitState.OPEN or not self.opened_at:
return 0
elapsed = time.time() - self.opened_at
remaining = self.timeout_seconds - elapsed
return max(0, remaining)
def get_stats(self) -> CircuitBreakerStats:
"""Get current circuit breaker statistics."""
next_retry_time = None
if self.state == CircuitState.OPEN and self.opened_at:
next_retry_time = self.opened_at + self.timeout_seconds
return CircuitBreakerStats(
state=self.state,
failure_count=self.failure_count,
success_count=self.success_count,
last_failure_time=self.last_failure_time,
last_success_time=self.last_success_time,
opened_at=self.opened_at,
next_retry_time=next_retry_time,
)
class BaseIntegration:
"""Base class for all external integrations."""
def __init__(self, integration_type: str, config: Dict[str, Any]):
"""
Args:
integration_type: Type of integration (figma, jira, etc.)
config: Integration configuration (decrypted)
"""
self.integration_type = integration_type
self.config = config
self.circuit_breaker = CircuitBreaker(integration_type)
async def call_api(self, func: Callable, *args, **kwargs) -> Any:
"""
Call external API through circuit breaker.
Args:
func: API function to call
*args, **kwargs: Arguments to pass
Returns:
API response
Raises:
CircuitBreakerOpen: If circuit is open
Exception: Original API exception
"""
return await self.circuit_breaker.call(func, *args, **kwargs)
def get_health(self) -> Dict[str, Any]:
"""Get integration health status."""
stats = self.circuit_breaker.get_stats()
return {
"integration_type": self.integration_type,
"state": stats.state.value,
"is_healthy": stats.state == CircuitState.CLOSED,
"failure_count": stats.failure_count,
"success_count": stats.success_count,
"last_failure_time": stats.last_failure_time,
"last_success_time": stats.last_success_time,
"next_retry_time": stats.next_retry_time,
}

View File

@@ -1,218 +0,0 @@
"""
Confluence Integration for MCP.
Provides Confluence API tools for documentation and knowledge base.
"""
from typing import Any, Dict, Optional
from atlassian import Confluence
from mcp import types
from .base import BaseIntegration
# Confluence MCP Tool Definitions
CONFLUENCE_TOOLS = [
types.Tool(
name="confluence_create_page",
description="Create a new Confluence page",
inputSchema={
"type": "object",
"properties": {
"space_key": {"type": "string", "description": "Confluence space key"},
"title": {"type": "string", "description": "Page title"},
"body": {"type": "string", "description": "Page content (HTML or wiki markup)"},
"parent_id": {"type": "string", "description": "Optional parent page ID"},
},
"required": ["space_key", "title", "body"],
},
),
types.Tool(
name="confluence_get_page",
description="Get Confluence page by ID or title",
inputSchema={
"type": "object",
"properties": {
"page_id": {"type": "string", "description": "Page ID (use this OR title)"},
"space_key": {
"type": "string",
"description": "Space key (required if using title)",
},
"title": {"type": "string", "description": "Page title (use this OR page_id)"},
"expand": {
"type": "string",
"description": "Comma-separated list of expansions (body.storage, version, etc.)",
"default": "body.storage,version",
},
},
},
),
types.Tool(
name="confluence_update_page",
description="Update an existing Confluence page",
inputSchema={
"type": "object",
"properties": {
"page_id": {"type": "string", "description": "Page ID to update"},
"title": {"type": "string", "description": "New page title"},
"body": {"type": "string", "description": "New page content"},
},
"required": ["page_id", "title", "body"],
},
),
types.Tool(
name="confluence_search",
description="Search Confluence pages using CQL",
inputSchema={
"type": "object",
"properties": {
"cql": {
"type": "string",
"description": "CQL query (e.g., 'space=DSS AND type=page')",
},
"limit": {
"type": "integer",
"description": "Maximum number of results",
"default": 25,
},
},
"required": ["cql"],
},
),
types.Tool(
name="confluence_get_space",
description="Get Confluence space details",
inputSchema={
"type": "object",
"properties": {"space_key": {"type": "string", "description": "Space key"}},
"required": ["space_key"],
},
),
]
class ConfluenceIntegration(BaseIntegration):
"""Confluence API integration with circuit breaker."""
def __init__(self, config: Dict[str, Any]):
"""
Initialize Confluence integration.
Args:
config: Must contain 'url', 'username', 'api_token'
"""
super().__init__("confluence", config)
url = config.get("url")
username = config.get("username")
api_token = config.get("api_token")
if not all([url, username, api_token]):
raise ValueError(
"Confluence configuration incomplete: url, username, api_token required"
)
self.confluence = Confluence(url=url, username=username, password=api_token, cloud=True)
async def create_page(
self, space_key: str, title: str, body: str, parent_id: Optional[str] = None
) -> Dict[str, Any]:
"""Create a new page."""
def _create():
return self.confluence.create_page(
space=space_key,
title=title,
body=body,
parent_id=parent_id,
representation="storage",
)
return await self.call_api(_create)
async def get_page(
self,
page_id: Optional[str] = None,
space_key: Optional[str] = None,
title: Optional[str] = None,
expand: str = "body.storage,version",
) -> Dict[str, Any]:
"""Get page by ID or title."""
def _get():
if page_id:
return self.confluence.get_page_by_id(page_id=page_id, expand=expand)
elif space_key and title:
return self.confluence.get_page_by_title(
space=space_key, title=title, expand=expand
)
else:
raise ValueError("Must provide either page_id or (space_key + title)")
return await self.call_api(_get)
async def update_page(self, page_id: str, title: str, body: str) -> Dict[str, Any]:
"""Update an existing page."""
def _update():
# Get current version
page = self.confluence.get_page_by_id(page_id, expand="version")
current_version = page["version"]["number"]
return self.confluence.update_page(
page_id=page_id,
title=title,
body=body,
parent_id=None,
type="page",
representation="storage",
minor_edit=False,
version_comment="Updated via DSS MCP",
version_number=current_version + 1,
)
return await self.call_api(_update)
async def search(self, cql: str, limit: int = 25) -> Dict[str, Any]:
"""Search pages using CQL."""
def _search():
return self.confluence.cql(cql, limit=limit)
return await self.call_api(_search)
async def get_space(self, space_key: str) -> Dict[str, Any]:
"""Get space details."""
def _get():
return self.confluence.get_space(space_key)
return await self.call_api(_get)
class ConfluenceTools:
"""MCP tool executor for Confluence integration."""
def __init__(self, config: Dict[str, Any]):
self.confluence = ConfluenceIntegration(config)
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute Confluence tool."""
handlers = {
"confluence_create_page": self.confluence.create_page,
"confluence_get_page": self.confluence.get_page,
"confluence_update_page": self.confluence.update_page,
"confluence_search": self.confluence.search,
"confluence_get_space": self.confluence.get_space,
}
handler = handlers.get(tool_name)
if not handler:
return {"error": f"Unknown Confluence tool: {tool_name}"}
try:
clean_args = {k: v for k, v in arguments.items() if not k.startswith("_")}
result = await handler(**clean_args)
return result
except Exception as e:
return {"error": str(e)}

View File

@@ -1,236 +0,0 @@
"""
Figma Integration for MCP.
Provides Figma API tools through circuit breaker pattern.
"""
from typing import Any, Dict
import httpx
from mcp import types
from ..config import integration_config
from .base import BaseIntegration
# Figma MCP Tool Definitions
FIGMA_TOOLS = [
types.Tool(
name="figma_get_file",
description="Get Figma file metadata and structure",
inputSchema={
"type": "object",
"properties": {"file_key": {"type": "string", "description": "Figma file key"}},
"required": ["file_key"],
},
),
types.Tool(
name="figma_get_styles",
description="Get design styles (colors, text, effects) from Figma file",
inputSchema={
"type": "object",
"properties": {"file_key": {"type": "string", "description": "Figma file key"}},
"required": ["file_key"],
},
),
types.Tool(
name="figma_get_components",
description="Get component definitions from Figma file",
inputSchema={
"type": "object",
"properties": {"file_key": {"type": "string", "description": "Figma file key"}},
"required": ["file_key"],
},
),
types.Tool(
name="figma_extract_tokens",
description="Extract design tokens (variables) from Figma file",
inputSchema={
"type": "object",
"properties": {"file_key": {"type": "string", "description": "Figma file key"}},
"required": ["file_key"],
},
),
types.Tool(
name="figma_get_node",
description="Get specific node/component by ID from Figma file",
inputSchema={
"type": "object",
"properties": {
"file_key": {"type": "string", "description": "Figma file key"},
"node_id": {"type": "string", "description": "Node ID to fetch"},
},
"required": ["file_key", "node_id"],
},
),
]
class FigmaIntegration(BaseIntegration):
"""Figma API integration with circuit breaker."""
FIGMA_API_BASE = "https://api.figma.com/v1"
def __init__(self, config: Dict[str, Any]):
"""
Initialize Figma integration.
Args:
config: Must contain 'api_token' or use FIGMA_TOKEN from env
"""
super().__init__("figma", config)
self.api_token = config.get("api_token") or integration_config.FIGMA_TOKEN
if not self.api_token:
raise ValueError("Figma API token not configured")
self.headers = {"X-Figma-Token": self.api_token}
async def get_file(self, file_key: str) -> Dict[str, Any]:
"""
Get Figma file metadata and structure.
Args:
file_key: Figma file key
Returns:
File data
"""
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}", headers=self.headers, timeout=30.0
)
response.raise_for_status()
return response.json()
return await self.call_api(_fetch)
async def get_styles(self, file_key: str) -> Dict[str, Any]:
"""
Get all styles from Figma file.
Args:
file_key: Figma file key
Returns:
Styles data
"""
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/styles",
headers=self.headers,
timeout=30.0,
)
response.raise_for_status()
return response.json()
return await self.call_api(_fetch)
async def get_components(self, file_key: str) -> Dict[str, Any]:
"""
Get all components from Figma file.
Args:
file_key: Figma file key
Returns:
Components data
"""
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/components",
headers=self.headers,
timeout=30.0,
)
response.raise_for_status()
return response.json()
return await self.call_api(_fetch)
async def extract_tokens(self, file_key: str) -> Dict[str, Any]:
"""
Extract design tokens (variables) from Figma file.
Args:
file_key: Figma file key
Returns:
Variables/tokens data
"""
async def _fetch():
async with httpx.AsyncClient() as client:
# Get local variables
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/variables/local",
headers=self.headers,
timeout=30.0,
)
response.raise_for_status()
return response.json()
return await self.call_api(_fetch)
async def get_node(self, file_key: str, node_id: str) -> Dict[str, Any]:
"""
Get specific node from Figma file.
Args:
file_key: Figma file key
node_id: Node ID
Returns:
Node data
"""
async def _fetch():
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.FIGMA_API_BASE}/files/{file_key}/nodes",
headers=self.headers,
params={"ids": node_id},
timeout=30.0,
)
response.raise_for_status()
return response.json()
return await self.call_api(_fetch)
class FigmaTools:
"""MCP tool executor for Figma integration."""
def __init__(self, config: Dict[str, Any]):
"""
Args:
config: Figma configuration (with api_token)
"""
self.figma = FigmaIntegration(config)
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute Figma tool."""
handlers = {
"figma_get_file": self.figma.get_file,
"figma_get_styles": self.figma.get_styles,
"figma_get_components": self.figma.get_components,
"figma_extract_tokens": self.figma.extract_tokens,
"figma_get_node": self.figma.get_node,
}
handler = handlers.get(tool_name)
if not handler:
return {"error": f"Unknown Figma tool: {tool_name}"}
try:
# Remove tool-specific prefix from arguments if needed
clean_args = {k: v for k, v in arguments.items() if not k.startswith("_")}
result = await handler(**clean_args)
return result
except Exception as e:
return {"error": str(e)}

View File

@@ -1,190 +0,0 @@
"""
Jira Integration for MCP.
Provides Jira API tools for issue tracking and project management.
"""
from typing import Any, Dict
from atlassian import Jira
from mcp import types
from .base import BaseIntegration
# Jira MCP Tool Definitions
JIRA_TOOLS = [
types.Tool(
name="jira_create_issue",
description="Create a new Jira issue",
inputSchema={
"type": "object",
"properties": {
"project_key": {"type": "string", "description": "Jira project key (e.g., 'DSS')"},
"summary": {"type": "string", "description": "Issue summary/title"},
"description": {"type": "string", "description": "Issue description"},
"issue_type": {
"type": "string",
"description": "Issue type (Story, Task, Bug, etc.)",
"default": "Task",
},
},
"required": ["project_key", "summary"],
},
),
types.Tool(
name="jira_get_issue",
description="Get Jira issue details by key",
inputSchema={
"type": "object",
"properties": {
"issue_key": {"type": "string", "description": "Issue key (e.g., 'DSS-123')"}
},
"required": ["issue_key"],
},
),
types.Tool(
name="jira_search_issues",
description="Search Jira issues using JQL",
inputSchema={
"type": "object",
"properties": {
"jql": {
"type": "string",
"description": "JQL query (e.g., 'project=DSS AND status=Open')",
},
"max_results": {
"type": "integer",
"description": "Maximum number of results",
"default": 50,
},
},
"required": ["jql"],
},
),
types.Tool(
name="jira_update_issue",
description="Update a Jira issue",
inputSchema={
"type": "object",
"properties": {
"issue_key": {"type": "string", "description": "Issue key to update"},
"fields": {
"type": "object",
"description": "Fields to update (summary, description, status, etc.)",
},
},
"required": ["issue_key", "fields"],
},
),
types.Tool(
name="jira_add_comment",
description="Add a comment to a Jira issue",
inputSchema={
"type": "object",
"properties": {
"issue_key": {"type": "string", "description": "Issue key"},
"comment": {"type": "string", "description": "Comment text"},
},
"required": ["issue_key", "comment"],
},
),
]
class JiraIntegration(BaseIntegration):
"""Jira API integration with circuit breaker."""
def __init__(self, config: Dict[str, Any]):
"""
Initialize Jira integration.
Args:
config: Must contain 'url', 'username', 'api_token'
"""
super().__init__("jira", config)
url = config.get("url")
username = config.get("username")
api_token = config.get("api_token")
if not all([url, username, api_token]):
raise ValueError("Jira configuration incomplete: url, username, api_token required")
self.jira = Jira(url=url, username=username, password=api_token, cloud=True)
async def create_issue(
self, project_key: str, summary: str, description: str = "", issue_type: str = "Task"
) -> Dict[str, Any]:
"""Create a new Jira issue."""
def _create():
fields = {
"project": {"key": project_key},
"summary": summary,
"description": description,
"issuetype": {"name": issue_type},
}
return self.jira.create_issue(fields)
return await self.call_api(_create)
async def get_issue(self, issue_key: str) -> Dict[str, Any]:
"""Get issue details."""
def _get():
return self.jira.get_issue(issue_key)
return await self.call_api(_get)
async def search_issues(self, jql: str, max_results: int = 50) -> Dict[str, Any]:
"""Search issues with JQL."""
def _search():
return self.jira.jql(jql, limit=max_results)
return await self.call_api(_search)
async def update_issue(self, issue_key: str, fields: Dict[str, Any]) -> Dict[str, Any]:
"""Update issue fields."""
def _update():
self.jira.update_issue_field(issue_key, fields)
return {"status": "updated", "issue_key": issue_key}
return await self.call_api(_update)
async def add_comment(self, issue_key: str, comment: str) -> Dict[str, Any]:
"""Add comment to issue."""
def _comment():
return self.jira.issue_add_comment(issue_key, comment)
return await self.call_api(_comment)
class JiraTools:
"""MCP tool executor for Jira integration."""
def __init__(self, config: Dict[str, Any]):
self.jira = JiraIntegration(config)
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute Jira tool."""
handlers = {
"jira_create_issue": self.jira.create_issue,
"jira_get_issue": self.jira.get_issue,
"jira_search_issues": self.jira.search_issues,
"jira_update_issue": self.jira.update_issue,
"jira_add_comment": self.jira.add_comment,
}
handler = handlers.get(tool_name)
if not handler:
return {"error": f"Unknown Jira tool: {tool_name}"}
try:
clean_args = {k: v for k, v in arguments.items() if not k.startswith("_")}
result = await handler(**clean_args)
return result
except Exception as e:
return {"error": str(e)}

View File

@@ -1,513 +0,0 @@
"""
Storybook Integration for MCP.
Provides Storybook tools for scanning, generating stories, creating themes, and configuration.
"""
from pathlib import Path
from typing import Any, Dict, Optional
from mcp import types
from ..context.project_context import get_context_manager
from .base import BaseIntegration
# Storybook MCP Tool Definitions
STORYBOOK_TOOLS = [
types.Tool(
name="storybook_scan",
description="Scan project for existing Storybook configuration and stories. Returns story inventory, configuration details, and coverage statistics.",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"path": {
"type": "string",
"description": "Optional: Specific path to scan (defaults to project root)",
},
},
"required": ["project_id"],
},
),
types.Tool(
name="storybook_generate_stories",
description="Generate Storybook stories for React components. Supports CSF3, CSF2, and MDX formats with automatic prop detection.",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"component_path": {
"type": "string",
"description": "Path to component file or directory",
},
"template": {
"type": "string",
"description": "Story format template",
"enum": ["csf3", "csf2", "mdx"],
"default": "csf3",
},
"include_variants": {
"type": "boolean",
"description": "Generate variant stories (default: true)",
"default": True,
},
"dry_run": {
"type": "boolean",
"description": "Preview without writing files (default: true)",
"default": True,
},
},
"required": ["project_id", "component_path"],
},
),
types.Tool(
name="storybook_generate_theme",
description="Generate Storybook theme configuration from design tokens. Creates manager.ts, preview.ts, and theme files.",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"brand_title": {
"type": "string",
"description": "Brand title for Storybook UI",
"default": "Design System",
},
"base_theme": {
"type": "string",
"description": "Base theme (light or dark)",
"enum": ["light", "dark"],
"default": "light",
},
"output_dir": {
"type": "string",
"description": "Output directory (default: .storybook)",
},
"write_files": {
"type": "boolean",
"description": "Write files to disk (default: false - preview only)",
"default": False,
},
},
"required": ["project_id"],
},
),
types.Tool(
name="storybook_get_status",
description="Get Storybook installation and configuration status for a project.",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
types.Tool(
name="storybook_configure",
description="Configure or update Storybook for a project with DSS integration.",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"action": {
"type": "string",
"description": "Configuration action",
"enum": ["init", "update", "add_theme"],
"default": "init",
},
"options": {
"type": "object",
"description": "Configuration options",
"properties": {
"framework": {"type": "string", "enum": ["react", "vue", "angular"]},
"builder": {"type": "string", "enum": ["vite", "webpack5"]},
"typescript": {"type": "boolean"},
},
},
},
"required": ["project_id"],
},
),
]
class StorybookIntegration(BaseIntegration):
"""Storybook integration wrapper for DSS tools."""
def __init__(self, config: Optional[Dict[str, Any]] = None):
"""
Initialize Storybook integration.
Args:
config: Optional Storybook configuration
"""
super().__init__("storybook", config or {})
self.context_manager = get_context_manager()
async def _get_project_path(self, project_id: str) -> Path:
"""
Get project path from context manager.
Args:
project_id: Project ID
Returns:
Project path as Path object
"""
context = await self.context_manager.get_context(project_id)
if not context or not context.path:
raise ValueError(f"Project not found: {project_id}")
return Path(context.path)
async def scan_storybook(self, project_id: str, path: Optional[str] = None) -> Dict[str, Any]:
"""
Scan for Storybook config and stories.
Args:
project_id: Project ID
path: Optional specific path to scan
Returns:
Storybook scan results
"""
try:
from dss.storybook.scanner import StorybookScanner
project_path = await self._get_project_path(project_id)
# Ensure path is within project directory for security
if path:
scan_path = project_path / path
# Validate path doesn't escape project directory
if not scan_path.resolve().is_relative_to(project_path.resolve()):
raise ValueError("Path must be within project directory")
else:
scan_path = project_path
scanner = StorybookScanner(str(scan_path))
result = await scanner.scan() if hasattr(scanner.scan, "__await__") else scanner.scan()
coverage = (
await scanner.get_story_coverage()
if hasattr(scanner.get_story_coverage, "__await__")
else scanner.get_story_coverage()
)
return {
"project_id": project_id,
"path": str(scan_path),
"config": result.get("config") if isinstance(result, dict) else None,
"stories_count": result.get("stories_count", 0) if isinstance(result, dict) else 0,
"components_with_stories": result.get("components_with_stories", [])
if isinstance(result, dict)
else [],
"stories": result.get("stories", []) if isinstance(result, dict) else [],
"coverage": coverage if coverage else {},
}
except Exception as e:
return {"error": f"Failed to scan Storybook: {str(e)}", "project_id": project_id}
async def generate_stories(
self,
project_id: str,
component_path: str,
template: str = "csf3",
include_variants: bool = True,
dry_run: bool = True,
) -> Dict[str, Any]:
"""
Generate stories for components.
Args:
project_id: Project ID
component_path: Path to component file or directory
template: Story format (csf3, csf2, mdx)
include_variants: Whether to generate variant stories
dry_run: Preview without writing files
Returns:
Generation results
"""
try:
from dss.storybook.generator import StoryGenerator
project_path = await self._get_project_path(project_id)
generator = StoryGenerator(str(project_path))
full_path = project_path / component_path
# Check if path exists and is directory or file
if not full_path.exists():
return {"error": f"Path not found: {component_path}", "project_id": project_id}
if full_path.is_dir():
# Generate for directory
func = generator.generate_stories_for_directory
if hasattr(func, "__await__"):
results = await func(component_path, template=template.upper(), dry_run=dry_run)
else:
results = func(component_path, template=template.upper(), dry_run=dry_run)
return {
"project_id": project_id,
"path": component_path,
"generated_count": len(
[
r
for r in (results if isinstance(results, list) else [])
if "story" in str(r)
]
),
"results": results if isinstance(results, list) else [],
"dry_run": dry_run,
"template": template,
}
else:
# Generate for single file
func = generator.generate_story
if hasattr(func, "__await__"):
story = await func(
component_path, template=template.upper(), include_variants=include_variants
)
else:
story = func(
component_path, template=template.upper(), include_variants=include_variants
)
return {
"project_id": project_id,
"component": component_path,
"story": story,
"template": template,
"include_variants": include_variants,
"dry_run": dry_run,
}
except Exception as e:
return {
"error": f"Failed to generate stories: {str(e)}",
"project_id": project_id,
"component_path": component_path,
}
async def generate_theme(
self,
project_id: str,
brand_title: str = "Design System",
base_theme: str = "light",
output_dir: Optional[str] = None,
write_files: bool = False,
) -> Dict[str, Any]:
"""
Generate Storybook theme from design tokens.
Args:
project_id: Project ID
brand_title: Brand title for Storybook
base_theme: Base theme (light or dark)
output_dir: Output directory for theme files
write_files: Write files to disk or preview only
Returns:
Theme generation results
"""
try:
from dss.storybook.theme import ThemeGenerator
from dss.themes import get_default_dark_theme, get_default_light_theme
# Get project tokens from context
context = await self.context_manager.get_context(project_id)
if not context:
return {"error": f"Project not found: {project_id}"}
# Convert tokens to list format for ThemeGenerator
tokens_list = [
{"name": name, "value": token.get("value") if isinstance(token, dict) else token}
for name, token in (
context.tokens.items() if hasattr(context, "tokens") else {}.items()
)
]
generator = ThemeGenerator()
if write_files and output_dir:
# Generate and write files
func = generator.generate_full_config
if hasattr(func, "__await__"):
files = await func(
tokens=tokens_list, brand_title=brand_title, output_dir=output_dir
)
else:
files = func(tokens=tokens_list, brand_title=brand_title, output_dir=output_dir)
return {
"project_id": project_id,
"files_written": list(files.keys()) if isinstance(files, dict) else [],
"output_dir": output_dir,
"brand_title": brand_title,
}
else:
# Preview mode - generate file contents
try:
func = generator.generate_from_tokens
if hasattr(func, "__await__"):
theme = await func(tokens_list, brand_title, base_theme)
else:
theme = func(tokens_list, brand_title, base_theme)
except Exception:
# Fallback to default theme
theme_obj = (
get_default_light_theme()
if base_theme == "light"
else get_default_dark_theme()
)
theme = {
"name": theme_obj.name if hasattr(theme_obj, "name") else "Default",
"colors": {},
}
# Generate theme file content
theme_file = f"// Storybook theme for {brand_title}\nexport default {str(theme)};"
manager_file = "import addons from '@storybook/addons';\nimport theme from './dss-theme';\naddons.setConfig({ theme });"
preview_file = "import '../dss-theme';\nexport default { parameters: { actions: { argTypesRegex: '^on[A-Z].*' } } };"
return {
"project_id": project_id,
"preview": True,
"brand_title": brand_title,
"base_theme": base_theme,
"files": {
"dss-theme.ts": theme_file,
"manager.ts": manager_file,
"preview.ts": preview_file,
},
"token_count": len(tokens_list),
}
except Exception as e:
return {"error": f"Failed to generate theme: {str(e)}", "project_id": project_id}
async def get_status(self, project_id: str) -> Dict[str, Any]:
"""
Get Storybook installation and configuration status.
Args:
project_id: Project ID
Returns:
Storybook status information
"""
try:
from dss.storybook.config import get_storybook_status
project_path = await self._get_project_path(project_id)
func = get_storybook_status
if hasattr(func, "__await__"):
status = await func(str(project_path))
else:
status = func(str(project_path))
return {
"project_id": project_id,
"path": str(project_path),
**(status if isinstance(status, dict) else {}),
}
except Exception as e:
return {
"error": f"Failed to get Storybook status: {str(e)}",
"project_id": project_id,
"installed": False,
}
async def configure(
self, project_id: str, action: str = "init", options: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Configure or update Storybook for project.
Args:
project_id: Project ID
action: Configuration action (init, update, add_theme)
options: Configuration options
Returns:
Configuration results
"""
try:
from dss.storybook.config import write_storybook_config_file
project_path = await self._get_project_path(project_id)
options = options or {}
# Map action to configuration
config = {
"action": action,
"framework": options.get("framework", "react"),
"builder": options.get("builder", "vite"),
"typescript": options.get("typescript", True),
}
func = write_storybook_config_file
if hasattr(func, "__await__"):
result = await func(str(project_path), config)
else:
result = func(str(project_path), config)
return {
"project_id": project_id,
"action": action,
"success": True,
"path": str(project_path),
"config_path": str(project_path / ".storybook"),
"options": config,
}
except Exception as e:
return {
"error": f"Failed to configure Storybook: {str(e)}",
"project_id": project_id,
"action": action,
"success": False,
}
class StorybookTools:
"""MCP tool executor for Storybook integration."""
def __init__(self, config: Optional[Dict[str, Any]] = None):
"""
Args:
config: Optional Storybook configuration
"""
self.storybook = StorybookIntegration(config)
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Execute Storybook tool.
Args:
tool_name: Name of tool to execute
arguments: Tool arguments
Returns:
Tool execution result
"""
handlers = {
"storybook_scan": self.storybook.scan_storybook,
"storybook_generate_stories": self.storybook.generate_stories,
"storybook_generate_theme": self.storybook.generate_theme,
"storybook_get_status": self.storybook.get_status,
"storybook_configure": self.storybook.configure,
}
handler = handlers.get(tool_name)
if not handler:
return {"error": f"Unknown Storybook tool: {tool_name}"}
try:
# Remove internal prefixes and execute
clean_args = {k: v for k, v in arguments.items() if not k.startswith("_")}
result = await handler(**clean_args)
return result
except Exception as e:
return {"error": f"Tool execution failed: {str(e)}", "tool": tool_name}

File diff suppressed because it is too large Load Diff

View File

@@ -1,313 +0,0 @@
"""
DSS MCP Operations Module.
Handles long-running operations with status tracking, result storage, and cancellation support.
Operations are queued and executed asynchronously with persistent state.
"""
import asyncio
import json
import uuid
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
class OperationStatus(Enum):
"""Operation execution status."""
PENDING = "pending"
RUNNING = "running"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
class Operation:
"""Represents a single operation."""
def __init__(self, operation_type: str, args: Dict[str, Any], user_id: Optional[str] = None):
self.id = str(uuid.uuid4())
self.operation_type = operation_type
self.args = args
self.user_id = user_id
self.status = OperationStatus.PENDING
self.result = None
self.error = None
self.progress = 0
self.created_at = datetime.utcnow()
self.started_at = None
self.completed_at = None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for storage."""
return {
"id": self.id,
"operation_type": self.operation_type,
"args": json.dumps(self.args),
"user_id": self.user_id,
"status": self.status.value,
"result": json.dumps(self.result) if self.result else None,
"error": self.error,
"progress": self.progress,
"created_at": self.created_at.isoformat(),
"started_at": self.started_at.isoformat() if self.started_at else None,
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Operation":
"""Reconstruct from dictionary."""
op = cls(
operation_type=data["operation_type"],
args=json.loads(data["args"]),
user_id=data.get("user_id"),
)
op.id = data["id"]
op.status = OperationStatus(data["status"])
op.result = json.loads(data["result"]) if data.get("result") else None
op.error = data.get("error")
op.progress = data.get("progress", 0)
op.created_at = datetime.fromisoformat(data["created_at"])
if data.get("started_at"):
op.started_at = datetime.fromisoformat(data["started_at"])
if data.get("completed_at"):
op.completed_at = datetime.fromisoformat(data["completed_at"])
return op
class OperationQueue:
"""
Manages async operations with status tracking.
Operations are stored in database for persistence and recovery.
Multiple workers can process operations in parallel while respecting
per-resource locks to prevent concurrent modifications.
"""
# In-memory queue for active operations
_active_operations: Dict[str, Operation] = {}
_queue: asyncio.Queue = None
_workers: list = []
@classmethod
async def initialize(cls, num_workers: int = 4):
"""Initialize operation queue with worker pool."""
cls._queue = asyncio.Queue()
cls._workers = []
for i in range(num_workers):
worker = asyncio.create_task(cls._worker(i))
cls._workers.append(worker)
@classmethod
async def enqueue(
cls, operation_type: str, args: Dict[str, Any], user_id: Optional[str] = None
) -> str:
"""
Enqueue a new operation.
Args:
operation_type: Type of operation (e.g., 'sync_tokens')
args: Operation arguments
user_id: Optional user ID for tracking
Returns:
Operation ID for status checking
"""
operation = Operation(operation_type, args, user_id)
# Save to database
cls._save_operation(operation)
# Add to in-memory tracking
cls._active_operations[operation.id] = operation
# Queue for processing
await cls._queue.put(operation)
return operation.id
@classmethod
def get_status(cls, operation_id: str) -> Optional[Dict[str, Any]]:
"""Get operation status and result."""
# Check in-memory first
if operation_id in cls._active_operations:
op = cls._active_operations[operation_id]
return {
"id": op.id,
"status": op.status.value,
"progress": op.progress,
"result": op.result,
"error": op.error,
}
# Check database for completed operations
with get_connection() as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM operations WHERE id = ?", (operation_id,))
row = cursor.fetchone()
if not row:
return None
op = Operation.from_dict(dict(row))
return {
"id": op.id,
"status": op.status.value,
"progress": op.progress,
"result": op.result,
"error": op.error,
}
@classmethod
def get_result(cls, operation_id: str) -> Optional[Any]:
"""Get operation result (blocks if still running)."""
status = cls.get_status(operation_id)
if not status:
raise ValueError(f"Operation not found: {operation_id}")
if status["status"] == OperationStatus.COMPLETED.value:
return status["result"]
elif status["status"] == OperationStatus.FAILED.value:
raise RuntimeError(f"Operation failed: {status['error']}")
else:
raise RuntimeError(f"Operation still {status['status']}: {operation_id}")
@classmethod
def cancel(cls, operation_id: str) -> bool:
"""Cancel a pending operation."""
if operation_id not in cls._active_operations:
return False
op = cls._active_operations[operation_id]
if op.status == OperationStatus.PENDING:
op.status = OperationStatus.CANCELLED
op.completed_at = datetime.utcnow()
cls._save_operation(op)
return True
return False
@classmethod
def list_operations(
cls,
operation_type: Optional[str] = None,
status: Optional[str] = None,
user_id: Optional[str] = None,
limit: int = 100,
) -> list:
"""List operations with optional filtering."""
with get_connection() as conn:
cursor = conn.cursor()
query = "SELECT * FROM operations WHERE 1=1"
params = []
if operation_type:
query += " AND operation_type = ?"
params.append(operation_type)
if status:
query += " AND status = ?"
params.append(status)
if user_id:
query += " AND user_id = ?"
params.append(user_id)
query += " ORDER BY created_at DESC LIMIT ?"
params.append(limit)
cursor.execute(query, params)
return [Operation.from_dict(dict(row)).to_dict() for row in cursor.fetchall()]
# Private helper methods
@classmethod
def _save_operation(cls, operation: Operation):
"""Save operation to database."""
data = operation.to_dict()
with get_connection() as conn:
conn.execute(
"""
INSERT OR REPLACE INTO operations (
id, operation_type, args, user_id, status, result,
error, progress, created_at, started_at, completed_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
tuple(data.values()),
)
@classmethod
async def _worker(cls, worker_id: int):
"""Worker coroutine that processes operations from queue."""
while True:
try:
operation = await cls._queue.get()
# Mark as running
operation.status = OperationStatus.RUNNING
operation.started_at = datetime.utcnow()
cls._save_operation(operation)
# Execute operation (placeholder - would call actual handlers)
try:
# TODO: Implement actual operation execution
# based on operation_type
operation.result = {
"message": f"Operation {operation.operation_type} completed"
}
operation.status = OperationStatus.COMPLETED
operation.progress = 100
except Exception as e:
operation.error = str(e)
operation.status = OperationStatus.FAILED
# Mark as completed
operation.completed_at = datetime.utcnow()
cls._save_operation(operation)
cls._queue.task_done()
except asyncio.CancelledError:
break
except Exception as e:
# Log error and continue
print(f"Worker {worker_id} error: {str(e)}")
await asyncio.sleep(1)
@classmethod
def ensure_operations_table(cls):
"""Ensure operations table exists."""
with get_connection() as conn:
conn.execute(
"""
CREATE TABLE IF NOT EXISTS operations (
id TEXT PRIMARY KEY,
operation_type TEXT NOT NULL,
args TEXT NOT NULL,
user_id TEXT,
status TEXT DEFAULT 'pending',
result TEXT,
error TEXT,
progress INTEGER DEFAULT 0,
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
started_at TEXT,
completed_at TEXT
)
"""
)
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_operations_type ON operations(operation_type)"
)
conn.execute("CREATE INDEX IF NOT EXISTS idx_operations_status ON operations(status)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_operations_user ON operations(user_id)")
# Initialize table on import
OperationQueue.ensure_operations_table()

View File

@@ -1,279 +0,0 @@
"""
Dynamic Plugin Registry for DSS MCP Server.
Automatically discovers and registers MCP tools from the plugins/ directory.
Plugins follow a simple contract: export TOOLS list and a handler class with execute_tool() method.
"""
import importlib
import inspect
import logging
import pkgutil
import types as python_types
from typing import Any, Dict, List, Optional
from mcp import types
logger = logging.getLogger("dss.mcp.plugins")
class PluginRegistry:
"""
Discovers and manages dynamically loaded plugins.
Plugin Contract:
- Must export TOOLS: List[types.Tool] - MCP tool definitions
- Must have a class with execute_tool(name: str, arguments: dict) method
- Optional: PLUGIN_METADATA dict with name, version, author
Example Plugin Structure:
```python
from mcp import types
PLUGIN_METADATA = {
"name": "Example Plugin",
"version": "1.0.0",
"author": "DSS Team"
}
TOOLS = [
types.Tool(
name="example_tool",
description="Example tool",
inputSchema={...}
)
]
class PluginTools:
async def execute_tool(self, name: str, arguments: dict):
if name == "example_tool":
return {"result": "success"}
raise ValueError(f"Unknown tool: {name}")
```
"""
def __init__(self):
self.tools: List[types.Tool] = []
self.handlers: Dict[str, Any] = {} # tool_name -> handler_instance
self.plugins: List[Dict[str, Any]] = [] # plugin metadata
self._loaded_modules: set = set()
def load_plugins(self, plugins_package_name: str = "dss_mcp.plugins"):
"""
Scans the plugins directory and registers valid tool modules.
Args:
plugins_package_name: Fully qualified name of plugins package
Default: "dss_mcp.plugins" (works when called from tools/ dir)
"""
try:
# Dynamically import the plugins package
plugins_pkg = importlib.import_module(plugins_package_name)
path = plugins_pkg.__path__
prefix = plugins_pkg.__name__ + "."
logger.info(f"Scanning for plugins in: {path}")
# Iterate through all modules in the plugins directory
for _, name, is_pkg in pkgutil.iter_modules(path, prefix):
# Skip packages (only load .py files)
if is_pkg:
continue
# Skip template and private modules
module_basename = name.split(".")[-1]
if module_basename.startswith("_"):
logger.debug(f"Skipping private module: {module_basename}")
continue
try:
module = importlib.import_module(name)
self._register_module(module)
except Exception as e:
logger.error(f"Failed to load plugin module {name}: {e}", exc_info=True)
except ImportError as e:
logger.warning(f"Plugins package not found: {plugins_package_name} ({e})")
logger.info("Server will run without plugins")
def _register_module(self, module: python_types.ModuleType):
"""
Validates and registers a single plugin module.
Args:
module: The imported plugin module
"""
module_name = module.__name__
# Check if already loaded
if module_name in self._loaded_modules:
logger.debug(f"Module already loaded: {module_name}")
return
# Contract Check 1: Must export TOOLS list
if not hasattr(module, "TOOLS"):
logger.debug(f"Module {module_name} has no TOOLS export, skipping")
return
if not isinstance(module.TOOLS, list):
logger.error(f"Module {module_name} TOOLS must be a list, got {type(module.TOOLS)}")
return
if len(module.TOOLS) == 0:
logger.warning(f"Module {module_name} has empty TOOLS list")
return
# Contract Check 2: Must have a class with execute_tool method
handler_instance = self._find_and_instantiate_handler(module)
if not handler_instance:
logger.warning(f"Plugin {module_name} has TOOLS but no valid handler class")
return
# Contract Check 3: execute_tool must be async (coroutine)
execute_tool_method = getattr(handler_instance, "execute_tool", None)
if execute_tool_method and not inspect.iscoroutinefunction(execute_tool_method):
logger.error(
f"Plugin '{module_name}' is invalid: 'PluginTools.execute_tool' must be "
f"an async function ('async def'). Skipping plugin."
)
return
# Extract metadata
metadata = getattr(module, "PLUGIN_METADATA", {})
plugin_name = metadata.get("name", module_name.split(".")[-1])
plugin_version = metadata.get("version", "unknown")
# Validate tools and check for name collisions
registered_count = 0
for tool in module.TOOLS:
if not hasattr(tool, "name"):
logger.error(f"Tool in {module_name} missing 'name' attribute")
continue
# Check for name collision
if tool.name in self.handlers:
logger.error(
f"Tool name collision: '{tool.name}' already registered. "
f"Skipping duplicate from {module_name}"
)
continue
# Register tool
self.tools.append(tool)
self.handlers[tool.name] = handler_instance
registered_count += 1
logger.debug(f"Registered tool: {tool.name}")
# Track plugin metadata
self.plugins.append(
{
"name": plugin_name,
"version": plugin_version,
"module": module_name,
"tools_count": registered_count,
"author": metadata.get("author", "unknown"),
}
)
self._loaded_modules.add(module_name)
logger.info(
f"Loaded plugin: {plugin_name} v{plugin_version} "
f"({registered_count} tools from {module_name})"
)
def _find_and_instantiate_handler(self, module: python_types.ModuleType) -> Optional[Any]:
"""
Finds a class implementing execute_tool and instantiates it.
Args:
module: The plugin module to search
Returns:
Instantiated handler class or None if not found
"""
for name, obj in inspect.getmembers(module, inspect.isclass):
# Only consider classes defined in this module (not imports)
if obj.__module__ != module.__name__:
continue
# Look for execute_tool method
if hasattr(obj, "execute_tool"):
try:
# Try to instantiate with no args
instance = obj()
logger.debug(f"Instantiated handler class: {name}")
return instance
except TypeError:
# Try with **kwargs for flexible initialization
try:
instance = obj(**{})
logger.debug(f"Instantiated handler class with kwargs: {name}")
return instance
except Exception as e:
logger.error(
f"Failed to instantiate handler {name} in {module.__name__}: {e}"
)
return None
except Exception as e:
logger.error(f"Failed to instantiate handler {name} in {module.__name__}: {e}")
return None
return None
async def execute_tool(self, name: str, arguments: dict) -> Any:
"""
Routes tool execution to the correct plugin handler.
Args:
name: Tool name
arguments: Tool arguments
Returns:
Tool execution result
Raises:
ValueError: If tool not found in registry
"""
if name not in self.handlers:
raise ValueError(f"Tool '{name}' not found in plugin registry")
handler = self.handlers[name]
# Support both async and sync implementations
if inspect.iscoroutinefunction(handler.execute_tool):
return await handler.execute_tool(name, arguments)
else:
return handler.execute_tool(name, arguments)
def get_all_tools(self) -> List[types.Tool]:
"""Get merged list of all plugin tools."""
return self.tools.copy()
def get_plugin_info(self) -> List[Dict[str, Any]]:
"""Get metadata for all loaded plugins."""
return self.plugins.copy()
def reload_plugins(self, plugins_package_name: str = "dss_mcp.plugins"):
"""
Reload all plugins (useful for development).
WARNING: This clears all registered plugins and reloads from scratch.
Args:
plugins_package_name: Fully qualified name of plugins package
"""
logger.info("Reloading all plugins...")
# Clear existing registrations
self.tools.clear()
self.handlers.clear()
self.plugins.clear()
self._loaded_modules.clear()
# Reload
self.load_plugins(plugins_package_name)
logger.info(
f"Plugin reload complete. Loaded {len(self.plugins)} plugins, {len(self.tools)} tools"
)

View File

@@ -1,55 +0,0 @@
"""
DSS MCP Server Plugins.
This directory contains dynamically loaded plugins for the DSS MCP server.
Plugin Contract:
- Each plugin is a .py file in this directory
- Must export TOOLS: List[types.Tool] with MCP tool definitions
- Must have a handler class with execute_tool(name, arguments) method
- Optional: export PLUGIN_METADATA dict with name, version, author
Example Plugin Structure:
from mcp import types
PLUGIN_METADATA = {
"name": "My Plugin",
"version": "1.0.0",
"author": "DSS Team"
}
TOOLS = [
types.Tool(name="my_tool", description="...", inputSchema={...})
]
class PluginTools:
async def execute_tool(self, name, arguments):
if name == "my_tool":
return {"result": "success"}
Developer Workflow:
1. Copy _template.py to new_plugin.py
2. Edit TOOLS list and PluginTools class
3. (Optional) Create requirements.txt if plugin needs dependencies
4. Run: ../install_plugin_deps.sh (if dependencies added)
5. Restart MCP server: supervisorctl restart dss-mcp
6. Plugin tools are immediately available to all clients
Dependency Management:
- If your plugin needs Python packages, create a requirements.txt file
- Place it in the same directory as your plugin (e.g., plugins/my_plugin/requirements.txt)
- Run ../install_plugin_deps.sh to install all plugin dependencies
- Use --check flag to see which plugins have dependencies without installing
Example plugin with dependencies:
plugins/
├── my_plugin/
│ ├── __init__.py
│ ├── tool.py (exports TOOLS and PluginTools)
│ └── requirements.txt (jinja2>=3.1.2, httpx>=0.25.0)
└── _template.py
See _template.py for a complete example.
"""
__all__ = [] # Plugins are auto-discovered, not explicitly exported

View File

@@ -1,205 +0,0 @@
"""
Plugin Template for DSS MCP Server.
This file serves as both documentation and a starting point for new plugins.
To create a new plugin:
1. Copy this file: cp _template.py my_plugin.py
2. Update PLUGIN_METADATA with your plugin details
3. Define your tools in the TOOLS list
4. Implement tool logic in the PluginTools class
5. Restart the MCP server
The plugin will be automatically discovered and registered.
"""
from typing import Any, Dict, List
from mcp import types
# =============================================================================
# 1. PLUGIN METADATA (Optional but recommended)
# =============================================================================
PLUGIN_METADATA = {
"name": "Template Plugin",
"version": "1.0.0",
"author": "DSS Team",
"description": "Template plugin demonstrating the plugin contract",
}
# =============================================================================
# 2. TOOLS DEFINITION (Required)
# =============================================================================
TOOLS = [
types.Tool(
name="template_hello",
description="A simple hello world tool to verify the plugin system works",
inputSchema={
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name to greet (optional)",
"default": "World",
}
},
},
),
types.Tool(
name="template_echo",
description="Echo back the provided message",
inputSchema={
"type": "object",
"properties": {
"message": {"type": "string", "description": "Message to echo back"},
"uppercase": {
"type": "boolean",
"description": "Convert to uppercase (optional)",
"default": False,
},
},
"required": ["message"],
},
),
]
# =============================================================================
# 3. PLUGIN TOOLS HANDLER (Required)
# =============================================================================
class PluginTools:
"""
Handler class for plugin tools.
The PluginRegistry will instantiate this class and call execute_tool()
to handle tool invocations.
Contract:
- Must have async execute_tool(name: str, arguments: dict) method
- Should return list[types.TextContent | types.ImageContent | types.EmbeddedResource]
- Can raise exceptions for errors (will be caught and logged)
"""
def __init__(self, **kwargs):
"""
Initialize the plugin tools handler.
Args:
**kwargs: Optional context/dependencies (context_manager, user_id, etc.)
"""
# Extract any dependencies you need
self.context_manager = kwargs.get("context_manager")
self.user_id = kwargs.get("user_id")
self.audit_log = kwargs.get("audit_log")
# Initialize any plugin-specific state
self.call_count = 0
async def execute_tool(self, name: str, arguments: Dict[str, Any]) -> List:
"""
Route tool calls to appropriate implementation methods.
Args:
name: Tool name (matches TOOLS[].name)
arguments: Tool arguments from the client
Returns:
List of MCP content objects (TextContent, ImageContent, etc.)
Raises:
ValueError: If tool name is unknown
"""
self.call_count += 1
# Route to implementation methods
if name == "template_hello":
return await self._handle_hello(arguments)
elif name == "template_echo":
return await self._handle_echo(arguments)
else:
raise ValueError(f"Unknown tool: {name}")
async def _handle_hello(self, arguments: Dict[str, Any]) -> List[types.TextContent]:
"""
Implementation of template_hello tool.
Args:
arguments: Tool arguments (contains 'name')
Returns:
Greeting message
"""
name = arguments.get("name", "World")
message = f"Hello, {name}! The plugin system is operational. (Call #{self.call_count})"
return [types.TextContent(type="text", text=message)]
async def _handle_echo(self, arguments: Dict[str, Any]) -> List[types.TextContent]:
"""
Implementation of template_echo tool.
Args:
arguments: Tool arguments (contains 'message' and optional 'uppercase')
Returns:
Echoed message
"""
message = arguments["message"]
uppercase = arguments.get("uppercase", False)
if uppercase:
message = message.upper()
return [types.TextContent(type="text", text=f"Echo: {message}")]
# =============================================================================
# NOTES FOR PLUGIN DEVELOPERS
# =============================================================================
"""
## Plugin Development Tips
### Error Handling
- The plugin loader catches exceptions during loading, so syntax errors won't crash the server
- Runtime exceptions in execute_tool() are caught and logged by the MCP server
- Return clear error messages to help users understand what went wrong
### Dependencies
- You can import from other DSS modules: from ..context.project_context import get_context_manager
- Keep dependencies minimal - plugins should be self-contained
- Standard library and existing DSS dependencies only (no new pip packages without discussion)
### Testing
- Test your plugin by:
1. Restarting the MCP server: supervisorctl restart dss-mcp
2. Using the MCP server directly via API: POST /api/tools/your_tool_name
3. Via Claude Code if connected to the MCP server
### Best Practices
- Use clear, descriptive tool names prefixed with your plugin name (e.g., "analytics_track_event")
- Provide comprehensive inputSchema with descriptions
- Return structured data using types.TextContent
- Log errors with logger.error() for debugging
- Keep tools focused - one tool should do one thing well
### Advanced Features
- For image results, use types.ImageContent
- For embedded resources, use types.EmbeddedResource
- Access project context via self.context_manager if injected
- Use async/await for I/O operations (API calls, database queries, etc.)
## Example Plugin Ideas
- **Network Logger**: Capture and analyze browser network requests
- **Performance Analyzer**: Measure component render times, bundle sizes
- **Workflow Helper**: Automate common development workflows
- **Integration Tools**: Connect to external services (Slack, GitHub, etc.)
- **Custom Validators**: Project-specific validation rules
"""

View File

@@ -1,82 +0,0 @@
"""
Hello World Plugin - Test Plugin for DSS MCP Server.
Simple plugin to validate the plugin loading system is working correctly.
"""
from typing import Any, Dict, List
from mcp import types
PLUGIN_METADATA = {
"name": "Hello World Plugin",
"version": "1.0.0",
"author": "DSS Team",
"description": "Simple test plugin to validate plugin system",
}
TOOLS = [
types.Tool(
name="hello_world",
description="Simple hello world tool to test plugin loading",
inputSchema={
"type": "object",
"properties": {
"name": {"type": "string", "description": "Name to greet", "default": "World"}
},
},
),
types.Tool(
name="plugin_status",
description="Get status of the plugin system",
inputSchema={"type": "object", "properties": {}},
),
]
class PluginTools:
"""Handler for hello world plugin tools."""
def __init__(self, **kwargs):
self.call_count = 0
async def execute_tool(self, name: str, arguments: Dict[str, Any]) -> List:
"""Execute tool by name."""
self.call_count += 1
if name == "hello_world":
return await self._hello_world(arguments)
elif name == "plugin_status":
return await self._plugin_status(arguments)
else:
raise ValueError(f"Unknown tool: {name}")
async def _hello_world(self, arguments: Dict[str, Any]) -> List[types.TextContent]:
"""Simple hello world implementation."""
name = arguments.get("name", "World")
message = (
f"Hello, {name}!\n\n"
f"✓ Plugin system is operational\n"
f"✓ Dynamic loading works correctly\n"
f"✓ Tool routing is functional\n"
f"✓ Call count: {self.call_count}"
)
return [types.TextContent(type="text", text=message)]
async def _plugin_status(self, arguments: Dict[str, Any]) -> List[types.TextContent]:
"""Return plugin system status."""
status = {
"status": "operational",
"plugin_name": PLUGIN_METADATA["name"],
"plugin_version": PLUGIN_METADATA["version"],
"tools_count": len(TOOLS),
"call_count": self.call_count,
"tools": [tool.name for tool in TOOLS],
}
import json
return [types.TextContent(type="text", text=json.dumps(status, indent=2))]

View File

@@ -1,36 +0,0 @@
# MCP Server Dependencies
# Model Context Protocol
mcp>=0.9.0
# Anthropic SDK
anthropic>=0.40.0
# FastAPI & SSE
fastapi>=0.104.0
sse-starlette>=1.8.0
uvicorn[standard]>=0.24.0
# HTTP Client
httpx>=0.25.0
aiohttp>=3.9.0
# Atlassian Integrations
atlassian-python-api>=3.41.0
# Encryption
cryptography>=42.0.0
# Async Task Queue (for worker pool)
celery[redis]>=5.3.0
# Caching
redis>=5.0.0
# Environment Variables
python-dotenv>=1.0.0
# Database
aiosqlite>=0.19.0
# Logging
structlog>=23.2.0

View File

@@ -1,248 +0,0 @@
"""
DSS MCP Security Module.
Handles encryption, decryption, and secure storage of sensitive credentials.
Uses cryptography library for AES-256 encryption with per-credential salt.
"""
import json
import os
import secrets
from datetime import datetime
from typing import Any, Dict, Optional
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
class CredentialVault:
"""
Manages encrypted credential storage.
All credentials are encrypted using Fernet (AES-128 in CBC mode)
with PBKDF2-derived keys from a master encryption key.
"""
# Master encryption key (should be set via environment variable)
MASTER_KEY = os.environ.get("DSS_ENCRYPTION_KEY", "").encode()
@classmethod
def _get_cipher_suite(cls, salt: bytes) -> Fernet:
"""Derive encryption cipher from master key and salt."""
if not cls.MASTER_KEY:
raise ValueError(
"DSS_ENCRYPTION_KEY environment variable not set. "
"Required for credential encryption."
)
# Derive key from master key using PBKDF2
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend(),
)
key = kdf.derive(cls.MASTER_KEY)
# Encode key for Fernet
import base64
key_b64 = base64.urlsafe_b64encode(key)
return Fernet(key_b64)
@classmethod
def encrypt_credential(
cls, credential_type: str, credential_data: Dict[str, Any], user_id: Optional[str] = None
) -> str:
"""
Encrypt and store a credential.
Args:
credential_type: Type of credential (figma_token, jira_token, etc.)
credential_data: Dictionary containing credential details
user_id: Optional user ID for multi-tenant security
Returns:
Credential ID for later retrieval
"""
import base64
import uuid
credential_id = str(uuid.uuid4())
salt = secrets.token_bytes(16) # 128-bit salt
# Serialize credential data
json_data = json.dumps(credential_data)
# Encrypt
cipher = cls._get_cipher_suite(salt)
encrypted = cipher.encrypt(json_data.encode())
# Store in database
with get_connection() as conn:
conn.execute(
"""
INSERT INTO credentials (
id, credential_type, encrypted_data, salt, user_id, created_at
) VALUES (?, ?, ?, ?, ?, ?)
""",
(
credential_id,
credential_type,
encrypted.decode(),
base64.b64encode(salt).decode(),
user_id,
datetime.utcnow().isoformat(),
),
)
return credential_id
@classmethod
def decrypt_credential(cls, credential_id: str) -> Optional[Dict[str, Any]]:
"""
Decrypt and retrieve a credential.
Args:
credential_id: Credential ID from encrypt_credential()
Returns:
Decrypted credential data or None if not found
"""
import base64
with get_connection() as conn:
cursor = conn.cursor()
cursor.execute(
"""
SELECT encrypted_data, salt FROM credentials WHERE id = ?
""",
(credential_id,),
)
row = cursor.fetchone()
if not row:
return None
encrypted_data, salt_b64 = row
salt = base64.b64decode(salt_b64)
# Decrypt
cipher = cls._get_cipher_suite(salt)
decrypted = cipher.decrypt(encrypted_data.encode())
return json.loads(decrypted.decode())
@classmethod
def delete_credential(cls, credential_id: str) -> bool:
"""Delete a credential."""
with get_connection() as conn:
cursor = conn.cursor()
cursor.execute("DELETE FROM credentials WHERE id = ?", (credential_id,))
return cursor.rowcount > 0
@classmethod
def list_credentials(
cls, credential_type: Optional[str] = None, user_id: Optional[str] = None
) -> list:
"""List credentials (metadata only, not decrypted)."""
with get_connection() as conn:
cursor = conn.cursor()
query = "SELECT id, credential_type, user_id, created_at FROM credentials WHERE 1=1"
params = []
if credential_type:
query += " AND credential_type = ?"
params.append(credential_type)
if user_id:
query += " AND user_id = ?"
params.append(user_id)
cursor.execute(query, params)
return [dict(row) for row in cursor.fetchall()]
@classmethod
def rotate_encryption_key(cls) -> bool:
"""
Rotate the master encryption key.
This re-encrypts all credentials with a new master key.
Requires new key to be set in DSS_ENCRYPTION_KEY_NEW environment variable.
"""
new_key = os.environ.get("DSS_ENCRYPTION_KEY_NEW", "").encode()
if not new_key:
raise ValueError("DSS_ENCRYPTION_KEY_NEW environment variable not set for key rotation")
try:
with get_connection() as conn:
cursor = conn.cursor()
# Get all credentials
cursor.execute("SELECT id, encrypted_data, salt FROM credentials")
rows = cursor.fetchall()
# Re-encrypt with new key
for row in rows:
credential_id, encrypted_data, salt_b64 = row
import base64
salt = base64.b64decode(salt_b64)
# Decrypt with old key
old_cipher = cls._get_cipher_suite(salt)
decrypted = old_cipher.decrypt(encrypted_data.encode())
# Encrypt with new key (use new master key)
old_master = cls.MASTER_KEY
cls.MASTER_KEY = new_key
try:
new_cipher = cls._get_cipher_suite(salt)
new_encrypted = new_cipher.encrypt(decrypted)
# Update database
conn.execute(
"UPDATE credentials SET encrypted_data = ? WHERE id = ?",
(new_encrypted.decode(), credential_id),
)
finally:
cls.MASTER_KEY = old_master
# Update environment
os.environ["DSS_ENCRYPTION_KEY"] = new_key.decode()
return True
except Exception as e:
raise RuntimeError(f"Key rotation failed: {str(e)}")
@classmethod
def ensure_credentials_table(cls):
"""Ensure credentials table exists."""
with get_connection() as conn:
conn.execute(
"""
CREATE TABLE IF NOT EXISTS credentials (
id TEXT PRIMARY KEY,
credential_type TEXT NOT NULL,
encrypted_data TEXT NOT NULL,
salt TEXT NOT NULL,
user_id TEXT,
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
updated_at TEXT DEFAULT CURRENT_TIMESTAMP
)
"""
)
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_credentials_type ON credentials(credential_type)"
)
conn.execute("CREATE INDEX IF NOT EXISTS idx_credentials_user ON credentials(user_id)")
# Initialize table on import
CredentialVault.ensure_credentials_table()

View File

@@ -1,404 +0,0 @@
"""
DSS MCP Server.
SSE-based Model Context Protocol server for Claude.
Provides project-isolated context and tools with user-scoped integrations.
"""
import asyncio
import json
import logging
from typing import Any, Dict, Optional
import structlog
from fastapi import FastAPI, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
from mcp import types
from mcp.server import Server
from sse_starlette.sse import EventSourceResponse
from .config import mcp_config, validate_config
from .context.project_context import get_context_manager
from .integrations.storybook import STORYBOOK_TOOLS
from .integrations.translations import TRANSLATION_TOOLS
from .plugin_registry import PluginRegistry
from .tools.debug_tools import DEBUG_TOOLS, DebugTools
from .tools.project_tools import PROJECT_TOOLS, ProjectTools
from .tools.workflow_tools import WORKFLOW_TOOLS, WorkflowTools
# Configure logging
logging.basicConfig(
level=mcp_config.LOG_LEVEL, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = structlog.get_logger()
# FastAPI app for SSE endpoints
app = FastAPI(
title="DSS MCP Server",
description="Model Context Protocol server for Design System Server",
version="0.8.0",
)
# CORS configuration
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # TODO: Configure based on environment
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# MCP Server instance
mcp_server = Server("dss-mcp")
# Initialize Plugin Registry
plugin_registry = PluginRegistry()
plugin_registry.load_plugins()
# Store active sessions
_active_sessions: Dict[str, Dict[str, Any]] = {}
def get_session_key(project_id: str, user_id: Optional[int] = None) -> str:
"""Generate session key for caching."""
return f"{project_id}:{user_id or 'anonymous'}"
@app.on_event("startup")
async def startup():
"""Startup tasks."""
logger.info("Starting DSS MCP Server")
# Validate configuration
warnings = validate_config()
if warnings:
for warning in warnings:
logger.warning(warning)
logger.info("DSS MCP Server started", host=mcp_config.HOST, port=mcp_config.PORT)
@app.on_event("shutdown")
async def shutdown():
"""Cleanup on shutdown."""
logger.info("Shutting down DSS MCP Server")
@app.get("/health")
async def health_check():
"""Health check endpoint."""
context_manager = get_context_manager()
return {
"status": "healthy",
"server": "dss-mcp",
"version": "0.8.0",
"cache_size": len(context_manager._cache),
"active_sessions": len(_active_sessions),
}
@app.get("/sse")
async def sse_endpoint(
project_id: str = Query(..., description="Project ID for context isolation"),
user_id: Optional[int] = Query(None, description="User ID for user-scoped integrations"),
):
"""
Server-Sent Events endpoint for MCP communication.
This endpoint maintains a persistent connection with the client
and streams MCP protocol messages.
"""
session_key = get_session_key(project_id, user_id)
logger.info(
"SSE connection established",
project_id=project_id,
user_id=user_id,
session_key=session_key,
)
# Load project context
context_manager = get_context_manager()
try:
project_context = await context_manager.get_context(project_id, user_id)
if not project_context:
raise HTTPException(status_code=404, detail=f"Project not found: {project_id}")
except Exception as e:
logger.error("Failed to load project context", error=str(e))
raise HTTPException(status_code=500, detail=f"Failed to load project: {str(e)}")
# Create project tools instance
project_tools = ProjectTools(user_id)
# Track session
_active_sessions[session_key] = {
"project_id": project_id,
"user_id": user_id,
"connected_at": asyncio.get_event_loop().time(),
"project_tools": project_tools,
}
async def event_generator():
"""Generate SSE events for MCP communication."""
try:
# Send initial connection confirmation
yield {
"event": "connected",
"data": json.dumps(
{
"project_id": project_id,
"project_name": project_context.name,
"available_tools": len(PROJECT_TOOLS),
"integrations_enabled": list(project_context.integrations.keys()),
}
),
}
# Keep connection alive
while True:
await asyncio.sleep(30) # Heartbeat every 30 seconds
yield {
"event": "heartbeat",
"data": json.dumps({"timestamp": asyncio.get_event_loop().time()}),
}
except asyncio.CancelledError:
logger.info("SSE connection closed", session_key=session_key)
finally:
# Cleanup session
if session_key in _active_sessions:
del _active_sessions[session_key]
return EventSourceResponse(event_generator())
# MCP Protocol Handlers
@mcp_server.list_tools()
async def list_tools() -> list[types.Tool]:
"""
List all available tools.
Tools are dynamically determined based on:
- Base DSS project tools (always available)
- Workflow orchestration tools
- Debug tools
- Storybook integration tools
- Dynamically loaded plugins
- User's enabled integrations (Figma, Jira, Confluence, etc.)
"""
# Start with base project tools
tools = PROJECT_TOOLS.copy()
# Add workflow orchestration tools
tools.extend(WORKFLOW_TOOLS)
# Add debug tools
tools.extend(DEBUG_TOOLS)
# Add Storybook integration tools
tools.extend(STORYBOOK_TOOLS)
# Add Translation tools
tools.extend(TRANSLATION_TOOLS)
# Add plugin tools
tools.extend(plugin_registry.get_all_tools())
# TODO: Add integration-specific tools based on user's enabled integrations
# This will be implemented in Phase 3
logger.debug("Listed tools", tool_count=len(tools), plugin_count=len(plugin_registry.plugins))
return tools
@mcp_server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
"""
Execute a tool by name.
Args:
name: Tool name
arguments: Tool arguments (must include project_id)
Returns:
Tool execution results
"""
logger.info("Tool called", tool_name=name, arguments=arguments)
project_id = arguments.get("project_id")
if not project_id:
return [
types.TextContent(type="text", text=json.dumps({"error": "project_id is required"}))
]
# Find active session for this project
# For now, use first matching session (can be enhanced with session management)
session_key = None
project_tools = None
for key, session in _active_sessions.items():
if session["project_id"] == project_id:
session_key = key
project_tools = session["project_tools"]
break
if not project_tools:
# Create temporary tools instance
project_tools = ProjectTools()
# Check if this is a workflow tool
workflow_tool_names = [tool.name for tool in WORKFLOW_TOOLS]
debug_tool_names = [tool.name for tool in DEBUG_TOOLS]
storybook_tool_names = [tool.name for tool in STORYBOOK_TOOLS]
translation_tool_names = [tool.name for tool in TRANSLATION_TOOLS]
# Execute tool
try:
if name in workflow_tool_names:
# Handle workflow orchestration tools
from .audit import AuditLog
audit_log = AuditLog()
workflow_tools = WorkflowTools(audit_log)
result = await workflow_tools.handle_tool_call(name, arguments)
elif name in debug_tool_names:
# Handle debug tools
debug_tools = DebugTools()
result = await debug_tools.execute_tool(name, arguments)
elif name in storybook_tool_names:
# Handle Storybook tools
from .integrations.storybook import StorybookTools
storybook_tools = StorybookTools()
result = await storybook_tools.execute_tool(name, arguments)
elif name in translation_tool_names:
# Handle Translation tools
from .integrations.translations import TranslationTools
translation_tools = TranslationTools()
result = await translation_tools.execute_tool(name, arguments)
elif name in plugin_registry.handlers:
# Handle plugin tools
result = await plugin_registry.execute_tool(name, arguments)
# Plugin tools return MCP content objects directly, not dicts
if isinstance(result, list):
return result
else:
# Handle regular project tools
result = await project_tools.execute_tool(name, arguments)
return [types.TextContent(type="text", text=json.dumps(result, indent=2))]
except Exception as e:
logger.error("Tool execution failed", tool_name=name, error=str(e))
return [types.TextContent(type="text", text=json.dumps({"error": str(e)}))]
@mcp_server.list_resources()
async def list_resources() -> list[types.Resource]:
"""
List available resources.
Resources provide static or dynamic content that Claude can access.
Examples: project documentation, component specs, design system guidelines.
"""
# TODO: Implement resources based on project context
# For now, return empty list
return []
@mcp_server.read_resource()
async def read_resource(uri: str) -> str:
"""
Read a specific resource by URI.
Args:
uri: Resource URI (e.g., "dss://project-id/components/Button")
Returns:
Resource content
"""
# TODO: Implement resource reading
# For now, return not implemented
return json.dumps({"error": "Resource reading not yet implemented"})
@mcp_server.list_prompts()
async def list_prompts() -> list[types.Prompt]:
"""
List available prompt templates.
Prompts provide pre-configured conversation starters for Claude.
"""
# TODO: Add DSS-specific prompt templates
# Examples: "Analyze component consistency", "Review token usage", etc.
return []
@mcp_server.get_prompt()
async def get_prompt(name: str, arguments: dict) -> types.GetPromptResult:
"""
Get a specific prompt template.
Args:
name: Prompt name
arguments: Prompt arguments
Returns:
Prompt content
"""
# TODO: Implement prompt templates
return types.GetPromptResult(description="Prompt not found", messages=[])
# API endpoint to call MCP tools directly (for testing/debugging)
@app.post("/api/tools/{tool_name}")
async def call_tool_api(tool_name: str, arguments: Dict[str, Any]):
"""
Direct API endpoint to call MCP tools.
Useful for testing tools without MCP client.
"""
project_tools = ProjectTools()
result = await project_tools.execute_tool(tool_name, arguments)
return result
# API endpoint to list active sessions
@app.get("/api/sessions")
async def list_sessions():
"""List all active SSE sessions."""
return {
"active_sessions": len(_active_sessions),
"sessions": [
{
"project_id": session["project_id"],
"user_id": session["user_id"],
"connected_at": session["connected_at"],
}
for session in _active_sessions.values()
],
}
# API endpoint to clear context cache
@app.post("/api/cache/clear")
async def clear_cache(project_id: Optional[str] = None):
"""Clear context cache for a project or all projects."""
context_manager = get_context_manager()
context_manager.clear_cache(project_id)
return {"status": "cache_cleared", "project_id": project_id or "all"}
if __name__ == "__main__":
import uvicorn
logger.info("Starting DSS MCP Server", host=mcp_config.HOST, port=mcp_config.PORT)
uvicorn.run(
"server:app",
host=mcp_config.HOST,
port=mcp_config.PORT,
reload=True,
log_level=mcp_config.LOG_LEVEL.lower(),
)

View File

@@ -1,77 +0,0 @@
"""DSS MCP - Code Analysis Tools."""
import asyncio
from typing import Any, Dict
# Adjust the import path to find the project_analyzer
# This assumes the script is run from the project root.
from tools.analysis.project_analyzer import analyze_react_project, save_analysis
class Tool:
"""Basic tool definition for MCP."""
def __init__(self, name: str, description: str, input_schema: Dict[str, Any]):
self.name = name
self.description = description
self.inputSchema = input_schema
# Define the new tool
analyze_project_tool = Tool(
name="analyze_project",
description="Analyzes a given project's structure, components, and styles. This is a long-running operation.",
input_schema={
"type": "object",
"properties": {
"project_path": {
"type": "string",
"description": "The absolute path to the project to be analyzed.",
}
},
"required": ["project_path"],
},
)
class AnalysisTools:
"""A wrapper class for analysis-related tools."""
def __init__(self, user_id: str = None):
self.user_id = user_id
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
if tool_name == "analyze_project":
return await self.analyze_project(arguments.get("project_path"))
else:
return {"error": f"Analysis tool '{tool_name}' not found."}
async def analyze_project(self, project_path: str) -> Dict[str, Any]:
"""Triggers the analysis of a project."""
if not project_path:
return {"error": "project_path is a required argument."}
try:
# This is a potentially long-running task.
# In a real scenario, this should be offloaded to a background worker.
# For now, we run it asynchronously.
loop = asyncio.get_event_loop()
# Run the analysis in a separate thread to avoid blocking the event loop
analysis_data = await loop.run_in_executor(None, analyze_react_project, project_path)
# Save the analysis data
await loop.run_in_executor(None, save_analysis, project_path, analysis_data)
return {
"status": "success",
"message": f"Analysis complete for project at {project_path}.",
"graph_nodes": len(analysis_data.get("nodes", [])),
"graph_edges": len(analysis_data.get("links", [])),
}
except Exception as e:
return {"error": f"An error occurred during project analysis: {str(e)}"}
# A list of all tools in this module
ANALYSIS_TOOLS = [analyze_project_tool]

View File

@@ -1,459 +0,0 @@
"""
DSS Debug Tools for MCP.
This module implements the MCP tool layer that bridges Claude Code to the DSS Debug API.
It allows the LLM to inspect browser sessions, check server health, and run debug workflows.
Configuration:
DSS_DEBUG_API_URL: Base URL for the DSS Debug API (default: http://localhost:3456)
"""
import json
import logging
import os
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional
from mcp import types
try:
import httpx
except ImportError:
httpx = None
# Configure logging
logger = logging.getLogger(__name__)
# Configuration
DSS_API_URL = os.getenv("DSS_DEBUG_API_URL", "http://localhost:3456")
DEFAULT_LOG_LIMIT = 50
# Tool definitions (metadata for Claude)
DEBUG_TOOLS = [
types.Tool(
name="dss_list_browser_sessions",
description="List all browser log sessions that have been captured. Use this to find session IDs for detailed analysis.",
inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_get_browser_diagnostic",
description="Get diagnostic summary for a specific browser session including log counts, error counts, and session metadata",
inputSchema={
"type": "object",
"properties": {
"session_id": {
"type": "string",
"description": "Session ID to inspect. If omitted, uses the most recent session.",
}
},
"required": [],
},
),
types.Tool(
name="dss_get_browser_errors",
description="Get console errors and exceptions from a browser session. Filters logs to show only errors and warnings.",
inputSchema={
"type": "object",
"properties": {
"session_id": {
"type": "string",
"description": "Session ID. Defaults to most recent if omitted.",
},
"limit": {
"type": "integer",
"description": "Maximum number of errors to retrieve (default: 50)",
"default": 50,
},
},
"required": [],
},
),
types.Tool(
name="dss_get_browser_network",
description="Get network request logs from a browser session. Useful for checking failed API calls (404, 500) or latency issues.",
inputSchema={
"type": "object",
"properties": {
"session_id": {
"type": "string",
"description": "Session ID. Defaults to most recent if omitted.",
},
"limit": {
"type": "integer",
"description": "Maximum number of entries to retrieve (default: 50)",
"default": 50,
},
},
"required": [],
},
),
types.Tool(
name="dss_get_server_status",
description="Quick check if the DSS Debug Server is up and running. Returns simple UP/DOWN status from health check.",
inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_get_server_diagnostic",
description="Get detailed server health diagnostics including memory usage, database size, process info, and recent errors. Use for deep debugging of infrastructure.",
inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_list_workflows",
description="List available debug workflows that can be executed. Workflows are predefined diagnostic procedures.",
inputSchema={"type": "object", "properties": {}, "required": []},
),
types.Tool(
name="dss_run_workflow",
description="Execute a predefined debug workflow by ID. Workflows contain step-by-step diagnostic procedures.",
inputSchema={
"type": "object",
"properties": {
"workflow_id": {
"type": "string",
"description": "The ID of the workflow to run (see dss_list_workflows for available IDs)",
}
},
"required": ["workflow_id"],
},
),
]
class DebugTools:
"""Debug tool implementations."""
def __init__(self):
self.api_base = DSS_API_URL
self.browser_logs_dir = None
def _get_browser_logs_dir(self) -> Path:
"""Get the browser logs directory path."""
if self.browser_logs_dir is None:
# Assuming we're in tools/dss_mcp/tools/debug_tools.py
# Root is 3 levels up
root = Path(__file__).parent.parent.parent.parent
self.browser_logs_dir = root / ".dss" / "browser-logs"
return self.browser_logs_dir
async def _request(
self,
method: str,
endpoint: str,
params: Optional[Dict[str, Any]] = None,
json_data: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Internal helper to make safe HTTP requests to the DSS Debug API."""
if httpx is None:
return {"error": "httpx library not installed. Run: pip install httpx"}
url = f"{self.api_base.rstrip('/')}/{endpoint.lstrip('/')}"
async with httpx.AsyncClient(timeout=10.0) as client:
try:
response = await client.request(method, url, params=params, json=json_data)
# Handle non-200 responses
if response.status_code >= 400:
try:
error_detail = response.json().get("detail", response.text)
except Exception:
error_detail = response.text
return {
"error": f"API returned status {response.status_code}",
"detail": error_detail,
}
# Return JSON if possible
try:
return response.json()
except Exception:
return {"result": response.text}
except httpx.ConnectError:
return {
"error": f"Could not connect to DSS Debug API at {self.api_base}",
"suggestion": "Please ensure the debug server is running (cd tools/api && python3 -m uvicorn server:app --port 3456)",
}
except httpx.TimeoutException:
return {"error": f"Request to DSS Debug API timed out ({url})"}
except Exception as e:
logger.error(f"DSS API Request failed: {e}")
return {"error": f"Unexpected error: {str(e)}"}
def _get_latest_session_id(self) -> Optional[str]:
"""Get the most recent browser session ID from filesystem."""
logs_dir = self._get_browser_logs_dir()
if not logs_dir.exists():
return None
# Get all .json files
json_files = list(logs_dir.glob("*.json"))
if not json_files:
return None
# Sort by modification time, most recent first
json_files.sort(key=lambda p: p.stat().st_mtime, reverse=True)
# Return filename without .json extension
return json_files[0].stem
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute a tool by name."""
handlers = {
"dss_list_browser_sessions": self.list_browser_sessions,
"dss_get_browser_diagnostic": self.get_browser_diagnostic,
"dss_get_browser_errors": self.get_browser_errors,
"dss_get_browser_network": self.get_browser_network,
"dss_get_server_status": self.get_server_status,
"dss_get_server_diagnostic": self.get_server_diagnostic,
"dss_list_workflows": self.list_workflows,
"dss_run_workflow": self.run_workflow,
}
handler = handlers.get(tool_name)
if not handler:
return {"error": f"Unknown tool: {tool_name}"}
try:
result = await handler(**arguments)
return result
except Exception as e:
logger.error(f"Tool execution failed: {e}")
return {"error": str(e)}
async def list_browser_sessions(self) -> Dict[str, Any]:
"""List all browser log sessions."""
logs_dir = self._get_browser_logs_dir()
if not logs_dir.exists():
return {
"sessions": [],
"count": 0,
"message": "No browser logs directory found. Browser logger may not have captured any sessions yet.",
}
# Get all .json files
json_files = list(logs_dir.glob("*.json"))
if not json_files:
return {
"sessions": [],
"count": 0,
"message": "No sessions found in browser logs directory.",
}
# Sort by modification time, most recent first
json_files.sort(key=lambda p: p.stat().st_mtime, reverse=True)
sessions = []
for json_file in json_files:
try:
# Read session metadata
with open(json_file, "r") as f:
data = json.load(f)
sessions.append(
{
"session_id": json_file.stem,
"exported_at": data.get("exportedAt", "unknown"),
"log_count": len(data.get("logs", [])),
"file_size_bytes": json_file.stat().st_size,
"modified_at": datetime.fromtimestamp(
json_file.stat().st_mtime
).isoformat(),
}
)
except Exception as e:
logger.warning(f"Could not read session file {json_file}: {e}")
sessions.append(
{"session_id": json_file.stem, "error": f"Could not parse: {str(e)}"}
)
return {"sessions": sessions, "count": len(sessions), "directory": str(logs_dir)}
async def get_browser_diagnostic(self, session_id: Optional[str] = None) -> Dict[str, Any]:
"""Get diagnostic summary for a browser session."""
# Resolve session_id
if not session_id:
session_id = self._get_latest_session_id()
if not session_id:
return {"error": "No active session found"}
# Fetch session data from API
response = await self._request("GET", f"/api/browser-logs/{session_id}")
if "error" in response:
return response
# Extract diagnostic info
logs = response.get("logs", [])
diagnostic = response.get("diagnostic", {})
# Calculate additional metrics
error_count = sum(1 for log in logs if log.get("level") in ["error", "warn"])
return {
"session_id": session_id,
"exported_at": response.get("exportedAt"),
"total_logs": len(logs),
"error_count": error_count,
"diagnostic": diagnostic,
"summary": f"Session {session_id}: {len(logs)} logs, {error_count} errors/warnings",
}
async def get_browser_errors(
self, session_id: Optional[str] = None, limit: int = DEFAULT_LOG_LIMIT
) -> Dict[str, Any]:
"""Get console errors from a browser session."""
# Resolve session_id
if not session_id:
session_id = self._get_latest_session_id()
if not session_id:
return {"error": "No active session found"}
# Fetch session data from API
response = await self._request("GET", f"/api/browser-logs/{session_id}")
if "error" in response:
return response
# Filter for errors and warnings
logs = response.get("logs", [])
errors = [log for log in logs if log.get("level") in ["error", "warn"]]
# Apply limit
errors = errors[:limit] if limit else errors
if not errors:
return {
"session_id": session_id,
"errors": [],
"count": 0,
"message": "No errors or warnings found in this session",
}
return {
"session_id": session_id,
"errors": errors,
"count": len(errors),
"total_logs": len(logs),
}
async def get_browser_network(
self, session_id: Optional[str] = None, limit: int = DEFAULT_LOG_LIMIT
) -> Dict[str, Any]:
"""Get network logs from a browser session."""
# Resolve session_id
if not session_id:
session_id = self._get_latest_session_id()
if not session_id:
return {"error": "No active session found"}
# Fetch session data from API
response = await self._request("GET", f"/api/browser-logs/{session_id}")
if "error" in response:
return response
# Check if diagnostic contains network data
diagnostic = response.get("diagnostic", {})
network_logs = diagnostic.get("network", [])
if not network_logs:
# Fallback: look for logs that mention network/fetch/xhr
logs = response.get("logs", [])
network_logs = [
log
for log in logs
if any(
keyword in str(log.get("message", "")).lower()
for keyword in ["fetch", "xhr", "request", "response", "http"]
)
]
# Apply limit
network_logs = network_logs[:limit] if limit else network_logs
if not network_logs:
return {
"session_id": session_id,
"network_logs": [],
"count": 0,
"message": "No network logs recorded in this session",
}
return {"session_id": session_id, "network_logs": network_logs, "count": len(network_logs)}
async def get_server_status(self) -> Dict[str, Any]:
"""Quick health check of the debug server."""
response = await self._request("GET", "/api/debug/diagnostic")
if "error" in response:
return {"status": "DOWN", "error": response["error"], "detail": response.get("detail")}
# Extract just the status
status = response.get("status", "unknown")
health = response.get("health", {})
return {
"status": status.upper(),
"health_status": health.get("status"),
"timestamp": response.get("timestamp"),
"message": f"Server is {status}",
}
async def get_server_diagnostic(self) -> Dict[str, Any]:
"""Get detailed server diagnostics."""
response = await self._request("GET", "/api/debug/diagnostic")
if "error" in response:
return response
return response
async def list_workflows(self) -> Dict[str, Any]:
"""List available debug workflows."""
response = await self._request("GET", "/api/debug/workflows")
if "error" in response:
return response
return response
async def run_workflow(self, workflow_id: str) -> Dict[str, Any]:
"""Execute a debug workflow."""
# For now, read the workflow markdown and return its content
# In the future, this could actually execute the workflow steps
response = await self._request("GET", "/api/debug/workflows")
if "error" in response:
return response
workflows = response.get("workflows", [])
workflow = next((w for w in workflows if w.get("id") == workflow_id), None)
if not workflow:
return {
"error": f"Workflow not found: {workflow_id}",
"available_workflows": [w.get("id") for w in workflows],
}
# Read workflow file
workflow_path = workflow.get("path")
if workflow_path and Path(workflow_path).exists():
with open(workflow_path, "r") as f:
content = f.read()
return {
"workflow_id": workflow_id,
"title": workflow.get("title"),
"content": content,
"message": "Workflow loaded. Follow the steps in the content.",
}
return {"error": "Workflow file not found", "workflow": workflow}

View File

@@ -1,527 +0,0 @@
"""
DSS Project Tools for MCP.
Base tools that Claude can use to interact with DSS projects.
All tools are project-scoped and context-aware.
Tools include:
- Project Management (create, list, get, update, delete)
- Figma Integration (setup credentials, discover files, add files)
- Token Management (sync, extract, validate, detect drift)
- Component Analysis (discover, analyze, find quick wins)
- Status & Info (project status, system health)
"""
import uuid
from typing import Any, Dict, Optional
from mcp import types
from dss.storage.json_store import Projects # JSON storage
from ..context.project_context import get_context_manager
from ..handler import MCPContext, get_mcp_handler
# Tool definitions (metadata for Claude)
PROJECT_TOOLS = [
types.Tool(
name="dss_get_project_summary",
description="Get comprehensive project summary including components, tokens, health, and stats",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID to query"},
"include_components": {
"type": "boolean",
"description": "Include full component list (default: false)",
"default": False,
},
},
"required": ["project_id"],
},
),
types.Tool(
name="dss_list_components",
description="List all components in a project with their properties",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"filter_name": {
"type": "string",
"description": "Optional: Filter by component name (partial match)",
},
"code_generated_only": {
"type": "boolean",
"description": "Optional: Only show components with generated code",
"default": False,
},
},
"required": ["project_id"],
},
),
types.Tool(
name="dss_get_component",
description="Get detailed information about a specific component",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"component_name": {"type": "string", "description": "Component name (exact match)"},
},
"required": ["project_id", "component_name"],
},
),
types.Tool(
name="dss_get_design_tokens",
description="Get all design tokens (colors, typography, spacing, etc.) for a project",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"token_category": {
"type": "string",
"description": "Optional: Filter by token category (colors, typography, spacing, etc.)",
"enum": ["colors", "typography", "spacing", "shadows", "borders", "all"],
},
},
"required": ["project_id"],
},
),
types.Tool(
name="dss_get_project_health",
description="Get project health score, grade, and list of issues",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
types.Tool(
name="dss_list_styles",
description="List design styles (text, fill, effect, grid) from Figma",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"style_type": {
"type": "string",
"description": "Optional: Filter by style type",
"enum": ["TEXT", "FILL", "EFFECT", "GRID", "all"],
},
},
"required": ["project_id"],
},
),
types.Tool(
name="dss_get_discovery_data",
description="Get project discovery/scan data (file counts, technologies detected, etc.)",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
# === Project Management Tools ===
types.Tool(
name="dss_create_project",
description="Create a new design system project",
inputSchema={
"type": "object",
"properties": {
"name": {"type": "string", "description": "Project name"},
"description": {"type": "string", "description": "Project description"},
"root_path": {
"type": "string",
"description": "Root directory path for the project. Can be a git URL or a local folder path.",
},
},
"required": ["name", "root_path"],
},
),
types.Tool(
name="dss_list_projects",
description="List all design system projects",
inputSchema={
"type": "object",
"properties": {
"filter_status": {
"type": "string",
"description": "Optional: Filter by project status (active, archived)",
"enum": ["active", "archived", "all"],
}
},
},
),
types.Tool(
name="dss_get_project",
description="Get detailed information about a specific project",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
types.Tool(
name="dss_update_project",
description="Update project settings and metadata",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID to update"},
"updates": {
"type": "object",
"description": "Fields to update (name, description, etc.)",
},
},
"required": ["project_id", "updates"],
},
),
types.Tool(
name="dss_delete_project",
description="Delete a design system project and all its data",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID to delete"},
"confirm": {
"type": "boolean",
"description": "Confirmation to delete (must be true)",
},
},
"required": ["project_id", "confirm"],
},
),
# === Figma Integration Tools ===
types.Tool(
name="dss_setup_figma_credentials",
description="Setup Figma API credentials for a project",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"api_token": {"type": "string", "description": "Figma API token"},
},
"required": ["project_id", "api_token"],
},
),
types.Tool(
name="dss_discover_figma_files",
description="Discover Figma files accessible with current credentials",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
types.Tool(
name="dss_add_figma_file",
description="Add a Figma file to a project for syncing",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"file_key": {"type": "string", "description": "Figma file key"},
"file_name": {"type": "string", "description": "Display name for the file"},
},
"required": ["project_id", "file_key", "file_name"],
},
),
types.Tool(
name="dss_list_figma_files",
description="List all Figma files linked to a project",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
# === Token Management Tools ===
types.Tool(
name="dss_sync_tokens",
description="Synchronize design tokens from Figma to project",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"output_format": {
"type": "string",
"description": "Output format for tokens (css, json, tailwind)",
"enum": ["css", "json", "tailwind", "figma-tokens"],
},
},
"required": ["project_id"],
},
),
types.Tool(
name="dss_extract_tokens",
description="Extract design tokens from a Figma file",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"file_key": {"type": "string", "description": "Figma file key"},
},
"required": ["project_id", "file_key"],
},
),
types.Tool(
name="dss_validate_tokens",
description="Validate design tokens for consistency and completeness",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
types.Tool(
name="dss_detect_token_drift",
description="Detect inconsistencies between Figma and project tokens",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
# === Component Analysis Tools ===
types.Tool(
name="dss_discover_components",
description="Discover components in project codebase",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"path": {"type": "string", "description": "Optional: Specific path to scan"},
},
"required": ["project_id"],
},
),
types.Tool(
name="dss_analyze_components",
description="Analyze components for design system alignment and quality",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
types.Tool(
name="dss_get_quick_wins",
description="Identify quick wins for improving design system consistency",
inputSchema={
"type": "object",
"properties": {
"project_id": {"type": "string", "description": "Project ID"},
"path": {"type": "string", "description": "Optional: Specific path to analyze"},
},
"required": ["project_id"],
},
),
# === Status & Info Tools ===
types.Tool(
name="dss_get_project_status",
description="Get current project status and progress",
inputSchema={
"type": "object",
"properties": {"project_id": {"type": "string", "description": "Project ID"}},
"required": ["project_id"],
},
),
types.Tool(
name="dss_get_system_health",
description="Get overall system health and statistics",
inputSchema={"type": "object", "properties": {}},
),
]
# Tool implementations
class ProjectTools:
"""Project tool implementations."""
def __init__(self, user_id: Optional[int] = None):
self.context_manager = get_context_manager()
self.user_id = user_id
self.projects_db = Projects()
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute a tool by name."""
handlers = {
# Project Management
"dss_create_project": self.create_project,
"dss_list_projects": self.list_projects,
"dss_get_project": self.get_project,
# Read-only tools
"dss_get_project_summary": self.get_project_summary,
"dss_list_components": self.list_components,
"dss_get_component": self.get_component,
"dss_get_design_tokens": self.get_design_tokens,
"dss_get_project_health": self.get_project_health,
"dss_list_styles": self.list_styles,
"dss_get_discovery_.dat": self.get_discovery_data,
}
handler = handlers.get(tool_name)
if not handler:
return {"error": f"Unknown or not implemented tool: {tool_name}"}
try:
result = await handler(**arguments)
return result
except Exception as e:
return {"error": str(e)}
async def create_project(
self, name: str, root_path: str, description: str = ""
) -> Dict[str, Any]:
"""Create a new project and trigger initial analysis."""
project_id = str(uuid.uuid4())
# The `create` method in json_store handles the creation of the manifest
self.projects_db.create(id=project_id, name=name, description=description)
# We may still want to update the root_path if it's not part of the manifest
self.projects_db.update(project_id, root_path=root_path)
# Trigger the analysis as a background task
# We don't want to block the creation call
mcp_handler = get_mcp_handler()
# Create a context for the tool call
# The user_id might be important for permissions later
mcp_context = MCPContext(project_id=project_id, user_id=self.user_id)
# It's better to run this in the background and not wait for the result here
asyncio.create_task(
mcp_handler.execute_tool(
tool_name="analyze_project",
arguments={"project_path": root_path},
context=mcp_context,
)
)
return {
"status": "success",
"message": "Project created successfully. Analysis has been started in the background.",
"project_id": project_id,
}
async def list_projects(self, filter_status: Optional[str] = None) -> Dict[str, Any]:
"""List all projects."""
all_projects = self.projects_db.list(status=filter_status)
return {"projects": all_projects}
async def get_project(self, project_id: str) -> Dict[str, Any]:
"""Get a single project by its ID."""
project = self.projects_db.get(project_id)
if not project:
return {"error": f"Project with ID '{project_id}' not found."}
return {"project": project}
async def get_project_summary(
self, project_id: str, include_components: bool = False
) -> Dict[str, Any]:
"""Get comprehensive project summary."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
summary = {
"project_id": context.project_id,
"name": context.name,
"description": context.description,
"component_count": context.component_count,
"health": context.health,
"stats": context.stats,
"config": context.config,
"integrations_enabled": list(context.integrations.keys()),
"loaded_at": context.loaded_at.isoformat(),
}
if include_components:
summary["components"] = context.components
return summary
async def list_components(
self, project_id: str, filter_name: Optional[str] = None, code_generated_only: bool = False
) -> Dict[str, Any]:
"""List components with optional filtering."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
components = context.components
# Apply filters
if filter_name:
components = [c for c in components if filter_name.lower() in c["name"].lower()]
if code_generated_only:
components = [c for c in components if c.get("code_generated")]
return {"project_id": project_id, "total_count": len(components), "components": components}
async def get_component(self, project_id: str, component_name: str) -> Dict[str, Any]:
"""Get detailed component information."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
# Find component by name
component = next((c for c in context.components if c["name"] == component_name), None)
if not component:
return {"error": f"Component not found: {component_name}"}
return {"project_id": project_id, "component": component}
async def get_design_tokens(
self, project_id: str, token_category: Optional[str] = None
) -> Dict[str, Any]:
"""Get design tokens, optionally filtered by category."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
tokens = context.tokens
if token_category and token_category != "all":
# Filter by category
if token_category in tokens:
tokens = {token_category: tokens[token_category]}
else:
tokens = {}
return {"project_id": project_id, "tokens": tokens, "categories": list(tokens.keys())}
async def get_project_health(self, project_id: str) -> Dict[str, Any]:
"""Get project health information."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
return {"project_id": project_id, "health": context.health}
async def list_styles(
self, project_id: str, style_type: Optional[str] = None
) -> Dict[str, Any]:
"""List design styles with optional type filter."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
styles = context.styles
if style_type and style_type != "all":
styles = [s for s in styles if s["type"] == style_type]
return {"project_id": project_id, "total_count": len(styles), "styles": styles}
async def get_discovery_data(self, project_id: str) -> Dict[str, Any]:
"""Get project discovery/scan data."""
context = await self.context_manager.get_context(project_id, self.user_id)
if not context:
return {"error": f"Project not found: {project_id}"}
return {"project_id": project_id, "discovery": context.discovery}

View File

@@ -1,64 +0,0 @@
"""
DSS Workflow Orchestration Tools.
(This file has been modified to remove the AI orchestration logic
as per user request. The original file contained complex, multi-step
workflows that have now been stubbed out.)
"""
from typing import Any, Dict
from mcp import types
from ..audit import AuditLog
# Workflow tool definitions
WORKFLOW_TOOLS = [
types.Tool(
name="dss_workflow_status",
description="Get status of a running workflow execution",
inputSchema={
"type": "object",
"properties": {
"workflow_id": {"type": "string", "description": "Workflow execution ID"}
},
"required": ["workflow_id"],
},
)
]
class WorkflowOrchestrator:
"""(This class has been stubbed out.)."""
def __init__(self, audit_log: AuditLog):
self.audit_log = audit_log
self.active_workflows = {} # workflow_id -> state
def get_workflow_status(self, workflow_id: str) -> Dict[str, Any]:
"""Get current status of a workflow."""
workflow = self.active_workflows.get(workflow_id)
if not workflow:
return {"error": "Workflow not found", "workflow_id": workflow_id}
return {
"workflow_id": workflow_id,
"status": "No active workflows.",
}
# Handler class that MCP server will use
class WorkflowTools:
"""Handler for workflow orchestration tools."""
def __init__(self, audit_log: AuditLog):
self.orchestrator = WorkflowOrchestrator(audit_log)
async def handle_tool_call(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Route tool calls to appropriate handlers."""
if tool_name == "dss_workflow_status":
return self.orchestrator.get_workflow_status(arguments["workflow_id"])
else:
return {"error": f"Unknown or deprecated workflow tool: {tool_name}"}

View File

@@ -38,19 +38,19 @@ class DSSSettings(BaseSettings):
TEST_DATABASE_PATH: Path = Path.home() / ".dss" / "test.db"
USE_MOCK_APIS: bool = True
# Server Configuration
# Server Configuration (DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226)
SERVER_HOST: str = "0.0.0.0" # Host to bind server to
SERVER_PORT: int = 3456
SERVER_PORT: int = 6220 # DSS API port
SERVER_ENV: str = "development" # development or production
LOG_LEVEL: str = "INFO"
# MCP Server Configuration
MCP_HOST: str = "127.0.0.1"
MCP_PORT: int = 3457
MCP_PORT: int = 6222 # DSS MCP port
# Storybook Configuration
STORYBOOK_HOST: str = "0.0.0.0" # Host for Storybook server (uses SERVER_HOST if not set)
STORYBOOK_PORT: int = 6006 # Default Storybook port
STORYBOOK_PORT: int = 6226 # DSS Storybook port
STORYBOOK_AUTO_OPEN: bool = False # Don't auto-open browser
@property

1
dss_mcp Symbolic link
View File

@@ -0,0 +1 @@
/Users/bsarlo/Documents/SoFi/dss/dss/mcp

View File

@@ -24,15 +24,15 @@ BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
# Service configuration
# Service configuration (DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226)
declare -A SERVICES=(
["api"]="8000"
["admin-ui"]="3456"
["storybook"]="6006"
["api"]="6220"
["admin-ui"]="6221"
["storybook"]="6226"
)
declare -A SERVICE_CMDS=(
["api"]="uvicorn apps.api.server:app --host 0.0.0.0 --port 8000 --reload"
["api"]="uvicorn apps.api.server:app --host 0.0.0.0 --port 6220 --reload"
["admin-ui"]="npm run dev"
["storybook"]="npm run storybook"
)

70
scripts/setup-mcp.sh Executable file
View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Generate .claude/mcp.json with absolute paths for current setup
#
# USAGE:
# ./scripts/setup-mcp.sh
#
# This script generates the MCP configuration file needed for Claude Code
# to access DSS tools. Run this after cloning or when switching machines.
set -e
DSS_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
MCP_CONFIG_DIR="$DSS_ROOT/.claude"
MCP_CONFIG="$MCP_CONFIG_DIR/mcp.json"
# Ensure .claude directory exists
mkdir -p "$MCP_CONFIG_DIR"
# Detect Python venv location
if [ -d "$DSS_ROOT/.venv" ]; then
PYTHON_PATH="$DSS_ROOT/.venv/bin/python3"
elif [ -d "$DSS_ROOT/venv" ]; then
PYTHON_PATH="$DSS_ROOT/venv/bin/python3"
else
echo "Error: No Python virtual environment found at .venv or venv"
echo "Create one with: python3 -m venv .venv && source .venv/bin/activate && pip install -r requirements.txt"
exit 1
fi
# Verify MCP server exists
MCP_SERVER="$DSS_ROOT/dss-claude-plugin/servers/dss-mcp-server.py"
if [ ! -f "$MCP_SERVER" ]; then
echo "Error: MCP server not found at $MCP_SERVER"
exit 1
fi
cat > "$MCP_CONFIG" << EOF
{
"\$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json",
"mcpServers": {
"dss": {
"command": "$PYTHON_PATH",
"args": ["$MCP_SERVER"],
"env": {
"PYTHONPATH": "$DSS_ROOT:$DSS_ROOT/dss-claude-plugin",
"DSS_HOME": "$DSS_ROOT/.dss",
"DSS_DATABASE": "$DSS_ROOT/.dss/dss.db",
"DSS_CACHE": "$DSS_ROOT/.dss/cache",
"DSS_BASE_PATH": "$DSS_ROOT"
},
"description": "Design System Server MCP - local development"
}
}
}
EOF
echo "Generated MCP config: $MCP_CONFIG"
echo ""
echo "Configuration:"
echo " DSS_ROOT: $DSS_ROOT"
echo " Python: $PYTHON_PATH"
echo " MCP Server: $MCP_SERVER"
echo ""
# Optionally install the DSS plugin for commands/skills
echo "To install DSS plugin commands (optional):"
echo " claude plugin marketplace add $DSS_ROOT/dss-claude-plugin"
echo " claude plugin install dss-claude-plugin@dss"
echo ""
echo "Restart Claude Code to load the DSS MCP server."

View File

@@ -37,8 +37,9 @@ api:
# ==========================================
server:
host: "0.0.0.0" # Allow external connections
port: 3456
mcp_port: 3457
# DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226
port: 6220
mcp_port: 6222
reload: false # Production mode
cors_origins:
- "https://dss.overbits.luz.uy"

1
tools_link Symbolic link
View File

@@ -0,0 +1 @@
tools