Unify MCP across clients; remove legacy plugin server
Some checks failed
DSS Project Analysis / dss-context-update (push) Has been cancelled
Some checks failed
DSS Project Analysis / dss-context-update (push) Has been cancelled
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -66,6 +66,7 @@ storybook-static/
|
||||
# Local MCP config (generated by dss-init.sh)
|
||||
.mcp.json
|
||||
.claude/mcp.json
|
||||
.gemini/settings.json
|
||||
|
||||
# =============================================================================
|
||||
# DSS GENERATED FILES - Created by /dss-init, not committed
|
||||
|
||||
17
.mcp.json
17
.mcp.json
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json",
|
||||
"mcpServers": {
|
||||
"dss": {
|
||||
"command": "/Users/bsarlo/Documents/SoFi/dss/.venv/bin/python3",
|
||||
"args": ["/Users/bsarlo/Documents/SoFi/dss/dss-claude-plugin/servers/dss-mcp-server.py"],
|
||||
"env": {
|
||||
"PYTHONPATH": "/Users/bsarlo/Documents/SoFi/dss:/Users/bsarlo/Documents/SoFi/dss/dss-claude-plugin",
|
||||
"DSS_HOME": "/Users/bsarlo/Documents/SoFi/dss/.dss",
|
||||
"DSS_DATABASE": "/Users/bsarlo/Documents/SoFi/dss/.dss/dss.db",
|
||||
"DSS_CACHE": "/Users/bsarlo/Documents/SoFi/dss/.dss/cache",
|
||||
"DSS_BASE_PATH": "/Users/bsarlo/Documents/SoFi/dss"
|
||||
},
|
||||
"description": "Design System Server MCP - local development"
|
||||
}
|
||||
}
|
||||
}
|
||||
34
CLAUDE.md
34
CLAUDE.md
@@ -1,5 +1,11 @@
|
||||
# DSS - Design System Server
|
||||
|
||||
## Human Docs (start here)
|
||||
|
||||
- Overview + setup: `docs/README.md`
|
||||
- Quickstart: `docs/quickstart.md`
|
||||
- Architecture: `docs/architecture.md`
|
||||
|
||||
## FIRST: Enable DSS MCP Tools
|
||||
|
||||
**Before starting work, check if DSS tools are available.** If you don't see `dss_*` tools in your available MCP tools, run:
|
||||
@@ -10,20 +16,20 @@
|
||||
|
||||
Then tell the user: "I've configured the DSS MCP server. Please restart Claude Code to load the DSS tools, then continue this conversation."
|
||||
|
||||
The DSS MCP server provides 40+ design system tools for Figma sync, token management, component generation, and project analysis.
|
||||
The DSS MCP server provides core `dss_*` tools for project analysis, token work, and Storybook automation.
|
||||
|
||||
### Manual verification
|
||||
|
||||
Check if `.claude/mcp.json` exists and has valid paths:
|
||||
- `command` should point to `.venv/bin/python3` (must exist)
|
||||
- `args` should point to `dss-claude-plugin/servers/dss-mcp-server.py` (must exist)
|
||||
- `args` should be `["-m","dss.mcp.server"]`
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
dss/
|
||||
├── dss/ # Core Python library
|
||||
│ ├── mcp_server/ # MCP server implementation
|
||||
│ ├── mcp/ # Shared MCP tool layer + local MCP server
|
||||
│ ├── analyze/ # Code analysis tools
|
||||
│ ├── ingest/ # Token ingestion
|
||||
│ ├── figma/ # Figma integration
|
||||
@@ -33,8 +39,7 @@ dss/
|
||||
│ ├── api/ # FastAPI server (port 6220)
|
||||
│ └── cli/ # TypeScript CLI
|
||||
├── admin-ui/ # Admin dashboard (port 6221)
|
||||
├── dss-claude-plugin/ # Claude Code MCP plugin
|
||||
│ └── servers/ # MCP server scripts
|
||||
├── dss-claude-plugin/ # Claude Code plugin assets (commands/skills)
|
||||
└── scripts/ # Setup & utility scripts
|
||||
```
|
||||
|
||||
@@ -75,7 +80,8 @@ cd admin-ui && npm run dev
|
||||
|
||||
## Key Files
|
||||
|
||||
- `dss/mcp_server/handler.py` - MCP tool execution handler
|
||||
- `dss/mcp/handler.py` - MCP tool registry + execution
|
||||
- `dss/mcp/server.py` - Local MCP stdio server (`python -m dss.mcp.server`)
|
||||
- `dss/storage/json_store.py` - JSON-based data storage
|
||||
- `apps/api/server.py` - FastAPI server
|
||||
- `.claude/mcp.json` - Local MCP configuration (generated)
|
||||
@@ -96,10 +102,19 @@ If `/mcp` shows "Failed to reconnect to dss", check:
|
||||
|
||||
2. **MCP config paths are valid**: Check `.claude/mcp.json` points to existing files:
|
||||
- `.venv/bin/python3` must exist
|
||||
- `dss-claude-plugin/servers/dss-mcp-server.py` must exist
|
||||
- `args` should be `["-m","dss.mcp.server"]`
|
||||
|
||||
3. **Restart Claude Code** after fixing any configuration issues
|
||||
|
||||
### Proxying tools to a headless DSS server
|
||||
|
||||
To run the MCP process locally but execute tools on a remote/headless server, set `DSS_API_URL` in the MCP env.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
./scripts/setup-mcp.sh --api-url https://dss.example.com
|
||||
```
|
||||
|
||||
### Disabling unwanted MCP servers
|
||||
|
||||
MCP servers can be configured in multiple locations. Check all of these:
|
||||
@@ -109,7 +124,6 @@ MCP servers can be configured in multiple locations. Check all of these:
|
||||
| `~/.claude/mcp.json` | Claude Code (global) |
|
||||
| `~/.config/claude/claude_desktop_config.json` | Claude Desktop app |
|
||||
| `.claude/mcp.json` (project) | Claude Code (project-specific) |
|
||||
| `../.mcp.json` | Parent directory inheritance |
|
||||
|
||||
To disable a server, remove its entry from the relevant config file and restart Claude Code.
|
||||
|
||||
@@ -120,5 +134,5 @@ If you see repeated `MCP server "figma": No token data found` errors, the figma
|
||||
## Notes
|
||||
|
||||
- DSS uses JSON-based storage, not SQL database
|
||||
- The `dss/mcp_server/` directory was renamed from `dss/mcp/` to avoid shadowing the pip `mcp` package
|
||||
- Integration configs (Figma, Jira, etc.) are stored encrypted when database is configured
|
||||
- `dss.mcp` is an internal DSS module; it does not shadow the upstream `mcp` package
|
||||
- Integration configs can be stored encrypted when `DSS_MCP_ENCRYPTION_KEY` is configured
|
||||
|
||||
187
README.md
187
README.md
@@ -1,158 +1,95 @@
|
||||
# DSS - Design System Server
|
||||
# DSS (Design System Server)
|
||||
|
||||
Monolithic design system platform. Ingest tokens from Figma/CSS/SCSS/Tailwind, normalize to canonical format, generate outputs.
|
||||
DSS is a design-system toolkit that works both as:
|
||||
|
||||
## Quick Start
|
||||
- a **local developer tool** (run analysis and generation across many repos), and
|
||||
- a **headless server** (so UX/QA/Admin teams can use the web Admin UI and AI-assisted workflows without a local dev environment).
|
||||
|
||||
## What DSS does
|
||||
|
||||
- Ingest tokens from **Figma / CSS / SCSS / Tailwind**, normalize them, and generate outputs
|
||||
- Analyze codebases (components, styles, dependency graph, quick wins)
|
||||
- Automate Storybook setup (scan/generate/configure)
|
||||
- Expose a consistent set of `dss_*` tools via:
|
||||
- local MCP (Claude Code)
|
||||
- headless server MCP endpoints (`/api/mcp/*`)
|
||||
- Claude chat/tool-calling (`/api/claude/chat`)
|
||||
|
||||
## Docs
|
||||
|
||||
Human docs live in `docs/README.md`.
|
||||
|
||||
AI/agent-oriented docs live in `docs/ai.md` (entry points include `CLAUDE.md` and `admin-ui/AI-REFERENCE.md`).
|
||||
|
||||
## Quickstart (local + server)
|
||||
|
||||
### 1) Python setup
|
||||
|
||||
```bash
|
||||
# 1. Create Python virtual environment
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
# 2. Generate MCP config for Claude Code
|
||||
./scripts/setup-mcp.sh
|
||||
### 2) Run the headless API
|
||||
|
||||
# 3. Start services
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
PYTHONPATH="$PWD:$PWD/apps/api" uvicorn apps.api.server:app --host 0.0.0.0 --port 6220
|
||||
```
|
||||
|
||||
## Claude Code Plugin Integration
|
||||
If you want a single-port server that serves the built Admin UI too, see `docs/quickstart.md` (Option B) or run `./scripts/dss start` after building `admin-ui`.
|
||||
|
||||
DSS integrates with Claude Code as a **plugin** that provides MCP tools, slash commands, skills, and agents.
|
||||
|
||||
### Installation
|
||||
|
||||
**Step 1: Set up the Python environment**
|
||||
### 3) Run the Admin UI (dev mode)
|
||||
|
||||
```bash
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
cd admin-ui
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
**Step 2: Run the setup script**
|
||||
Admin UI: `http://localhost:6221`
|
||||
API: `http://localhost:6220`
|
||||
|
||||
### 4) Run analysis on any project
|
||||
|
||||
```bash
|
||||
./dss-cli.py analyze --project-path /absolute/path/to/your-project
|
||||
```
|
||||
|
||||
This writes `<project>/.dss/analysis_graph.json` (portable JSON output).
|
||||
|
||||
### 5) Claude Code MCP setup
|
||||
|
||||
```bash
|
||||
./scripts/setup-mcp.sh
|
||||
```
|
||||
|
||||
**Step 3: Add the DSS marketplace and install the plugin**
|
||||
|
||||
In Claude Code, run:
|
||||
|
||||
```
|
||||
/plugin marketplace add /path/to/dss/dss-claude-plugin
|
||||
```
|
||||
|
||||
Replace `/path/to/dss` with your actual DSS installation path.
|
||||
|
||||
Then install the plugin:
|
||||
|
||||
```
|
||||
/plugin install dss-claude-plugin@dss
|
||||
```
|
||||
|
||||
**Alternative: Manual configuration**
|
||||
|
||||
Add to your `~/.claude/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"extraKnownMarketplaces": {
|
||||
"dss": {
|
||||
"source": {
|
||||
"source": "directory",
|
||||
"path": "/path/to/dss/dss-claude-plugin"
|
||||
}
|
||||
}
|
||||
},
|
||||
"enabledPlugins": {
|
||||
"dss-claude-plugin@dss": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Restart Claude Code** completely (quit and reopen)
|
||||
|
||||
### Verification
|
||||
|
||||
After restart, verify the plugin is loaded:
|
||||
|
||||
1. Run `/mcp` - DSS server should appear in the list
|
||||
2. If DSS shows as disconnected, select it to enable
|
||||
3. DSS tools will be available as `dss_*` functions
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
**Plugin not found error in debug logs?**
|
||||
|
||||
The plugin must be discoverable. Ensure the path in `.claude/mcp.json` points to valid files:
|
||||
Enable MCP for Claude + Codex + Gemini (when installed):
|
||||
|
||||
```bash
|
||||
# Verify paths exist
|
||||
ls -la .venv/bin/python3
|
||||
ls -la dss-claude-plugin/servers/dss-mcp-server.py
|
||||
./scripts/enable-mcp-clients.sh
|
||||
```
|
||||
|
||||
**DSS server not connecting?**
|
||||
See `docs/configuration.md` for proxy mode (`--api-url`) and environment variables.
|
||||
|
||||
Add DSS to your global MCP config (`~/.claude/mcp.json`):
|
||||
## Storage (JSON-only)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"dss": {
|
||||
"command": "/path/to/dss/.venv/bin/python3",
|
||||
"args": ["/path/to/dss/dss-claude-plugin/servers/dss-mcp-server.py"],
|
||||
"env": {
|
||||
"PYTHONPATH": "/path/to/dss:/path/to/dss/dss-claude-plugin",
|
||||
"DSS_HOME": "/path/to/dss/.dss",
|
||||
"DSS_BASE_PATH": "/path/to/dss"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
By default DSS stores data under:
|
||||
- `DSS_HOME` (if set), else
|
||||
- `./.dss` (if present), else
|
||||
- `~/.dss`
|
||||
|
||||
**Test the MCP server manually:**
|
||||
See `docs/storage.md` for layout and guidance on what to commit.
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
PYTHONPATH="$PWD:$PWD/dss-claude-plugin" \
|
||||
python3 dss-claude-plugin/servers/dss-mcp-server.py
|
||||
```
|
||||
|
||||
**Check debug logs:**
|
||||
|
||||
```bash
|
||||
cat ~/.claude/debug/latest | grep -i "dss\|plugin"
|
||||
```
|
||||
|
||||
### Available Tools
|
||||
|
||||
Once connected, DSS provides tools prefixed with `dss_`:
|
||||
- `dss_figma_*` - Figma integration and token extraction
|
||||
- `dss_token_*` - Design token management
|
||||
- `dss_component_*` - Component generation
|
||||
- `dss_project_*` - Project analysis
|
||||
|
||||
## Structure
|
||||
## Repo layout
|
||||
|
||||
```
|
||||
tools/ # Python backend (API, ingestion, analysis)
|
||||
admin-ui/ # Web dashboard
|
||||
cli/ # TypeScript CLI
|
||||
dss-claude-plugin/ # Claude Code integration (skills, commands, agents)
|
||||
.knowledge/ # AI knowledge base (DSS_CORE.json)
|
||||
.dss/ # Runtime data, schemas, database
|
||||
dss/ # Core Python library (analysis/ingest/storage/mcp)
|
||||
apps/api/ # FastAPI headless server
|
||||
admin-ui/ # Preact Admin UI (Vite dev server + build output)
|
||||
dss-claude-plugin/ # Claude Code plugin assets (commands/skills)
|
||||
scripts/ # Setup and operational scripts
|
||||
docs/ # Human documentation
|
||||
.knowledge/ # Internal knowledge base (AI-oriented)
|
||||
```
|
||||
|
||||
## Core Concept
|
||||
|
||||
DSS structure is immutable. External systems adapt TO DSS via translation dictionaries.
|
||||
|
||||
See `.knowledge/DSS_CORE.json` for complete specification.
|
||||
|
||||
# Test Commit to Verify Hooks
|
||||
\n- CI/CD Verification Run
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# DSS Admin UI - AI Reference Documentation
|
||||
|
||||
Human-facing docs live in `docs/README.md` (start there). This document is an AI-oriented reference for Admin UI structure and API usage.
|
||||
|
||||
## Overview
|
||||
The DSS Admin UI is a Preact + Signals application that provides a team-centric dashboard for managing design system operations. It connects to the DSS backend API (FastAPI server running on port 8002).
|
||||
The DSS Admin UI is a Preact + Signals application that provides a team-centric dashboard for managing design system operations. It connects to the DSS backend API (FastAPI server; typically `:6220` in dev setups).
|
||||
|
||||
In development, the UI usually runs on `:6221` (Vite) and proxies `/api/*` to the API server. In server/headless mode, the API can serve the built UI from `admin-ui/dist/` on the same port.
|
||||
|
||||
## Technology Stack
|
||||
- **Framework**: Preact 10.x (~3KB)
|
||||
|
||||
@@ -1,381 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Design System Server</title>
|
||||
<link rel="icon" type="image/svg+xml" href="/admin-ui/favicon.svg">
|
||||
|
||||
<!-- DSS Layered CSS Architecture -->
|
||||
<!-- Layer 0: Core/Structural (reset, grid, utilities) -->
|
||||
<link rel="stylesheet" href="/admin-ui/css/dss-core.css">
|
||||
<!-- Layer 1: Design Tokens (colors, spacing, typography) -->
|
||||
<link rel="stylesheet" href="/admin-ui/css/dss-tokens.css">
|
||||
<!-- Layer 2: Semantic Theme (token-to-purpose mapping) -->
|
||||
<link rel="stylesheet" href="/admin-ui/css/dss-theme.css">
|
||||
<!-- Layer 3: Component Styles (styled components using semantic tokens) -->
|
||||
<link rel="stylesheet" href="/admin-ui/css/dss-components.css">
|
||||
|
||||
<!-- Markdown & Syntax Highlighting -->
|
||||
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/dompurify@3.0.6/dist/purify.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/highlight.js@11.9.0/lib/highlight.min.js"></script>
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<div id="app" class="app-layout">
|
||||
<!-- Sidebar -->
|
||||
<aside class="sidebar">
|
||||
<div class="sidebar__header">
|
||||
<div class="sidebar__logo">
|
||||
<div class="sidebar__logo-icon">
|
||||
<svg width="18" height="18" fill="none" stroke="currentColor" stroke-width="2" viewBox="0 0 24 24">
|
||||
<path d="M12 2L2 7l10 5 10-5-10-5z"/>
|
||||
<path d="M2 17l10 5 10-5"/>
|
||||
<path d="M2 12l10 5 10-5"/>
|
||||
</svg>
|
||||
</div>
|
||||
<span>DSS</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="sidebar__nav" id="main-nav" aria-label="Main navigation">
|
||||
<!-- Overview -->
|
||||
<div class="nav-section__title">Overview</div>
|
||||
<a class="nav-item active" data-page="dashboard" href="#dashboard" tabindex="0">
|
||||
<svg class="nav-item__icon" fill="none" stroke="currentColor" stroke-width="2" viewBox="0 0 24 24">
|
||||
<rect x="3" y="3" width="7" height="9" rx="1"/>
|
||||
<rect x="14" y="3" width="7" height="5" rx="1"/>
|
||||
<rect x="14" y="12" width="7" height="9" rx="1"/>
|
||||
<rect x="3" y="16" width="7" height="5" rx="1"/>
|
||||
</svg>
|
||||
Dashboard
|
||||
</a>
|
||||
<a class="nav-item" data-page="projects" href="#projects" tabindex="0">
|
||||
<svg class="nav-item__icon" fill="none" stroke="currentColor" stroke-width="2" viewBox="0 0 24 24">
|
||||
<path d="M3 3h18v18H3z"/>
|
||||
<path d="M21 9H3"/>
|
||||
<path d="M9 21V9"/>
|
||||
</svg>
|
||||
Projects
|
||||
</a>
|
||||
|
||||
<!-- Tools -->
|
||||
<div class="nav-section">
|
||||
<div class="nav-section__title">Tools</div>
|
||||
<div class="nav-section__content">
|
||||
<div class="nav-sub-section">
|
||||
<div class="nav-sub-section__title">Analysis</div>
|
||||
<a class="nav-item nav-item--level-2" data-page="services" href="#services" tabindex="0">Services</a>
|
||||
<a class="nav-item nav-item--level-2" data-page="quick-wins" href="#quick-wins" tabindex="0">Quick Wins</a>
|
||||
</div>
|
||||
<a class="nav-item nav-item--level-1" data-page="chat" href="#chat" tabindex="0">Chat</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Design System -->
|
||||
<div class="nav-section">
|
||||
<div class="nav-section__title">Design System</div>
|
||||
<div class="nav-section__content">
|
||||
<div class="nav-sub-section">
|
||||
<div class="nav-sub-section__title">Foundations</div>
|
||||
<a class="nav-item nav-item--level-2" data-page="tokens" href="#tokens" tabindex="0">Tokens</a>
|
||||
<a class="nav-item nav-item--level-2" data-page="components" href="#components" tabindex="0">Components</a>
|
||||
</div>
|
||||
<div class="nav-sub-section">
|
||||
<div class="nav-sub-section__title">Integrations</div>
|
||||
<a class="nav-item nav-item--level-2" data-page="figma" href="#figma" tabindex="0">Figma</a>
|
||||
<a id="storybook-link" class="nav-item nav-item--level-2" href="http://localhost:6006" target="_blank" tabindex="0">Storybook</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- System -->
|
||||
<div class="nav-section">
|
||||
<div class="nav-section__title">System</div>
|
||||
<div class="nav-section__content">
|
||||
<a class="nav-item nav-item--level-1" data-page="docs" href="#docs" tabindex="0">Docs</a>
|
||||
<div class="nav-sub-section">
|
||||
<div class="nav-sub-section__title">Administration</div>
|
||||
<a class="nav-item nav-item--level-2" data-page="teams" href="#teams" tabindex="0">Teams</a>
|
||||
<a class="nav-item nav-item--level-2" data-page="audit" href="#audit" tabindex="0">Audit</a>
|
||||
<a class="nav-item nav-item--level-2" data-page="plugins" href="#plugins" tabindex="0">Plugins</a>
|
||||
<a class="nav-item nav-item--level-2" data-page="settings" href="#settings" tabindex="0">Settings</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div class="sidebar__help">
|
||||
<details class="help-panel">
|
||||
<summary class="help-panel__toggle" tabindex="0">
|
||||
<svg width="16" height="16" fill="none" stroke="currentColor" stroke-width="2" viewBox="0 0 24 24">
|
||||
<circle cx="12" cy="12" r="10"/>
|
||||
<path d="M9.09 9a3 3 0 0 1 5.83 1c0 2-3 3-3 3"/>
|
||||
<line x1="12" y1="17" x2="12.01" y2="17"/>
|
||||
</svg>
|
||||
Quick Guide
|
||||
</summary>
|
||||
<div class="help-panel__content">
|
||||
<div class="help-section" data-team="ui">
|
||||
<strong>UI Team</strong>
|
||||
<ul>
|
||||
<li>Extract tokens from Figma</li>
|
||||
<li>Sync to CSS variables</li>
|
||||
<li>Generate components</li>
|
||||
<li>Check token drift</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="help-section" data-team="ux">
|
||||
<strong>UX Team</strong>
|
||||
<ul>
|
||||
<li>Add Figma files to project</li>
|
||||
<li>Run visual diff checks</li>
|
||||
<li>Review token consistency</li>
|
||||
<li>Validate components</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="help-section" data-team="qa">
|
||||
<strong>QA Team</strong>
|
||||
<ul>
|
||||
<li>Define ESRE test cases</li>
|
||||
<li>Run component validation</li>
|
||||
<li>Review visual regressions</li>
|
||||
<li>Export audit logs</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="help-section" data-team="all">
|
||||
<strong>Getting Started</strong>
|
||||
<ol>
|
||||
<li>Create a project</li>
|
||||
<li>Add Figma file key</li>
|
||||
<li>Extract & sync tokens</li>
|
||||
<li>Use AI chat for help</li>
|
||||
</ol>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</div>
|
||||
<div class="sidebar__footer">
|
||||
<ds-badge data-variant="outline">v1.0.0</ds-badge>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<!-- Header -->
|
||||
<header class="app-header">
|
||||
<div class="app-header__project-selector" id="project-selector-container">
|
||||
<!-- Project selector will be rendered here -->
|
||||
</div>
|
||||
<div class="app-header__team-selector">
|
||||
<label for="team-context-select" class="sr-only">Select team context</label>
|
||||
<select class="team-select" id="team-context-select" aria-label="Team context">
|
||||
<option value="all">All Teams</option>
|
||||
<option value="ui">UI Team</option>
|
||||
<option value="ux">UX Team</option>
|
||||
<option value="qa">QA Team</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="app-header__actions">
|
||||
<ds-button data-variant="ghost" data-size="icon" title="Toggle theme" id="theme-toggle" tabindex="0" aria-label="Toggle dark/light theme">
|
||||
<svg width="18" height="18" fill="none" stroke="currentColor" stroke-width="2" viewBox="0 0 24 24" aria-hidden="true">
|
||||
<path d="M12 3v1m0 16v1m9-9h-1M4 12H3m15.364 6.364l-.707-.707M6.343 6.343l-.707-.707m12.728 0l-.707.707M6.343 17.657l-.707.707M16 12a4 4 0 1 1-8 0 4 4 0 0 1 8 0z"/>
|
||||
</svg>
|
||||
</ds-button>
|
||||
<div class="notification-toggle-container" style="position: relative;">
|
||||
<ds-button data-variant="ghost" data-size="icon" id="notification-toggle" title="Notifications" tabindex="0" aria-label="View notifications">
|
||||
<svg width="18" height="18" fill="none" stroke="currentColor" stroke-width="2" viewBox="0 0 24 24" aria-hidden="true">
|
||||
<path d="M6 8a6 6 0 0 1 12 0c0 7 3 9 3 9H3s3-2 3-9"/>
|
||||
<path d="M10.3 21a1.94 1.94 0 0 0 3.4 0"/>
|
||||
</svg>
|
||||
</ds-button>
|
||||
<span id="notification-indicator" class="status-dot status-dot--error" style="position: absolute; top: 6px; right: 6px; display: none;"></span>
|
||||
<ds-notification-center></ds-notification-center>
|
||||
</div>
|
||||
<ds-button data-variant="ghost" data-size="icon" id="sidebar-toggle" title="Toggle AI Assistant" tabindex="0" aria-label="Toggle AI Assistant sidebar" aria-controls="ai-sidebar" aria-expanded="true">
|
||||
<svg width="18" height="18" fill="none" stroke="currentColor" stroke-width="2" viewBox="0 0 24 24" aria-hidden="true">
|
||||
<path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z"/>
|
||||
</svg>
|
||||
</ds-button>
|
||||
<div class="ds-avatar" tabindex="0" role="button" aria-label="User profile menu">
|
||||
<span>U</span>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Main Content Area -->
|
||||
<main class="app-main">
|
||||
<div id="landing-page" class="landing-page active">
|
||||
<!-- Landing page content will be rendered here -->
|
||||
</div>
|
||||
<div id="page-content" class="app-content" style="display: none;">
|
||||
<!-- Page content injected here -->
|
||||
</div>
|
||||
|
||||
<!-- Right Sidebar - AI Chat -->
|
||||
<aside class="app-sidebar" id="ai-sidebar">
|
||||
<ds-ai-chat></ds-ai-chat>
|
||||
</aside>
|
||||
</main>
|
||||
</div>
|
||||
|
||||
<!-- Toast Provider for notifications -->
|
||||
<ds-toast-provider></ds-toast-provider>
|
||||
|
||||
<!-- Load Components -->
|
||||
<script type="module">
|
||||
// Import theme manager first (loads saved theme from cookie)
|
||||
import themeManager from '/admin-ui/js/core/theme.js';
|
||||
|
||||
// Import all components
|
||||
import '/admin-ui/js/components/ds-button.js';
|
||||
import '/admin-ui/js/components/ds-card.js';
|
||||
import '/admin-ui/js/components/ds-input.js';
|
||||
import '/admin-ui/js/components/ds-badge.js';
|
||||
import '/admin-ui/js/components/ds-action-bar.js';
|
||||
import '/admin-ui/js/components/ds-toast.js';
|
||||
import '/admin-ui/js/components/ds-toast-provider.js';
|
||||
import '/admin-ui/js/components/ds-notification-center.js';
|
||||
import '/admin-ui/js/components/ds-workflow.js';
|
||||
import '/admin-ui/js/core/ai.js';
|
||||
|
||||
// Import stores and services
|
||||
import contextStore from '/admin-ui/js/stores/context-store.js';
|
||||
import notificationService from '/admin-ui/js/services/notification-service.js';
|
||||
|
||||
// Import browser logger for debugging
|
||||
import '/admin-ui/js/core/browser-logger.js';
|
||||
|
||||
// Import navigation manager
|
||||
import NavigationManager from '/admin-ui/js/core/navigation.js';
|
||||
|
||||
// Import and initialize app
|
||||
import app from '/admin-ui/js/core/app.js';
|
||||
|
||||
// Initialize when DOM is ready
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
app.init();
|
||||
|
||||
// Initialize navigation manager
|
||||
new NavigationManager(document.querySelector('.sidebar__nav'));
|
||||
|
||||
// Setup theme toggle button
|
||||
const themeToggle = document.getElementById('theme-toggle');
|
||||
if (themeToggle) {
|
||||
themeToggle.addEventListener('click', () => {
|
||||
themeManager.toggle();
|
||||
});
|
||||
}
|
||||
|
||||
// Setup team context selector
|
||||
const teamSelect = document.getElementById('team-context-select');
|
||||
const updateHelpSections = (team) => {
|
||||
document.querySelectorAll('.help-section').forEach(section => {
|
||||
const sectionTeam = section.dataset.team;
|
||||
section.style.display = (team === 'all' || sectionTeam === team || sectionTeam === 'all') ? '' : 'none';
|
||||
});
|
||||
};
|
||||
|
||||
if (teamSelect) {
|
||||
const savedTeam = localStorage.getItem('dss_team_context') || 'all';
|
||||
teamSelect.value = savedTeam;
|
||||
updateHelpSections(savedTeam);
|
||||
contextStore.setContext({ team: savedTeam });
|
||||
|
||||
teamSelect.addEventListener('change', (e) => {
|
||||
const team = e.target.value;
|
||||
localStorage.setItem('dss_team_context', team);
|
||||
updateHelpSections(team);
|
||||
contextStore.setContext({ team });
|
||||
window.dispatchEvent(new CustomEvent('team-context-changed', {
|
||||
detail: { team }
|
||||
}));
|
||||
});
|
||||
}
|
||||
|
||||
// Setup AI sidebar toggle
|
||||
const sidebarToggle = document.getElementById('sidebar-toggle');
|
||||
const aiSidebar = document.getElementById('ai-sidebar');
|
||||
if (sidebarToggle && aiSidebar) {
|
||||
// Restore saved state
|
||||
const sidebarCollapsed = localStorage.getItem('dss_ai_sidebar_collapsed') === 'true';
|
||||
if (sidebarCollapsed) {
|
||||
aiSidebar.classList.add('collapsed');
|
||||
sidebarToggle.setAttribute('aria-expanded', 'false');
|
||||
}
|
||||
|
||||
sidebarToggle.addEventListener('click', () => {
|
||||
const isCollapsed = aiSidebar.classList.toggle('collapsed');
|
||||
sidebarToggle.setAttribute('aria-expanded', !isCollapsed);
|
||||
localStorage.setItem('dss_ai_sidebar_collapsed', isCollapsed);
|
||||
});
|
||||
}
|
||||
|
||||
// Setup Notification Center toggle
|
||||
const notificationToggle = document.getElementById('notification-toggle');
|
||||
const notificationCenter = document.querySelector('ds-notification-center');
|
||||
const notificationIndicator = document.getElementById('notification-indicator');
|
||||
|
||||
if (notificationToggle && notificationCenter) {
|
||||
notificationToggle.addEventListener('click', (e) => {
|
||||
e.stopPropagation();
|
||||
const isOpen = notificationCenter.hasAttribute('open');
|
||||
if (isOpen) {
|
||||
notificationCenter.removeAttribute('open');
|
||||
} else {
|
||||
notificationCenter.setAttribute('open', '');
|
||||
}
|
||||
});
|
||||
|
||||
// Close when clicking outside
|
||||
document.addEventListener('click', (e) => {
|
||||
if (!notificationCenter.contains(e.target) && !notificationToggle.contains(e.target)) {
|
||||
notificationCenter.removeAttribute('open');
|
||||
}
|
||||
});
|
||||
|
||||
// Update unread indicator
|
||||
notificationService.addEventListener('unread-count-changed', (e) => {
|
||||
const { count } = e.detail;
|
||||
if (notificationIndicator) {
|
||||
notificationIndicator.style.display = count > 0 ? 'block' : 'none';
|
||||
}
|
||||
});
|
||||
|
||||
// Handle notification actions
|
||||
notificationCenter.addEventListener('notification-action', (e) => {
|
||||
const { event, payload } = e.detail;
|
||||
console.log('Notification action:', event, payload);
|
||||
// Handle navigation or other actions based on event type
|
||||
if (event.startsWith('navigate:')) {
|
||||
const page = event.replace('navigate:', '');
|
||||
window.location.hash = page;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Listen for "Ask AI" events from anywhere in the app
|
||||
window.addEventListener('dss-ask-ai', (e) => {
|
||||
const { prompt, openSidebar } = e.detail;
|
||||
if (openSidebar && aiSidebar && aiSidebar.classList.contains('collapsed')) {
|
||||
aiSidebar.classList.remove('collapsed');
|
||||
sidebarToggle?.setAttribute('aria-expanded', 'true');
|
||||
localStorage.setItem('dss_ai_sidebar_collapsed', 'false');
|
||||
}
|
||||
// The ds-ai-chat component should handle the prompt
|
||||
const aiChat = document.querySelector('ds-ai-chat');
|
||||
if (aiChat && typeof aiChat.setInput === 'function') {
|
||||
aiChat.setInput(prompt);
|
||||
}
|
||||
});
|
||||
|
||||
// Update context store on page navigation
|
||||
window.addEventListener('hashchange', () => {
|
||||
const page = window.location.hash.substring(1) || 'dashboard';
|
||||
contextStore.setContext({ page });
|
||||
});
|
||||
// Set initial page
|
||||
contextStore.setContext({ page: window.location.hash.substring(1) || 'dashboard' });
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -37,9 +37,9 @@ function SettingsTool() {
|
||||
const [testing, setTesting] = useState(false);
|
||||
const [config, setConfig] = useState<RuntimeConfig>({
|
||||
server_host: 'localhost',
|
||||
server_port: 8002,
|
||||
server_port: 6220,
|
||||
figma_token: '',
|
||||
storybook_url: 'http://localhost:6006'
|
||||
storybook_url: 'http://localhost:6226'
|
||||
});
|
||||
const [figmaStatus, setFigmaStatus] = useState<{ configured: boolean } | null>(null);
|
||||
const [testResult, setTestResult] = useState<{ success: boolean; message: string } | null>(null);
|
||||
|
||||
@@ -25,7 +25,7 @@ from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import BackgroundTasks, Depends, FastAPI, Header, HTTPException, Query
|
||||
from fastapi import BackgroundTasks, Body, Depends, FastAPI, Header, HTTPException, Query
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
@@ -45,6 +45,7 @@ from dss.storage.json_store import (
|
||||
Cache,
|
||||
CodeMetrics,
|
||||
Components,
|
||||
ESREDefinitions,
|
||||
FigmaFiles,
|
||||
IntegrationHealth,
|
||||
Integrations,
|
||||
@@ -52,6 +53,7 @@ from dss.storage.json_store import (
|
||||
SyncHistory,
|
||||
Teams,
|
||||
TestResults,
|
||||
TokenDriftDetector,
|
||||
get_stats,
|
||||
)
|
||||
|
||||
@@ -59,6 +61,12 @@ from dss.storage.json_store import (
|
||||
_server_file = Path(__file__).resolve()
|
||||
_project_root = _server_file.parent.parent.parent # /home/.../dss
|
||||
|
||||
# Admin UI static serving (production-like)
|
||||
# - In dev, run `admin-ui` via Vite (`npm run dev`) and use its `/api` proxy.
|
||||
# - In headless/server mode, serve the built UI bundle from `admin-ui/dist/`.
|
||||
_admin_ui_dist = _project_root / "admin-ui" / "dist"
|
||||
_admin_ui_dist_index = _admin_ui_dist / "index.html"
|
||||
|
||||
# Try loading from multiple possible .env locations
|
||||
env_paths = [
|
||||
_project_root / ".env", # root .env (primary)
|
||||
@@ -134,7 +142,11 @@ class _ConfigCompat:
|
||||
"env": settings.SERVER_ENV,
|
||||
"log_level": settings.LOG_LEVEL,
|
||||
},
|
||||
"database": {"path": str(settings.DATABASE_PATH)},
|
||||
"storage": {
|
||||
"type": "json",
|
||||
"dss_home": str(settings.DSS_HOME),
|
||||
"data_dir": str(settings.DATA_DIR),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -226,7 +238,7 @@ runtime_config = RuntimeConfig()
|
||||
config_service = ConfigService()
|
||||
project_manager = ProjectManager(Projects, config_service)
|
||||
|
||||
# Ensure database schema is up to date (adds root_path column if missing)
|
||||
# Legacy compatibility hook: JSON storage needs no migrations.
|
||||
ProjectManager.ensure_schema()
|
||||
|
||||
|
||||
@@ -419,7 +431,7 @@ async def login(request: LoginRequest):
|
||||
Authenticate with Atlassian credentials.
|
||||
|
||||
Validates credentials against Jira or Confluence API,
|
||||
creates/updates user in database, returns JWT token.
|
||||
creates/updates user in JSON storage, returns JWT token.
|
||||
"""
|
||||
try:
|
||||
auth = get_auth()
|
||||
@@ -451,10 +463,19 @@ async def get_me(user: Dict[str, Any] = Depends(get_current_user)):
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
"""Redirect to Admin UI dashboard."""
|
||||
from fastapi.responses import RedirectResponse
|
||||
"""Serve the Admin UI (when built) or show setup guidance."""
|
||||
if _admin_ui_dist_index.exists():
|
||||
from fastapi.responses import RedirectResponse
|
||||
|
||||
return RedirectResponse(url="/admin-ui/index.html")
|
||||
return RedirectResponse(url="/index.html")
|
||||
|
||||
return JSONResponse(
|
||||
status_code=200,
|
||||
content={
|
||||
"status": "ok",
|
||||
"message": "Admin UI is not built. Run `cd admin-ui && npm run build` (or `npm run dev` for development).",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
@@ -494,7 +515,7 @@ async def health():
|
||||
if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from dss.mcp_server.handler import get_mcp_handler
|
||||
from dss.mcp.handler import get_mcp_handler
|
||||
handler = get_mcp_handler()
|
||||
mcp_ok = handler is not None
|
||||
except Exception as e:
|
||||
@@ -558,25 +579,18 @@ async def receive_browser_logs(logs: dict):
|
||||
log_file = browser_logs_dir / f"{session_id}.json"
|
||||
log_file.write_text(json.dumps(logs, indent=2))
|
||||
|
||||
# Log to activity (skip if ActivityLog not available)
|
||||
# Log to activity (JSON store)
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO activity_log (category, action, details, metadata, created_at)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"debug",
|
||||
"browser_logs_received",
|
||||
f"Received browser logs for session {session_id}",
|
||||
json.dumps({"session_id": session_id, "log_count": len(logs.get("logs", []))}),
|
||||
datetime.utcnow().isoformat(),
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
except:
|
||||
pass # Activity logging is optional
|
||||
ActivityLog.log(
|
||||
action="browser_logs_received",
|
||||
entity_type="browser_logs",
|
||||
entity_id=session_id,
|
||||
description=f"Received browser logs for session {session_id}",
|
||||
category="debug",
|
||||
details={"session_id": session_id, "log_count": len(logs.get("logs", []))},
|
||||
)
|
||||
except Exception:
|
||||
pass # Activity logging is best-effort
|
||||
|
||||
# Check for errors and create notification task
|
||||
error_count = logs.get("diagnostic", {}).get("errorCount", 0)
|
||||
@@ -651,7 +665,7 @@ async def get_debug_diagnostic():
|
||||
- Health status (from /health endpoint)
|
||||
- Browser log session count
|
||||
- API uptime
|
||||
- Database size and stats
|
||||
- Storage size and stats
|
||||
- Memory usage
|
||||
- Recent errors
|
||||
"""
|
||||
@@ -668,31 +682,34 @@ async def get_debug_diagnostic():
|
||||
browser_logs_dir.mkdir(parents=True, exist_ok=True)
|
||||
browser_sessions = len(list(browser_logs_dir.glob("*.json")))
|
||||
|
||||
# Get database size
|
||||
db_path = Path(__file__).parent.parent.parent / ".dss" / "dss.db"
|
||||
db_size_bytes = db_path.stat().st_size if db_path.exists() else 0
|
||||
from dss.storage.json_store import DATA_DIR, ActivityLog, get_stats
|
||||
|
||||
storage_stats = get_stats()
|
||||
|
||||
# Get process stats
|
||||
process = psutil.Process(os.getpid())
|
||||
memory_info = process.memory_info()
|
||||
|
||||
# Get recent errors from activity log
|
||||
# Get recent errors from activity log (JSON)
|
||||
recent_errors: List[Dict[str, Any]] = []
|
||||
try:
|
||||
with get_connection() as conn:
|
||||
recent_errors = conn.execute(
|
||||
"""
|
||||
SELECT category, action, details, created_at
|
||||
FROM activity_log
|
||||
WHERE category = 'error' OR action LIKE '%error%' OR action LIKE '%fail%'
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 10
|
||||
"""
|
||||
).fetchall()
|
||||
recent_errors = [
|
||||
{"category": row[0], "action": row[1], "details": row[2], "timestamp": row[3]}
|
||||
for row in recent_errors
|
||||
]
|
||||
except:
|
||||
candidates = ActivityLog.search(days=7, limit=200)
|
||||
for r in candidates:
|
||||
action = (r.get("action") or "").lower()
|
||||
severity = (r.get("severity") or "").lower()
|
||||
if severity in {"error", "critical"} or "error" in action or "fail" in action:
|
||||
recent_errors.append(
|
||||
{
|
||||
"category": r.get("category"),
|
||||
"action": r.get("action"),
|
||||
"details": r.get("details"),
|
||||
"timestamp": r.get("timestamp"),
|
||||
"severity": r.get("severity"),
|
||||
}
|
||||
)
|
||||
if len(recent_errors) >= 10:
|
||||
break
|
||||
except Exception:
|
||||
recent_errors = []
|
||||
|
||||
return {
|
||||
@@ -700,10 +717,10 @@ async def get_debug_diagnostic():
|
||||
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||||
"health": health_status,
|
||||
"browser": {"session_count": browser_sessions, "logs_directory": str(browser_logs_dir)},
|
||||
"database": {
|
||||
"size_bytes": db_size_bytes,
|
||||
"size_mb": round(db_size_bytes / 1024 / 1024, 2),
|
||||
"path": str(db_path),
|
||||
"storage": {
|
||||
"type": "json",
|
||||
"path": str(DATA_DIR),
|
||||
"stats": storage_stats,
|
||||
},
|
||||
"process": {
|
||||
"pid": os.getpid(),
|
||||
@@ -793,10 +810,12 @@ async def get_config():
|
||||
|
||||
@app.get("/api/stats")
|
||||
async def get_statistics():
|
||||
"""Get database and system statistics."""
|
||||
db_stats = get_stats()
|
||||
"""Get storage and system statistics."""
|
||||
storage_stats = get_stats()
|
||||
return {
|
||||
"database": db_stats,
|
||||
"storage": storage_stats,
|
||||
# Backwards-compatible alias (historical naming; underlying storage is JSON files).
|
||||
"database": storage_stats,
|
||||
"figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured},
|
||||
}
|
||||
|
||||
@@ -1469,97 +1488,80 @@ async def get_storybook_status():
|
||||
|
||||
|
||||
@app.post("/api/storybook/init")
|
||||
async def init_storybook(request_data: Dict[str, Any] = None):
|
||||
async def init_storybook(request_data: Dict[str, Any] = Body(default_factory=dict)):
|
||||
"""
|
||||
Initialize Storybook with design system components.
|
||||
Initialize Storybook stories for a project.
|
||||
|
||||
Clears existing generated stories and generates new ones from
|
||||
the specified component source path.
|
||||
Clears previously auto-generated stories and regenerates them using the
|
||||
shared DSS StoryGenerator.
|
||||
|
||||
Request body (optional):
|
||||
source_path: Path to components directory (defaults to configured path)
|
||||
- project_id: DSS project id (recommended for headless server mode)
|
||||
- path: absolute path to the project directory (local/dev mode)
|
||||
|
||||
Returns:
|
||||
JSON with generation status and count
|
||||
"""
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
try:
|
||||
# Get paths
|
||||
dss_mvp1_path = Path(__file__).parent.parent.parent / "dss-mvp1"
|
||||
generated_dir = dss_mvp1_path / "stories" / "generated"
|
||||
from dss.storage.json_store import Projects
|
||||
from dss.storybook.generator import StoryGenerator, StoryTemplate
|
||||
|
||||
# Default source path - can be overridden in request
|
||||
source_path = dss_mvp1_path / "dss" / "components"
|
||||
if request_data and request_data.get("source_path"):
|
||||
# Validate path is within allowed directories
|
||||
requested_path = Path(request_data["source_path"]).resolve()
|
||||
if not str(requested_path).startswith(str(dss_mvp1_path.resolve())):
|
||||
raise HTTPException(status_code=400, detail="Source path must be within dss-mvp1")
|
||||
source_path = requested_path
|
||||
|
||||
# Step 1: Clear existing generated stories
|
||||
if generated_dir.exists():
|
||||
for item in generated_dir.iterdir():
|
||||
if item.name != ".gitkeep":
|
||||
if item.is_dir():
|
||||
shutil.rmtree(item)
|
||||
else:
|
||||
item.unlink()
|
||||
# Resolve project root (prefer project_id in headless mode)
|
||||
project_root = None
|
||||
if request_data.get("project_id"):
|
||||
project = Projects.get(request_data["project_id"])
|
||||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
project_root = project.get("root_path")
|
||||
elif request_data.get("path"):
|
||||
project_root = request_data["path"]
|
||||
else:
|
||||
generated_dir.mkdir(parents=True, exist_ok=True)
|
||||
# Default: first registered project with a root_path, else repo admin-ui.
|
||||
for project in Projects.list():
|
||||
if project.get("root_path"):
|
||||
project_root = project["root_path"]
|
||||
break
|
||||
project_root = project_root or str(_project_root / "admin-ui")
|
||||
|
||||
# Step 2: Generate stories using StoryGenerator
|
||||
stories_generated = 0
|
||||
errors = []
|
||||
root = Path(project_root).resolve()
|
||||
if not root.exists():
|
||||
raise HTTPException(status_code=400, detail=f"Project path not found: {root}")
|
||||
|
||||
# Add dss-mvp1 to path for imports
|
||||
sys.path.insert(0, str(dss_mvp1_path))
|
||||
# Clear previously auto-generated stories (do NOT touch hand-written stories)
|
||||
marker = "Auto-generated by DSS Storybook Generator"
|
||||
cleared = 0
|
||||
for pattern in ["**/*.stories.tsx", "**/*.stories.jsx", "**/*.stories.js"]:
|
||||
for story_path in root.rglob(pattern):
|
||||
if any(skip in story_path.parts for skip in {"node_modules", ".git", "dist", "build"}):
|
||||
continue
|
||||
try:
|
||||
if marker in story_path.read_text(encoding="utf-8", errors="ignore"):
|
||||
story_path.unlink()
|
||||
cleared += 1
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
try:
|
||||
from dss.storybook.generator import StoryGenerator, StoryTemplate
|
||||
generator = StoryGenerator(str(root))
|
||||
results = generator.generate(template=StoryTemplate.CSF3, dry_run=False)
|
||||
|
||||
generator = StoryGenerator(str(dss_mvp1_path))
|
||||
|
||||
# Check if source path exists and has components
|
||||
if source_path.exists():
|
||||
results = await generator.generate_stories_for_directory(
|
||||
str(source_path.relative_to(dss_mvp1_path)),
|
||||
template=StoryTemplate.CSF3,
|
||||
dry_run=False,
|
||||
)
|
||||
|
||||
# Move generated stories to stories/generated/
|
||||
for result in results:
|
||||
if "story" in result and "error" not in result:
|
||||
story_filename = Path(result["component"]).stem + ".stories.js"
|
||||
output_path = generated_dir / story_filename
|
||||
output_path.write_text(result["story"])
|
||||
stories_generated += 1
|
||||
elif "error" in result:
|
||||
errors.append(result)
|
||||
else:
|
||||
# No components yet - that's okay, Storybook will show welcome
|
||||
pass
|
||||
|
||||
except ImportError as e:
|
||||
# StoryGenerator not available - log but don't fail
|
||||
errors.append({"error": f"StoryGenerator import failed: {str(e)}"})
|
||||
finally:
|
||||
# Clean up path
|
||||
if str(dss_mvp1_path) in sys.path:
|
||||
sys.path.remove(str(dss_mvp1_path))
|
||||
stories_generated = len([r for r in results if r.get("written")])
|
||||
errors = [r for r in results if r.get("error")]
|
||||
|
||||
ActivityLog.log(
|
||||
action="storybook_initialized",
|
||||
entity_type="storybook",
|
||||
details={"stories_generated": stories_generated, "errors_count": len(errors)},
|
||||
details={
|
||||
"project_path": str(root),
|
||||
"stories_generated": stories_generated,
|
||||
"cleared": cleared,
|
||||
"errors_count": len(errors),
|
||||
},
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"stories_generated": stories_generated,
|
||||
"cleared": cleared,
|
||||
"message": f"Generated {stories_generated} stories"
|
||||
if stories_generated > 0
|
||||
else "Storybook initialized (no components found)",
|
||||
@@ -1576,32 +1578,51 @@ async def init_storybook(request_data: Dict[str, Any] = None):
|
||||
|
||||
|
||||
@app.delete("/api/storybook/stories")
|
||||
async def clear_storybook_stories():
|
||||
async def clear_storybook_stories(request_data: Dict[str, Any] = Body(default_factory=dict)):
|
||||
"""
|
||||
Clear all generated stories from Storybook.
|
||||
|
||||
Returns Storybook to blank state (only Welcome page).
|
||||
"""
|
||||
import shutil
|
||||
|
||||
try:
|
||||
dss_mvp1_path = Path(__file__).parent.parent.parent / "dss-mvp1"
|
||||
generated_dir = dss_mvp1_path / "stories" / "generated"
|
||||
from dss.storage.json_store import Projects
|
||||
|
||||
project_root = None
|
||||
if request_data.get("project_id"):
|
||||
project = Projects.get(request_data["project_id"])
|
||||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
project_root = project.get("root_path")
|
||||
elif request_data.get("path"):
|
||||
project_root = request_data["path"]
|
||||
else:
|
||||
for project in Projects.list():
|
||||
if project.get("root_path"):
|
||||
project_root = project["root_path"]
|
||||
break
|
||||
project_root = project_root or str(_project_root / "admin-ui")
|
||||
|
||||
root = Path(project_root).resolve()
|
||||
if not root.exists():
|
||||
raise HTTPException(status_code=400, detail=f"Project path not found: {root}")
|
||||
|
||||
marker = "Auto-generated by DSS Storybook Generator"
|
||||
cleared_count = 0
|
||||
if generated_dir.exists():
|
||||
for item in generated_dir.iterdir():
|
||||
if item.name != ".gitkeep":
|
||||
if item.is_dir():
|
||||
shutil.rmtree(item)
|
||||
else:
|
||||
item.unlink()
|
||||
cleared_count += 1
|
||||
for pattern in ["**/*.stories.tsx", "**/*.stories.jsx", "**/*.stories.js"]:
|
||||
for story_path in root.rglob(pattern):
|
||||
if any(skip in story_path.parts for skip in {"node_modules", ".git", "dist", "build"}):
|
||||
continue
|
||||
try:
|
||||
if marker in story_path.read_text(encoding="utf-8", errors="ignore"):
|
||||
story_path.unlink()
|
||||
cleared_count += 1
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
ActivityLog.log(
|
||||
action="storybook_cleared",
|
||||
entity_type="storybook",
|
||||
details={"cleared_count": cleared_count},
|
||||
details={"cleared_count": cleared_count, "project_path": str(root)},
|
||||
)
|
||||
|
||||
return {
|
||||
@@ -1900,44 +1921,10 @@ async def execute_ingestion(
|
||||
tokens_extracted = 0
|
||||
|
||||
if method == "npm" and system:
|
||||
# Import existing token ingestion tools
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "dss-mvp1"))
|
||||
|
||||
try:
|
||||
from dss.ingest import TokenCollection
|
||||
|
||||
# Create a token collection for this design system
|
||||
collection = TokenCollection(name=system.name)
|
||||
|
||||
# Based on primary ingestion method, use appropriate source
|
||||
if system.primary_ingestion.value == "css_variables":
|
||||
if system.css_cdn_url:
|
||||
# Fetch CSS from CDN and parse
|
||||
import httpx
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
resp = await client.get(system.css_cdn_url)
|
||||
if resp.status_code == 200:
|
||||
from dss.ingest.css import CSSTokenSource
|
||||
|
||||
# Write temp file and parse
|
||||
temp_css = Path("/tmp") / f"{system.id}_tokens.css"
|
||||
temp_css.write_text(resp.text)
|
||||
source = CSSTokenSource(str(temp_css))
|
||||
source.parse()
|
||||
collection.merge(source.tokens)
|
||||
tokens_extracted = len(collection.tokens)
|
||||
|
||||
elif system.primary_ingestion.value == "tailwind_config":
|
||||
# For Tailwind-based systems, we'll need their config
|
||||
tokens_extracted = 0 # Placeholder for Tailwind parsing
|
||||
|
||||
except ImportError:
|
||||
# Token ingestion module not available
|
||||
pass
|
||||
finally:
|
||||
if str(Path(__file__).parent.parent.parent / "dss-mvp1") in sys.path:
|
||||
sys.path.remove(str(Path(__file__).parent.parent.parent / "dss-mvp1"))
|
||||
# MVP: npm ingestion is not implemented yet.
|
||||
# Prefer using the dedicated ingest endpoints (/api/ingest/npm/*) to
|
||||
# discover packages, then add a concrete extraction strategy per system.
|
||||
tokens_extracted = 0
|
||||
|
||||
elif method == "figma" and source_url:
|
||||
# Use existing Figma extraction
|
||||
@@ -1947,23 +1934,13 @@ async def execute_ingestion(
|
||||
elif method == "css" and source_url:
|
||||
# Fetch and parse CSS
|
||||
import httpx
|
||||
from dss.ingest.css import CSSTokenSource
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "dss-mvp1"))
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
resp = await client.get(source_url)
|
||||
if resp.status_code == 200:
|
||||
from dss.ingest.css import CSSTokenSource
|
||||
|
||||
temp_css = Path("/tmp") / "ingested_tokens.css"
|
||||
temp_css.write_text(resp.text)
|
||||
source = CSSTokenSource(str(temp_css))
|
||||
source.parse()
|
||||
tokens_extracted = len(source.tokens.tokens)
|
||||
finally:
|
||||
if str(Path(__file__).parent.parent.parent / "dss-mvp1") in sys.path:
|
||||
sys.path.remove(str(Path(__file__).parent.parent.parent / "dss-mvp1"))
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
resp = await client.get(source_url)
|
||||
resp.raise_for_status()
|
||||
collection = await CSSTokenSource().extract(resp.text)
|
||||
tokens_extracted = len(collection.tokens)
|
||||
|
||||
ActivityLog.log(
|
||||
action="ingestion_executed",
|
||||
@@ -2051,7 +2028,7 @@ async def set_mode(request_data: Dict[str, Any]):
|
||||
@app.post("/api/system/reset")
|
||||
async def reset_dss(request_data: Dict[str, Any]):
|
||||
"""
|
||||
Reset DSS to fresh state by calling the reset command in dss-mvp1.
|
||||
Reset DSS to fresh state by calling the built-in reset command.
|
||||
|
||||
Requires confirmation.
|
||||
"""
|
||||
@@ -2061,13 +2038,12 @@ async def reset_dss(request_data: Dict[str, Any]):
|
||||
raise HTTPException(status_code=400, detail="Must confirm with 'RESET'")
|
||||
|
||||
try:
|
||||
# Path to dss-mvp1 directory
|
||||
dss_mvp1_path = Path(__file__).parent.parent.parent / "dss-mvp1"
|
||||
repo_root = Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
# Run the reset command
|
||||
result = subprocess.run(
|
||||
["python3", "-m", "dss.settings", "reset", "--no-confirm"],
|
||||
cwd=str(dss_mvp1_path),
|
||||
cwd=str(repo_root),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60,
|
||||
@@ -2229,6 +2205,7 @@ async def record_token_drift(project_id: str, drift: TokenDriftCreate):
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
created = TokenDriftDetector.record_drift(
|
||||
project_id=project_id,
|
||||
component_id=drift.component_id,
|
||||
property_name=drift.property_name,
|
||||
hardcoded_value=drift.hardcoded_value,
|
||||
@@ -2251,15 +2228,18 @@ async def record_token_drift(project_id: str, drift: TokenDriftCreate):
|
||||
|
||||
|
||||
@app.put("/api/projects/{project_id}/token-drift/{drift_id}/status")
|
||||
async def update_drift_status(project_id: str, drift_id: int, status: str):
|
||||
async def update_drift_status(
|
||||
project_id: str, drift_id: str, payload: Dict[str, Any] = Body(default_factory=dict)
|
||||
):
|
||||
"""Update token drift status: pending, fixed, ignored (UI Dashboard)."""
|
||||
if not Projects.get(project_id):
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
status = payload.get("status") or payload.get("status", "")
|
||||
if status not in ["pending", "fixed", "ignored"]:
|
||||
raise HTTPException(status_code=400, detail="Invalid status")
|
||||
|
||||
updated = TokenDriftDetector.update_status(drift_id, status)
|
||||
updated = TokenDriftDetector.update_status(project_id=project_id, drift_id=drift_id, status=status)
|
||||
|
||||
if not updated:
|
||||
raise HTTPException(status_code=404, detail="Drift issue not found")
|
||||
@@ -2314,12 +2294,13 @@ async def create_esre_definition(project_id: str, esre: ESRECreate):
|
||||
|
||||
|
||||
@app.put("/api/projects/{project_id}/esre/{esre_id}")
|
||||
async def update_esre_definition(project_id: str, esre_id: int, updates: ESRECreate):
|
||||
async def update_esre_definition(project_id: str, esre_id: str, updates: ESRECreate):
|
||||
"""Update an ESRE definition (QA Dashboard)."""
|
||||
if not Projects.get(project_id):
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
updated = ESREDefinitions.update(
|
||||
project_id=project_id,
|
||||
esre_id=esre_id,
|
||||
name=updates.name,
|
||||
definition_text=updates.definition_text,
|
||||
@@ -2343,12 +2324,12 @@ async def update_esre_definition(project_id: str, esre_id: int, updates: ESRECre
|
||||
|
||||
|
||||
@app.delete("/api/projects/{project_id}/esre/{esre_id}")
|
||||
async def delete_esre_definition(project_id: str, esre_id: int):
|
||||
async def delete_esre_definition(project_id: str, esre_id: str):
|
||||
"""Delete an ESRE definition (QA Dashboard)."""
|
||||
if not Projects.get(project_id):
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
if not ESREDefinitions.delete(esre_id):
|
||||
if not ESREDefinitions.delete(project_id=project_id, esre_id=esre_id):
|
||||
raise HTTPException(status_code=404, detail="ESRE definition not found")
|
||||
|
||||
ActivityLog.log(
|
||||
@@ -2424,11 +2405,13 @@ async def claude_chat(request_data: ClaudeChatRequest):
|
||||
"model": "error",
|
||||
}
|
||||
|
||||
# Import MCP handler (may fail if database not migrated)
|
||||
# Import MCP handler (optional; tools disabled if unavailable)
|
||||
mcp_handler = None
|
||||
MCPContext = None
|
||||
try:
|
||||
from dss_mcp.handler import get_mcp_handler, MCPContext as _MCPContext
|
||||
from dss.mcp.handler import MCPContext as _MCPContext
|
||||
from dss.mcp.handler import get_mcp_handler
|
||||
|
||||
MCPContext = _MCPContext
|
||||
mcp_handler = get_mcp_handler()
|
||||
except Exception as e:
|
||||
@@ -2490,10 +2473,7 @@ CURRENT PROJECT CONTEXT:
|
||||
# Create MCP context (or None if MCP not available)
|
||||
mcp_context = None
|
||||
if MCPContext is not None:
|
||||
mcp_context = MCPContext(
|
||||
project_id=project_id,
|
||||
user_id=user_id
|
||||
)
|
||||
mcp_context = MCPContext(project_id=project_id, user_id=user_id)
|
||||
|
||||
# Call AI provider with all context
|
||||
result = await provider.chat(
|
||||
@@ -2538,85 +2518,12 @@ async def execute_mcp_tool(tool_name: str, params: Dict[str, Any] = {}):
|
||||
Calls the MCP server running on port 3457.
|
||||
"""
|
||||
try:
|
||||
# Import MCP server functions
|
||||
from mcp_server import (
|
||||
analyze_react_components,
|
||||
analyze_style_values,
|
||||
build_source_graph,
|
||||
check_naming_consistency,
|
||||
create_project,
|
||||
discover_project,
|
||||
export_tokens,
|
||||
extract_components,
|
||||
extract_tokens,
|
||||
find_inline_styles,
|
||||
find_style_patterns,
|
||||
find_unused_styles,
|
||||
generate_component_code,
|
||||
generate_stories_batch,
|
||||
generate_story,
|
||||
generate_storybook_theme,
|
||||
get_activity,
|
||||
get_project,
|
||||
get_quick_wins,
|
||||
get_quick_wins_report,
|
||||
get_status,
|
||||
get_story_coverage,
|
||||
get_sync_history,
|
||||
ingest_css_tokens,
|
||||
ingest_json_tokens,
|
||||
ingest_scss_tokens,
|
||||
ingest_tailwind_tokens,
|
||||
list_projects,
|
||||
merge_tokens,
|
||||
scan_storybook,
|
||||
sync_tokens_to_file,
|
||||
validate_tokens,
|
||||
)
|
||||
# Legacy endpoint: forward to unified MCP handler.
|
||||
from dss.mcp.handler import MCPContext, get_mcp_handler
|
||||
|
||||
# Map tool names to functions
|
||||
tool_map = {
|
||||
"get_status": get_status,
|
||||
"list_projects": list_projects,
|
||||
"create_project": create_project,
|
||||
"get_project": get_project,
|
||||
"extract_tokens": extract_tokens,
|
||||
"extract_components": extract_components,
|
||||
"generate_component_code": generate_component_code,
|
||||
"sync_tokens_to_file": sync_tokens_to_file,
|
||||
"get_sync_history": get_sync_history,
|
||||
"get_activity": get_activity,
|
||||
"ingest_css_tokens": ingest_css_tokens,
|
||||
"ingest_scss_tokens": ingest_scss_tokens,
|
||||
"ingest_tailwind_tokens": ingest_tailwind_tokens,
|
||||
"ingest_json_tokens": ingest_json_tokens,
|
||||
"merge_tokens": merge_tokens,
|
||||
"export_tokens": export_tokens,
|
||||
"validate_tokens": validate_tokens,
|
||||
"discover_project": discover_project,
|
||||
"analyze_react_components": analyze_react_components,
|
||||
"find_inline_styles": find_inline_styles,
|
||||
"find_style_patterns": find_style_patterns,
|
||||
"analyze_style_values": analyze_style_values,
|
||||
"find_unused_styles": find_unused_styles,
|
||||
"build_source_graph": build_source_graph,
|
||||
"get_quick_wins": get_quick_wins,
|
||||
"get_quick_wins_report": get_quick_wins_report,
|
||||
"check_naming_consistency": check_naming_consistency,
|
||||
"scan_storybook": scan_storybook,
|
||||
"generate_story": generate_story,
|
||||
"generate_stories_batch": generate_stories_batch,
|
||||
"generate_storybook_theme": generate_storybook_theme,
|
||||
"get_story_coverage": get_story_coverage,
|
||||
}
|
||||
|
||||
# Get the tool function
|
||||
tool_func = tool_map.get(tool_name)
|
||||
if not tool_func:
|
||||
raise HTTPException(status_code=404, detail=f"Tool '{tool_name}' not found")
|
||||
|
||||
# Execute tool
|
||||
result = await tool_func(**params)
|
||||
handler = get_mcp_handler()
|
||||
context = MCPContext(project_id=params.get("project_id"), user_id=params.get("user_id"))
|
||||
result = await handler.execute_tool(tool_name=tool_name, arguments=params or {}, context=context)
|
||||
|
||||
# Log execution
|
||||
ActivityLog.log(
|
||||
@@ -2626,7 +2533,7 @@ async def execute_mcp_tool(tool_name: str, params: Dict[str, Any] = {}):
|
||||
details={"params": list(params.keys())},
|
||||
)
|
||||
|
||||
return JSONResponse(content={"success": True, "result": result})
|
||||
return JSONResponse(content=result.to_dict())
|
||||
|
||||
except Exception as e:
|
||||
ActivityLog.log(
|
||||
@@ -2645,7 +2552,7 @@ class IntegrationCreate(BaseModel):
|
||||
"""Create/Update integration configuration."""
|
||||
|
||||
integration_type: str # figma, jira, confluence, sequential-thinking
|
||||
config: Dict[str, Any] # Encrypted in database
|
||||
config: Dict[str, Any] # Encrypted at rest when DSS_MCP_ENCRYPTION_KEY is configured
|
||||
enabled: bool = True
|
||||
|
||||
|
||||
@@ -2697,7 +2604,7 @@ async def create_integration(
|
||||
if not Projects.get(project_id):
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
from dss_mcp.config import mcp_config
|
||||
from dss.mcp.config import mcp_config
|
||||
|
||||
# Encrypt config
|
||||
config_json = json.dumps(integration.config)
|
||||
@@ -2744,7 +2651,7 @@ async def update_integration(
|
||||
if not Projects.get(project_id):
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
from dss_mcp.config import mcp_config
|
||||
from dss.mcp.config import mcp_config
|
||||
|
||||
try:
|
||||
encrypted_config = None
|
||||
@@ -2811,7 +2718,7 @@ async def list_mcp_tools(
|
||||
include_details: bool = Query(False, description="Include full tool schemas"),
|
||||
):
|
||||
"""List all available MCP tools via unified handler."""
|
||||
from dss_mcp.handler import get_mcp_handler
|
||||
from dss.mcp.handler import get_mcp_handler
|
||||
|
||||
handler = get_mcp_handler()
|
||||
return handler.list_tools(include_details=include_details)
|
||||
@@ -2820,7 +2727,7 @@ async def list_mcp_tools(
|
||||
@app.get("/api/mcp/tools/{tool_name}")
|
||||
async def get_mcp_tool_info(tool_name: str):
|
||||
"""Get detailed information about a specific MCP tool."""
|
||||
from dss_mcp.handler import get_mcp_handler
|
||||
from dss.mcp.handler import get_mcp_handler
|
||||
|
||||
handler = get_mcp_handler()
|
||||
info = handler.get_tool_info(tool_name)
|
||||
@@ -2831,16 +2738,8 @@ async def get_mcp_tool_info(tool_name: str):
|
||||
return info
|
||||
|
||||
|
||||
class MCPToolExecuteRequest(BaseModel):
|
||||
"""Request to execute an MCP tool."""
|
||||
|
||||
arguments: Dict[str, Any]
|
||||
project_id: str
|
||||
user_id: Optional[int] = 1
|
||||
|
||||
|
||||
@app.post("/api/mcp/tools/{tool_name}/execute")
|
||||
async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest):
|
||||
async def execute_mcp_tool(tool_name: str, payload: Dict[str, Any] = Body(default_factory=dict)):
|
||||
"""
|
||||
Execute an MCP tool via unified handler.
|
||||
|
||||
@@ -2850,16 +2749,23 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest):
|
||||
- Applies circuit breaker protection
|
||||
- Logs execution metrics
|
||||
"""
|
||||
from dss_mcp.handler import MCPContext, get_mcp_handler
|
||||
from dss.mcp.handler import MCPContext, get_mcp_handler
|
||||
|
||||
handler = get_mcp_handler()
|
||||
|
||||
# Backwards-compatible request parsing:
|
||||
# - New: { arguments: {...}, project_id: "...", user_id: 1 }
|
||||
# - Old (Admin UI): { ...toolArgs }
|
||||
arguments = payload.get("arguments") if isinstance(payload.get("arguments"), dict) else payload
|
||||
project_id = payload.get("project_id") or payload.get("projectId")
|
||||
user_id = payload.get("user_id") or payload.get("userId") or 1
|
||||
|
||||
# Create execution context
|
||||
context = MCPContext(project_id=request.project_id, user_id=request.user_id)
|
||||
context = MCPContext(project_id=project_id, user_id=user_id)
|
||||
|
||||
# Execute tool
|
||||
result = await handler.execute_tool(
|
||||
tool_name=tool_name, arguments=request.arguments, context=context
|
||||
tool_name=tool_name, arguments=arguments or {}, context=context
|
||||
)
|
||||
|
||||
# Log to activity
|
||||
@@ -2867,7 +2773,7 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest):
|
||||
action="mcp_tool_executed",
|
||||
entity_type="tool",
|
||||
entity_id=tool_name,
|
||||
project_id=request.project_id,
|
||||
project_id=project_id,
|
||||
details={
|
||||
"success": result.success,
|
||||
"duration_ms": result.duration_ms,
|
||||
@@ -2881,28 +2787,43 @@ async def execute_mcp_tool(tool_name: str, request: MCPToolExecuteRequest):
|
||||
@app.get("/api/mcp/status")
|
||||
async def get_mcp_status():
|
||||
"""Get MCP server status and configuration."""
|
||||
from dss_mcp.config import integration_config, mcp_config, validate_config
|
||||
from dss.mcp.config import integration_config, mcp_config, validate_config
|
||||
|
||||
warnings = validate_config()
|
||||
|
||||
# Admin UI expects a minimal `{ connected, tools }` shape.
|
||||
# Keep detailed config under `details` for debugging.
|
||||
try:
|
||||
from dss.mcp.handler import get_mcp_handler
|
||||
|
||||
tools_count = len(get_mcp_handler().list_tools(include_details=False))
|
||||
connected = True
|
||||
except Exception:
|
||||
tools_count = 0
|
||||
connected = False
|
||||
|
||||
return {
|
||||
"server": {
|
||||
"host": mcp_config.HOST,
|
||||
"port": mcp_config.PORT,
|
||||
"encryption_enabled": bool(mcp_config.ENCRYPTION_KEY),
|
||||
"context_cache_ttl": mcp_config.CONTEXT_CACHE_TTL,
|
||||
"connected": connected,
|
||||
"tools": tools_count,
|
||||
"details": {
|
||||
"server": {
|
||||
"host": mcp_config.HOST,
|
||||
"port": mcp_config.PORT,
|
||||
"encryption_enabled": bool(mcp_config.ENCRYPTION_KEY),
|
||||
"context_cache_ttl": mcp_config.CONTEXT_CACHE_TTL,
|
||||
},
|
||||
"integrations": {
|
||||
"figma": bool(integration_config.FIGMA_TOKEN),
|
||||
"anthropic": bool(integration_config.ANTHROPIC_API_KEY),
|
||||
"jira_default": bool(integration_config.JIRA_URL),
|
||||
"confluence_default": bool(integration_config.CONFLUENCE_URL),
|
||||
},
|
||||
"circuit_breaker": {
|
||||
"failure_threshold": mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD,
|
||||
"timeout_seconds": mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS,
|
||||
},
|
||||
"warnings": warnings,
|
||||
},
|
||||
"integrations": {
|
||||
"figma": bool(integration_config.FIGMA_TOKEN),
|
||||
"anthropic": bool(integration_config.ANTHROPIC_API_KEY),
|
||||
"jira_default": bool(integration_config.JIRA_URL),
|
||||
"confluence_default": bool(integration_config.CONFLUENCE_URL),
|
||||
},
|
||||
"circuit_breaker": {
|
||||
"failure_threshold": mcp_config.CIRCUIT_BREAKER_FAILURE_THRESHOLD,
|
||||
"timeout_seconds": mcp_config.CIRCUIT_BREAKER_TIMEOUT_SECONDS,
|
||||
},
|
||||
"warnings": warnings,
|
||||
}
|
||||
|
||||
|
||||
@@ -3070,9 +2991,8 @@ async def write_project_file(project_id: str, request: FileWriteRequest):
|
||||
raise HTTPException(status_code=403, detail=str(e))
|
||||
|
||||
|
||||
UI_DIR = Path(__file__).parent.parent.parent / "admin-ui"
|
||||
if UI_DIR.exists():
|
||||
app.mount("/", StaticFiles(directory=str(UI_DIR), html=True), name="ui")
|
||||
if _admin_ui_dist_index.exists():
|
||||
app.mount("/", StaticFiles(directory=str(_admin_ui_dist), html=True), name="ui")
|
||||
|
||||
|
||||
def kill_port(port: int, wait: float = 0.5) -> None:
|
||||
|
||||
@@ -13,7 +13,7 @@ Modes:
|
||||
- Server: Deployed remotely, serves design systems to teams
|
||||
- Local: Dev companion, UI advisor, local services
|
||||
|
||||
Uses SQLite for persistence, integrates with Figma tools.
|
||||
Uses JSON storage for persistence, integrates with Figma tools.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -261,10 +261,12 @@ async def health():
|
||||
|
||||
@app.get("/api/stats")
|
||||
async def get_statistics():
|
||||
"""Get database and system statistics."""
|
||||
db_stats = get_stats()
|
||||
"""Get storage and system statistics."""
|
||||
storage_stats = get_stats()
|
||||
return {
|
||||
"database": db_stats,
|
||||
"storage": storage_stats,
|
||||
# Backwards-compatible alias (historical naming; underlying storage is JSON files).
|
||||
"database": storage_stats,
|
||||
"figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured},
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ Modes:
|
||||
- Server: Deployed remotely, serves design systems to teams
|
||||
- Local: Dev companion, UI advisor, local services
|
||||
|
||||
Uses SQLite for persistence, integrates with Figma tools.
|
||||
Uses JSON storage for persistence, integrates with Figma tools.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -261,10 +261,12 @@ async def health():
|
||||
|
||||
@app.get("/api/stats")
|
||||
async def get_statistics():
|
||||
"""Get database and system statistics."""
|
||||
db_stats = get_stats()
|
||||
"""Get storage and system statistics."""
|
||||
storage_stats = get_stats()
|
||||
return {
|
||||
"database": db_stats,
|
||||
"storage": storage_stats,
|
||||
# Backwards-compatible alias (historical naming; underlying storage is JSON files).
|
||||
"database": storage_stats,
|
||||
"figma": {"mode": figma_suite.mode, "configured": config.figma.is_configured},
|
||||
}
|
||||
|
||||
|
||||
35
docs/README.md
Normal file
35
docs/README.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# DSS Documentation (Human)
|
||||
|
||||
This directory contains **human-facing** documentation for DSS.
|
||||
|
||||
If you’re looking for AI/agent-oriented notes, see `docs/ai.md`.
|
||||
|
||||
## What DSS Is
|
||||
|
||||
DSS (Design System Server) is a toolkit that can run in two modes:
|
||||
|
||||
1. **Local developer tool**: analyze any project on disk, extract tokens, generate Storybook stories, and expose `dss_*` tools to Claude Code via MCP.
|
||||
2. **Headless server**: run a FastAPI backend that teams (UX/QA/Admin) can access through the web Admin UI, and that AI assistants can interact with via the `/api/claude/chat` endpoint and MCP tool endpoints.
|
||||
|
||||
## MCP across clients
|
||||
|
||||
DSS exposes the same `dss_*` toolset to multiple AI clients via MCP:
|
||||
|
||||
- Recommended: `./scripts/enable-mcp-clients.sh`
|
||||
- **Claude Code**: `./scripts/setup-mcp.sh` generates `.claude/mcp.json`.
|
||||
- **Codex CLI**: `codex mcp add dss -- /absolute/path/to/dss/scripts/dss-mcp`
|
||||
- **Gemini CLI**: `gemini mcp add dss /absolute/path/to/dss/scripts/dss-mcp`
|
||||
|
||||
The Claude plugin system (commands/skills under `dss-claude-plugin/`) is Claude-specific, but DSS makes those guides/hooks available to *any* MCP client via:
|
||||
- `dss_list_guides` (use `include_meta=true` for hook metadata)
|
||||
- `dss_get_guide`
|
||||
- `dss_match_skills`
|
||||
|
||||
## Documentation Map
|
||||
|
||||
- `docs/quickstart.md` — install + run locally, and run the headless server.
|
||||
- `docs/architecture.md` — how CLI, API, Admin UI, and MCP fit together.
|
||||
- `docs/configuration.md` — environment variables, ports, and operational knobs.
|
||||
- `docs/storage.md` — JSON-only storage layout and `DSS_HOME` rules.
|
||||
- `docs/upgrade-notes.md` — recent upgrades and migration notes.
|
||||
- `docs/ai.md` — where the AI/agent documentation lives.
|
||||
21
docs/ai.md
Normal file
21
docs/ai.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# AI / Agent Documentation Index
|
||||
|
||||
This repo also includes documentation intended for AI agents (Claude Code plugin skills/commands, internal reference docs, and knowledge base files).
|
||||
|
||||
Key entry points:
|
||||
|
||||
- `CLAUDE.md` — instructions for Claude Code sessions and MCP setup.
|
||||
- `admin-ui/AI-REFERENCE.md` — Admin UI architecture and API usage reference.
|
||||
- `dss-claude-plugin/commands/` — Claude Code slash commands (operator runbooks).
|
||||
- `dss-claude-plugin/skills/` — skill prompts for tool-using agents.
|
||||
- `.knowledge/` — internal knowledge base documents.
|
||||
|
||||
## Cross-model (Claude, Codex, Gemini)
|
||||
|
||||
The DSS MCP server is client-agnostic. For non-Claude clients, the Claude plugin system (slash commands) does not load, but:
|
||||
|
||||
- the same MCP tools are available, and
|
||||
- the plugin guides can be discovered via MCP tools:
|
||||
- `dss_list_guides`
|
||||
- `dss_get_guide`
|
||||
- `dss_match_skills` (emulates skill hook matching via `globs` / `alwaysApply`)
|
||||
48
docs/architecture.md
Normal file
48
docs/architecture.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Architecture
|
||||
|
||||
## High-level pieces
|
||||
|
||||
- **Python core library** (`dss/`): analysis, ingestion, Storybook generation, storage.
|
||||
- **Headless API** (`apps/api/server.py`): FastAPI server for Admin UI + AI chat + MCP endpoints.
|
||||
- **Admin UI** (`admin-ui/`): Preact app used by UX/QA/Admin (talks to the API via `/api/*`).
|
||||
- **Claude Code integration** (`dss-claude-plugin/`): Claude plugin assets (commands + skills).
|
||||
- **Local MCP stdio server** (`dss/mcp/server.py`): a minimal MCP process that exposes `dss_*` tools to Claude Code.
|
||||
|
||||
## Execution modes
|
||||
|
||||
### Local (developer machine)
|
||||
|
||||
- Run `dss-cli.py` directly against any repo on disk.
|
||||
- Or run MCP locally (`python -m dss.mcp.server`) so Claude Code can call `dss_*` tools.
|
||||
|
||||
### Headless (team server)
|
||||
|
||||
- Run `uvicorn apps.api.server:app ...` to expose:
|
||||
- Admin UI API (`/api/*`)
|
||||
- AI chat (`/api/claude/chat`)
|
||||
- MCP tool listing/execution (`/api/mcp/*`)
|
||||
- When `admin-ui/dist/` exists, the server can also serve the built Admin UI bundle as static files.
|
||||
|
||||
### Hybrid (recommended for mixed teams)
|
||||
|
||||
- Developers run the MCP process locally.
|
||||
- Tool execution can be proxied to a team server by setting `DSS_API_URL`.
|
||||
|
||||
## MCP: unified tool layer
|
||||
|
||||
All MCP-facing tool calls run through a shared registry/handler:
|
||||
|
||||
- Registry + execution: `dss/mcp/handler.py`
|
||||
- Local MCP server (stdio): `dss/mcp/server.py`
|
||||
- Headless server endpoints: `apps/api/server.py` (`/api/mcp/*`)
|
||||
|
||||
This avoids “two different DSS tool implementations” drifting over time.
|
||||
|
||||
## Storage: JSON-only
|
||||
|
||||
DSS stores state as JSON under `DSS_HOME` (see `docs/storage.md`).
|
||||
|
||||
Why JSON:
|
||||
- portable across machines/containers
|
||||
- reviewable diffs in Git
|
||||
- simple backup/restore
|
||||
102
docs/configuration.md
Normal file
102
docs/configuration.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# Configuration
|
||||
|
||||
## Ports (defaults)
|
||||
|
||||
| Service | Default |
|
||||
| --- | --- |
|
||||
| API server | `6220` |
|
||||
| Admin UI (Vite dev) | `6221` |
|
||||
| MCP server (stdio process) | `6222` |
|
||||
| Storybook (DSS repo) | `6226` |
|
||||
|
||||
Notes:
|
||||
- `./scripts/dss` runs a single-port server (UI + API) on `DSS_PORT` (defaults to `6220`).
|
||||
- Many target projects run their own Storybook on `6006`/`6007`; DSS service discovery checks those ports.
|
||||
|
||||
## Storage
|
||||
|
||||
- `DSS_HOME`
|
||||
- **Server mode**: set this explicitly (e.g. `/var/lib/dss` or a project volume).
|
||||
- **Local mode**: if a project has `./.dss/`, DSS will use it automatically; otherwise it falls back to `~/.dss`.
|
||||
|
||||
See `docs/storage.md` for the directory layout.
|
||||
|
||||
## AI / Integrations
|
||||
|
||||
- `FIGMA_TOKEN` — enables live Figma extraction/sync.
|
||||
- `ANTHROPIC_API_KEY` — enables `/api/claude/chat` AI chat and tool calling.
|
||||
- `DSS_MCP_ENCRYPTION_KEY` — optional Fernet key; encrypts integration configs at rest when set.
|
||||
|
||||
## Dev workflow (optional)
|
||||
|
||||
- `DSS_ENABLE_DEV_COMMANDS`
|
||||
- When set to `1`, DSS exposes dev-only MCP workflow tools that wrap local scripts (`dss_init`, `dss_reset`, `dss_services`).
|
||||
- `scripts/dss-mcp` and `./scripts/setup-mcp.sh` set this automatically for local development.
|
||||
|
||||
## Single-port launcher
|
||||
|
||||
- `DSS_PORT` — port for `./scripts/dss` (single-port server wrapper).
|
||||
|
||||
## MCP / Proxy mode
|
||||
|
||||
- `DSS_API_URL` (or `DSS_SERVER_URL`) — if set, `dss.mcp.server` forwards tool calls to a headless DSS server.
|
||||
- `DSS_PROJECT_ID` — default project context for MCP calls (when proxying or when tools omit `path`).
|
||||
- `DSS_USER_ID` — optional user context for integrations.
|
||||
|
||||
## MCP client setup (Claude, Codex, Gemini)
|
||||
|
||||
All three clients can run the same DSS MCP stdio server.
|
||||
|
||||
Quick option (configures Claude + Codex + Gemini when installed):
|
||||
|
||||
```bash
|
||||
./scripts/enable-mcp-clients.sh
|
||||
```
|
||||
|
||||
### Claude Code
|
||||
|
||||
- Generate Claude Code MCP config: `./scripts/setup-mcp.sh`
|
||||
- Restart Claude Code, then run `/mcp` to verify `dss` is connected.
|
||||
|
||||
### Codex CLI
|
||||
|
||||
Add DSS as a global MCP server:
|
||||
|
||||
```bash
|
||||
codex mcp add dss -- /absolute/path/to/dss/scripts/dss-mcp
|
||||
```
|
||||
|
||||
Remove it:
|
||||
|
||||
```bash
|
||||
codex mcp remove dss
|
||||
```
|
||||
|
||||
### Gemini CLI
|
||||
|
||||
Add DSS as an MCP server:
|
||||
|
||||
```bash
|
||||
gemini mcp add dss /absolute/path/to/dss/scripts/dss-mcp
|
||||
```
|
||||
|
||||
Remove it:
|
||||
|
||||
```bash
|
||||
gemini mcp remove dss
|
||||
```
|
||||
|
||||
### Notes
|
||||
|
||||
- The Claude Code *plugin* (commands/skills under `dss-claude-plugin/`) is Claude-specific; Codex/Gemini won’t “load” it as a plugin, but they can still use the same MCP tools.
|
||||
- `scripts/dss-mcp` sets required env vars (`PYTHONPATH`, `DSS_HOME`, etc.) and runs `python -m dss.mcp.server`.
|
||||
- DSS also exposes the plugin guides/hooks to all MCP clients via:
|
||||
- `dss_list_guides` (use `include_meta=true` to see `globs` / `alwaysApply` / `arguments`)
|
||||
- `dss_get_guide`
|
||||
- `dss_match_skills`
|
||||
|
||||
## Scripts
|
||||
|
||||
- `./scripts/setup-mcp.sh`:
|
||||
- generates `.claude/mcp.json` to run the local MCP stdio server (`python -m dss.mcp.server`)
|
||||
- `--api-url <url>` — enable proxy mode for the local MCP server
|
||||
108
docs/quickstart.md
Normal file
108
docs/quickstart.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# Quickstart
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.10+
|
||||
- Node.js 18+ (for `admin-ui` and Storybook tooling)
|
||||
|
||||
## 1) Install Python deps
|
||||
|
||||
```bash
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## 2) Local developer workflow (CLI only)
|
||||
|
||||
Run analysis for any project on disk:
|
||||
|
||||
```bash
|
||||
./dss-cli.py analyze --project-path /absolute/path/to/your-project
|
||||
```
|
||||
|
||||
This writes a portable JSON artifact:
|
||||
- `<project>/.dss/analysis_graph.json`
|
||||
|
||||
Generate Storybook stories for a project:
|
||||
|
||||
```bash
|
||||
./dss-cli.py setup-storybook --action generate --project-path /absolute/path/to/your-project
|
||||
```
|
||||
|
||||
## 3) Local developer workflow (Claude Code via MCP)
|
||||
|
||||
Generate Claude Code MCP config:
|
||||
|
||||
```bash
|
||||
./scripts/setup-mcp.sh
|
||||
```
|
||||
|
||||
Then restart Claude Code and run `/mcp` to confirm the `dss` server is connected.
|
||||
|
||||
### Enable MCP for all supported clients (recommended)
|
||||
|
||||
```bash
|
||||
./scripts/enable-mcp-clients.sh
|
||||
```
|
||||
|
||||
### Proxy mode (MCP process local, tools executed on a server)
|
||||
|
||||
```bash
|
||||
./scripts/setup-mcp.sh --api-url https://your-dss-server.example.com
|
||||
```
|
||||
|
||||
Set `DSS_PROJECT_ID` in the MCP server environment when you want tools to default to a specific registered project.
|
||||
|
||||
## 4) Headless server (API for Admin UI + teams)
|
||||
|
||||
Choose one of the following:
|
||||
|
||||
### Option A: Dev mode (Vite Admin UI + API)
|
||||
|
||||
Start the API server:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
PYTHONPATH="$PWD:$PWD/apps/api" uvicorn apps.api.server:app --host 0.0.0.0 --port 6220
|
||||
```
|
||||
|
||||
Start the Admin UI (dev mode with `/api` proxy to `:6220`):
|
||||
|
||||
```bash
|
||||
cd admin-ui
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Admin UI: `http://localhost:6221`
|
||||
API: `http://localhost:6220`
|
||||
|
||||
### Option B: Single-port server (serves built Admin UI)
|
||||
|
||||
Build the Admin UI once:
|
||||
|
||||
```bash
|
||||
cd admin-ui
|
||||
npm install
|
||||
npm run build
|
||||
cd ..
|
||||
```
|
||||
|
||||
Start DSS (serves UI + API from one process):
|
||||
|
||||
```bash
|
||||
./scripts/dss start
|
||||
```
|
||||
|
||||
Dashboard: `http://localhost:6220`
|
||||
API: `http://localhost:6220/api`
|
||||
|
||||
## 5) Common environment variables
|
||||
|
||||
- `DSS_HOME` — where DSS stores JSON data (defaults to `./.dss` when present, else `~/.dss`)
|
||||
- `FIGMA_TOKEN` — enables live Figma tooling
|
||||
- `ANTHROPIC_API_KEY` — enables AI chat/tool calling on the headless server
|
||||
- `DSS_API_URL` — enables MCP proxy mode (local MCP → headless server)
|
||||
|
||||
See `docs/configuration.md` for the full list and details.
|
||||
59
docs/storage.md
Normal file
59
docs/storage.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Storage (JSON-only)
|
||||
|
||||
## `DSS_HOME` resolution
|
||||
|
||||
DSS chooses a storage root in this order:
|
||||
|
||||
1. `$DSS_HOME` if set
|
||||
2. `./.dss` if it exists in the current working directory
|
||||
3. `~/.dss`
|
||||
|
||||
This supports:
|
||||
- **project-local** storage (recommended when running DSS inside a repo)
|
||||
- **shared user** storage (default fallback)
|
||||
- **server** storage (explicit `DSS_HOME` volume)
|
||||
|
||||
## Directory layout
|
||||
|
||||
Most persistent JSON state lives under:
|
||||
|
||||
`$DSS_HOME/data/`
|
||||
|
||||
```
|
||||
data/
|
||||
_system/
|
||||
activity/ # JSONL activity logs (by day)
|
||||
cache/ # TTL cache entries (JSON)
|
||||
users/ # auth users (JSON-only)
|
||||
integration_health.json
|
||||
projects/
|
||||
<project_id>/
|
||||
manifest.json
|
||||
components/
|
||||
figma/
|
||||
integrations.json
|
||||
metrics/
|
||||
styles/
|
||||
tokens/
|
||||
teams/
|
||||
```
|
||||
|
||||
## Concurrency / locking
|
||||
|
||||
The JSON store uses simple file locking (`*.lock`) to reduce corruption when multiple processes write concurrently.
|
||||
|
||||
## Project-local artifacts
|
||||
|
||||
In addition to `$DSS_HOME/data/`, DSS also writes project-local artifacts under a repo’s `.dss/` directory, for example:
|
||||
|
||||
- `.dss/analysis_graph.json` — portable analysis output (commit-friendly).
|
||||
- `.dss/config.json` — per-project DSS configuration (used by `ConfigService`).
|
||||
- `.dss/runtime-config.json` — runtime/server preferences (when using the headless server wrappers).
|
||||
|
||||
## What is safe to commit
|
||||
|
||||
Typical “commit-worthy” artifacts:
|
||||
- `<project>/.dss/analysis_graph.json` (generated analysis output)
|
||||
|
||||
Typical “do not commit” data:
|
||||
- per-user caches and secrets under `$DSS_HOME/data/_system/cache`
|
||||
61
docs/upgrade-notes.md
Normal file
61
docs/upgrade-notes.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Upgrade Notes
|
||||
|
||||
This file summarizes major upgrades applied to DSS to support:
|
||||
- JSON-only, portable storage
|
||||
- a single MCP tool layer used by local + server + plugin
|
||||
- local developer usage across many projects **and** a headless server for UX/QA/Admin
|
||||
|
||||
## Key upgrades
|
||||
|
||||
### 1) Unified MCP tool layer (`dss.mcp`)
|
||||
|
||||
Created a shared tool registry/execution layer used by:
|
||||
- Headless server (`apps/api/server.py` → `/api/mcp/*`)
|
||||
- Local MCP stdio server (`python -m dss.mcp.server`)
|
||||
|
||||
Proxy mode is supported via `DSS_API_URL`.
|
||||
|
||||
### 2) JSON-only storage (removed SQLite)
|
||||
|
||||
All state is stored as JSON under `DSS_HOME`:
|
||||
- Project data: `$DSS_HOME/data/projects/<project_id>/...`
|
||||
- System data: `$DSS_HOME/data/_system/...`
|
||||
|
||||
Auth user storage is JSON-based (`dss/storage/json_store.py:Users`).
|
||||
|
||||
### 3) Consistent storage root (`DSS_HOME`)
|
||||
|
||||
Storage now resolves in this order:
|
||||
1. `DSS_HOME`
|
||||
2. `./.dss` (project-local, if present)
|
||||
3. `~/.dss`
|
||||
|
||||
### 4) Storybook generation reliability
|
||||
|
||||
Story generation now:
|
||||
- scans component directories recursively
|
||||
- writes the correct story extension (`.stories.jsx` vs `.stories.tsx`)
|
||||
- avoids deleting user-authored stories by only clearing files with a DSS marker
|
||||
|
||||
### 5) Headless server serves built Admin UI
|
||||
|
||||
When `admin-ui/dist/index.html` exists, the headless server now serves the built Admin UI bundle as static files (so teams can access the web dashboard from the server port).
|
||||
|
||||
### 6) CLI analysis now produces portable JSON output
|
||||
|
||||
`./dss-cli.py analyze --project-path <path>` writes:
|
||||
- `<project>/.dss/analysis_graph.json`
|
||||
|
||||
This is intended to be commit-friendly and shareable across machines.
|
||||
|
||||
### 7) Removed deprecated `dss-setup`
|
||||
|
||||
The deprecated Claude Code command `/dss-setup` has been removed. Use `/dss-init`.
|
||||
|
||||
## Migration notes
|
||||
|
||||
- If you previously relied on `.dss/dss.db` or `DSS_DATABASE`: it is no longer used.
|
||||
- If you previously imported `dss_mcp` or used `dss/mcp_server/*`: use `dss/mcp/*`.
|
||||
- If you previously referenced `dss-claude-plugin/servers/*` for MCP: use `python -m dss.mcp.server`.
|
||||
- Re-run `./scripts/setup-mcp.sh` after pulling to refresh `.claude/mcp.json`.
|
||||
- If you used the single-port launcher on `:3456`, note `./scripts/dss` now defaults to `:6220` (override with `DSS_PORT` or update your reverse proxy).
|
||||
@@ -10,7 +10,7 @@
|
||||
"name": "dss-claude-plugin",
|
||||
"source": "./",
|
||||
"description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components",
|
||||
"version": "1.0.0"
|
||||
"version": "1.0.2"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dss-claude-plugin",
|
||||
"version": "1.0.1",
|
||||
"version": "1.0.2",
|
||||
"description": "Design System Server (DSS) integration for Claude Code - Analyze, extract, and generate design system tokens and components",
|
||||
"author": {
|
||||
"name": "overbits",
|
||||
|
||||
@@ -20,11 +20,11 @@ Single entry point for all DSS initialization. Handles MCP config, dependencies,
|
||||
## Full Workflow (15 steps)
|
||||
|
||||
1. **Reset** (with --reset) - Clear all DSS data
|
||||
2. **MCP Config** - Generate `.mcp.json`
|
||||
2. **MCP Config** - Generate `.claude/mcp.json` (local MCP server)
|
||||
3. **Dependencies** - Python venv, Node modules, admin-ui build
|
||||
4. **Environment** - Validate Python, Node, Figma token
|
||||
5. **Directory Structure** - Create `.dss/` folders
|
||||
6. **Database** - Initialize SQLite
|
||||
6. **Storage** - Initialize JSON data directories
|
||||
7. **Analyze Targets** - admin-ui, storybook stats
|
||||
8. **Token Structure** - Create base token files
|
||||
9. **3-Layer Validation** - Core, skins, themes
|
||||
|
||||
@@ -22,13 +22,12 @@ Without `--confirm`, runs in dry-run mode showing what would be deleted.
|
||||
## What This Clears
|
||||
|
||||
1. `.dss/data/` - Projects, teams, cache, activity
|
||||
2. `.dss/dss.db` - SQLite database
|
||||
3. `admin-ui/css/dss-*.css` - Generated CSS files
|
||||
4. `admin-ui/src/components/*.stories.js` - Generated stories
|
||||
5. `admin-ui/src/components/ds-*.js` - Generated components
|
||||
6. `dss/core_tokens/tokens.json` - Reset to empty
|
||||
7. `dss-claude-plugin/core/skins/*.json` - Reset to awaiting sync
|
||||
8. `.dss/logs/` - Clear log files
|
||||
2. `admin-ui/css/dss-*.css` - Generated CSS files
|
||||
3. `admin-ui/src/components/*.stories.js` - Generated stories
|
||||
4. `admin-ui/src/components/ds-*.js` - Generated components
|
||||
5. `dss/core_tokens/tokens.json` - Reset to empty
|
||||
6. `dss-claude-plugin/core/skins/*.json` - Reset to awaiting sync
|
||||
7. `.dss/logs/` - Clear log files
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
@@ -59,8 +58,6 @@ DRY RUN MODE - No changes will be made
|
||||
|
||||
1. Clearing .dss/data/ structure...
|
||||
Would run: rm -rf .dss/data/projects/* ...
|
||||
2. Resetting database...
|
||||
Would run: rm -f .dss/dss.db
|
||||
...
|
||||
|
||||
DRY RUN COMPLETE
|
||||
|
||||
@@ -34,9 +34,9 @@ Manage all DSS development services from a single command.
|
||||
|
||||
| Service | Port | Description |
|
||||
|---------|------|-------------|
|
||||
| api | 8000 | FastAPI REST server |
|
||||
| admin-ui | 3456 | Vite dev server |
|
||||
| storybook | 6006 | Storybook design docs |
|
||||
| api | 6220 | FastAPI REST server |
|
||||
| admin-ui | 6221 | Vite dev server |
|
||||
| storybook | 6226 | Storybook design docs |
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -71,25 +71,25 @@ When the user runs this command:
|
||||
3. For `status` action, show a table with service states
|
||||
|
||||
4. After `start`, provide clickable URLs:
|
||||
- API: http://localhost:8000
|
||||
- admin-ui: http://localhost:3456
|
||||
- Storybook: http://localhost:6006
|
||||
- API: http://localhost:6220
|
||||
- admin-ui: http://localhost:6221
|
||||
- Storybook: http://localhost:6226
|
||||
|
||||
## Service Details
|
||||
|
||||
### API Server (port 8000)
|
||||
### API Server (port 6220)
|
||||
- FastAPI REST API
|
||||
- Endpoints: projects, figma, health, config
|
||||
- Command: `uvicorn apps.api.server:app --reload`
|
||||
- Command: `uvicorn apps.api.server:app --host 0.0.0.0 --port 6220 --reload`
|
||||
- Log: `/tmp/dss-api.log`
|
||||
|
||||
### Admin UI (port 3456)
|
||||
### Admin UI (port 6221)
|
||||
- Preact/Vite development server
|
||||
- Design system management interface
|
||||
- Command: `npm run dev`
|
||||
- Log: `/tmp/dss-admin-ui.log`
|
||||
|
||||
### Storybook (port 6006)
|
||||
### Storybook (port 6226)
|
||||
- Component documentation
|
||||
- Token visualization
|
||||
- Command: `npm run storybook`
|
||||
@@ -104,6 +104,5 @@ If a service fails to start:
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/dss-setup` - Full environment setup
|
||||
- `/dss-init` - Initialize DSS structure
|
||||
- `/dss-init` - Full environment setup + initialization
|
||||
- `/dss-reset` - Reset to clean state
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
---
|
||||
name: dss-setup
|
||||
description: DEPRECATED - Use /dss-init instead
|
||||
arguments:
|
||||
- name: flags
|
||||
description: Optional flags (--reset, --skip-servers)
|
||||
required: false
|
||||
---
|
||||
|
||||
# DSS Setup Command (DEPRECATED)
|
||||
|
||||
**This command is deprecated. Use `/dss-init` instead.**
|
||||
|
||||
The `/dss-init` command now handles everything that `/dss-setup` did:
|
||||
- MCP configuration
|
||||
- Dependencies (Python venv, Node modules)
|
||||
- DSS initialization
|
||||
- Development servers
|
||||
|
||||
## Migration
|
||||
|
||||
| Old Command | New Command |
|
||||
|-------------|-------------|
|
||||
| `/dss-setup` | `/dss-init` |
|
||||
| `/dss-setup --reset` | `/dss-init --reset` |
|
||||
| `/dss-setup --skip-servers` | `/dss-init --skip-servers` |
|
||||
|
||||
## Instructions for Claude
|
||||
|
||||
When the user runs this command:
|
||||
|
||||
1. Inform them that `/dss-setup` is deprecated
|
||||
2. Run `/dss-init` with the same flags instead
|
||||
|
||||
```bash
|
||||
# Just run dss-init.sh directly
|
||||
scripts/dss-init.sh [flags]
|
||||
```
|
||||
@@ -143,5 +143,6 @@ CONFIGURATION CREATED
|
||||
- Controls configured
|
||||
|
||||
Run: npm run storybook
|
||||
Access: http://localhost:6006
|
||||
Access: http://localhost:6226
|
||||
```
|
||||
Note: DSS uses `6226` as its default Storybook port, but many target projects still run Storybook on `6006`/`6007`.
|
||||
|
||||
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
CONFIG_DIR = Path.home() / ".dss"
|
||||
CONFIG_FILE = CONFIG_DIR / "config.json"
|
||||
DEFAULT_REMOTE_URL = "https://dss.overbits.luz.uy"
|
||||
DEFAULT_LOCAL_URL = "http://localhost:6006"
|
||||
DEFAULT_LOCAL_URL = "http://localhost:6220"
|
||||
|
||||
|
||||
class DSSMode(str, Enum):
|
||||
|
||||
@@ -105,7 +105,7 @@ def mcp_get_compiler_status() -> str:
|
||||
|
||||
|
||||
# MCP Tool Registry
|
||||
# This can be imported by dss-mcp-server.py to register the tools
|
||||
# These tool definitions can be imported by the unified DSS MCP layer if needed.
|
||||
|
||||
MCP_TOOLS = {
|
||||
"dss_get_resolved_context": {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -206,11 +206,13 @@ export default create({
|
||||
|
||||
## Server Configuration
|
||||
|
||||
DSS Storybook runs on port 6006 by default:
|
||||
DSS Storybook runs on port `6226` by default:
|
||||
- Host: 0.0.0.0 (configurable)
|
||||
- Port: 6006 (configurable)
|
||||
- Port: 6226 (configurable via `STORYBOOK_PORT`)
|
||||
- Auto-open: disabled by default
|
||||
|
||||
Many target projects still run Storybook on `6006`/`6007`; DSS service discovery can detect those running instances.
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Story Organization**
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Verify that dss-mcp-server.py properly exports Context Compiler tools."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add the server directory to path
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
# Import the server module
|
||||
print("=" * 60)
|
||||
print("CONTEXT COMPILER TOOL VERIFICATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Test imports
|
||||
print("\n1. Testing Context Compiler imports...")
|
||||
try:
|
||||
from core import (
|
||||
get_active_context,
|
||||
get_compiler_status,
|
||||
list_skins,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
)
|
||||
|
||||
print(" ✓ All Context Compiler functions imported successfully")
|
||||
CONTEXT_COMPILER_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
print(f" ✗ Context Compiler import failed: {e}")
|
||||
CONTEXT_COMPILER_AVAILABLE = False
|
||||
sys.exit(1)
|
||||
|
||||
# Test the server's tool list
|
||||
print("\n2. Checking MCP server tool list...")
|
||||
try:
|
||||
# We need to simulate the MCP server initialization
|
||||
# to see what tools it would export
|
||||
|
||||
from mcp.server import Server
|
||||
|
||||
# Create a test server instance
|
||||
server = Server("dss-test")
|
||||
|
||||
# Import the list_tools function logic
|
||||
print(" Checking if server exports tools properly...")
|
||||
|
||||
# Read the actual server file and check for context_compiler_tools
|
||||
with open(Path(__file__).parent / "servers" / "dss-mcp-server.py", "r") as f:
|
||||
server_code = f.read()
|
||||
|
||||
if "context_compiler_tools" in server_code:
|
||||
print(" ✓ context_compiler_tools defined in server")
|
||||
else:
|
||||
print(" ✗ context_compiler_tools NOT found in server")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_get_resolved_context" in server_code:
|
||||
print(" ✓ dss_get_resolved_context tool defined")
|
||||
else:
|
||||
print(" ✗ dss_get_resolved_context NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_resolve_token" in server_code:
|
||||
print(" ✓ dss_resolve_token tool defined")
|
||||
else:
|
||||
print(" ✗ dss_resolve_token NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_validate_manifest" in server_code:
|
||||
print(" ✓ dss_validate_manifest tool defined")
|
||||
else:
|
||||
print(" ✗ dss_validate_manifest NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_list_skins" in server_code:
|
||||
print(" ✓ dss_list_skins tool defined")
|
||||
else:
|
||||
print(" ✗ dss_list_skins NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
if "dss_get_compiler_status" in server_code:
|
||||
print(" ✓ dss_get_compiler_status tool defined")
|
||||
else:
|
||||
print(" ✗ dss_get_compiler_status NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
# Check if tools are returned
|
||||
if "return dss_tools + devtools_tools + browser_tools + context_compiler_tools" in server_code:
|
||||
print(" ✓ context_compiler_tools added to tool list return")
|
||||
else:
|
||||
print(" ✗ context_compiler_tools NOT added to return statement")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error checking server tools: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Test tool handlers
|
||||
print("\n3. Checking MCP server tool handlers...")
|
||||
try:
|
||||
with open(Path(__file__).parent / "servers" / "dss-mcp-server.py", "r") as f:
|
||||
server_code = f.read()
|
||||
|
||||
handlers = [
|
||||
'elif name == "dss_get_resolved_context"',
|
||||
'elif name == "dss_resolve_token"',
|
||||
'elif name == "dss_validate_manifest"',
|
||||
'elif name == "dss_list_skins"',
|
||||
'elif name == "dss_get_compiler_status"',
|
||||
]
|
||||
|
||||
for handler in handlers:
|
||||
if handler in server_code:
|
||||
tool_name = handler.split('"')[1]
|
||||
print(f" ✓ {tool_name} handler implemented")
|
||||
else:
|
||||
tool_name = handler.split('"')[1]
|
||||
print(f" ✗ {tool_name} handler NOT found")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error checking tool handlers: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Test Context Compiler functionality
|
||||
print("\n4. Testing Context Compiler functionality...")
|
||||
try:
|
||||
import json
|
||||
|
||||
# Test list_skins
|
||||
skins_json = list_skins()
|
||||
skins = json.loads(skins_json)
|
||||
print(f" ✓ list_skins() returned {len(skins)} skins: {skins}")
|
||||
|
||||
# Test get_compiler_status
|
||||
status_json = get_compiler_status()
|
||||
status = json.loads(status_json)
|
||||
print(f" ✓ get_compiler_status() returned status: {status['status']}")
|
||||
|
||||
if status["status"] == "active":
|
||||
print(" ✓ Context Compiler is active and ready")
|
||||
else:
|
||||
print(f" ✗ Context Compiler status is: {status['status']}")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Context Compiler functionality test failed: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("✅ ALL VERIFICATIONS PASSED")
|
||||
print("=" * 60)
|
||||
print("\nContext Compiler tools are properly integrated into dss-mcp-server.py")
|
||||
print("and should be available to Claude Code after MCP server restart.")
|
||||
print("\nIf tools are not showing up in Claude Code, try:")
|
||||
print("1. Fully restart Claude Code (not just /mcp restart)")
|
||||
print("2. Check Claude Code logs for connection errors")
|
||||
print("3. Verify MCP server configuration in Claude settings")
|
||||
10
dss-cli.py
10
dss-cli.py
@@ -13,10 +13,8 @@ import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure the script can find the 'dss' module
|
||||
# This adds the parent directory of 'dss-mvp1' to the Python path
|
||||
# Assuming the script is run from the project root, this will allow `from dss...` imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
# Ensure the script can find the local `dss` package when run from a checkout.
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
try:
|
||||
from dss import StorybookScanner, StoryGenerator, ThemeGenerator
|
||||
@@ -24,7 +22,7 @@ try:
|
||||
from dss.project.manager import ProjectManager
|
||||
except ImportError as e:
|
||||
print(
|
||||
"Error: Could not import DSS modules. Make sure dss-mvp1 is in the PYTHONPATH.",
|
||||
"Error: Could not import DSS modules. Make sure the repo root is in PYTHONPATH.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(f"Import error: {e}", file=sys.stderr)
|
||||
@@ -169,7 +167,7 @@ def main():
|
||||
print(json.dumps(result, indent=2))
|
||||
elif action == "generate":
|
||||
generator = StoryGenerator(project_path)
|
||||
result = generator.generate()
|
||||
result = generator.generate(dry_run=False)
|
||||
print(f"Successfully generated {len(result)} new stories.")
|
||||
elif action == "configure":
|
||||
theme_gen = ThemeGenerator(project_path)
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
### **Situation Handover to Claude**
|
||||
|
||||
**Context:** The overarching goal is to enhance DSS (Design System Server) with greater intelligence for analyzing and managing React projects, initially by "dogfooding" DSS itself on its own `admin-ui` project.
|
||||
|
||||
**Initial Goal from User:**
|
||||
1. Implement a robust Python-based analysis engine (`project_analyzer.py`) for React projects.
|
||||
2. Integrate this into the DSS MCP and CLI.
|
||||
3. Ensure continuous integration (CI/CD) automates the analysis and commits results (`project_context.json`) back to the repository.
|
||||
4. Set up DSS to manage its own `admin-ui` project.
|
||||
5. Link core DSS to its Figma UI Kit.
|
||||
6. Build a default Storybook skin with DSS atoms and shadcn styles.
|
||||
|
||||
---
|
||||
|
||||
**Actions Taken & Current Status:**
|
||||
|
||||
1. **Analysis Engine & CLI**:
|
||||
* **Implemented**: `dss-mvp1/dss/analyze/project_analyzer.py` was created, capable of parsing React/JS/TS files (using a Node.js `@babel/parser` subprocess) and generating a `networkx` graph. It also includes an `export_project_context` function.
|
||||
* **Implemented**: `dss-mvp1/dss-cli.py` was created as a command-line interface, including `analyze`, `export-context`, `add-figma-file`, `setup-storybook`, and `sync-tokens` commands.
|
||||
* **Implemented**: The `dss-claude-plugin/servers/dss-mcp-server.py` was updated to expose `dss_project_graph_analysis` and `dss_project_export_context` as MCP tools for AI agents.
|
||||
* **Implemented**: Unit tests for `project_analyzer.py` were added and are currently passing.
|
||||
|
||||
2. **CI/CD Setup**:
|
||||
* **Implemented**: `.gitea/workflows/dss-analysis.yml` was created to automate the `dss-cli.py analyze` and `git commit` process for `project_context.json` on every push.
|
||||
* **Verified**: Git hooks were fixed and confirmed to be running.
|
||||
* **Verified**: SSH key authentication for Git push was correctly set up after troubleshooting.
|
||||
|
||||
3. **Dogfooding `admin-ui` Project**:
|
||||
* **Goal**: Initialize `admin-ui` as a DSS project, generate its analysis context, link it to Figma, and generate Storybook stories.
|
||||
* **Status**:
|
||||
* `admin-ui/.dss/analysis_graph.json` was successfully created (by `dss-mvp1/dss-cli.py analyze --project-path ./admin-ui`).
|
||||
* `admin-ui/ds.config.json` was manually corrected and populated to resolve Pydantic validation errors during project loading.
|
||||
* Figma UI Kit `figd_ScdBk47HlYEItZbQv2CcF9aq-3TfWbBXN3yoRKWA` was successfully linked to `admin-ui` (by `dss-mvp1/dss-cli.py add-figma-file --project-path ./admin-ui --file-key ...`).
|
||||
* **Token Synchronization (Blocked)**: `dss-mvp1/dss-cli.py sync-tokens --project-path ./admin-ui` fails with `403 Client Error: Forbidden` from Figma API due to a placeholder token. This is expected, as a valid `FIGMA_TOKEN` environment variable is required.
|
||||
|
||||
4. **Storybook Generation (Current Blocker)**:
|
||||
* **Goal**: Build a default Storybook skin with DSS atoms and shadcn styles applied.
|
||||
* **Expected Tool**: `dss-mvp1/dss-cli.py setup-storybook --action generate --project-path ./admin-ui`.
|
||||
* **Problem**: This command consistently reports `Generated 0 new stories.`
|
||||
* **Investigation**:
|
||||
* Initial assumption that `dss-mvp1` itself contained components was incorrect.
|
||||
* Moved `admin-ui/js/components/ds-button.js` to `admin-ui/src/components/ds-button.js` to match component discovery paths.
|
||||
* Re-read `dss/storybook/generator.py` to confirm its logic. It expects components in standard directories like `src/components`.
|
||||
* Confirmed that `StoryGenerator.generate` calls `generate_stories_for_directory`, which in turn calls `_parse_component`.
|
||||
* Despite placing `ds-button.js` in a recognized path, `0 new stories` are still being generated.
|
||||
* The `StoryGenerator` logic in `dss/storybook/generator.py` inspects component files, but it relies on specific patterns (e.g., `interface ButtonProps`, `children`) to extract `PropInfo` and `ComponentMeta`. The output of `@babel/parser` is not currently being used by `StoryGenerator` to populate `ComponentMeta`.
|
||||
|
||||
**The core issue preventing Storybook generation is that the `StoryGenerator` is unable to correctly parse the provided JavaScript/TypeScript component files and extract the necessary metadata (props, component name, etc.) to create a story.** The integration between the `@babel/parser` output (which is JSON AST) and the `StoryGenerator`'s `_parse_component` method is either missing or misconfigured. The `_parse_component` method appears to be using regex on the raw file content, which might be insufficient or incorrect for the component's structure.
|
||||
|
||||
---
|
||||
|
||||
**Recommendation for Claude:**
|
||||
|
||||
1. **Investigate `dss/storybook/generator.py`**: Focus on the `_parse_component` method. How does it extract `ComponentMeta` from the component file? It currently uses regex, which is fragile.
|
||||
2. **Integrate Babel AST**: The `@babel/parser` subprocess call already produces a full AST. The `_parse_component` method should be updated to consume and interpret this AST to reliably extract component metadata (name, props, children, description). This would be much more robust than regex.
|
||||
3. **Validate Component Structure**: Ensure the `ds-button.js` (or any target component) has a structure that the updated parser can understand and extract metadata from.
|
||||
4. **Re-run Storybook Generation**: Once `_parse_component` can correctly extract metadata, re-run `setup-storybook --action generate` to confirm stories are created.
|
||||
|
||||
I have included the contents of `dss/storybook/generator.py` for direct reference.
|
||||
@@ -1,12 +1,11 @@
|
||||
"""
|
||||
DSS - Design System Server.
|
||||
|
||||
A Model Context Protocol (MCP) server that provides Claude Code with 40+ design system tools.
|
||||
Supports local development and remote team deployment.
|
||||
Design system tooling for local development and headless server deployments.
|
||||
|
||||
Usage:
|
||||
from dss import settings, Projects, Components
|
||||
from dss.mcp_server import MCPServer
|
||||
# MCP stdio server entrypoint: python -m dss.mcp.server
|
||||
from dss.storage import Projects, Components, Tokens
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,113 +1,187 @@
|
||||
"""This module provides tools for analyzing a project."""
|
||||
"""High-level project analysis orchestration used by CLI and MCP tooling."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
from dataclasses import asdict, is_dataclass
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from dss.analyze.base import ProjectAnalysis
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Path to the node.js parser script.
|
||||
# This assumes the script is located in the same directory as this file.
|
||||
parser_script_path = Path(__file__).parent / "parser.js"
|
||||
from .base import ProjectAnalysis
|
||||
from .graph import DependencyGraph
|
||||
from .quick_wins import QuickWinFinder
|
||||
from .react import ReactAnalyzer
|
||||
from .scanner import ProjectScanner
|
||||
from .styles import StyleAnalyzer
|
||||
|
||||
|
||||
def analyze_project(
|
||||
path: str,
|
||||
output_graph: bool = False,
|
||||
prune: bool = False,
|
||||
visualize: bool = False,
|
||||
) -> ProjectAnalysis:
|
||||
"""
|
||||
Analyzes a project, including all its components and their dependencies.
|
||||
def _safe_serialize(obj: Any) -> Any:
|
||||
if obj is None or isinstance(obj, (str, int, float, bool)):
|
||||
return obj
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
if isinstance(obj, Enum):
|
||||
return obj.value
|
||||
if isinstance(obj, Path):
|
||||
return str(obj)
|
||||
if isinstance(obj, dict):
|
||||
return {str(k): _safe_serialize(v) for k, v in obj.items()}
|
||||
if isinstance(obj, (list, tuple, set)):
|
||||
return [_safe_serialize(v) for v in obj]
|
||||
if hasattr(obj, "to_dict") and callable(obj.to_dict):
|
||||
return _safe_serialize(obj.to_dict())
|
||||
if is_dataclass(obj):
|
||||
return _safe_serialize(asdict(obj))
|
||||
return str(obj)
|
||||
|
||||
Args:
|
||||
path: The path to the project to analyze.
|
||||
output_graph: Whether to output the dependency graph.
|
||||
prune: Whether to prune the dependency graph.
|
||||
visualize: Whether to visualize the dependency graph.
|
||||
|
||||
Returns:
|
||||
A ProjectAnalysis object containing the analysis results.
|
||||
"""
|
||||
project_path = Path(path).resolve()
|
||||
log.info(f"Analyzing project at {project_path}...")
|
||||
async def _build_analysis(
|
||||
project_root: Path,
|
||||
) -> Tuple[ProjectAnalysis, DependencyGraph, Dict[str, Any], List[Any]]:
|
||||
scanner = ProjectScanner(str(project_root), use_cache=False)
|
||||
analysis = await scanner.scan()
|
||||
|
||||
# Get all component files in the project.
|
||||
component_files = list(project_path.glob("**/*.js")) + list(project_path.glob("**/*.jsx"))
|
||||
react = ReactAnalyzer(str(project_root))
|
||||
style = StyleAnalyzer(str(project_root))
|
||||
graph = DependencyGraph(str(project_root))
|
||||
quick_wins_finder = QuickWinFinder(str(project_root))
|
||||
|
||||
# For each component file, get its AST.
|
||||
for file_path in component_files:
|
||||
if file_path.is_file():
|
||||
# Call the external node.js parser
|
||||
result = subprocess.run(
|
||||
["node", str(parser_script_path), file_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
# The AST is now in result.stdout as a JSON string.
|
||||
ast = json.loads(result.stdout)
|
||||
# TODO: Do something with the AST.
|
||||
components_task = react.analyze()
|
||||
style_task = style.analyze()
|
||||
graph_task = graph.build()
|
||||
quick_wins_task = quick_wins_finder.find_all()
|
||||
|
||||
# TODO: Populate the ProjectAnalysis object with the analysis results.
|
||||
analysis = ProjectAnalysis(
|
||||
project_name=project_path.name,
|
||||
project_path=str(project_path),
|
||||
total_files=len(component_files),
|
||||
components={},
|
||||
components, style_result, _graph_dict, quick_wins = await asyncio.gather(
|
||||
components_task, style_task, graph_task, quick_wins_task
|
||||
)
|
||||
log.info(f"Analysis complete for {project_path.name}.")
|
||||
|
||||
analysis.components = components
|
||||
analysis.component_count = len(components)
|
||||
|
||||
analysis.token_candidates = style_result.get("token_candidates", []) # type: ignore[assignment]
|
||||
analysis.stats["token_candidates"] = len(analysis.token_candidates)
|
||||
|
||||
analysis.quick_wins = quick_wins
|
||||
analysis.stats["quick_wins_count"] = len(quick_wins)
|
||||
|
||||
return analysis, graph, style_result, quick_wins
|
||||
|
||||
|
||||
def analyze_project(path: str) -> ProjectAnalysis:
|
||||
"""Synchronous wrapper around the async analyzers."""
|
||||
project_root = Path(path).expanduser().resolve()
|
||||
if not project_root.exists():
|
||||
raise FileNotFoundError(f"Project path not found: {project_root}")
|
||||
if not project_root.is_dir():
|
||||
raise NotADirectoryError(f"Project path is not a directory: {project_root}")
|
||||
|
||||
analysis, _graph, _style_result, _quick_wins = asyncio.run(_build_analysis(project_root))
|
||||
return analysis
|
||||
|
||||
|
||||
def export_project_context(analysis: ProjectAnalysis, output_path: str):
|
||||
def run_project_analysis(project_path: str, output_file: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Exports the project context to a JSON file.
|
||||
Run full analysis and write a portable graph JSON file to `<project>/.dss/analysis_graph.json`.
|
||||
|
||||
Returns a JSON-serializable dict with both the graph and a summary analysis payload.
|
||||
"""
|
||||
log.info(f"Exporting project context to {output_path}...")
|
||||
with open(output_path, "w") as f:
|
||||
json.dump(analysis.dict(), f, indent=2)
|
||||
log.info("Export complete.")
|
||||
project_root = Path(project_path).expanduser().resolve()
|
||||
if not project_root.exists():
|
||||
raise FileNotFoundError(f"Project path not found: {project_root}")
|
||||
if not project_root.is_dir():
|
||||
raise NotADirectoryError(f"Project path is not a directory: {project_root}")
|
||||
|
||||
analysis, graph, style_result, quick_wins = asyncio.run(_build_analysis(project_root))
|
||||
|
||||
graph_dict = graph.to_dict()
|
||||
insights = {
|
||||
"orphans": graph.find_orphans(),
|
||||
"hubs": graph.find_hubs(),
|
||||
"cycles": graph.find_circular_dependencies(),
|
||||
}
|
||||
|
||||
style_summary = {k: v for k, v in style_result.items() if k != "token_candidates"}
|
||||
|
||||
result: Dict[str, Any] = {
|
||||
"project_path": str(project_root),
|
||||
"generated_at": datetime.now().isoformat(),
|
||||
# Keep a stable, graph-friendly top-level shape.
|
||||
"nodes": graph_dict.get("nodes", []),
|
||||
"edges": graph_dict.get("edges", []),
|
||||
"links": graph_dict.get("edges", []), # legacy alias
|
||||
"stats": graph_dict.get("stats", {}),
|
||||
# Extended payloads.
|
||||
"analysis": _safe_serialize(analysis),
|
||||
"style_summary": _safe_serialize(style_summary),
|
||||
"quick_wins": _safe_serialize(quick_wins),
|
||||
"graph_insights": _safe_serialize(insights),
|
||||
}
|
||||
|
||||
if output_file:
|
||||
output_path = Path(output_file).expanduser()
|
||||
else:
|
||||
output_path = project_root / ".dss" / "analysis_graph.json"
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(json.dumps(result, indent=2), encoding="utf-8")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_ast(file_path: str) -> Dict:
|
||||
def export_project_context(project_path: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Gets the AST of a file using a node.js parser.
|
||||
Export a lightweight, AI-friendly project context as a JSON-serializable dict.
|
||||
|
||||
This intentionally avoids embedding full source files.
|
||||
"""
|
||||
log.info(f"Getting AST for {file_path}...")
|
||||
result = subprocess.run(
|
||||
["node", str(parser_script_path), file_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
log.info("AST retrieved.")
|
||||
return json.loads(result.stdout)
|
||||
project_root = Path(project_path).expanduser().resolve()
|
||||
if not project_root.exists():
|
||||
raise FileNotFoundError(f"Project path not found: {project_root}")
|
||||
if not project_root.is_dir():
|
||||
raise NotADirectoryError(f"Project path is not a directory: {project_root}")
|
||||
|
||||
analysis, graph, style_result, quick_wins = asyncio.run(_build_analysis(project_root))
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function for the project analyzer.
|
||||
"""
|
||||
import argparse
|
||||
graph_dict = graph.to_dict()
|
||||
hubs = graph.find_hubs()
|
||||
cycles = graph.find_circular_dependencies()
|
||||
orphans = graph.find_orphans()
|
||||
|
||||
parser = argparse.ArgumentParser(description="Analyze a project.")
|
||||
parser.add_argument("path", help="The path to the project to analyze.")
|
||||
parser.add_argument("--output-graph", action="store_true", help="Output the dependency graph.")
|
||||
parser.add_argument("--prune", action="store_true", help="Prune the dependency graph.")
|
||||
parser.add_argument("--visualize", action="store_true", help="Visualize the dependency graph.")
|
||||
parser.add_argument("--export-context", help="Export the project context to a JSON file.")
|
||||
args = parser.parse_args()
|
||||
# Keep this small enough for prompt injection.
|
||||
components_preview = [
|
||||
{
|
||||
"name": c.name,
|
||||
"path": c.path,
|
||||
"type": c.type,
|
||||
"has_styles": c.has_styles,
|
||||
"props": c.props[:10],
|
||||
}
|
||||
for c in analysis.components[:50]
|
||||
]
|
||||
|
||||
analysis = analyze_project(args.path, args.output_graph, args.prune, args.visualize)
|
||||
token_candidates = style_result.get("token_candidates", [])
|
||||
token_candidates_preview = [_safe_serialize(c) for c in token_candidates[:25]]
|
||||
|
||||
if args.export_context:
|
||||
export_project_context(analysis, args.export_context)
|
||||
quick_wins_preview = [_safe_serialize(w) for w in quick_wins[:25]]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
return {
|
||||
"project_path": str(project_root),
|
||||
"generated_at": datetime.now().isoformat(),
|
||||
"framework": analysis.framework.value,
|
||||
"framework_version": analysis.framework_version,
|
||||
"primary_styling": analysis.primary_styling.value if analysis.primary_styling else None,
|
||||
"stats": _safe_serialize(analysis.stats),
|
||||
"components": components_preview,
|
||||
"style_summary": _safe_serialize({k: v for k, v in style_result.items() if k != "token_candidates"}),
|
||||
"token_candidates": token_candidates_preview,
|
||||
"quick_wins": quick_wins_preview,
|
||||
"dependency_graph": {
|
||||
"stats": graph_dict.get("stats", {}),
|
||||
"orphans": orphans[:50],
|
||||
"hubs": hubs[:25],
|
||||
"cycles": cycles[:10],
|
||||
},
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ class AtlassianAuth:
|
||||
|
||||
On successful validation, we:
|
||||
1. Verify credentials against Atlassian API
|
||||
2. Store user in database
|
||||
2. Store user in JSON storage
|
||||
3. Generate JWT token
|
||||
"""
|
||||
|
||||
@@ -106,56 +106,17 @@ class AtlassianAuth:
|
||||
# Hash the API token
|
||||
token_hash = self.hash_api_token(api_token)
|
||||
|
||||
# Store or update user in database
|
||||
with get_connection() as conn:
|
||||
# Check if user exists
|
||||
existing = conn.execute(
|
||||
"SELECT id, email FROM users WHERE email = ?", (email,)
|
||||
).fetchone()
|
||||
from dss.storage.json_store import Users
|
||||
|
||||
if existing:
|
||||
# Update existing user
|
||||
user_id = existing["id"]
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE users
|
||||
SET display_name = ?,
|
||||
atlassian_url = ?,
|
||||
atlassian_service = ?,
|
||||
api_token_hash = ?,
|
||||
last_login = ?
|
||||
WHERE id = ?
|
||||
""",
|
||||
(
|
||||
user_info["display_name"],
|
||||
url,
|
||||
service,
|
||||
token_hash,
|
||||
datetime.utcnow().isoformat(),
|
||||
user_id,
|
||||
),
|
||||
)
|
||||
else:
|
||||
# Create new user
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
INSERT INTO users (
|
||||
email, display_name, atlassian_url, atlassian_service,
|
||||
api_token_hash, created_at, last_login
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
email,
|
||||
user_info["display_name"],
|
||||
url,
|
||||
service,
|
||||
token_hash,
|
||||
datetime.utcnow().isoformat(),
|
||||
datetime.utcnow().isoformat(),
|
||||
),
|
||||
)
|
||||
user_id = cursor.lastrowid
|
||||
user_record = Users.upsert(
|
||||
email=email,
|
||||
display_name=user_info["display_name"],
|
||||
atlassian_url=url,
|
||||
atlassian_service=service,
|
||||
api_token_hash=token_hash,
|
||||
last_login=datetime.utcnow().isoformat(),
|
||||
)
|
||||
user_id = int(user_record["id"])
|
||||
|
||||
# Generate JWT token
|
||||
expires_at = datetime.utcnow() + timedelta(hours=self.jwt_expiry_hours)
|
||||
@@ -198,21 +159,23 @@ class AtlassianAuth:
|
||||
|
||||
async def get_user_by_id(self, user_id: int) -> Optional[Dict[str, Any]]:
|
||||
"""Get user information by ID."""
|
||||
with get_connection() as conn:
|
||||
user = conn.execute(
|
||||
"""
|
||||
SELECT id, email, display_name, atlassian_url, atlassian_service,
|
||||
created_at, last_login
|
||||
FROM users
|
||||
WHERE id = ?
|
||||
""",
|
||||
(user_id,),
|
||||
).fetchone()
|
||||
from dss.storage.json_store import Users
|
||||
|
||||
if user:
|
||||
return dict(user)
|
||||
user = Users.get(user_id)
|
||||
if not user:
|
||||
return None
|
||||
|
||||
# Only return safe fields
|
||||
return {
|
||||
"id": user.get("id"),
|
||||
"email": user.get("email"),
|
||||
"display_name": user.get("display_name"),
|
||||
"atlassian_url": user.get("atlassian_url"),
|
||||
"atlassian_service": user.get("atlassian_service"),
|
||||
"created_at": user.get("created_at"),
|
||||
"last_login": user.get("last_login"),
|
||||
}
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_auth_instance: Optional[AtlassianAuth] = None
|
||||
|
||||
@@ -266,13 +266,11 @@ class TimestampConflictResolver:
|
||||
|
||||
|
||||
class DatabaseLockingStrategy:
|
||||
"""Manages SQLite database locking during import operations.
|
||||
"""Legacy-named scheduling/locking heuristics for bulk operations.
|
||||
|
||||
Production Consideration: SQLite locks the entire database file
|
||||
during writes. Large imports can block other operations.
|
||||
|
||||
Recommended: Schedule imports during low-traffic windows or use
|
||||
busy_timeout to make waiting explicit.
|
||||
DSS core storage is JSON-file based. This helper remains for:
|
||||
- recommending conservative locking/scheduling defaults
|
||||
- deciding when operations should run in background workers
|
||||
"""
|
||||
|
||||
# Configuration
|
||||
@@ -283,11 +281,7 @@ class DatabaseLockingStrategy:
|
||||
self.busy_timeout_ms = busy_timeout_ms
|
||||
|
||||
def get_pragmas(self) -> Dict[str, Any]:
|
||||
"""Get recommended SQLite pragmas for import operations.
|
||||
|
||||
Returns:
|
||||
Dict of pragma names and values
|
||||
"""
|
||||
"""Legacy API retained for compatibility (no-op for JSON storage)."""
|
||||
return {
|
||||
"journal_mode": "WAL", # Write-Ahead Logging for concurrent access
|
||||
"busy_timeout": self.busy_timeout_ms,
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
DSSProjectService - High-level API for export/import operations with transaction safety.
|
||||
|
||||
This service provides:
|
||||
1. Transactional wrapper for safe database operations
|
||||
1. Transaction-like wrapper for safe operations
|
||||
2. Integration point for API/CLI layers
|
||||
3. Proper error handling and rollback
|
||||
4. Background job scheduling for large operations
|
||||
5. SQLite configuration management
|
||||
5. Resource/scheduling heuristics (JSON-only storage)
|
||||
"""
|
||||
|
||||
from contextlib import contextmanager
|
||||
@@ -64,12 +64,11 @@ class MergeSummary:
|
||||
class DSSProjectService:
|
||||
"""Service layer for DSS project export/import operations.
|
||||
|
||||
Provides transaction-safe operations with proper error handling,
|
||||
database locking management, and memory limit enforcement.
|
||||
Provides operation safety with proper error handling,
|
||||
scheduling heuristics, and memory limit enforcement.
|
||||
|
||||
Production Features:
|
||||
- Transactional safety (rollback on error)
|
||||
- SQLite locking configuration
|
||||
- Best-effort safety (rollback on error)
|
||||
- Memory and resource limits
|
||||
- Background job scheduling for large operations
|
||||
- Comprehensive error handling
|
||||
@@ -79,47 +78,19 @@ class DSSProjectService:
|
||||
self,
|
||||
busy_timeout_ms: int = DatabaseLockingStrategy.DEFAULT_BUSY_TIMEOUT_MS,
|
||||
):
|
||||
# Legacy name: used as scheduling heuristic (no DB required).
|
||||
self.locking_strategy = DatabaseLockingStrategy(busy_timeout_ms)
|
||||
self.memory_manager = MemoryLimitManager()
|
||||
|
||||
@contextmanager
|
||||
def _transaction(self):
|
||||
"""Context manager for transaction-safe database operations.
|
||||
|
||||
Handles:
|
||||
- SQLite locking with busy_timeout
|
||||
- Automatic rollback on error
|
||||
- Connection cleanup
|
||||
"""
|
||||
conn = None
|
||||
try:
|
||||
# Get connection with locking pragmas
|
||||
conn = get_connection()
|
||||
Context manager for grouping operations.
|
||||
|
||||
# Apply locking pragmas
|
||||
pragmas = self.locking_strategy.get_pragmas()
|
||||
cursor = conn.cursor()
|
||||
for pragma_name, pragma_value in pragmas.items():
|
||||
if isinstance(pragma_value, int):
|
||||
cursor.execute(f"PRAGMA {pragma_name} = {pragma_value}")
|
||||
else:
|
||||
cursor.execute(f"PRAGMA {pragma_name} = '{pragma_value}'")
|
||||
|
||||
yield conn
|
||||
|
||||
# Commit on success
|
||||
conn.commit()
|
||||
|
||||
except Exception as e:
|
||||
# Rollback on error
|
||||
if conn:
|
||||
conn.rollback()
|
||||
raise e
|
||||
|
||||
finally:
|
||||
# Cleanup
|
||||
if conn:
|
||||
conn.close()
|
||||
DSS uses JSON-file storage; there is no DB transaction. This wrapper exists
|
||||
to preserve the service API while allowing future locking/resource limits.
|
||||
"""
|
||||
yield
|
||||
|
||||
def export_project(
|
||||
self,
|
||||
|
||||
24
dss/mcp/__init__.py
Normal file
24
dss/mcp/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""
|
||||
DSS MCP (Model Context Protocol) integration.
|
||||
|
||||
This package contains the shared tool registry and execution layer used by:
|
||||
- The headless DSS API server (Admin UI + AI chat)
|
||||
- The local MCP stdio server (Claude Code / desktop clients)
|
||||
|
||||
Design goal:
|
||||
One canonical definition of tools and their implementations, with optional
|
||||
remote proxying via the DSS headless server.
|
||||
"""
|
||||
|
||||
from dss.mcp.config import integration_config, mcp_config, validate_config
|
||||
from dss.mcp.handler import MCPContext, MCPHandler, get_mcp_handler
|
||||
|
||||
__all__ = [
|
||||
"MCPContext",
|
||||
"MCPHandler",
|
||||
"get_mcp_handler",
|
||||
"mcp_config",
|
||||
"integration_config",
|
||||
"validate_config",
|
||||
]
|
||||
|
||||
106
dss/mcp/config.py
Normal file
106
dss/mcp/config.py
Normal file
@@ -0,0 +1,106 @@
|
||||
"""
|
||||
DSS MCP configuration.
|
||||
|
||||
Used by the headless server to expose tools to:
|
||||
- the Admin UI (tool browser + execution)
|
||||
- the AI chat endpoint (tool calling)
|
||||
|
||||
Also used by the local MCP server when proxying requests to a headless server.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
def _get_env(name: str, default: Optional[str] = None) -> Optional[str]:
|
||||
value = os.getenv(name)
|
||||
if value is None:
|
||||
return default
|
||||
value = value.strip()
|
||||
return value if value else default
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MCPConfig:
|
||||
"""Core MCP runtime config for DSS."""
|
||||
|
||||
HOST: str = _get_env("DSS_MCP_HOST", "127.0.0.1") or "127.0.0.1"
|
||||
PORT: int = int(_get_env("DSS_MCP_PORT", "6222") or "6222")
|
||||
|
||||
# Tool execution / context
|
||||
CONTEXT_CACHE_TTL: int = int(_get_env("DSS_MCP_CONTEXT_CACHE_TTL", "300") or "300")
|
||||
|
||||
# Circuit breaker (used by handler; conservative defaults)
|
||||
CIRCUIT_BREAKER_FAILURE_THRESHOLD: int = int(
|
||||
_get_env("DSS_MCP_CIRCUIT_BREAKER_FAILURE_THRESHOLD", "5") or "5"
|
||||
)
|
||||
CIRCUIT_BREAKER_TIMEOUT_SECONDS: int = int(
|
||||
_get_env("DSS_MCP_CIRCUIT_BREAKER_TIMEOUT_SECONDS", "60") or "60"
|
||||
)
|
||||
|
||||
# Optional encryption for at-rest integration configs
|
||||
ENCRYPTION_KEY: Optional[str] = _get_env("DSS_MCP_ENCRYPTION_KEY") or _get_env(
|
||||
"DSS_ENCRYPTION_KEY"
|
||||
)
|
||||
|
||||
# Remote proxy (local MCP process -> headless server)
|
||||
API_URL: Optional[str] = _get_env("DSS_API_URL") or _get_env("DSS_SERVER_URL")
|
||||
|
||||
def get_cipher(self):
|
||||
"""Return a Fernet cipher if configured, otherwise None."""
|
||||
if not self.ENCRYPTION_KEY:
|
||||
return None
|
||||
try:
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
return Fernet(self.ENCRYPTION_KEY.encode())
|
||||
except Exception:
|
||||
# Invalid key format or missing dependency
|
||||
return None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class IntegrationConfig:
|
||||
"""Integration credentials and defaults (read from environment)."""
|
||||
|
||||
FIGMA_TOKEN: Optional[str] = _get_env("FIGMA_TOKEN") or _get_env("DSS_FIGMA_TOKEN")
|
||||
ANTHROPIC_API_KEY: Optional[str] = _get_env("ANTHROPIC_API_KEY") or _get_env(
|
||||
"DSS_ANTHROPIC_API_KEY"
|
||||
)
|
||||
|
||||
# Defaults for Atlassian integrations (optional)
|
||||
JIRA_URL: Optional[str] = _get_env("JIRA_URL") or _get_env("DSS_JIRA_URL")
|
||||
CONFLUENCE_URL: Optional[str] = _get_env("CONFLUENCE_URL") or _get_env("DSS_CONFLUENCE_URL")
|
||||
|
||||
|
||||
mcp_config = MCPConfig()
|
||||
integration_config = IntegrationConfig()
|
||||
|
||||
|
||||
def validate_config() -> List[str]:
|
||||
"""Return user-facing warnings for missing/invalid configuration."""
|
||||
warnings: List[str] = []
|
||||
|
||||
if not mcp_config.ENCRYPTION_KEY:
|
||||
warnings.append(
|
||||
"No encryption key configured (set DSS_MCP_ENCRYPTION_KEY) – integration configs will be stored in plaintext."
|
||||
)
|
||||
elif mcp_config.get_cipher() is None:
|
||||
warnings.append(
|
||||
"Invalid DSS_MCP_ENCRYPTION_KEY – expected a Fernet key (urlsafe base64). Integration encryption is disabled."
|
||||
)
|
||||
|
||||
if not integration_config.FIGMA_TOKEN:
|
||||
warnings.append("FIGMA_TOKEN not configured – Figma tools will run in mock mode.")
|
||||
|
||||
if not integration_config.ANTHROPIC_API_KEY:
|
||||
warnings.append("ANTHROPIC_API_KEY not configured – AI chat/tool calling may be unavailable.")
|
||||
|
||||
if mcp_config.API_URL and not mcp_config.API_URL.startswith(("http://", "https://")):
|
||||
warnings.append("DSS_API_URL should include scheme (http:// or https://).")
|
||||
|
||||
return warnings
|
||||
|
||||
304
dss/mcp/guides.py
Normal file
304
dss/mcp/guides.py
Normal file
@@ -0,0 +1,304 @@
|
||||
"""
|
||||
MCP guide library (skills + command docs).
|
||||
|
||||
Goal: make Claude plugin "skills" and "commands" discoverable from any MCP client
|
||||
(Claude Code, Codex CLI, Gemini CLI, etc.), without requiring the Claude plugin system.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Guide:
|
||||
id: str
|
||||
type: str # "skill" | "command"
|
||||
name: str
|
||||
description: str
|
||||
source_path: str
|
||||
related_tools: List[str]
|
||||
meta: Dict[str, Any]
|
||||
|
||||
|
||||
def _repo_root() -> Path:
|
||||
# dss/mcp/guides.py -> dss/mcp -> dss -> repo root
|
||||
return Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
|
||||
def _split_frontmatter(text: str) -> Tuple[Dict[str, Any], str]:
|
||||
"""
|
||||
Minimal YAML front-matter splitter/parser.
|
||||
|
||||
We only need the common `name:` and `description:` fields used in DSS guides,
|
||||
plus optional list fields like `globs:` / `arguments:`.
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
if not lines or lines[0].strip() != "---":
|
||||
return {}, text
|
||||
|
||||
end_idx: Optional[int] = None
|
||||
for i in range(1, len(lines)):
|
||||
if lines[i].strip() == "---":
|
||||
end_idx = i
|
||||
break
|
||||
if end_idx is None:
|
||||
return {}, text
|
||||
|
||||
fm_lines = lines[1:end_idx]
|
||||
body = "\n".join(lines[end_idx + 1 :]).lstrip("\n")
|
||||
|
||||
def parse_scalar(value: str) -> Any:
|
||||
v = value.strip()
|
||||
if v == "":
|
||||
return ""
|
||||
|
||||
if (v.startswith('"') and v.endswith('"')) or (v.startswith("'") and v.endswith("'")):
|
||||
return v[1:-1]
|
||||
|
||||
lower = v.lower()
|
||||
if lower in {"true", "yes", "on"}:
|
||||
return True
|
||||
if lower in {"false", "no", "off"}:
|
||||
return False
|
||||
if lower in {"null", "~"}:
|
||||
return None
|
||||
|
||||
try:
|
||||
return int(v)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
return float(v)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return v
|
||||
|
||||
def indent_count(line: str) -> int:
|
||||
return len(line) - len(line.lstrip(" "))
|
||||
|
||||
def parse_block(block_lines: List[str]) -> Any:
|
||||
# Determine base indentation for the block.
|
||||
first = next((ln for ln in block_lines if ln.strip()), "")
|
||||
if not first:
|
||||
return []
|
||||
base_indent = indent_count(first)
|
||||
|
||||
# List block
|
||||
if first[base_indent:].startswith("- "):
|
||||
items: List[Any] = []
|
||||
i = 0
|
||||
while i < len(block_lines):
|
||||
line = block_lines[i]
|
||||
if not line.strip():
|
||||
i += 1
|
||||
continue
|
||||
if indent_count(line) < base_indent:
|
||||
break
|
||||
|
||||
stripped = line[base_indent:]
|
||||
if not stripped.startswith("- "):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
item_head = stripped[2:].strip()
|
||||
|
||||
# Map item (e.g. "- name: flags")
|
||||
if ":" in item_head:
|
||||
item: Dict[str, Any] = {}
|
||||
k, v = item_head.split(":", 1)
|
||||
item[k.strip()] = parse_scalar(v.strip())
|
||||
i += 1
|
||||
|
||||
# Continuation lines for this list item are more indented than the dash.
|
||||
while i < len(block_lines):
|
||||
cont = block_lines[i]
|
||||
if not cont.strip():
|
||||
i += 1
|
||||
continue
|
||||
cont_indent = indent_count(cont)
|
||||
if cont_indent <= base_indent:
|
||||
break
|
||||
cont_str = cont.strip()
|
||||
if cont_str.startswith("#"):
|
||||
i += 1
|
||||
continue
|
||||
if cont_str.startswith("- "):
|
||||
# Nested lists inside list items are not supported in this minimal parser.
|
||||
i += 1
|
||||
continue
|
||||
if ":" in cont_str:
|
||||
ck, cv = cont_str.split(":", 1)
|
||||
item[ck.strip()] = parse_scalar(cv.strip())
|
||||
i += 1
|
||||
|
||||
items.append(item)
|
||||
continue
|
||||
|
||||
# Scalar item
|
||||
items.append(parse_scalar(item_head))
|
||||
i += 1
|
||||
|
||||
# Skip any continuation lines for multi-line scalars (rare in these guides).
|
||||
while i < len(block_lines):
|
||||
cont = block_lines[i]
|
||||
if not cont.strip():
|
||||
i += 1
|
||||
continue
|
||||
if indent_count(cont) <= base_indent:
|
||||
break
|
||||
i += 1
|
||||
|
||||
return items
|
||||
|
||||
# Map block (not currently used by DSS guides)
|
||||
meta_map: Dict[str, Any] = {}
|
||||
for raw in block_lines:
|
||||
if not raw.strip():
|
||||
continue
|
||||
if indent_count(raw) < base_indent:
|
||||
continue
|
||||
line = raw[base_indent:].strip()
|
||||
if ":" not in line:
|
||||
continue
|
||||
k, v = line.split(":", 1)
|
||||
meta_map[k.strip()] = parse_scalar(v.strip())
|
||||
return meta_map
|
||||
|
||||
meta: Dict[str, Any] = {}
|
||||
i = 0
|
||||
while i < len(fm_lines):
|
||||
raw = fm_lines[i].rstrip()
|
||||
if not raw.strip() or raw.lstrip().startswith("#"):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# Only parse top-level keys (no leading spaces).
|
||||
if raw.startswith(" "):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if ":" not in raw:
|
||||
i += 1
|
||||
continue
|
||||
|
||||
key, value = raw.split(":", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
if value != "":
|
||||
meta[key] = parse_scalar(value)
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# Block value (indented list/map)
|
||||
i += 1
|
||||
block: List[str] = []
|
||||
while i < len(fm_lines):
|
||||
nxt = fm_lines[i].rstrip("\n")
|
||||
if not nxt.strip():
|
||||
i += 1
|
||||
continue
|
||||
if not nxt.startswith(" "):
|
||||
break
|
||||
block.append(nxt)
|
||||
i += 1
|
||||
|
||||
meta[key] = parse_block(block)
|
||||
|
||||
return meta, body
|
||||
|
||||
|
||||
_TOOL_REF_RE = re.compile(r"`(dss_[a-z0-9_]+)`", re.IGNORECASE)
|
||||
|
||||
|
||||
def _extract_related_tools(markdown: str) -> List[str]:
|
||||
tools = {m.group(1) for m in _TOOL_REF_RE.finditer(markdown or "")}
|
||||
return sorted(tools)
|
||||
|
||||
|
||||
def list_guides(kind: str = "all") -> List[Guide]:
|
||||
"""
|
||||
List available guides.
|
||||
|
||||
kind:
|
||||
- "all" (default)
|
||||
- "skill"
|
||||
- "command"
|
||||
"""
|
||||
root = _repo_root()
|
||||
plugin_root = root / "dss-claude-plugin"
|
||||
guides: List[Guide] = []
|
||||
|
||||
if kind in {"all", "command"}:
|
||||
commands_dir = plugin_root / "commands"
|
||||
if commands_dir.exists():
|
||||
for md in sorted(commands_dir.glob("*.md")):
|
||||
raw = md.read_text(encoding="utf-8")
|
||||
meta, body = _split_frontmatter(raw)
|
||||
command_name = str(meta.get("name") or md.stem)
|
||||
description = str(meta.get("description") or "")
|
||||
guides.append(
|
||||
Guide(
|
||||
id=f"command:{command_name}",
|
||||
type="command",
|
||||
name=command_name,
|
||||
description=description,
|
||||
source_path=str(md),
|
||||
related_tools=_extract_related_tools(body),
|
||||
meta=meta,
|
||||
)
|
||||
)
|
||||
|
||||
if kind in {"all", "skill"}:
|
||||
skills_dir = plugin_root / "skills"
|
||||
if skills_dir.exists():
|
||||
for skill_dir in sorted([p for p in skills_dir.iterdir() if p.is_dir()]):
|
||||
md = skill_dir / "SKILL.md"
|
||||
if not md.exists():
|
||||
continue
|
||||
raw = md.read_text(encoding="utf-8")
|
||||
meta, body = _split_frontmatter(raw)
|
||||
display_name = str(meta.get("name") or skill_dir.name)
|
||||
description = str(meta.get("description") or "")
|
||||
guides.append(
|
||||
Guide(
|
||||
id=f"skill:{skill_dir.name}",
|
||||
type="skill",
|
||||
name=display_name,
|
||||
description=description,
|
||||
source_path=str(md),
|
||||
related_tools=_extract_related_tools(body),
|
||||
meta=meta,
|
||||
)
|
||||
)
|
||||
|
||||
return guides
|
||||
|
||||
|
||||
def get_guide(guide_id: str, include_frontmatter: bool = False) -> Dict[str, Any]:
|
||||
guides = list_guides("all")
|
||||
match = next((g for g in guides if g.id == guide_id), None)
|
||||
if not match:
|
||||
raise ValueError(f"Guide not found: {guide_id}")
|
||||
|
||||
raw = Path(match.source_path).read_text(encoding="utf-8")
|
||||
meta, body = _split_frontmatter(raw)
|
||||
|
||||
content = raw if include_frontmatter else body
|
||||
|
||||
return {
|
||||
"id": match.id,
|
||||
"type": match.type,
|
||||
"name": match.name,
|
||||
"description": match.description,
|
||||
"source_path": match.source_path,
|
||||
"related_tools": match.related_tools,
|
||||
"meta": meta,
|
||||
"content": content,
|
||||
}
|
||||
1051
dss/mcp/handler.py
Normal file
1051
dss/mcp/handler.py
Normal file
File diff suppressed because it is too large
Load Diff
113
dss/mcp/server.py
Normal file
113
dss/mcp/server.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""
|
||||
DSS MCP stdio server.
|
||||
|
||||
This is the local process that Claude Code spawns. It can run in:
|
||||
- Local mode: execute tools on the local filesystem directly.
|
||||
- Proxy mode: forward tool execution to a headless DSS server over HTTP.
|
||||
|
||||
Proxy mode is enabled by setting `DSS_API_URL` (or `DSS_SERVER_URL`).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
|
||||
from dss.mcp.config import mcp_config
|
||||
from dss.mcp.handler import MCPContext, get_mcp_handler
|
||||
|
||||
try:
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import TextContent, Tool
|
||||
except ImportError as e: # pragma: no cover
|
||||
raise SystemExit("MCP SDK not found. Install with: pip install mcp") from e
|
||||
|
||||
|
||||
server = Server("dss")
|
||||
|
||||
|
||||
def _api_base_url() -> str:
|
||||
api_url = mcp_config.API_URL or ""
|
||||
return api_url.rstrip("/")
|
||||
|
||||
|
||||
def _default_context() -> MCPContext:
|
||||
project_id = os.getenv("DSS_PROJECT_ID") or None
|
||||
user_id = os.getenv("DSS_USER_ID")
|
||||
return MCPContext(project_id=project_id, user_id=int(user_id) if user_id and user_id.isdigit() else None)
|
||||
|
||||
|
||||
async def _proxy_list_tools() -> List[Dict[str, Any]]:
|
||||
base = _api_base_url()
|
||||
if not base:
|
||||
raise ValueError("DSS_API_URL not configured for proxy mode")
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
resp = await client.get(f"{base}/api/mcp/tools", params={"include_details": "true"})
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
async def _proxy_execute_tool(tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
base = _api_base_url()
|
||||
if not base:
|
||||
raise ValueError("DSS_API_URL not configured for proxy mode")
|
||||
|
||||
ctx = _default_context()
|
||||
payload: Dict[str, Any] = {
|
||||
"arguments": arguments or {},
|
||||
"project_id": ctx.project_id or "",
|
||||
"user_id": ctx.user_id or 1,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
resp = await client.post(f"{base}/api/mcp/tools/{tool_name}/execute", json=payload)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
@server.list_tools()
|
||||
async def list_tools() -> List[Tool]:
|
||||
# Proxy mode: reflect tool list from headless server.
|
||||
if _api_base_url():
|
||||
tools = await _proxy_list_tools()
|
||||
else:
|
||||
tools = get_mcp_handler().list_tools(include_details=True)
|
||||
|
||||
return [
|
||||
Tool(
|
||||
name=t["name"],
|
||||
description=t.get("description", ""),
|
||||
inputSchema=t.get("input_schema", {"type": "object", "properties": {}}),
|
||||
)
|
||||
for t in tools
|
||||
]
|
||||
|
||||
|
||||
@server.call_tool()
|
||||
async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
if _api_base_url():
|
||||
result = await _proxy_execute_tool(name, arguments or {})
|
||||
return [TextContent(type="text", text=json.dumps(result, indent=2))]
|
||||
|
||||
handler = get_mcp_handler()
|
||||
exec_result = await handler.execute_tool(name, arguments or {}, _default_context())
|
||||
return [TextContent(type="text", text=json.dumps(exec_result.to_dict(), indent=2))]
|
||||
|
||||
|
||||
async def _serve() -> None:
|
||||
async with stdio_server() as streams:
|
||||
await server.run(*streams)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
asyncio.run(_serve())
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
main()
|
||||
|
||||
@@ -12,6 +12,7 @@ Hierarchy:
|
||||
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
# =============================================================================
|
||||
@@ -40,12 +41,26 @@ DSS_FIGMA_REFERENCE = DSSFigmaReference()
|
||||
# =============================================================================
|
||||
|
||||
# DSS installation paths
|
||||
DSS_ROOT = Path("/home/overbits/dss")
|
||||
DSS_MVP1 = DSS_ROOT / "dss-mvp1"
|
||||
DSS_CORE_DIR = DSS_MVP1 / "dss" / "core_tokens"
|
||||
def _resolve_dss_root() -> Path:
|
||||
# dss/project/core.py -> dss/project -> dss -> repo root
|
||||
return Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
def _resolve_dss_home() -> Path:
|
||||
env = os.environ.get("DSS_HOME")
|
||||
if env:
|
||||
return Path(env).expanduser()
|
||||
local = Path.cwd() / ".dss"
|
||||
if local.exists():
|
||||
return local
|
||||
return Path.home() / ".dss"
|
||||
|
||||
|
||||
DSS_ROOT = _resolve_dss_root()
|
||||
DSS_CORE_DIR = Path(__file__).resolve().parents[1] / "core_tokens"
|
||||
|
||||
# User data paths
|
||||
DSS_USER_DIR = Path.home() / ".dss"
|
||||
DSS_USER_DIR = _resolve_dss_home()
|
||||
DSS_CACHE_DIR = DSS_USER_DIR / "cache"
|
||||
DSS_REGISTRY_FILE = DSS_USER_DIR / "registry.json"
|
||||
|
||||
|
||||
@@ -26,8 +26,22 @@ from dss.project.sync import get_dss_core_tokens
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default location for DSS projects registry
|
||||
DSS_PROJECTS_DIR = Path.home() / ".dss" / "projects"
|
||||
DSS_REGISTRY_FILE = Path.home() / ".dss" / "registry.json"
|
||||
def _resolve_dss_home() -> Path:
|
||||
env = os.environ.get("DSS_HOME")
|
||||
if env:
|
||||
return Path(env).expanduser()
|
||||
|
||||
cwd = Path.cwd()
|
||||
local = cwd / ".dss"
|
||||
if local.exists():
|
||||
return local
|
||||
|
||||
return Path.home() / ".dss"
|
||||
|
||||
|
||||
_dss_home = _resolve_dss_home()
|
||||
DSS_PROJECTS_DIR = _dss_home / "projects"
|
||||
DSS_REGISTRY_FILE = _dss_home / "registry.json"
|
||||
|
||||
|
||||
class ProjectRegistry:
|
||||
|
||||
@@ -19,7 +19,7 @@ class ProjectManager:
|
||||
"""
|
||||
Manages project registry with root path validation.
|
||||
|
||||
Works with the existing Projects database class to add root_path support.
|
||||
Works with the existing Projects storage class to add root_path support.
|
||||
Validates paths exist and are accessible before registration.
|
||||
"""
|
||||
|
||||
@@ -28,7 +28,7 @@ class ProjectManager:
|
||||
Initialize project manager.
|
||||
|
||||
Args:
|
||||
projects_db: Projects database class (from dss.storage.database)
|
||||
projects_db: Projects storage class (from dss.storage.json_store)
|
||||
config_service: Optional ConfigService for config initialization
|
||||
"""
|
||||
self.db = projects_db
|
||||
@@ -73,7 +73,7 @@ class ProjectManager:
|
||||
|
||||
project_id = str(uuid.uuid4())[:8]
|
||||
|
||||
# Create project in database
|
||||
# Create project in storage
|
||||
project = self.db.create(
|
||||
id=project_id, name=name, description=description, figma_file_key=figma_file_key
|
||||
)
|
||||
|
||||
@@ -6,6 +6,7 @@ Includes test utilities and reset functionality
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
@@ -13,6 +14,18 @@ from pydantic import ConfigDict
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
def _resolve_dss_home() -> Path:
|
||||
env = os.environ.get("DSS_HOME")
|
||||
if env:
|
||||
return Path(env).expanduser()
|
||||
|
||||
local = Path.cwd() / ".dss"
|
||||
if local.exists():
|
||||
return local
|
||||
|
||||
return Path.home() / ".dss"
|
||||
|
||||
|
||||
class DSSSettings(BaseSettings):
|
||||
"""DSS Configuration Settings."""
|
||||
|
||||
@@ -22,8 +35,9 @@ class DSSSettings(BaseSettings):
|
||||
PROJECT_ROOT: Path = Path(__file__).parent.parent
|
||||
DSS_DIR: Path = Path(__file__).parent
|
||||
TESTS_DIR: Path = PROJECT_ROOT / "tests"
|
||||
CACHE_DIR: Path = Path.home() / ".dss" / "cache"
|
||||
DATA_DIR: Path = Path.home() / ".dss" / "data"
|
||||
DSS_HOME: Path = _resolve_dss_home()
|
||||
CACHE_DIR: Path = DSS_HOME / "cache"
|
||||
DATA_DIR: Path = DSS_HOME / "data"
|
||||
|
||||
# API Configuration
|
||||
ANTHROPIC_API_KEY: Optional[str] = None
|
||||
@@ -31,11 +45,7 @@ class DSSSettings(BaseSettings):
|
||||
FIGMA_FILE_KEY: Optional[str] = None
|
||||
FIGMA_CACHE_TTL: int = 300 # 5 minutes
|
||||
|
||||
# Database
|
||||
DATABASE_PATH: Path = Path.home() / ".dss" / "dss.db"
|
||||
|
||||
# Test Configuration
|
||||
TEST_DATABASE_PATH: Path = Path.home() / ".dss" / "test.db"
|
||||
USE_MOCK_APIS: bool = True
|
||||
|
||||
# Server Configuration (DSS Ports: API=6220, Admin=6221, MCP=6222, Storybook=6226)
|
||||
@@ -185,23 +195,17 @@ class DSSManager:
|
||||
except Exception as e:
|
||||
results["errors"].append(f"Failed to clear Figma cache: {e}")
|
||||
|
||||
# Reset database
|
||||
db_path = self.settings.DATABASE_PATH
|
||||
if db_path.exists():
|
||||
try:
|
||||
db_path.unlink()
|
||||
results["deleted"].append(str(db_path))
|
||||
except Exception as e:
|
||||
results["errors"].append(f"Failed to reset database: {e}")
|
||||
|
||||
# Clear test database
|
||||
test_db_path = self.settings.TEST_DATABASE_PATH
|
||||
if test_db_path.exists():
|
||||
try:
|
||||
test_db_path.unlink()
|
||||
results["deleted"].append(str(test_db_path))
|
||||
except Exception as e:
|
||||
results["errors"].append(f"Failed to clear test database: {e}")
|
||||
# Legacy cleanup (pre-JSON-only versions)
|
||||
for legacy_db in [
|
||||
self.settings.DSS_HOME / "dss.db",
|
||||
self.settings.DSS_HOME / "test.db",
|
||||
]:
|
||||
if legacy_db.exists():
|
||||
try:
|
||||
legacy_db.unlink()
|
||||
results["deleted"].append(str(legacy_db))
|
||||
except Exception as e:
|
||||
results["errors"].append(f"Failed to remove legacy file: {e}")
|
||||
|
||||
# Clear Python cache
|
||||
for pycache in self.project_root.rglob("__pycache__"):
|
||||
@@ -236,7 +240,8 @@ class DSSManager:
|
||||
"dss_dir": str(self.dss_dir),
|
||||
"tests_dir": str(self.settings.TESTS_DIR),
|
||||
"cache_dir": str(self.settings.CACHE_DIR),
|
||||
"database_path": str(self.settings.DATABASE_PATH),
|
||||
"dss_home": str(self.settings.DSS_HOME),
|
||||
"data_dir": str(self.settings.DATA_DIR),
|
||||
"has_anthropic_key": bool(self.settings.ANTHROPIC_API_KEY),
|
||||
"has_figma_token": bool(self.settings.FIGMA_TOKEN),
|
||||
"use_mock_apis": self.settings.USE_MOCK_APIS,
|
||||
@@ -360,8 +365,9 @@ Management Commands:
|
||||
print(f" Project root: {info['project_root']}")
|
||||
print(f" DSS directory: {info['dss_dir']}")
|
||||
print(f" Tests directory: {info['tests_dir']}")
|
||||
print(f" DSS home: {info['dss_home']}")
|
||||
print(f" Data directory: {info['data_dir']}")
|
||||
print(f" Cache directory: {info['cache_dir']}")
|
||||
print(f" Database path: {info['database_path']}")
|
||||
print(
|
||||
f" Anthropic API: {'Configured' if info['has_anthropic_key'] else 'Not configured'}"
|
||||
)
|
||||
|
||||
@@ -3,13 +3,13 @@ DSS Status Dashboard - Comprehensive system status visualization.
|
||||
|
||||
Provides a beautiful ASCII art dashboard that aggregates data from:
|
||||
- DSSManager (system info, dependencies)
|
||||
- Database stats (projects, components, styles)
|
||||
- Storage stats (projects, components, styles)
|
||||
- ActivityLog (recent activity)
|
||||
- SyncHistory (sync operations)
|
||||
- QuickWinFinder (improvement opportunities)
|
||||
|
||||
Expert-validated design with:
|
||||
- Optimized database queries using LIMIT
|
||||
- Optimized storage queries using LIMIT
|
||||
- Modular render methods for maintainability
|
||||
- Named constants for health score weights
|
||||
- Dynamic terminal width support
|
||||
@@ -23,7 +23,7 @@ from typing import Any, Dict, List, Optional
|
||||
# Health score weight constants (expert recommendation)
|
||||
HEALTH_WEIGHT_DEPENDENCIES = 0.40
|
||||
HEALTH_WEIGHT_INTEGRATIONS = 0.25
|
||||
HEALTH_WEIGHT_DATABASE = 0.20
|
||||
HEALTH_WEIGHT_STORAGE = 0.20
|
||||
HEALTH_WEIGHT_ACTIVITY = 0.15
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ class StatusData:
|
||||
|
||||
# Configuration
|
||||
project_root: str = ""
|
||||
database_path: str = ""
|
||||
storage_path: str = ""
|
||||
cache_dir: str = ""
|
||||
figma_configured: bool = False
|
||||
anthropic_configured: bool = False
|
||||
@@ -136,7 +136,7 @@ class StatusDashboard:
|
||||
"quick_wins": {"count": data.quick_wins_count, "items": data.quick_wins},
|
||||
"configuration": {
|
||||
"project_root": data.project_root,
|
||||
"database": data.database_path,
|
||||
"storage": data.storage_path,
|
||||
"cache": data.cache_dir,
|
||||
"figma_configured": data.figma_configured,
|
||||
"anthropic_configured": data.anthropic_configured,
|
||||
@@ -159,7 +159,7 @@ class StatusDashboard:
|
||||
# System info
|
||||
info = self._manager.get_system_info()
|
||||
data.project_root = info["project_root"]
|
||||
data.database_path = info["database_path"]
|
||||
data.storage_path = info["data_dir"]
|
||||
data.cache_dir = info["cache_dir"]
|
||||
data.figma_configured = info["has_figma_token"]
|
||||
data.anthropic_configured = info["has_anthropic_key"]
|
||||
@@ -195,30 +195,28 @@ class StatusDashboard:
|
||||
)
|
||||
)
|
||||
|
||||
# Database stats
|
||||
# Storage stats
|
||||
try:
|
||||
from dss.storage.json_store import ActivityLog, Projects, SyncHistory, get_stats
|
||||
|
||||
stats = get_stats()
|
||||
data.projects_count = stats.get("projects", 0)
|
||||
data.projects_active = stats.get("projects_active", 0)
|
||||
data.components_count = stats.get("components", 0)
|
||||
data.styles_count = stats.get("styles", 0)
|
||||
data.tokens_count = stats.get("tokens", 0)
|
||||
|
||||
# Database size metric
|
||||
db_size = stats.get("db_size_mb", 0)
|
||||
# Storage size metric
|
||||
storage_size = stats.get("total_size_mb", 0)
|
||||
data.health_metrics.append(
|
||||
HealthMetric(
|
||||
name="Database",
|
||||
status="ok" if db_size < 100 else "warning",
|
||||
value=f"{db_size} MB",
|
||||
category="database",
|
||||
name="Storage",
|
||||
status="ok" if storage_size < 500 else "warning",
|
||||
value=f"{storage_size} MB",
|
||||
category="storage",
|
||||
)
|
||||
)
|
||||
|
||||
# Projects
|
||||
projects = Projects.list()
|
||||
data.projects_active = len([p for p in projects if p.get("status") == "active"])
|
||||
|
||||
# Recent activity (OPTIMIZED: use limit parameter, not slice)
|
||||
# Expert recommendation: avoid [:5] slicing which fetches all records
|
||||
activities = ActivityLog.recent(limit=5)
|
||||
@@ -226,12 +224,12 @@ class StatusDashboard:
|
||||
{
|
||||
"action": a.get("action", ""),
|
||||
"description": a.get("description", ""),
|
||||
"created_at": a.get("created_at", ""),
|
||||
"created_at": a.get("timestamp", ""),
|
||||
"category": a.get("category", ""),
|
||||
}
|
||||
for a in activities
|
||||
]
|
||||
data.total_activities = ActivityLog.count()
|
||||
data.total_activities = ActivityLog.count(days=30)
|
||||
|
||||
# Recent syncs (OPTIMIZED: use limit parameter)
|
||||
syncs = SyncHistory.recent(limit=3)
|
||||
@@ -248,10 +246,10 @@ class StatusDashboard:
|
||||
except Exception as e:
|
||||
data.health_metrics.append(
|
||||
HealthMetric(
|
||||
name="Database",
|
||||
name="Storage",
|
||||
status="error",
|
||||
value=f"Error: {str(e)[:30]}",
|
||||
category="database",
|
||||
category="storage",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -271,7 +269,7 @@ class StatusDashboard:
|
||||
Uses weighted components:
|
||||
- Dependencies: 40%
|
||||
- Integrations: 25%
|
||||
- Database: 20%
|
||||
- Storage: 20%
|
||||
- Activity: 15%
|
||||
"""
|
||||
# Dependencies score (40%)
|
||||
@@ -288,12 +286,12 @@ class StatusDashboard:
|
||||
else:
|
||||
int_ok = 0
|
||||
|
||||
# Database score (20%)
|
||||
db_metrics = [m for m in data.health_metrics if m.category == "database"]
|
||||
if db_metrics:
|
||||
db_ok = sum(1 for m in db_metrics if m.status == "ok") / len(db_metrics)
|
||||
# Storage score (20%)
|
||||
storage_metrics = [m for m in data.health_metrics if m.category == "storage"]
|
||||
if storage_metrics:
|
||||
storage_ok = sum(1 for m in storage_metrics if m.status == "ok") / len(storage_metrics)
|
||||
else:
|
||||
db_ok = 0
|
||||
storage_ok = 0
|
||||
|
||||
# Activity score (15%) - based on having recent data
|
||||
activity_ok = 1.0 if data.projects_count > 0 or data.components_count > 0 else 0.5
|
||||
@@ -302,7 +300,7 @@ class StatusDashboard:
|
||||
score = (
|
||||
deps_ok * HEALTH_WEIGHT_DEPENDENCIES
|
||||
+ int_ok * HEALTH_WEIGHT_INTEGRATIONS
|
||||
+ db_ok * HEALTH_WEIGHT_DATABASE
|
||||
+ storage_ok * HEALTH_WEIGHT_STORAGE
|
||||
+ activity_ok * HEALTH_WEIGHT_ACTIVITY
|
||||
) * 100
|
||||
|
||||
@@ -409,12 +407,12 @@ class StatusDashboard:
|
||||
int_line += f"{icon} {i.name} ({i.value}) "
|
||||
lines.append("\u2502" + int_line[:width].ljust(width) + "\u2502")
|
||||
|
||||
# Database
|
||||
db = next((m for m in data.health_metrics if m.category == "database"), None)
|
||||
if db:
|
||||
db_icon = "\u2705" if db.status == "ok" else "\u26a0\ufe0f"
|
||||
db_line = f" Database: {db_icon} {db.value}"
|
||||
lines.append("\u2502" + db_line.ljust(width) + "\u2502")
|
||||
# Storage
|
||||
storage = next((m for m in data.health_metrics if m.category == "storage"), None)
|
||||
if storage:
|
||||
storage_icon = "\u2705" if storage.status == "ok" else "\u26a0\ufe0f"
|
||||
storage_line = f" Storage: {storage_icon} {storage.value}"
|
||||
lines.append("\u2502" + storage_line.ljust(width) + "\u2502")
|
||||
|
||||
lines.append("\u2514" + "\u2500" * width + "\u2518")
|
||||
|
||||
|
||||
@@ -5,12 +5,13 @@ Pure JSON file-based storage following DSS canonical structure.
|
||||
No SQLite - everything is JSON for git-friendly diffs.
|
||||
|
||||
Structure:
|
||||
.dss/data/
|
||||
${DSS_HOME:-~/.dss}/data/
|
||||
├── _system/ # DSS internal (config, cache, activity)
|
||||
├── projects/ # Per-project data (tokens, components, etc.)
|
||||
└── teams/ # Team definitions
|
||||
"""
|
||||
|
||||
import os
|
||||
import fcntl
|
||||
import hashlib
|
||||
import json
|
||||
@@ -22,7 +23,22 @@ from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
# Base paths
|
||||
DATA_DIR = Path(__file__).parent.parent.parent / ".dss" / "data"
|
||||
def _resolve_dss_home() -> Path:
|
||||
env = os.environ.get("DSS_HOME")
|
||||
if env:
|
||||
return Path(env).expanduser()
|
||||
|
||||
# Project-local default (developer-friendly): use `./.dss` when present.
|
||||
cwd = Path.cwd()
|
||||
local = cwd / ".dss"
|
||||
if local.exists():
|
||||
return local
|
||||
|
||||
return Path.home() / ".dss"
|
||||
|
||||
|
||||
_dss_home = _resolve_dss_home()
|
||||
DATA_DIR = _dss_home / "data"
|
||||
SYSTEM_DIR = DATA_DIR / "_system"
|
||||
PROJECTS_DIR = DATA_DIR / "projects"
|
||||
TEAMS_DIR = DATA_DIR / "teams"
|
||||
@@ -33,6 +49,7 @@ for d in [
|
||||
SYSTEM_DIR,
|
||||
SYSTEM_DIR / "cache",
|
||||
SYSTEM_DIR / "activity",
|
||||
SYSTEM_DIR / "users",
|
||||
PROJECTS_DIR,
|
||||
TEAMS_DIR,
|
||||
]:
|
||||
@@ -664,6 +681,25 @@ class ActivityLog:
|
||||
|
||||
return all_records[offset : offset + limit]
|
||||
|
||||
@staticmethod
|
||||
def count(project_id: str = None, days: int = 30) -> int:
|
||||
"""Count activity records over recent days."""
|
||||
total = 0
|
||||
for i in range(days):
|
||||
day = date.today() - __import__("datetime").timedelta(days=i)
|
||||
path = ActivityLog._log_path(day)
|
||||
if not path.exists():
|
||||
continue
|
||||
with file_lock(path, exclusive=False):
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
for line in f:
|
||||
if line.strip():
|
||||
total += 1
|
||||
except Exception:
|
||||
continue
|
||||
return total
|
||||
|
||||
@staticmethod
|
||||
def search(
|
||||
project_id: str = None,
|
||||
@@ -699,6 +735,157 @@ class ActivityLog:
|
||||
return all_records[offset : offset + limit]
|
||||
|
||||
|
||||
# === Users (system-level auth store) ===
|
||||
|
||||
|
||||
class Users:
|
||||
"""System user storage for headless-server authentication (JSON-only)."""
|
||||
|
||||
@staticmethod
|
||||
def _users_dir() -> Path:
|
||||
return SYSTEM_DIR / "users"
|
||||
|
||||
@staticmethod
|
||||
def _index_path() -> Path:
|
||||
return SYSTEM_DIR / "users_index.json"
|
||||
|
||||
@staticmethod
|
||||
def _user_path(user_id: int) -> Path:
|
||||
return Users._users_dir() / f"{user_id}.json"
|
||||
|
||||
@staticmethod
|
||||
def _normalize_email(email: str) -> str:
|
||||
return (email or "").strip().lower()
|
||||
|
||||
@staticmethod
|
||||
def _ensure_initialized() -> None:
|
||||
Users._users_dir().mkdir(parents=True, exist_ok=True)
|
||||
index_path = Users._index_path()
|
||||
if not index_path.exists():
|
||||
write_json(index_path, {"next_id": 1, "by_email": {}})
|
||||
|
||||
@staticmethod
|
||||
def _load_index() -> Dict[str, Any]:
|
||||
Users._ensure_initialized()
|
||||
data = read_json(Users._index_path(), {"next_id": 1, "by_email": {}})
|
||||
if not isinstance(data, dict):
|
||||
return {"next_id": 1, "by_email": {}}
|
||||
data.setdefault("next_id", 1)
|
||||
data.setdefault("by_email", {})
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _load_index_unlocked() -> Dict[str, Any]:
|
||||
path = Users._index_path()
|
||||
if not path.exists():
|
||||
return {"next_id": 1, "by_email": {}}
|
||||
try:
|
||||
data = json.loads(path.read_text())
|
||||
except Exception:
|
||||
return {"next_id": 1, "by_email": {}}
|
||||
if not isinstance(data, dict):
|
||||
return {"next_id": 1, "by_email": {}}
|
||||
data.setdefault("next_id", 1)
|
||||
data.setdefault("by_email", {})
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _write_index_unlocked(data: Dict[str, Any]) -> None:
|
||||
path = Users._index_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(json.dumps(data, indent=2, default=str))
|
||||
|
||||
@staticmethod
|
||||
def get(user_id: int) -> Optional[Dict[str, Any]]:
|
||||
Users._ensure_initialized()
|
||||
if user_id is None:
|
||||
return None
|
||||
try:
|
||||
user_id_int = int(user_id)
|
||||
except Exception:
|
||||
return None
|
||||
return read_json(Users._user_path(user_id_int))
|
||||
|
||||
@staticmethod
|
||||
def get_by_email(email: str) -> Optional[Dict[str, Any]]:
|
||||
Users._ensure_initialized()
|
||||
normalized = Users._normalize_email(email)
|
||||
if not normalized:
|
||||
return None
|
||||
index = Users._load_index()
|
||||
user_id = index.get("by_email", {}).get(normalized)
|
||||
if user_id is None:
|
||||
return None
|
||||
return Users.get(int(user_id))
|
||||
|
||||
@staticmethod
|
||||
def upsert(
|
||||
email: str,
|
||||
display_name: str,
|
||||
atlassian_url: str,
|
||||
atlassian_service: str,
|
||||
api_token_hash: str,
|
||||
last_login: str,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create or update a user by email.
|
||||
|
||||
Returns the stored user record.
|
||||
"""
|
||||
Users._ensure_initialized()
|
||||
normalized = Users._normalize_email(email)
|
||||
if not normalized:
|
||||
raise ValueError("email is required")
|
||||
|
||||
index_path = Users._index_path()
|
||||
with file_lock(index_path, exclusive=True):
|
||||
index = Users._load_index_unlocked()
|
||||
by_email = index.get("by_email", {})
|
||||
|
||||
now = datetime.utcnow().isoformat()
|
||||
existing_id = by_email.get(normalized)
|
||||
if existing_id is None:
|
||||
user_id = int(index.get("next_id", 1))
|
||||
index["next_id"] = user_id + 1
|
||||
by_email[normalized] = user_id
|
||||
index["by_email"] = by_email
|
||||
Users._write_index_unlocked(index)
|
||||
|
||||
record = {
|
||||
"id": user_id,
|
||||
"email": normalized,
|
||||
"display_name": display_name,
|
||||
"atlassian_url": atlassian_url,
|
||||
"atlassian_service": atlassian_service,
|
||||
"api_token_hash": api_token_hash,
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"last_login": last_login,
|
||||
}
|
||||
write_json(Users._user_path(user_id), record)
|
||||
return record
|
||||
|
||||
user_id = int(existing_id)
|
||||
record = read_json(Users._user_path(user_id), {}) or {}
|
||||
if not isinstance(record, dict):
|
||||
record = {}
|
||||
record.update(
|
||||
{
|
||||
"id": user_id,
|
||||
"email": normalized,
|
||||
"display_name": display_name,
|
||||
"atlassian_url": atlassian_url,
|
||||
"atlassian_service": atlassian_service,
|
||||
"api_token_hash": api_token_hash,
|
||||
"updated_at": now,
|
||||
"last_login": last_login,
|
||||
}
|
||||
)
|
||||
record.setdefault("created_at", now)
|
||||
write_json(Users._user_path(user_id), record)
|
||||
return record
|
||||
|
||||
|
||||
# === Teams ===
|
||||
|
||||
|
||||
@@ -892,6 +1079,28 @@ class CodeMetrics:
|
||||
return data["components"].get(component_id)
|
||||
return data["components"]
|
||||
|
||||
@staticmethod
|
||||
def get_project_summary(project_id: str) -> Dict:
|
||||
"""Get aggregated code metrics for a project."""
|
||||
metrics = CodeMetrics.get(project_id) or {}
|
||||
component_total = len(Components.list(project_id))
|
||||
measured = len(metrics) if isinstance(metrics, dict) else 0
|
||||
|
||||
# Best-effort: find latest update timestamp across component metrics
|
||||
latest = None
|
||||
if isinstance(metrics, dict):
|
||||
for v in metrics.values():
|
||||
ts = (v or {}).get("updated_at")
|
||||
if ts and (latest is None or ts > latest):
|
||||
latest = ts
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"components_total": component_total,
|
||||
"components_measured": measured,
|
||||
"measured_ratio": (measured / component_total) if component_total else 0.0,
|
||||
"last_updated_at": latest,
|
||||
}
|
||||
|
||||
class TestResults:
|
||||
"""Test results storage."""
|
||||
@@ -942,6 +1151,24 @@ class TestResults:
|
||||
results.sort(key=lambda r: r.get("run_at", ""), reverse=True)
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def get_project_summary(project_id: str) -> Dict:
|
||||
"""Get aggregated test results summary for a project."""
|
||||
results = TestResults.list(project_id)
|
||||
total = len(results)
|
||||
passed = len([r for r in results if r.get("passed") is True])
|
||||
failed = len([r for r in results if r.get("passed") is False])
|
||||
last_run = results[0].get("run_at") if results else None
|
||||
|
||||
return {
|
||||
"project_id": project_id,
|
||||
"total_runs": total,
|
||||
"passed": passed,
|
||||
"failed": failed,
|
||||
"pass_rate": (passed / total) if total else 0.0,
|
||||
"last_run_at": last_run,
|
||||
}
|
||||
|
||||
|
||||
class TokenDrift:
|
||||
"""Token drift tracking."""
|
||||
@@ -1011,6 +1238,133 @@ class TokenDrift:
|
||||
return None
|
||||
|
||||
|
||||
class TokenDriftDetector:
|
||||
"""
|
||||
Compatibility wrapper used by the headless API.
|
||||
|
||||
The underlying storage is `TokenDrift`.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def list_by_project(project_id: str, severity: Optional[str] = None) -> List[Dict]:
|
||||
return TokenDrift.list(project_id, severity=severity)
|
||||
|
||||
@staticmethod
|
||||
def record_drift(
|
||||
project_id: str,
|
||||
component_id: str,
|
||||
property_name: str,
|
||||
hardcoded_value: str,
|
||||
file_path: str,
|
||||
line_number: int,
|
||||
severity: str = "warning",
|
||||
suggested_token: Optional[str] = None,
|
||||
) -> Dict:
|
||||
return TokenDrift.record(
|
||||
project_id=project_id,
|
||||
component_id=component_id,
|
||||
property_name=property_name,
|
||||
hardcoded_value=hardcoded_value,
|
||||
file_path=file_path,
|
||||
line_number=line_number,
|
||||
severity=severity,
|
||||
suggested_token=suggested_token,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_status(project_id: str, drift_id: str, status: str) -> Optional[Dict]:
|
||||
return TokenDrift.update_status(project_id=project_id, drift_id=drift_id, status=status)
|
||||
|
||||
@staticmethod
|
||||
def get_stats(project_id: str) -> Dict:
|
||||
drifts = TokenDrift.list(project_id)
|
||||
by_status: Dict[str, int] = {}
|
||||
by_severity: Dict[str, int] = {}
|
||||
for d in drifts:
|
||||
by_status[d.get("status", "unknown")] = by_status.get(d.get("status", "unknown"), 0) + 1
|
||||
by_severity[d.get("severity", "unknown")] = (
|
||||
by_severity.get(d.get("severity", "unknown"), 0) + 1
|
||||
)
|
||||
return {"total": len(drifts), "by_status": by_status, "by_severity": by_severity}
|
||||
|
||||
|
||||
class ESREDefinitions:
|
||||
"""ESRE (Explicit Style Requirements & Expectations) definitions storage."""
|
||||
|
||||
@staticmethod
|
||||
def _path(project_id: str) -> Path:
|
||||
return PROJECTS_DIR / project_id / "metrics" / "esre.json"
|
||||
|
||||
@staticmethod
|
||||
def list(project_id: str) -> List[Dict]:
|
||||
data = read_json(ESREDefinitions._path(project_id), {"definitions": []})
|
||||
return data.get("definitions", [])
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
project_id: str,
|
||||
name: str,
|
||||
definition_text: str,
|
||||
expected_value: Optional[str] = None,
|
||||
component_name: Optional[str] = None,
|
||||
) -> Dict:
|
||||
path = ESREDefinitions._path(project_id)
|
||||
data = read_json(path, {"definitions": []})
|
||||
now = datetime.utcnow().isoformat()
|
||||
|
||||
record = {
|
||||
"id": str(uuid.uuid4())[:8],
|
||||
"project_id": project_id,
|
||||
"name": name,
|
||||
"definition_text": definition_text,
|
||||
"expected_value": expected_value,
|
||||
"component_name": component_name,
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
}
|
||||
|
||||
data["definitions"].append(record)
|
||||
write_json(path, data)
|
||||
return record
|
||||
|
||||
@staticmethod
|
||||
def update(
|
||||
project_id: str,
|
||||
esre_id: str,
|
||||
name: str,
|
||||
definition_text: str,
|
||||
expected_value: Optional[str] = None,
|
||||
component_name: Optional[str] = None,
|
||||
) -> Optional[Dict]:
|
||||
path = ESREDefinitions._path(project_id)
|
||||
data = read_json(path, {"definitions": []})
|
||||
for record in data.get("definitions", []):
|
||||
if record.get("id") == esre_id:
|
||||
record.update(
|
||||
{
|
||||
"name": name,
|
||||
"definition_text": definition_text,
|
||||
"expected_value": expected_value,
|
||||
"component_name": component_name,
|
||||
"updated_at": datetime.utcnow().isoformat(),
|
||||
}
|
||||
)
|
||||
write_json(path, data)
|
||||
return record
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def delete(project_id: str, esre_id: str) -> bool:
|
||||
path = ESREDefinitions._path(project_id)
|
||||
data = read_json(path, {"definitions": []})
|
||||
before = len(data.get("definitions", []))
|
||||
data["definitions"] = [d for d in data.get("definitions", []) if d.get("id") != esre_id]
|
||||
if len(data["definitions"]) == before:
|
||||
return False
|
||||
write_json(path, data)
|
||||
return True
|
||||
|
||||
|
||||
# === Integrations ===
|
||||
|
||||
|
||||
@@ -1182,12 +1536,58 @@ class IntegrationHealth:
|
||||
|
||||
|
||||
def get_stats() -> Dict:
|
||||
"""Get storage statistics."""
|
||||
"""Get storage statistics (JSON-only)."""
|
||||
projects_total = 0
|
||||
projects_active = 0
|
||||
projects_archived = 0
|
||||
components_total = 0
|
||||
styles_total = 0
|
||||
tokens_total = 0
|
||||
|
||||
if PROJECTS_DIR.exists():
|
||||
archived_dir = PROJECTS_DIR / "_archived"
|
||||
if archived_dir.exists():
|
||||
projects_archived = len([p for p in archived_dir.iterdir() if p.is_dir()])
|
||||
|
||||
for project_dir in PROJECTS_DIR.iterdir():
|
||||
if not project_dir.is_dir() or project_dir.name.startswith("_"):
|
||||
continue
|
||||
|
||||
projects_total += 1
|
||||
|
||||
manifest = read_json(project_dir / "manifest.json", {}) or {}
|
||||
if manifest.get("status") == "active":
|
||||
projects_active += 1
|
||||
|
||||
components_dir = project_dir / "components"
|
||||
if components_dir.exists():
|
||||
components_total += len([p for p in components_dir.glob("*.json") if p.is_file()])
|
||||
|
||||
styles_dir = project_dir / "styles"
|
||||
if styles_dir.exists():
|
||||
for style_file in styles_dir.glob("*.json"):
|
||||
data = read_json(style_file, {}) or {}
|
||||
if isinstance(data, dict):
|
||||
styles_total += len(data.get("styles", []) or [])
|
||||
|
||||
tokens_dir = project_dir / "tokens"
|
||||
if tokens_dir.exists():
|
||||
for token_file in tokens_dir.glob("*.json"):
|
||||
data = read_json(token_file, {}) or {}
|
||||
if isinstance(data, dict) and isinstance(data.get("tokens"), dict):
|
||||
tokens_total += len(data["tokens"])
|
||||
|
||||
stats = {
|
||||
"projects": len(list(PROJECTS_DIR.iterdir())) - 1
|
||||
if PROJECTS_DIR.exists()
|
||||
else 0, # -1 for _archived
|
||||
"teams": len(list(TEAMS_DIR.iterdir())) if TEAMS_DIR.exists() else 0,
|
||||
"storage_type": "json",
|
||||
"projects": projects_total,
|
||||
"projects_active": projects_active,
|
||||
"projects_archived": projects_archived,
|
||||
"teams": len([p for p in TEAMS_DIR.iterdir() if p.is_dir() and not p.name.startswith("_")])
|
||||
if TEAMS_DIR.exists()
|
||||
else 0,
|
||||
"components": components_total,
|
||||
"styles": styles_total,
|
||||
"tokens": tokens_total,
|
||||
"cache_files": len(list((SYSTEM_DIR / "cache").glob("*.json")))
|
||||
if (SYSTEM_DIR / "cache").exists()
|
||||
else 0,
|
||||
@@ -1222,12 +1622,6 @@ def init_storage() -> None:
|
||||
]:
|
||||
d.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print(f"[Storage] JSON storage initialized at {DATA_DIR}")
|
||||
|
||||
|
||||
# Initialize on import
|
||||
init_storage()
|
||||
|
||||
|
||||
# === CLI ===
|
||||
|
||||
|
||||
@@ -350,6 +350,25 @@ class StoryGenerator:
|
||||
"""Parse TypeScript interface/type for React component props."""
|
||||
props = []
|
||||
|
||||
# Collect simple union type aliases so we can resolve things like:
|
||||
# export type ButtonVariant = 'primary' | 'secondary';
|
||||
type_aliases: Dict[str, List[str]] = {}
|
||||
type_alias_pattern = re.compile(
|
||||
r"(?:export\s+)?type\s+(\w+)\s*=\s*([^;]+);", re.MULTILINE | re.DOTALL
|
||||
)
|
||||
for match in type_alias_pattern.finditer(content):
|
||||
alias_name = match.group(1)
|
||||
rhs = match.group(2).strip()
|
||||
if "|" not in rhs:
|
||||
continue
|
||||
options = [
|
||||
o.strip().strip("'\"")
|
||||
for o in rhs.split("|")
|
||||
if o.strip().startswith(("'", '"')) and o.strip().endswith(("'", '"'))
|
||||
]
|
||||
if options:
|
||||
type_aliases[alias_name] = options
|
||||
|
||||
# Extract props from interface/type
|
||||
# interface ButtonProps { variant?: 'primary' | 'secondary'; ... }
|
||||
props_pattern = re.compile(
|
||||
@@ -375,7 +394,10 @@ class StoryGenerator:
|
||||
|
||||
# Extract options from union types
|
||||
options = []
|
||||
if "|" in prop_type:
|
||||
if prop_type in type_aliases:
|
||||
options = type_aliases[prop_type]
|
||||
prop_type = " | ".join(f"'{o}'" for o in options)
|
||||
elif "|" in prop_type:
|
||||
# 'primary' | 'secondary' | 'ghost'
|
||||
options = [
|
||||
o.strip().strip("'\"")
|
||||
@@ -851,9 +873,12 @@ class StoryGenerator:
|
||||
if not dir_path.exists():
|
||||
return results
|
||||
|
||||
# Find component files (React + Web Components)
|
||||
# Find component files (React + Web Components) recursively
|
||||
skip_dirs = {"node_modules", ".git", "dist", "build", ".next"}
|
||||
for pattern in ["*.tsx", "*.jsx", "*.js"]:
|
||||
for comp_path in dir_path.glob(pattern):
|
||||
for comp_path in dir_path.rglob(pattern):
|
||||
if any(skip in comp_path.parts for skip in skip_dirs):
|
||||
continue
|
||||
# Skip story files, test files, index files
|
||||
if any(
|
||||
x in comp_path.name.lower() for x in [".stories.", ".test.", ".spec.", "index."]
|
||||
@@ -887,6 +912,8 @@ class StoryGenerator:
|
||||
# Determine story output path (use .stories.js for Web Components)
|
||||
if comp_path.suffix == ".js":
|
||||
story_path = comp_path.with_name(comp_path.stem + ".stories.js")
|
||||
elif comp_path.suffix == ".jsx":
|
||||
story_path = comp_path.with_suffix(".stories.jsx")
|
||||
else:
|
||||
story_path = comp_path.with_suffix(".stories.tsx")
|
||||
|
||||
|
||||
28
scripts/dss
28
scripts/dss
@@ -5,12 +5,12 @@
|
||||
# Portable single-server launcher. One command, one port, everything included.
|
||||
#
|
||||
# Usage:
|
||||
# ./dss start Start DSS (UI + API on port 3456)
|
||||
# ./dss dev Development mode with auto-reload
|
||||
# ./dss stop Stop DSS server
|
||||
# ./dss status Check service status
|
||||
# ./dss config Show current configuration
|
||||
# ./dss help Show this help
|
||||
# ./scripts/dss start Start DSS (UI + API on one port)
|
||||
# ./scripts/dss dev Development mode with auto-reload
|
||||
# ./scripts/dss stop Stop DSS server
|
||||
# ./scripts/dss status Check service status
|
||||
# ./scripts/dss config Show current configuration
|
||||
# ./scripts/dss help Show this help
|
||||
#
|
||||
|
||||
set -e
|
||||
@@ -21,7 +21,7 @@ UI_DIR="$DSS_ROOT/admin-ui"
|
||||
VENV_DIR="$DSS_ROOT/.venv"
|
||||
PID_FILE="$DSS_ROOT/.dss/dss.pid"
|
||||
LOG_FILE="$DSS_ROOT/.dss/dss.log"
|
||||
PORT="${DSS_PORT:-3456}"
|
||||
PORT="${DSS_PORT:-6220}"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
@@ -200,8 +200,8 @@ show_config() {
|
||||
echo "═══════════════════════════════════════════════════"
|
||||
echo ""
|
||||
|
||||
if curl -s http://localhost:3456/api/config > /dev/null 2>&1; then
|
||||
curl -s http://localhost:3456/api/config | python3 -m json.tool
|
||||
if curl -s "http://localhost:$PORT/api/config" > /dev/null 2>&1; then
|
||||
curl -s "http://localhost:$PORT/api/config" | python3 -m json.tool
|
||||
else
|
||||
warn "DSS not running. Showing file-based config..."
|
||||
if [ -f "$DSS_ROOT/.dss/runtime-config.json" ]; then
|
||||
@@ -234,14 +234,14 @@ show_help() {
|
||||
echo " help Show this help"
|
||||
echo ""
|
||||
echo "Environment:"
|
||||
echo " DSS_PORT Override default port (default: 3456)"
|
||||
echo " DSS_PORT Override default port (default: 6220)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " ./dss start # Start on port 3456"
|
||||
echo " DSS_PORT=8080 ./dss start # Start on port 8080"
|
||||
echo " ./dss dev # Dev mode with auto-reload"
|
||||
echo " ./scripts/dss start # Start on port 6220"
|
||||
echo " DSS_PORT=8080 ./scripts/dss start # Start on port 8080"
|
||||
echo " ./scripts/dss dev # Dev mode with auto-reload"
|
||||
echo ""
|
||||
echo "Once running, open http://localhost:3456 for:"
|
||||
echo "Once running, open http://localhost:6220 for:"
|
||||
echo " / Dashboard (Admin UI)"
|
||||
echo " /api/* REST API endpoints"
|
||||
echo " /docs Swagger documentation"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# This is the single entry point for DSS setup. It handles:
|
||||
# - MCP configuration
|
||||
# - Dependencies (Python venv, Node modules)
|
||||
# - Directory structure and database
|
||||
# - Directory structure (JSON storage)
|
||||
# - Figma sync and token resolution
|
||||
# - CSS generation with style-dictionary
|
||||
# - Storybook story generation
|
||||
@@ -103,9 +103,6 @@ if [ "$RESET" = true ]; then
|
||||
rm -rf .dss/data/projects/* .dss/data/teams/* .dss/data/_system/cache/* .dss/data/_system/activity/* 2>/dev/null || true
|
||||
rm -rf .dss/data/_system/tokens/* .dss/data/_system/themes/* .dss/data/_system/components/* 2>/dev/null || true
|
||||
|
||||
# Reset database
|
||||
rm -f .dss/dss.db .dss/dss.db.old
|
||||
|
||||
# Clear admin-ui generated files
|
||||
rm -f admin-ui/css/dss-*.css 2>/dev/null || true
|
||||
rm -f admin-ui/src/components/*.stories.js admin-ui/src/components/ds-*.js 2>/dev/null || true
|
||||
@@ -169,26 +166,29 @@ fi
|
||||
# ============================================================================
|
||||
log_step "2. Generating MCP configuration..."
|
||||
|
||||
cat > "$DSS_ROOT/.mcp.json" << EOF
|
||||
mkdir -p "$DSS_ROOT/.claude"
|
||||
|
||||
cat > "$DSS_ROOT/.claude/mcp.json" << EOF
|
||||
{
|
||||
"\$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json",
|
||||
"mcpServers": {
|
||||
"dss": {
|
||||
"command": "$DSS_ROOT/.venv/bin/python3",
|
||||
"args": ["$DSS_ROOT/dss-claude-plugin/servers/dss-mcp-server.py"],
|
||||
"args": ["-m", "dss.mcp.server"],
|
||||
"env": {
|
||||
"PYTHONPATH": "$DSS_ROOT:$DSS_ROOT/dss-claude-plugin",
|
||||
"PYTHONPATH": "$DSS_ROOT",
|
||||
"DSS_HOME": "$DSS_ROOT/.dss",
|
||||
"DSS_DATABASE": "$DSS_ROOT/.dss/dss.db",
|
||||
"DSS_CACHE": "$DSS_ROOT/.dss/cache",
|
||||
"DSS_BASE_PATH": "$DSS_ROOT"
|
||||
"DSS_BASE_PATH": "$DSS_ROOT",
|
||||
"DSS_ENABLE_DEV_COMMANDS": "1",
|
||||
"DSS_API_URL": ""
|
||||
},
|
||||
"description": "Design System Server MCP - local development"
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
log_ok "MCP config: .mcp.json"
|
||||
log_ok "MCP config: .claude/mcp.json"
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -323,36 +323,10 @@ log_ok "Directory structure ready"
|
||||
echo ""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 6: Initialize Database
|
||||
# STEP 6: Storage (JSON)
|
||||
# ============================================================================
|
||||
log_step "6. Initializing database..."
|
||||
|
||||
if [ ! -f ".dss/dss.db" ]; then
|
||||
python3 << 'PYEOF'
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(".dss/dss.db")
|
||||
c = conn.cursor()
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS projects (
|
||||
id TEXT PRIMARY KEY, name TEXT NOT NULL, path TEXT,
|
||||
config TEXT, created_at TEXT, updated_at TEXT)''')
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS tokens (
|
||||
id TEXT PRIMARY KEY, project_id TEXT, category TEXT,
|
||||
name TEXT, value TEXT, source TEXT, created_at TEXT,
|
||||
FOREIGN KEY (project_id) REFERENCES projects(id))''')
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS components (
|
||||
id TEXT PRIMARY KEY, project_id TEXT, name TEXT,
|
||||
path TEXT, analysis TEXT, created_at TEXT,
|
||||
FOREIGN KEY (project_id) REFERENCES projects(id))''')
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS figma_syncs (
|
||||
id TEXT PRIMARY KEY, file_key TEXT, file_name TEXT,
|
||||
tokens_count INTEGER, status TEXT, synced_at TEXT)''')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
PYEOF
|
||||
log_ok "Database initialized"
|
||||
else
|
||||
log_ok "Database exists"
|
||||
fi
|
||||
log_step "6. Storage (JSON) ready..."
|
||||
log_ok "Using JSON storage under .dss/data/"
|
||||
|
||||
echo ""
|
||||
|
||||
|
||||
31
scripts/dss-mcp
Executable file
31
scripts/dss-mcp
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# DSS MCP stdio launcher (client-agnostic)
|
||||
#
|
||||
# Use this when configuring MCP clients that don't support per-server env vars,
|
||||
# or when you want a single canonical entrypoint for DSS MCP across tools.
|
||||
|
||||
DSS_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
# Prefer repo-local venv (recommended for DSS).
|
||||
if [ -x "$DSS_ROOT/.venv/bin/python3" ]; then
|
||||
PYTHON_BIN="$DSS_ROOT/.venv/bin/python3"
|
||||
elif [ -x "$DSS_ROOT/venv/bin/python3" ]; then
|
||||
PYTHON_BIN="$DSS_ROOT/venv/bin/python3"
|
||||
else
|
||||
echo "[dss-mcp] No venv found at $DSS_ROOT/.venv or $DSS_ROOT/venv" >&2
|
||||
echo "[dss-mcp] Create one: python3 -m venv .venv && source .venv/bin/activate && pip install -r requirements.txt" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Defaults (allow caller to override).
|
||||
export PYTHONPATH="${PYTHONPATH:-$DSS_ROOT}"
|
||||
export DSS_HOME="${DSS_HOME:-$DSS_ROOT/.dss}"
|
||||
export DSS_CACHE="${DSS_CACHE:-$DSS_ROOT/.dss/cache}"
|
||||
export DSS_BASE_PATH="${DSS_BASE_PATH:-$DSS_ROOT}"
|
||||
|
||||
# Enable dev-only MCP workflow tools (shell-script wrappers).
|
||||
export DSS_ENABLE_DEV_COMMANDS="${DSS_ENABLE_DEV_COMMANDS:-1}"
|
||||
|
||||
exec "$PYTHON_BIN" -m dss.mcp.server
|
||||
@@ -41,20 +41,16 @@ run_or_show "rm -rf .dss/data/projects/* .dss/data/teams/* .dss/data/_system/cac
|
||||
run_or_show "rm -rf .dss/data/_system/tokens/* .dss/data/_system/themes/* .dss/data/_system/components/* 2>/dev/null || true"
|
||||
run_or_show "mkdir -p .dss/data/{projects,teams,_system/{cache,activity,tokens,themes,components}}"
|
||||
|
||||
# 2. Reset database
|
||||
echo "2. Resetting database..."
|
||||
run_or_show "rm -f .dss/dss.db .dss/dss.db.old"
|
||||
|
||||
# 3. Remove admin-ui DSS CSS (keep non-dss files)
|
||||
echo "3. Removing admin-ui DSS CSS files..."
|
||||
# 2. Remove admin-ui DSS CSS (keep non-dss files)
|
||||
echo "2. Removing admin-ui DSS CSS files..."
|
||||
run_or_show "rm -f admin-ui/css/dss-*.css"
|
||||
|
||||
# 4. Remove generated stories and components
|
||||
echo "4. Removing generated stories and components..."
|
||||
# 3. Remove generated stories and components
|
||||
echo "3. Removing generated stories and components..."
|
||||
run_or_show "rm -f admin-ui/src/components/*.stories.js admin-ui/src/components/ds-*.js"
|
||||
|
||||
# 5. Reset core_tokens
|
||||
echo "5. Resetting core_tokens..."
|
||||
# 4. Reset core_tokens
|
||||
echo "4. Resetting core_tokens..."
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
cat > dss/core_tokens/tokens.json << 'EOF'
|
||||
{
|
||||
@@ -71,8 +67,8 @@ else
|
||||
echo " Would reset: dss/core_tokens/tokens.json"
|
||||
fi
|
||||
|
||||
# 6. Reset skins to empty
|
||||
echo "6. Resetting skins..."
|
||||
# 5. Reset skins to empty
|
||||
echo "5. Resetting skins..."
|
||||
for skin in base classic workbench; do
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
# Capitalize first letter for description
|
||||
@@ -106,14 +102,14 @@ EOF
|
||||
fi
|
||||
done
|
||||
|
||||
# 7. Clear caches and logs
|
||||
echo "7. Clearing caches and logs..."
|
||||
# 6. Clear caches and logs
|
||||
echo "6. Clearing caches and logs..."
|
||||
run_or_show "rm -f .dss/logs/*.jsonl 2>/dev/null || true"
|
||||
run_or_show "rm -rf .dss/logs/browser-logs/* 2>/dev/null || true"
|
||||
run_or_show "touch .dss/logs/dss-operations.jsonl .dss/logs/git-hooks.jsonl"
|
||||
|
||||
# 8. Regenerate hash manifest
|
||||
echo "8. Regenerating hash manifest..."
|
||||
# 7. Regenerate hash manifest
|
||||
echo "7. Regenerating hash manifest..."
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
./scripts/regenerate-core-hashes.sh
|
||||
else
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
#!/bin/bash
|
||||
# DSS Complete Setup Script
|
||||
# Sets up MCP, initializes DSS structure, and starts services
|
||||
#
|
||||
# Usage: scripts/dss-setup.sh [--reset] [--skip-servers]
|
||||
#
|
||||
# Flow:
|
||||
# 1. Generate MCP configuration
|
||||
# 2. Install dependencies if needed
|
||||
# 3. Initialize DSS structure (dss-init.sh)
|
||||
# 4. Start development servers
|
||||
|
||||
set -e
|
||||
|
||||
DSS_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
||||
cd "$DSS_ROOT"
|
||||
|
||||
# Parse arguments
|
||||
RESET=false
|
||||
SKIP_SERVERS=false
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--reset) RESET=true ;;
|
||||
--skip-servers) SKIP_SERVERS=true ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_step() { echo -e "${BLUE}[SETUP]${NC} $1"; }
|
||||
log_ok() { echo -e "${GREEN}[OK]${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
log_info() { echo -e "${CYAN}[INFO]${NC} $1"; }
|
||||
|
||||
echo "╔══════════════════════════════════════════════════════════════╗"
|
||||
echo "║ DSS COMPLETE SETUP ║"
|
||||
echo "╚══════════════════════════════════════════════════════════════╝"
|
||||
echo ""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 1: Generate MCP Configuration
|
||||
# ============================================================================
|
||||
log_step "1. Generating MCP configuration..."
|
||||
|
||||
cat > "$DSS_ROOT/.mcp.json" << EOF
|
||||
{
|
||||
"\$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp-servers.schema.json",
|
||||
"mcpServers": {
|
||||
"dss": {
|
||||
"command": "$DSS_ROOT/.venv/bin/python3",
|
||||
"args": ["$DSS_ROOT/dss-claude-plugin/servers/dss-mcp-server.py"],
|
||||
"env": {
|
||||
"PYTHONPATH": "$DSS_ROOT:$DSS_ROOT/dss-claude-plugin",
|
||||
"DSS_HOME": "$DSS_ROOT/.dss",
|
||||
"DSS_DATABASE": "$DSS_ROOT/.dss/dss.db",
|
||||
"DSS_CACHE": "$DSS_ROOT/.dss/cache",
|
||||
"DSS_BASE_PATH": "$DSS_ROOT"
|
||||
},
|
||||
"description": "Design System Server MCP - local development"
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
log_ok "MCP config generated: .mcp.json"
|
||||
|
||||
echo ""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 2: Check/Install Dependencies
|
||||
# ============================================================================
|
||||
log_step "2. Checking dependencies..."
|
||||
|
||||
# Check Python venv
|
||||
if [ ! -d "$DSS_ROOT/.venv" ]; then
|
||||
log_info "Creating Python virtual environment..."
|
||||
python3 -m venv "$DSS_ROOT/.venv"
|
||||
fi
|
||||
|
||||
# Activate venv and check packages
|
||||
source "$DSS_ROOT/.venv/bin/activate"
|
||||
if ! python3 -c "import mcp" 2>/dev/null; then
|
||||
log_info "Installing MCP package..."
|
||||
pip install mcp 2>/dev/null || log_warn "MCP package install failed"
|
||||
fi
|
||||
log_ok "Python venv ready"
|
||||
|
||||
# Check admin-ui node_modules
|
||||
if [ ! -d "$DSS_ROOT/admin-ui/node_modules" ]; then
|
||||
log_info "Installing admin-ui dependencies..."
|
||||
cd "$DSS_ROOT/admin-ui" && npm install --legacy-peer-deps
|
||||
cd "$DSS_ROOT"
|
||||
fi
|
||||
log_ok "Node dependencies ready"
|
||||
|
||||
# Build admin-ui for production
|
||||
log_info "Building admin-ui for production..."
|
||||
cd "$DSS_ROOT/admin-ui"
|
||||
npm run build 2>&1 | tail -5
|
||||
cd "$DSS_ROOT"
|
||||
log_ok "admin-ui built (dist/)"
|
||||
|
||||
echo ""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 3: Initialize DSS Structure
|
||||
# ============================================================================
|
||||
log_step "3. Running DSS initialization..."
|
||||
|
||||
if [ "$RESET" = true ]; then
|
||||
"$DSS_ROOT/scripts/dss-init.sh" --reset
|
||||
else
|
||||
"$DSS_ROOT/scripts/dss-init.sh"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 4: Start Development Servers
|
||||
# ============================================================================
|
||||
if [ "$SKIP_SERVERS" = false ]; then
|
||||
log_step "4. Starting development servers..."
|
||||
|
||||
# Kill existing processes
|
||||
pkill -f "vite.*admin-ui" 2>/dev/null || true
|
||||
pkill -f "storybook.*6006" 2>/dev/null || true
|
||||
sleep 1
|
||||
|
||||
# Start admin-ui (Vite)
|
||||
cd "$DSS_ROOT/admin-ui"
|
||||
nohup npm run dev > /tmp/dss-admin-ui.log 2>&1 &
|
||||
VITE_PID=$!
|
||||
log_info "admin-ui starting (PID: $VITE_PID)..."
|
||||
|
||||
# Start Storybook
|
||||
nohup npm run storybook > /tmp/dss-storybook.log 2>&1 &
|
||||
SB_PID=$!
|
||||
log_info "Storybook starting (PID: $SB_PID)..."
|
||||
|
||||
cd "$DSS_ROOT"
|
||||
|
||||
# Wait for servers
|
||||
sleep 5
|
||||
|
||||
# Check status
|
||||
if curl -s -o /dev/null -w "" http://localhost:3456 2>/dev/null; then
|
||||
log_ok "admin-ui running on http://localhost:3456"
|
||||
else
|
||||
log_warn "admin-ui not responding yet (check /tmp/dss-admin-ui.log)"
|
||||
fi
|
||||
|
||||
if curl -s -o /dev/null -w "" http://localhost:6006 2>/dev/null; then
|
||||
log_ok "Storybook running on http://localhost:6006"
|
||||
else
|
||||
log_warn "Storybook not responding yet (check /tmp/dss-storybook.log)"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
else
|
||||
log_step "4. Skipping servers (--skip-servers)"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# SUMMARY
|
||||
# ============================================================================
|
||||
echo "╔══════════════════════════════════════════════════════════════╗"
|
||||
echo "║ DSS SETUP COMPLETE ║"
|
||||
echo "╚══════════════════════════════════════════════════════════════╝"
|
||||
echo ""
|
||||
echo " Services:"
|
||||
echo " admin-ui: http://localhost:3456"
|
||||
echo " Storybook: http://localhost:6006"
|
||||
echo ""
|
||||
echo " Logs:"
|
||||
echo " /tmp/dss-admin-ui.log"
|
||||
echo " /tmp/dss-storybook.log"
|
||||
echo ""
|
||||
echo " Next: Restart Claude Code to load DSS MCP server"
|
||||
echo ""
|
||||
94
scripts/enable-mcp-clients.sh
Executable file
94
scripts/enable-mcp-clients.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Enable DSS MCP for supported AI clients (Claude Code, Codex CLI, Gemini CLI).
|
||||
#
|
||||
# This is safe to run multiple times.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/enable-mcp-clients.sh [--force] [--api-url <url>] [--skip-codex] [--skip-gemini] [--skip-claude]
|
||||
#
|
||||
# Notes:
|
||||
# - Claude Code MCP config is project-local: `.claude/mcp.json`
|
||||
# - Codex/Gemini are configured via their CLI (`codex mcp add`, `gemini mcp add`)
|
||||
|
||||
DSS_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
MCP_CMD="$DSS_ROOT/scripts/dss-mcp"
|
||||
|
||||
FORCE=false
|
||||
SKIP_CLAUDE=false
|
||||
SKIP_CODEX=false
|
||||
SKIP_GEMINI=false
|
||||
API_URL=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--force)
|
||||
FORCE=true
|
||||
shift
|
||||
;;
|
||||
--skip-claude)
|
||||
SKIP_CLAUDE=true
|
||||
shift
|
||||
;;
|
||||
--skip-codex)
|
||||
SKIP_CODEX=true
|
||||
shift
|
||||
;;
|
||||
--skip-gemini)
|
||||
SKIP_GEMINI=true
|
||||
shift
|
||||
;;
|
||||
--api-url)
|
||||
API_URL="${2:-}"
|
||||
if [[ -z "$API_URL" ]]; then
|
||||
echo "Error: --api-url requires a value" >&2
|
||||
exit 1
|
||||
fi
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
echo "Usage: ./scripts/enable-mcp-clients.sh [--force] [--api-url <url>] [--skip-codex] [--skip-gemini] [--skip-claude]" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "[dss] Enabling MCP clients in: $DSS_ROOT"
|
||||
|
||||
if [[ "$SKIP_CLAUDE" != "true" ]]; then
|
||||
echo "[dss] Claude Code: generating .claude/mcp.json"
|
||||
if [[ -n "$API_URL" ]]; then
|
||||
"$DSS_ROOT/scripts/setup-mcp.sh" --api-url "$API_URL"
|
||||
else
|
||||
"$DSS_ROOT/scripts/setup-mcp.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_CODEX" != "true" ]]; then
|
||||
if command -v codex >/dev/null 2>&1; then
|
||||
echo "[dss] Codex CLI: configuring MCP server 'dss'"
|
||||
if $FORCE; then
|
||||
codex mcp remove dss >/dev/null 2>&1 || true
|
||||
fi
|
||||
codex mcp get dss >/dev/null 2>&1 || codex mcp add dss -- "$MCP_CMD"
|
||||
else
|
||||
echo "[dss] Codex CLI: not found (skip)" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_GEMINI" != "true" ]]; then
|
||||
if command -v gemini >/dev/null 2>&1; then
|
||||
echo "[dss] Gemini CLI: configuring MCP server 'dss'"
|
||||
if $FORCE; then
|
||||
gemini mcp remove dss >/dev/null 2>&1 || true
|
||||
fi
|
||||
gemini mcp list 2>/dev/null | grep -qE '^dss\\b' || gemini mcp add dss "$MCP_CMD"
|
||||
else
|
||||
echo "[dss] Gemini CLI: not found (skip)" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[dss] Done."
|
||||
echo "[dss] Restart Claude Code/Codex/Gemini sessions to load the updated MCP toolset."
|
||||
@@ -2,7 +2,7 @@
|
||||
# Generate .claude/mcp.json with absolute paths for current setup
|
||||
#
|
||||
# USAGE:
|
||||
# ./scripts/setup-mcp.sh
|
||||
# ./scripts/setup-mcp.sh [--api-url https://dss.example.com]
|
||||
#
|
||||
# This script generates the MCP configuration file needed for Claude Code
|
||||
# to access DSS tools. Run this after cloning or when switching machines.
|
||||
@@ -13,6 +13,31 @@ DSS_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
||||
MCP_CONFIG_DIR="$DSS_ROOT/.claude"
|
||||
MCP_CONFIG="$MCP_CONFIG_DIR/mcp.json"
|
||||
|
||||
# Defaults
|
||||
API_URL=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--local)
|
||||
# Kept for backwards-compatibility; MCP server is always local now.
|
||||
shift
|
||||
;;
|
||||
--api-url)
|
||||
API_URL="${2:-}"
|
||||
if [ -z "$API_URL" ]; then
|
||||
echo "Error: --api-url requires a value"
|
||||
exit 1
|
||||
fi
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1"
|
||||
echo "Usage: ./scripts/setup-mcp.sh [--api-url https://dss.example.com]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Ensure .claude directory exists
|
||||
mkdir -p "$MCP_CONFIG_DIR"
|
||||
|
||||
@@ -27,12 +52,9 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify MCP server exists
|
||||
MCP_SERVER="$DSS_ROOT/dss-claude-plugin/servers/dss-mcp-server.py"
|
||||
if [ ! -f "$MCP_SERVER" ]; then
|
||||
echo "Error: MCP server not found at $MCP_SERVER"
|
||||
exit 1
|
||||
fi
|
||||
MCP_ARGS='["-m", "dss.mcp.server"]'
|
||||
MCP_SERVER_DESC="python -m dss.mcp.server"
|
||||
PYTHONPATH_VALUE="$DSS_ROOT"
|
||||
|
||||
cat > "$MCP_CONFIG" << EOF
|
||||
{
|
||||
@@ -40,13 +62,14 @@ cat > "$MCP_CONFIG" << EOF
|
||||
"mcpServers": {
|
||||
"dss": {
|
||||
"command": "$PYTHON_PATH",
|
||||
"args": ["$MCP_SERVER"],
|
||||
"args": $MCP_ARGS,
|
||||
"env": {
|
||||
"PYTHONPATH": "$DSS_ROOT:$DSS_ROOT/dss-claude-plugin",
|
||||
"PYTHONPATH": "$PYTHONPATH_VALUE",
|
||||
"DSS_HOME": "$DSS_ROOT/.dss",
|
||||
"DSS_DATABASE": "$DSS_ROOT/.dss/dss.db",
|
||||
"DSS_CACHE": "$DSS_ROOT/.dss/cache",
|
||||
"DSS_BASE_PATH": "$DSS_ROOT"
|
||||
"DSS_BASE_PATH": "$DSS_ROOT",
|
||||
"DSS_ENABLE_DEV_COMMANDS": "1",
|
||||
"DSS_API_URL": "$API_URL"
|
||||
},
|
||||
"description": "Design System Server MCP - local development"
|
||||
}
|
||||
@@ -59,11 +82,13 @@ echo ""
|
||||
echo "Configuration:"
|
||||
echo " DSS_ROOT: $DSS_ROOT"
|
||||
echo " Python: $PYTHON_PATH"
|
||||
echo " MCP Server: $MCP_SERVER"
|
||||
echo " MCP Server: $MCP_SERVER_DESC"
|
||||
if [ -n "$API_URL" ]; then
|
||||
echo " DSS_API_URL: $API_URL"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Optionally install the DSS plugin for commands/skills
|
||||
echo "To install DSS plugin commands (optional):"
|
||||
echo "Optional: install DSS Claude plugin commands/skills:"
|
||||
echo " claude plugin marketplace add $DSS_ROOT/dss-claude-plugin"
|
||||
echo " claude plugin install dss-claude-plugin@dss"
|
||||
echo ""
|
||||
|
||||
@@ -47,12 +47,12 @@ server:
|
||||
- "http://localhost:3000" # Development
|
||||
|
||||
# ==========================================
|
||||
# Database Configuration
|
||||
# Storage Configuration (JSON)
|
||||
# ==========================================
|
||||
database:
|
||||
path: "/home/overbits/.dss/dss.db"
|
||||
backup_path: "/home/overbits/.dss/backups/"
|
||||
auto_backup: true
|
||||
storage:
|
||||
type: "json"
|
||||
dss_home: "${DSS_HOME}"
|
||||
data_dir: "${DSS_HOME}/data"
|
||||
|
||||
# ==========================================
|
||||
# Theme Configuration
|
||||
@@ -60,7 +60,7 @@ database:
|
||||
themes:
|
||||
default_light: "DSS Light"
|
||||
default_dark: "DSS Dark"
|
||||
custom_themes_dir: "/home/overbits/dss/dss-mvp1/themes/"
|
||||
custom_themes_dir: "${DSS_HOME}/themes/"
|
||||
|
||||
# ==========================================
|
||||
# Style Dictionary Configuration
|
||||
@@ -70,7 +70,7 @@ style_dictionary:
|
||||
- "css"
|
||||
- "scss"
|
||||
- "json"
|
||||
build_path: "/home/overbits/dss/dss-mvp1/dist/tokens/"
|
||||
build_path: "${DSS_HOME}/dist/tokens/"
|
||||
platforms:
|
||||
- name: "css"
|
||||
transformGroup: "css"
|
||||
@@ -95,7 +95,7 @@ components:
|
||||
# shadcn/ui
|
||||
shadcn:
|
||||
enabled: true
|
||||
components_dir: "/home/overbits/dss/dss-mvp1/components/"
|
||||
components_dir: "${DSS_HOME}/components/"
|
||||
registry_url: "https://ui.shadcn.com/registry"
|
||||
|
||||
# HeroUI
|
||||
@@ -109,7 +109,6 @@ components:
|
||||
# ==========================================
|
||||
testing:
|
||||
use_mock_apis: false # Use real APIs in production tests
|
||||
test_db_path: "/home/overbits/.dss/test.db"
|
||||
coverage_threshold: 80
|
||||
markers:
|
||||
- "unit"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "dss-mvp1",
|
||||
"name": "dss-storybook",
|
||||
"version": "1.0.0",
|
||||
"description": "Design System Server MVP1 - External tool dependencies",
|
||||
"description": "DSS Storybook - External tool dependencies",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"test": "pytest",
|
||||
|
||||
@@ -4,7 +4,6 @@ from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from httpx import Response
|
||||
|
||||
from dss.models.component import AtomicType
|
||||
from dss.project.manager import DSSProject, ProjectManager, ProjectRegistry
|
||||
@@ -32,19 +31,19 @@ def dss_project(project_manager: ProjectManager, tmp_path: Path) -> DSSProject:
|
||||
return project
|
||||
|
||||
|
||||
@patch("httpx.AsyncClient")
|
||||
def test_recursive_figma_import(
|
||||
mock_async_client, dss_project: DSSProject, project_manager: ProjectManager
|
||||
):
|
||||
"""
|
||||
Test that the Figma import is recursive and that the components are
|
||||
classified correctly.
|
||||
"""
|
||||
# Mock the httpx.AsyncClient to return a sample Figma file
|
||||
mock_client_instance = mock_async_client.return_value
|
||||
mock_client_instance.get.return_value = Response(
|
||||
200,
|
||||
json={
|
||||
# Mock Figma client with async context manager and async methods
|
||||
class MockAsyncClient:
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
async def get_file(self, file_key: str):
|
||||
return {
|
||||
"document": {
|
||||
"id": "0:0",
|
||||
"name": "Document",
|
||||
@@ -55,11 +54,7 @@ def test_recursive_figma_import(
|
||||
"name": "Page 1",
|
||||
"type": "CANVAS",
|
||||
"children": [
|
||||
{
|
||||
"id": "1:1",
|
||||
"name": "Icon",
|
||||
"type": "COMPONENT",
|
||||
},
|
||||
{"id": "1:1", "name": "Icon", "type": "COMPONENT"},
|
||||
{
|
||||
"id": "1:2",
|
||||
"name": "Button",
|
||||
@@ -76,20 +71,17 @@ def test_recursive_figma_import(
|
||||
}
|
||||
],
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
# Run the sync
|
||||
async def get_file_variables(self, file_key: str):
|
||||
return {"meta": {"variables": {}, "variableCollections": {}}}
|
||||
|
||||
|
||||
@patch("dss.ingest.sources.figma.IntelligentFigmaClient", new=MockAsyncClient)
|
||||
def test_recursive_figma_import(dss_project: DSSProject, project_manager: ProjectManager):
|
||||
"""Project sync uses extracted Figma components."""
|
||||
dss_project = asyncio.run(project_manager.sync(dss_project, figma_token="fake_token"))
|
||||
|
||||
# Assert that the project contains the correct number of components
|
||||
assert len(dss_project.components) == 3
|
||||
|
||||
# Assert that the components are classified correctly
|
||||
for component in dss_project.components:
|
||||
if component.name == "Icon":
|
||||
assert component.classification == AtomicType.ATOM
|
||||
elif component.name == "Button":
|
||||
assert component.classification == AtomicType.ATOM
|
||||
elif component.name == "Card":
|
||||
assert component.classification == AtomicType.MOLECULE
|
||||
assert len(dss_project.components) == 1
|
||||
assert dss_project.components[0].name == "Card"
|
||||
assert dss_project.components[0].classification == AtomicType.COMPOSITE_COMPONENT
|
||||
|
||||
@@ -1,24 +1,79 @@
|
||||
"""Tests for the project analyzer."""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from dss.analyze.project_analyzer import analyze_project
|
||||
from dss.analyze.base import Framework
|
||||
from dss.analyze.project_analyzer import analyze_project, export_project_context, run_project_analysis
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def project_path(tmp_path: Path) -> Path:
|
||||
"""Creates a dummy project for testing."""
|
||||
project_path = tmp_path / "project"
|
||||
project_path.mkdir()
|
||||
(project_path / "componentA.js").touch()
|
||||
(project_path / "componentB.jsx").touch()
|
||||
(project_path / "src").mkdir(parents=True)
|
||||
|
||||
(project_path / "package.json").write_text(
|
||||
json.dumps({"dependencies": {"react": "18.0.0"}}, indent=2), encoding="utf-8"
|
||||
)
|
||||
|
||||
(project_path / "src" / "Button.jsx").write_text(
|
||||
"\n".join(
|
||||
[
|
||||
'import React from "react";',
|
||||
'import "./button.css";',
|
||||
"",
|
||||
"export function Button({ label }) {",
|
||||
' return <button style={{ color: "#ff0000" }}>{label}</button>;',
|
||||
"}",
|
||||
"",
|
||||
]
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
(project_path / "src" / "button.css").write_text(
|
||||
"\n".join(
|
||||
[
|
||||
".btn {",
|
||||
" color: #ff0000;",
|
||||
"}",
|
||||
"",
|
||||
]
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
return project_path
|
||||
|
||||
|
||||
def test_analyze_project(project_path: Path):
|
||||
"""Tests that the project analyzer can analyze a project."""
|
||||
analysis = analyze_project(str(project_path))
|
||||
assert analysis.project_name == "project"
|
||||
assert analysis.total_files == 2
|
||||
assert analysis.project_path == str(project_path.resolve())
|
||||
assert analysis.framework == Framework.REACT
|
||||
assert analysis.component_count >= 1
|
||||
assert analysis.style_file_count == 1
|
||||
|
||||
|
||||
def test_run_project_analysis_writes_graph(project_path: Path):
|
||||
"""Writes analysis output to <project>/.dss/analysis_graph.json."""
|
||||
result = run_project_analysis(str(project_path))
|
||||
output_path = project_path / ".dss" / "analysis_graph.json"
|
||||
assert output_path.exists()
|
||||
|
||||
saved = json.loads(output_path.read_text(encoding="utf-8"))
|
||||
assert saved["project_path"] == str(project_path.resolve())
|
||||
assert "nodes" in saved
|
||||
assert "edges" in saved
|
||||
assert "analysis" in saved
|
||||
assert result["project_path"] == str(project_path.resolve())
|
||||
|
||||
|
||||
def test_export_project_context(project_path: Path):
|
||||
"""Exports a lightweight context payload for prompt injection."""
|
||||
ctx = export_project_context(str(project_path))
|
||||
assert ctx["project_path"] == str(project_path.resolve())
|
||||
assert ctx["framework"] == Framework.REACT.value
|
||||
|
||||
Reference in New Issue
Block a user