fix: Address high-severity bandit issues
This commit is contained in:
@@ -1,19 +1,20 @@
|
||||
"""
|
||||
DSS Core Module - Configuration and Context Management
|
||||
DSS Core Module - Configuration and Context Management.
|
||||
|
||||
Extended with Context Compiler for design system context resolution.
|
||||
"""
|
||||
|
||||
from .compiler import EMERGENCY_SKIN, ContextCompiler
|
||||
from .config import DSSConfig, DSSMode
|
||||
from .context import DSSContext
|
||||
from .compiler import ContextCompiler, EMERGENCY_SKIN
|
||||
from .mcp_extensions import (
|
||||
COMPILER,
|
||||
get_active_context,
|
||||
get_compiler_status,
|
||||
list_skins,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status,
|
||||
with_context,
|
||||
COMPILER
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -28,5 +29,5 @@ __all__ = [
|
||||
"list_skins",
|
||||
"get_compiler_status",
|
||||
"with_context",
|
||||
"COMPILER"
|
||||
"COMPILER",
|
||||
]
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
"""
|
||||
DSS Context Compiler
|
||||
DSS Context Compiler.
|
||||
|
||||
Resolves project context via 3-layer cascade: Base -> Skin -> Project
|
||||
Includes Safe Boot Protocol and Debug Provenance.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any, Optional, List, Union
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
@@ -21,25 +21,26 @@ logger = logging.getLogger("DSSCompiler")
|
||||
EMERGENCY_SKIN = {
|
||||
"meta": {"id": "emergency", "version": "1.0.0"},
|
||||
"tokens": {
|
||||
"colors": {
|
||||
"primary": "#FF0000",
|
||||
"background": "#FFFFFF",
|
||||
"text": "#000000"
|
||||
},
|
||||
"spacing": {"base": "4px"}
|
||||
"colors": {"primary": "#FF0000", "background": "#FFFFFF", "text": "#000000"},
|
||||
"spacing": {"base": "4px"},
|
||||
},
|
||||
"status": "emergency_mode"
|
||||
"status": "emergency_mode",
|
||||
}
|
||||
|
||||
|
||||
class ContextCompiler:
|
||||
def __init__(self, skins_dir: str = "./skins"):
|
||||
self.skins_dir = Path(skins_dir)
|
||||
self.cache: Dict[str, Any] = {}
|
||||
self._manifest_mtimes: Dict[str, float] = {} # Track file modification times
|
||||
|
||||
def compile(self, manifest_path: str, debug: bool = False, force_refresh: bool = False) -> Dict[str, Any]:
|
||||
def compile(
|
||||
self, manifest_path: str, debug: bool = False, force_refresh: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Main entry point. Compiles context by merging:
|
||||
Main entry point.
|
||||
|
||||
Compiles context by merging:
|
||||
1. Base Skin (Implicit or Explicit)
|
||||
2. Extended Skin (defined in manifest)
|
||||
3. Project Overrides (defined in manifest)
|
||||
@@ -83,17 +84,17 @@ class ContextCompiler:
|
||||
|
||||
# Merge Result + Project Overrides
|
||||
# Need to wrap project overrides in same structure as skins
|
||||
project_overrides_wrapped = {
|
||||
"tokens": manifest.get("overrides", {}).get("tokens", {})
|
||||
}
|
||||
final_context = self._deep_merge(context, project_overrides_wrapped, path="skin->project", debug=debug)
|
||||
project_overrides_wrapped = {"tokens": manifest.get("overrides", {}).get("tokens", {})}
|
||||
final_context = self._deep_merge(
|
||||
context, project_overrides_wrapped, path="skin->project", debug=debug
|
||||
)
|
||||
|
||||
# Inject Metadata
|
||||
final_context["_meta"] = {
|
||||
"project_id": manifest["project"]["id"],
|
||||
"compiled_at": datetime.now(timezone.utc).isoformat(),
|
||||
"debug_enabled": debug,
|
||||
"compiler_config": manifest.get("compiler", {})
|
||||
"compiler_config": manifest.get("compiler", {}),
|
||||
}
|
||||
|
||||
if debug:
|
||||
@@ -138,19 +139,28 @@ class ContextCompiler:
|
||||
return data
|
||||
|
||||
def _load_json(self, path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r') as f:
|
||||
with open(path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
def _deep_merge(self, base: Dict, override: Dict, path: str = "", debug: bool = False, provenance: List[Dict] = None) -> Dict:
|
||||
def _deep_merge(
|
||||
self,
|
||||
base: Dict,
|
||||
override: Dict,
|
||||
path: str = "",
|
||||
debug: bool = False,
|
||||
provenance: List[Dict] = None,
|
||||
) -> Dict:
|
||||
"""
|
||||
Deep merge dictionaries. Replaces arrays.
|
||||
Deep merge dictionaries.
|
||||
|
||||
Replaces arrays.
|
||||
Populates provenance list if debug is True (thread-safe).
|
||||
"""
|
||||
# Thread-safe: use method parameter instead of instance variable
|
||||
if provenance is None and debug:
|
||||
provenance = []
|
||||
# Store reference on first call for later retrieval
|
||||
if not hasattr(self, 'provenance_log'):
|
||||
if not hasattr(self, "provenance_log"):
|
||||
self.provenance_log = provenance
|
||||
|
||||
result = copy.deepcopy(base)
|
||||
@@ -158,16 +168,20 @@ class ContextCompiler:
|
||||
for key, value in override.items():
|
||||
if isinstance(value, dict) and key in result and isinstance(result[key], dict):
|
||||
# Recursive merge - pass provenance down
|
||||
result[key] = self._deep_merge(result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance)
|
||||
result[key] = self._deep_merge(
|
||||
result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance
|
||||
)
|
||||
else:
|
||||
# Direct replacement (Primitive or Array)
|
||||
if debug and provenance is not None:
|
||||
provenance.append({
|
||||
"key": key,
|
||||
"action": "override",
|
||||
"layer": path,
|
||||
"value_type": type(value).__name__
|
||||
})
|
||||
provenance.append(
|
||||
{
|
||||
"key": key,
|
||||
"action": "override",
|
||||
"layer": path,
|
||||
"value_type": type(value).__name__,
|
||||
}
|
||||
)
|
||||
result[key] = copy.deepcopy(value)
|
||||
|
||||
return result
|
||||
|
||||
@@ -7,17 +7,15 @@ Supports local/remote mode detection, persistent configuration storage, and
|
||||
environment variable overrides.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import uuid
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union, Any
|
||||
|
||||
import aiohttp
|
||||
from pydantic import BaseModel, Field, HttpUrl, ValidationError
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
# Configure module-level logger
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -30,6 +28,7 @@ DEFAULT_LOCAL_URL = "http://localhost:6006"
|
||||
|
||||
class DSSMode(str, Enum):
|
||||
"""Operation modes for the DSS plugin."""
|
||||
|
||||
LOCAL = "local"
|
||||
REMOTE = "remote"
|
||||
AUTO = "auto"
|
||||
@@ -45,10 +44,13 @@ class DSSConfig(BaseModel):
|
||||
local_url (str): URL for the local DSS API (usually localhost).
|
||||
session_id (str): Unique identifier for this client instance.
|
||||
"""
|
||||
|
||||
mode: DSSMode = Field(default=DSSMode.AUTO, description="Operation mode preference")
|
||||
remote_url: str = Field(default=DEFAULT_REMOTE_URL, description="Remote API endpoint")
|
||||
local_url: str = Field(default=DEFAULT_LOCAL_URL, description="Local API endpoint")
|
||||
session_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID")
|
||||
session_id: str = Field(
|
||||
default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID"
|
||||
)
|
||||
|
||||
class Config:
|
||||
validate_assignment = True
|
||||
@@ -58,6 +60,7 @@ class DSSConfig(BaseModel):
|
||||
def load(cls) -> "DSSConfig":
|
||||
"""
|
||||
Load configuration from ~/.dss/config.json.
|
||||
|
||||
Returns a default instance if the file does not exist or is invalid.
|
||||
"""
|
||||
if not CONFIG_FILE.exists():
|
||||
@@ -79,6 +82,7 @@ class DSSConfig(BaseModel):
|
||||
def save(self) -> None:
|
||||
"""
|
||||
Save the current configuration to ~/.dss/config.json.
|
||||
|
||||
Creates the directory if it does not exist.
|
||||
"""
|
||||
try:
|
||||
@@ -153,9 +157,7 @@ class DSSConfig(BaseModel):
|
||||
return False
|
||||
|
||||
def get_api_url(self, active_mode: DSSMode) -> str:
|
||||
"""
|
||||
Helper to get the correct API URL for the determined mode.
|
||||
"""
|
||||
"""Helper to get the correct API URL for the determined mode."""
|
||||
if active_mode == DSSMode.LOCAL:
|
||||
return self.local_url
|
||||
return self.remote_url
|
||||
|
||||
@@ -8,7 +8,7 @@ Handles configuration loading, mode detection, and strategy instantiation.
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional, Dict, Any
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from .config import DSSConfig, DSSMode
|
||||
|
||||
@@ -26,12 +26,15 @@ class DSSContext:
|
||||
Handles configuration loading, mode detection (Local/Remote),
|
||||
and strategy instantiation.
|
||||
"""
|
||||
_instance: Optional['DSSContext'] = None
|
||||
|
||||
_instance: Optional["DSSContext"] = None
|
||||
_lock: asyncio.Lock = asyncio.Lock()
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Private initializer. Use get_instance() instead.
|
||||
Private initializer.
|
||||
|
||||
Use get_instance() instead.
|
||||
"""
|
||||
if DSSContext._instance is not None:
|
||||
raise RuntimeError("DSSContext is a singleton. Use get_instance() to access it.")
|
||||
@@ -43,9 +46,10 @@ class DSSContext:
|
||||
self.session_id: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls) -> 'DSSContext':
|
||||
async def get_instance(cls) -> "DSSContext":
|
||||
"""
|
||||
Async factory method to get the singleton instance.
|
||||
|
||||
Ensures config is loaded and mode is detected before returning.
|
||||
"""
|
||||
if not cls._instance:
|
||||
@@ -61,13 +65,16 @@ class DSSContext:
|
||||
@classmethod
|
||||
def reset(cls) -> None:
|
||||
"""
|
||||
Resets the singleton instance. Useful for testing.
|
||||
Resets the singleton instance.
|
||||
|
||||
Useful for testing.
|
||||
"""
|
||||
cls._instance = None
|
||||
|
||||
async def _initialize(self) -> None:
|
||||
"""
|
||||
Internal initialization logic:
|
||||
|
||||
1. Load Config
|
||||
2. Detect Mode
|
||||
3. Cache Capabilities
|
||||
@@ -80,7 +87,9 @@ class DSSContext:
|
||||
# 2. Detect Mode (Async check)
|
||||
self.active_mode = await self.config.get_active_mode()
|
||||
|
||||
logger.info(f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}")
|
||||
logger.info(
|
||||
f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}"
|
||||
)
|
||||
|
||||
# 3. Cache Capabilities
|
||||
self._cache_capabilities()
|
||||
@@ -92,15 +101,13 @@ class DSSContext:
|
||||
self._capabilities = {"limited": True}
|
||||
|
||||
def _cache_capabilities(self) -> None:
|
||||
"""
|
||||
Determines what the plugin can do based on the active mode.
|
||||
"""
|
||||
"""Determines what the plugin can do based on the active mode."""
|
||||
# Base capabilities
|
||||
caps = {
|
||||
"can_read_files": False,
|
||||
"can_execute_browser": False,
|
||||
"can_screenshot": False,
|
||||
"can_connect_remote": True
|
||||
"can_connect_remote": True,
|
||||
}
|
||||
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
@@ -111,8 +118,10 @@ class DSSContext:
|
||||
elif self.active_mode == DSSMode.REMOTE:
|
||||
# Remote mode relies on API capabilities
|
||||
# Depending on remote configuration, these might differ
|
||||
caps["can_execute_browser"] = False # Typically restricted in pure remote unless via API
|
||||
caps["can_read_files"] = False # Security restriction
|
||||
caps[
|
||||
"can_execute_browser"
|
||||
] = False # Typically restricted in pure remote unless via API
|
||||
caps["can_read_files"] = False # Security restriction
|
||||
|
||||
self._capabilities = caps
|
||||
|
||||
@@ -151,18 +160,22 @@ class DSSContext:
|
||||
# Will be implemented in Phase 2 & 3
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
from ..strategies.local.browser import LocalBrowserStrategy
|
||||
|
||||
strategy_instance = LocalBrowserStrategy(self)
|
||||
else:
|
||||
from ..strategies.remote.browser import RemoteBrowserStrategy
|
||||
|
||||
strategy_instance = RemoteBrowserStrategy(self)
|
||||
|
||||
elif strategy_type == "filesystem":
|
||||
# Will be implemented in Phase 2
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
from ..strategies.local.filesystem import LocalFilesystemStrategy
|
||||
|
||||
strategy_instance = LocalFilesystemStrategy(self)
|
||||
else:
|
||||
from ..strategies.remote.filesystem import RemoteFilesystemStrategy
|
||||
|
||||
strategy_instance = RemoteFilesystemStrategy(self)
|
||||
|
||||
elif strategy_type == "screenshot":
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
"""
|
||||
MCP Extensions for Context Awareness
|
||||
MCP Extensions for Context Awareness.
|
||||
|
||||
Implements the Factory Pattern to wrap existing tools with context
|
||||
and defines 5 new tools for the Context Compiler.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Callable
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
from typing import Callable
|
||||
|
||||
from .compiler import ContextCompiler
|
||||
|
||||
# Singleton compiler instance
|
||||
@@ -15,19 +17,22 @@ COMPILER = ContextCompiler(skins_dir=os.path.join(os.path.dirname(__file__), "sk
|
||||
|
||||
# --- FACTORY PATTERN: Context Wrapper ---
|
||||
|
||||
|
||||
def with_context(default_manifest_path: str = None):
|
||||
"""
|
||||
Decorator that injects the compiled context into the tool's arguments.
|
||||
|
||||
Use this to upgrade existing 'token extractor' tools to be 'context aware'.
|
||||
|
||||
The manifest path is extracted from kwargs['manifest_path'] if present,
|
||||
otherwise falls back to the default_manifest_path provided at decoration time.
|
||||
"""
|
||||
|
||||
def decorator(func: Callable):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
# 1. Get manifest path (runtime kwarg or decorator default)
|
||||
manifest_path = kwargs.get('manifest_path', default_manifest_path)
|
||||
manifest_path = kwargs.get("manifest_path", default_manifest_path)
|
||||
if not manifest_path:
|
||||
raise ValueError("No manifest_path provided to context-aware tool")
|
||||
|
||||
@@ -35,33 +40,39 @@ def with_context(default_manifest_path: str = None):
|
||||
context = COMPILER.compile(manifest_path)
|
||||
|
||||
# 3. Inject into kwargs
|
||||
kwargs['dss_context'] = context
|
||||
kwargs["dss_context"] = context
|
||||
|
||||
# 4. Execute Tool
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
# --- 5 NEW MCP TOOLS ---
|
||||
|
||||
|
||||
def get_active_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
[Tool 1] Returns the fully resolved JSON context for the project.
|
||||
|
||||
Set debug=True to see provenance (which layer defined which token).
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
"""
|
||||
context = COMPILER.compile(manifest_path, debug=debug, force_refresh=force_refresh)
|
||||
return json.dumps(context, indent=2)
|
||||
|
||||
|
||||
def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
[Tool 2] Resolves a specific token value (e.g. 'colors.primary')
|
||||
[Tool 2] Resolves a specific token value (e.g. 'colors.primary').
|
||||
|
||||
through the cascade.
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
"""
|
||||
context = COMPILER.compile(manifest_path, force_refresh=force_refresh)
|
||||
keys = token_path.split('.')
|
||||
keys = token_path.split(".")
|
||||
current = context.get("tokens", {})
|
||||
|
||||
for k in keys:
|
||||
@@ -72,10 +83,9 @@ def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = Fal
|
||||
|
||||
return str(current)
|
||||
|
||||
|
||||
def validate_manifest(manifest_path: str) -> str:
|
||||
"""
|
||||
[Tool 3] Validates the ds.config.json against the schema.
|
||||
"""
|
||||
"""[Tool 3] Validates the ds.config.json against the schema."""
|
||||
# In a full implementation, we would use 'jsonschema' library here.
|
||||
# For now, we perform a basic structural check via the Compiler's loader.
|
||||
try:
|
||||
@@ -84,10 +94,9 @@ def validate_manifest(manifest_path: str) -> str:
|
||||
except Exception as e:
|
||||
return f"Invalid: {str(e)}"
|
||||
|
||||
|
||||
def list_skins() -> str:
|
||||
"""
|
||||
[Tool 4] Lists all available skins in the registry.
|
||||
"""
|
||||
"""[Tool 4] Lists all available skins in the registry."""
|
||||
skins_path = COMPILER.skins_dir
|
||||
if not skins_path.exists():
|
||||
return "No skins directory found."
|
||||
@@ -95,18 +104,18 @@ def list_skins() -> str:
|
||||
skins = [f.stem for f in skins_path.glob("*.json")]
|
||||
return json.dumps(skins)
|
||||
|
||||
|
||||
def get_compiler_status() -> str:
|
||||
"""
|
||||
[Tool 5] Returns the health and configuration of the Context Compiler.
|
||||
"""
|
||||
"""[Tool 5] Returns the health and configuration of the Context Compiler."""
|
||||
status = {
|
||||
"status": "active",
|
||||
"skins_directory": str(COMPILER.skins_dir),
|
||||
"cached_skins": list(COMPILER.cache.keys()),
|
||||
"safe_boot_ready": True
|
||||
"safe_boot_ready": True,
|
||||
}
|
||||
return json.dumps(status, indent=2)
|
||||
|
||||
|
||||
# Instructions for Main Server File:
|
||||
# 1. Import these tools
|
||||
# 2. Register them with the MCP server instance
|
||||
|
||||
@@ -1,23 +1,21 @@
|
||||
"""
|
||||
MCP Integration Layer for DSS Context Compiler
|
||||
MCP Integration Layer for DSS Context Compiler.
|
||||
|
||||
Provides MCP-compliant tool wrappers for the 5 new context tools.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
import json
|
||||
from . import (
|
||||
get_active_context,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status
|
||||
)
|
||||
|
||||
from . import get_active_context, get_compiler_status, list_skins, resolve_token, validate_manifest
|
||||
|
||||
# MCP Tool Definitions
|
||||
|
||||
def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
|
||||
|
||||
def mcp_get_resolved_context(
|
||||
manifest_path: str, debug: bool = False, force_refresh: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
MCP Tool: Get Active Context
|
||||
MCP Tool: Get Active Context.
|
||||
|
||||
Returns the fully resolved JSON context for a project.
|
||||
Set debug=True to see provenance (which layer defined which token).
|
||||
@@ -39,7 +37,7 @@ def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refr
|
||||
|
||||
def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
MCP Tool: Resolve Token
|
||||
MCP Tool: Resolve Token.
|
||||
|
||||
Resolves a specific token value (e.g. 'colors.primary') through the cascade.
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
@@ -60,7 +58,7 @@ def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool =
|
||||
|
||||
def mcp_validate_manifest(manifest_path: str) -> str:
|
||||
"""
|
||||
MCP Tool: Validate Manifest
|
||||
MCP Tool: Validate Manifest.
|
||||
|
||||
Validates the ds.config.json against the schema.
|
||||
|
||||
@@ -78,7 +76,7 @@ def mcp_validate_manifest(manifest_path: str) -> str:
|
||||
|
||||
def mcp_list_skins() -> str:
|
||||
"""
|
||||
MCP Tool: List Skins
|
||||
MCP Tool: List Skins.
|
||||
|
||||
Lists all available skins in the registry.
|
||||
|
||||
@@ -93,7 +91,7 @@ def mcp_list_skins() -> str:
|
||||
|
||||
def mcp_get_compiler_status() -> str:
|
||||
"""
|
||||
MCP Tool: Get Compiler Status
|
||||
MCP Tool: Get Compiler Status.
|
||||
|
||||
Returns the health and configuration of the Context Compiler.
|
||||
|
||||
@@ -117,15 +115,15 @@ MCP_TOOLS = {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
"required": True,
|
||||
},
|
||||
"debug": {
|
||||
"type": "boolean",
|
||||
"description": "Enable debug provenance tracking",
|
||||
"required": False,
|
||||
"default": False
|
||||
}
|
||||
}
|
||||
"default": False,
|
||||
},
|
||||
},
|
||||
},
|
||||
"dss_resolve_token": {
|
||||
"function": mcp_resolve_token,
|
||||
@@ -134,14 +132,14 @@ MCP_TOOLS = {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
"required": True,
|
||||
},
|
||||
"token_path": {
|
||||
"type": "string",
|
||||
"description": "Dot-notation path to token (e.g. 'colors.primary')",
|
||||
"required": True
|
||||
}
|
||||
}
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
},
|
||||
"dss_validate_manifest": {
|
||||
"function": mcp_validate_manifest,
|
||||
@@ -150,18 +148,18 @@ MCP_TOOLS = {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
"required": True,
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
"dss_list_skins": {
|
||||
"function": mcp_list_skins,
|
||||
"description": "List all available design system skins",
|
||||
"parameters": {}
|
||||
"parameters": {},
|
||||
},
|
||||
"dss_get_compiler_status": {
|
||||
"function": mcp_get_compiler_status,
|
||||
"description": "Get Context Compiler health and configuration",
|
||||
"parameters": {}
|
||||
}
|
||||
"parameters": {},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
DSS Runtime - Dependency Injection & Boundary Enforcement
|
||||
DSS Runtime - Dependency Injection & Boundary Enforcement.
|
||||
|
||||
This module provides a bounded runtime environment for DSS MCP tools.
|
||||
All external API access (Figma, Browser, HTTP) MUST go through this runtime.
|
||||
@@ -16,20 +16,24 @@ Usage:
|
||||
browser = runtime.get_browser() # Sandboxed
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
# Setup logging
|
||||
logger = logging.getLogger("dss.runtime")
|
||||
|
||||
|
||||
class BoundaryViolationError(Exception):
|
||||
"""Raised when an operation violates DSS boundaries"""
|
||||
"""Raised when an operation violates DSS boundaries."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class DSSRuntime:
|
||||
"""
|
||||
Bounded runtime environment for DSS operations.
|
||||
@@ -52,7 +56,11 @@ class DSSRuntime:
|
||||
self.config = self._load_config()
|
||||
self.enforcement_mode = self.config.get("enforcement", {}).get("mode", "strict")
|
||||
self.log_violations = self.config.get("enforcement", {}).get("log_violations", True)
|
||||
self.violation_log_path = Path(self.config.get("enforcement", {}).get("violation_log", ".dss/logs/boundary-violations.jsonl"))
|
||||
self.violation_log_path = Path(
|
||||
self.config.get("enforcement", {}).get(
|
||||
"violation_log", ".dss/logs/boundary-violations.jsonl"
|
||||
)
|
||||
)
|
||||
|
||||
# Client caches (lazy initialization)
|
||||
self._figma_client = None
|
||||
@@ -62,7 +70,7 @@ class DSSRuntime:
|
||||
logger.info(f"DSSRuntime initialized with enforcement mode: {self.enforcement_mode}")
|
||||
|
||||
def _load_config(self) -> Dict[str, Any]:
|
||||
"""Load boundary configuration from YAML"""
|
||||
"""Load boundary configuration from YAML."""
|
||||
if not self.config_path.exists():
|
||||
logger.warning(f"Boundary config not found: {self.config_path}, using defaults")
|
||||
return self._default_config()
|
||||
@@ -75,7 +83,7 @@ class DSSRuntime:
|
||||
return self._default_config()
|
||||
|
||||
def _default_config(self) -> Dict[str, Any]:
|
||||
"""Default boundary configuration (strict)"""
|
||||
"""Default boundary configuration (strict)."""
|
||||
return {
|
||||
"version": "1.0",
|
||||
"blocked_external_apis": ["api.figma.com"],
|
||||
@@ -83,12 +91,12 @@ class DSSRuntime:
|
||||
"enforcement": {
|
||||
"mode": "strict",
|
||||
"log_violations": True,
|
||||
"violation_log": ".dss/logs/boundary-violations.jsonl"
|
||||
}
|
||||
"violation_log": ".dss/logs/boundary-violations.jsonl",
|
||||
},
|
||||
}
|
||||
|
||||
def _log_violation(self, operation: str, details: Dict[str, Any]):
|
||||
"""Log boundary violation to audit trail"""
|
||||
"""Log boundary violation to audit trail."""
|
||||
if not self.log_violations:
|
||||
return
|
||||
|
||||
@@ -99,7 +107,7 @@ class DSSRuntime:
|
||||
"type": "boundary_violation",
|
||||
"operation": operation,
|
||||
"enforcement_mode": self.enforcement_mode,
|
||||
"details": details
|
||||
"details": details,
|
||||
}
|
||||
|
||||
with open(self.violation_log_path, "a") as f:
|
||||
@@ -108,7 +116,7 @@ class DSSRuntime:
|
||||
logger.warning(f"Boundary violation: {operation} - {details}")
|
||||
|
||||
def _log_access(self, operation: str, allowed: bool, details: Dict[str, Any]):
|
||||
"""Log successful access for audit trail"""
|
||||
"""Log successful access for audit trail."""
|
||||
access_log_path = Path(".dss/logs/runtime-access.jsonl")
|
||||
access_log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@@ -117,7 +125,7 @@ class DSSRuntime:
|
||||
"type": "runtime_access",
|
||||
"operation": operation,
|
||||
"allowed": allowed,
|
||||
"details": details
|
||||
"details": details,
|
||||
}
|
||||
|
||||
with open(access_log_path, "a") as f:
|
||||
@@ -139,11 +147,7 @@ class DSSRuntime:
|
||||
# Check if operation requires going through DSS tools
|
||||
for category, tools in required_tools.items():
|
||||
if operation in category:
|
||||
details = {
|
||||
"operation": operation,
|
||||
"context": context,
|
||||
"required_tools": tools
|
||||
}
|
||||
details = {"operation": operation, "context": context, "required_tools": tools}
|
||||
|
||||
self._log_violation(operation, details)
|
||||
|
||||
@@ -173,8 +177,8 @@ class DSSRuntime:
|
||||
|
||||
self._figma_client = SafeFigmaClient(
|
||||
token=token,
|
||||
allow_write=False, # Read-only by default
|
||||
runtime=self
|
||||
allow_write=False,
|
||||
runtime=self, # Read-only by default
|
||||
)
|
||||
|
||||
logger.info("Figma client initialized (read-only mode)")
|
||||
@@ -195,6 +199,7 @@ class DSSRuntime:
|
||||
if strategy == "local":
|
||||
try:
|
||||
from strategies.local.browser import LocalBrowserStrategy
|
||||
|
||||
self._browser_strategy = LocalBrowserStrategy(runtime=self)
|
||||
logger.info("Local browser strategy initialized")
|
||||
except ImportError:
|
||||
@@ -204,6 +209,7 @@ class DSSRuntime:
|
||||
elif strategy == "remote":
|
||||
try:
|
||||
from strategies.remote.browser import RemoteBrowserStrategy
|
||||
|
||||
self._browser_strategy = RemoteBrowserStrategy(runtime=self)
|
||||
logger.info("Remote browser strategy initialized")
|
||||
except ImportError:
|
||||
@@ -224,8 +230,7 @@ class DSSRuntime:
|
||||
from core.safe_http_client import SafeHTTPClient
|
||||
|
||||
self._http_client = SafeHTTPClient(
|
||||
blocked_domains=self.config.get("blocked_external_apis", []),
|
||||
runtime=self
|
||||
blocked_domains=self.config.get("blocked_external_apis", []), runtime=self
|
||||
)
|
||||
|
||||
logger.info("HTTP client initialized with URL validation")
|
||||
@@ -245,10 +250,7 @@ class DSSRuntime:
|
||||
blocked = self.config.get("blocked_imports", [])
|
||||
|
||||
if module_name in blocked:
|
||||
details = {
|
||||
"module": module_name,
|
||||
"blocked_imports": blocked
|
||||
}
|
||||
details = {"module": module_name, "blocked_imports": blocked}
|
||||
|
||||
self._log_violation(f"direct_import:{module_name}", details)
|
||||
|
||||
@@ -292,14 +294,16 @@ class DSSRuntime:
|
||||
"browser": self._browser_strategy is not None,
|
||||
"http": self._http_client is not None,
|
||||
},
|
||||
"config_version": self.config.get("version", "unknown")
|
||||
"config_version": self.config.get("version", "unknown"),
|
||||
}
|
||||
|
||||
|
||||
# Global runtime instance (singleton pattern)
|
||||
_runtime_instance: Optional[DSSRuntime] = None
|
||||
|
||||
|
||||
def get_runtime() -> DSSRuntime:
|
||||
"""Get the global DSSRuntime instance (singleton)"""
|
||||
"""Get the global DSSRuntime instance (singleton)."""
|
||||
global _runtime_instance
|
||||
|
||||
if _runtime_instance is None:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
DSS Structured Logger - JSON-based logging for AI-consumable audit trails
|
||||
DSS Structured Logger - JSON-based logging for AI-consumable audit trails.
|
||||
|
||||
Provides structured, machine-readable logging in JSONL format (one JSON object per line).
|
||||
All DSS operations are logged with consistent fields for analysis, debugging, and compliance.
|
||||
@@ -27,11 +27,11 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from contextlib import contextmanager
|
||||
import threading
|
||||
|
||||
# Thread-local storage for context
|
||||
_context = threading.local()
|
||||
@@ -51,7 +51,7 @@ class DSSJSONFormatter(logging.Formatter):
|
||||
"""
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
"""Format log record as single-line JSON"""
|
||||
"""Format log record as single-line JSON."""
|
||||
|
||||
# Build base log entry
|
||||
log_entry = {
|
||||
@@ -100,8 +100,10 @@ class DSSLogger(logging.Logger):
|
||||
as keyword arguments for structured logging.
|
||||
"""
|
||||
|
||||
def _log_with_extra(self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Internal method to log with extra structured data"""
|
||||
def _log_with_extra(
|
||||
self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs
|
||||
):
|
||||
"""Internal method to log with extra structured data."""
|
||||
if extra:
|
||||
# Store extra data in a custom attribute
|
||||
extra_record = {"extra_data": extra}
|
||||
@@ -110,23 +112,23 @@ class DSSLogger(logging.Logger):
|
||||
super()._log(level, msg, (), **kwargs)
|
||||
|
||||
def debug(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log DEBUG message with optional extra data"""
|
||||
"""Log DEBUG message with optional extra data."""
|
||||
self._log_with_extra(logging.DEBUG, msg, extra, **kwargs)
|
||||
|
||||
def info(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log INFO message with optional extra data"""
|
||||
"""Log INFO message with optional extra data."""
|
||||
self._log_with_extra(logging.INFO, msg, extra, **kwargs)
|
||||
|
||||
def warning(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log WARNING message with optional extra data"""
|
||||
"""Log WARNING message with optional extra data."""
|
||||
self._log_with_extra(logging.WARNING, msg, extra, **kwargs)
|
||||
|
||||
def error(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log ERROR message with optional extra data"""
|
||||
"""Log ERROR message with optional extra data."""
|
||||
self._log_with_extra(logging.ERROR, msg, extra, **kwargs)
|
||||
|
||||
def critical(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log CRITICAL message with optional extra data"""
|
||||
"""Log CRITICAL message with optional extra data."""
|
||||
self._log_with_extra(logging.CRITICAL, msg, extra, **kwargs)
|
||||
|
||||
|
||||
@@ -182,7 +184,9 @@ def get_logger(name: str, log_file: Optional[str] = None) -> DSSLogger:
|
||||
|
||||
|
||||
@contextmanager
|
||||
def LogContext(session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None):
|
||||
def LogContext(
|
||||
session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None
|
||||
):
|
||||
"""
|
||||
Context manager for adding structured context to log entries.
|
||||
|
||||
@@ -259,12 +263,15 @@ class PerformanceLogger:
|
||||
self.end_time = None
|
||||
|
||||
def start(self):
|
||||
"""Mark operation start time"""
|
||||
"""Mark operation start time."""
|
||||
self.start_time = datetime.now(timezone.utc)
|
||||
self.logger.debug(f"Started: {self.operation}", extra={
|
||||
"operation": self.operation,
|
||||
"start_time": self.start_time.isoformat(),
|
||||
})
|
||||
self.logger.debug(
|
||||
f"Started: {self.operation}",
|
||||
extra={
|
||||
"operation": self.operation,
|
||||
"start_time": self.start_time.isoformat(),
|
||||
},
|
||||
)
|
||||
|
||||
def end(self, extra: Optional[Dict[str, Any]] = None):
|
||||
"""
|
||||
@@ -276,7 +283,9 @@ class PerformanceLogger:
|
||||
self.end_time = datetime.now(timezone.utc)
|
||||
|
||||
if self.start_time is None:
|
||||
self.logger.warning(f"Performance logger end() called without start() for: {self.operation}")
|
||||
self.logger.warning(
|
||||
f"Performance logger end() called without start() for: {self.operation}"
|
||||
)
|
||||
return
|
||||
|
||||
duration_ms = (self.end_time - self.start_time).total_seconds() * 1000
|
||||
@@ -294,7 +303,9 @@ class PerformanceLogger:
|
||||
self.logger.info(f"Completed: {self.operation}", extra=perf_data)
|
||||
|
||||
|
||||
def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5):
|
||||
def configure_log_rotation(
|
||||
log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5
|
||||
):
|
||||
"""
|
||||
Configure log rotation for DSS log files.
|
||||
|
||||
@@ -325,19 +336,19 @@ def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 *
|
||||
|
||||
# Add rotating file handler
|
||||
rotating_handler = RotatingFileHandler(
|
||||
str(log_file),
|
||||
maxBytes=max_bytes,
|
||||
backupCount=backup_count,
|
||||
encoding="utf-8"
|
||||
str(log_file), maxBytes=max_bytes, backupCount=backup_count, encoding="utf-8"
|
||||
)
|
||||
rotating_handler.setFormatter(DSSJSONFormatter())
|
||||
logger.addHandler(rotating_handler)
|
||||
|
||||
logger.info("Log rotation configured", extra={
|
||||
"max_bytes": max_bytes,
|
||||
"backup_count": backup_count,
|
||||
"log_file": str(log_file),
|
||||
})
|
||||
logger.info(
|
||||
"Log rotation configured",
|
||||
extra={
|
||||
"max_bytes": max_bytes,
|
||||
"backup_count": backup_count,
|
||||
"log_file": str(log_file),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# Example usage (can be removed in production)
|
||||
@@ -356,6 +367,7 @@ if __name__ == "__main__":
|
||||
perf.start()
|
||||
# Simulate work
|
||||
import time
|
||||
|
||||
time.sleep(0.1)
|
||||
perf.end(extra={"tokens_found": 100})
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
1765445463969
|
||||
1765446683593
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
{
|
||||
"description": "DSS Hooks Configuration - Customize hook behavior",
|
||||
"version": "1.0.0",
|
||||
|
||||
|
||||
"security_check": {
|
||||
"enabled": true,
|
||||
"block_on_critical": false,
|
||||
"warn_only": true,
|
||||
"ignored_patterns": []
|
||||
},
|
||||
|
||||
|
||||
"token_validator": {
|
||||
"enabled": true,
|
||||
"strict_mode": false,
|
||||
"warn_only": true,
|
||||
"categories": ["color", "spacing", "typography", "border", "effects", "layout"]
|
||||
},
|
||||
|
||||
|
||||
"component_checker": {
|
||||
"enabled": true,
|
||||
"categories": ["accessibility", "react", "typescript", "structure"],
|
||||
"min_severity": "low"
|
||||
},
|
||||
|
||||
|
||||
"complexity_monitor": {
|
||||
"enabled": true,
|
||||
"max_function_lines": 50,
|
||||
@@ -30,7 +30,7 @@
|
||||
"max_nesting_depth": 4,
|
||||
"warn_only": true
|
||||
},
|
||||
|
||||
|
||||
"storybook_reminder": {
|
||||
"enabled": true,
|
||||
"component_patterns": ["**/components/**/*.tsx", "**/ui/**/*.tsx"],
|
||||
@@ -38,7 +38,7 @@
|
||||
"remind_on_new": true,
|
||||
"remind_on_props_change": true
|
||||
},
|
||||
|
||||
|
||||
"session_summary": {
|
||||
"enabled": true,
|
||||
"output_file": ".dss-session-summary.md",
|
||||
@@ -46,7 +46,7 @@
|
||||
"include_file_list": true,
|
||||
"max_diff_lines": 100
|
||||
},
|
||||
|
||||
|
||||
"git_backup": {
|
||||
"enabled": true,
|
||||
"require_git_repo": true,
|
||||
|
||||
@@ -55,7 +55,7 @@ function countProps(content) {
|
||||
function countNestingDepth(content) {
|
||||
let maxDepth = 0;
|
||||
let currentDepth = 0;
|
||||
|
||||
|
||||
for (const char of content) {
|
||||
if (char === '{' || char === '(') {
|
||||
currentDepth++;
|
||||
@@ -64,7 +64,7 @@ function countNestingDepth(content) {
|
||||
currentDepth = Math.max(0, currentDepth - 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return maxDepth;
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ function countFunctions(content) {
|
||||
/const\s+\w+\s*=\s*(?:async\s*)?\([^)]*\)\s*=>/g,
|
||||
/const\s+\w+\s*=\s*(?:async\s*)?function/g
|
||||
];
|
||||
|
||||
|
||||
let count = 0;
|
||||
for (const pattern of patterns) {
|
||||
const matches = content.match(pattern);
|
||||
@@ -87,17 +87,17 @@ function analyzeComplexity(content, filePath, config) {
|
||||
const issues = [];
|
||||
const monitorConfig = config.complexity_monitor || {};
|
||||
const ext = path.extname(filePath).toLowerCase();
|
||||
|
||||
|
||||
// Only analyze JS/TS files
|
||||
if (!['.js', '.jsx', '.ts', '.tsx'].includes(ext)) {
|
||||
return issues;
|
||||
}
|
||||
|
||||
|
||||
const lines = countLines(content);
|
||||
const props = countProps(content);
|
||||
const nesting = countNestingDepth(content);
|
||||
const functions = countFunctions(content);
|
||||
|
||||
|
||||
// Check component size (for tsx/jsx files)
|
||||
if (['.tsx', '.jsx'].includes(ext)) {
|
||||
if (lines > monitorConfig.max_component_lines) {
|
||||
@@ -108,7 +108,7 @@ function analyzeComplexity(content, filePath, config) {
|
||||
suggestion: 'Consider breaking into smaller components'
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
if (props > monitorConfig.max_props) {
|
||||
issues.push({
|
||||
type: 'prop_count',
|
||||
@@ -118,7 +118,7 @@ function analyzeComplexity(content, filePath, config) {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Check nesting depth
|
||||
if (nesting > monitorConfig.max_nesting_depth) {
|
||||
issues.push({
|
||||
@@ -128,7 +128,7 @@ function analyzeComplexity(content, filePath, config) {
|
||||
suggestion: 'Extract nested logic into separate functions'
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// Check function count (indicator of file doing too much)
|
||||
if (functions > 10) {
|
||||
issues.push({
|
||||
@@ -138,38 +138,38 @@ function analyzeComplexity(content, filePath, config) {
|
||||
suggestion: 'Consider splitting into multiple modules'
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
function formatOutput(issues, filePath) {
|
||||
if (issues.length === 0) return '';
|
||||
|
||||
|
||||
const severityIcons = {
|
||||
high: '[HIGH]',
|
||||
medium: '[MED]',
|
||||
low: '[LOW]'
|
||||
};
|
||||
|
||||
|
||||
const lines = [`\n=== DSS Complexity Monitor: ${filePath} ===\n`];
|
||||
|
||||
|
||||
for (const issue of issues) {
|
||||
const icon = severityIcons[issue.severity] || '[?]';
|
||||
lines.push(`${icon} ${issue.message}`);
|
||||
lines.push(` Suggestion: ${issue.suggestion}\n`);
|
||||
}
|
||||
|
||||
|
||||
lines.push('='.repeat(50));
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const config = loadConfig();
|
||||
|
||||
|
||||
if (!config.complexity_monitor?.enabled) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
// Read input from stdin
|
||||
let inputData;
|
||||
try {
|
||||
@@ -181,34 +181,34 @@ async function main() {
|
||||
} catch (e) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
const toolName = inputData.tool_name || '';
|
||||
const toolInput = inputData.tool_input || {};
|
||||
|
||||
|
||||
if (!['Edit', 'Write'].includes(toolName)) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
const filePath = toolInput.file_path || '';
|
||||
let content = '';
|
||||
|
||||
|
||||
if (toolName === 'Write') {
|
||||
content = toolInput.content || '';
|
||||
} else if (toolName === 'Edit') {
|
||||
content = toolInput.new_string || '';
|
||||
}
|
||||
|
||||
|
||||
if (!content || !filePath) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
const issues = analyzeComplexity(content, filePath, config);
|
||||
|
||||
|
||||
if (issues.length > 0) {
|
||||
const output = formatOutput(issues, filePath);
|
||||
console.error(output);
|
||||
}
|
||||
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Component Checker Hook
|
||||
DSS Component Checker Hook.
|
||||
|
||||
Validates React components for best practices and accessibility.
|
||||
Written from scratch for DSS.
|
||||
"""
|
||||
@@ -19,7 +20,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "accessibility",
|
||||
"severity": "high",
|
||||
"message": "Missing alt attribute on <img>. Add alt text for accessibility.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "a11y-button-type",
|
||||
@@ -27,7 +28,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "accessibility",
|
||||
"severity": "medium",
|
||||
"message": "Button missing type attribute. Add type='button' or type='submit'.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "a11y-anchor-href",
|
||||
@@ -35,7 +36,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "accessibility",
|
||||
"severity": "high",
|
||||
"message": "Anchor tag missing href. Use button for actions without navigation.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "a11y-click-handler",
|
||||
@@ -43,7 +44,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "accessibility",
|
||||
"severity": "medium",
|
||||
"message": "Click handler on non-interactive element. Use <button> or add role/tabIndex.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "a11y-form-label",
|
||||
@@ -51,7 +52,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "accessibility",
|
||||
"severity": "medium",
|
||||
"message": "Input may be missing label association. Add id with <label> or aria-label.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
# React best practices
|
||||
{
|
||||
@@ -60,7 +61,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "react",
|
||||
"severity": "medium",
|
||||
"message": "Using array index as key. Use unique, stable IDs when possible.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "react-bind-render",
|
||||
@@ -68,7 +69,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "react",
|
||||
"severity": "low",
|
||||
"message": "Binding in render creates new function each time. Use arrow function or bind in constructor.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "react-inline-style-object",
|
||||
@@ -76,7 +77,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "react",
|
||||
"severity": "low",
|
||||
"message": "Large inline style object. Consider extracting to a constant or CSS module.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "react-console-log",
|
||||
@@ -84,7 +85,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "react",
|
||||
"severity": "low",
|
||||
"message": "Console statement detected. Remove before production.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
# TypeScript checks
|
||||
{
|
||||
@@ -93,7 +94,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "typescript",
|
||||
"severity": "medium",
|
||||
"message": "Using 'any' type loses type safety. Consider using a specific type or 'unknown'.",
|
||||
"file_types": [".ts", ".tsx"]
|
||||
"file_types": [".ts", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "ts-type-assertion",
|
||||
@@ -101,7 +102,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "typescript",
|
||||
"severity": "medium",
|
||||
"message": "Type assertion to 'any'. This bypasses type checking.",
|
||||
"file_types": [".ts", ".tsx"]
|
||||
"file_types": [".ts", ".tsx"],
|
||||
},
|
||||
# Component structure
|
||||
{
|
||||
@@ -110,7 +111,7 @@ COMPONENT_PATTERNS = [
|
||||
"category": "structure",
|
||||
"severity": "low",
|
||||
"message": "Component may not be exported. Ensure it's exported if meant to be reused.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "component-missing-displayname",
|
||||
@@ -118,10 +119,11 @@ COMPONENT_PATTERNS = [
|
||||
"category": "structure",
|
||||
"severity": "low",
|
||||
"message": "HOC component may need displayName for debugging.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
}
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
@@ -129,10 +131,10 @@ def get_config():
|
||||
"component_checker": {
|
||||
"enabled": True,
|
||||
"categories": ["accessibility", "react", "typescript"],
|
||||
"min_severity": "low"
|
||||
"min_severity": "low",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
@@ -142,64 +144,65 @@ def get_config():
|
||||
pass
|
||||
return default_config
|
||||
|
||||
|
||||
def severity_level(severity: str) -> int:
|
||||
"""Convert severity to numeric level."""
|
||||
levels = {"low": 1, "medium": 2, "high": 3}
|
||||
return levels.get(severity, 0)
|
||||
|
||||
|
||||
def check_content(content: str, file_path: str, config: dict) -> list:
|
||||
"""Check content for component issues."""
|
||||
issues = []
|
||||
file_ext = Path(file_path).suffix.lower()
|
||||
|
||||
|
||||
checker_config = config.get("component_checker", {})
|
||||
enabled_categories = checker_config.get("categories", [])
|
||||
min_severity = checker_config.get("min_severity", "low")
|
||||
min_level = severity_level(min_severity)
|
||||
|
||||
|
||||
for pattern_def in COMPONENT_PATTERNS:
|
||||
# Skip if file type doesn't match
|
||||
if file_ext not in pattern_def.get("file_types", []):
|
||||
continue
|
||||
|
||||
|
||||
# Skip if category not enabled
|
||||
if enabled_categories and pattern_def["category"] not in enabled_categories:
|
||||
continue
|
||||
|
||||
|
||||
# Skip if below minimum severity
|
||||
if severity_level(pattern_def["severity"]) < min_level:
|
||||
continue
|
||||
|
||||
|
||||
if re.search(pattern_def["regex"], content, re.MULTILINE):
|
||||
issues.append({
|
||||
"id": pattern_def["id"],
|
||||
"category": pattern_def["category"],
|
||||
"severity": pattern_def["severity"],
|
||||
"message": pattern_def["message"]
|
||||
})
|
||||
|
||||
issues.append(
|
||||
{
|
||||
"id": pattern_def["id"],
|
||||
"category": pattern_def["category"],
|
||||
"severity": pattern_def["severity"],
|
||||
"message": pattern_def["message"],
|
||||
}
|
||||
)
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def format_output(issues: list, file_path: str) -> str:
|
||||
"""Format issues for display."""
|
||||
if not issues:
|
||||
return ""
|
||||
|
||||
severity_icons = {
|
||||
"high": "[HIGH]",
|
||||
"medium": "[MED]",
|
||||
"low": "[LOW]"
|
||||
}
|
||||
|
||||
|
||||
severity_icons = {"high": "[HIGH]", "medium": "[MED]", "low": "[LOW]"}
|
||||
|
||||
category_labels = {
|
||||
"accessibility": "A11Y",
|
||||
"react": "REACT",
|
||||
"typescript": "TS",
|
||||
"structure": "STRUCT"
|
||||
"structure": "STRUCT",
|
||||
}
|
||||
|
||||
|
||||
lines = [f"\n=== DSS Component Checker: {file_path} ===\n"]
|
||||
|
||||
|
||||
# Group by category
|
||||
by_category = {}
|
||||
for issue in issues:
|
||||
@@ -207,7 +210,7 @@ def format_output(issues: list, file_path: str) -> str:
|
||||
if cat not in by_category:
|
||||
by_category[cat] = []
|
||||
by_category[cat].append(issue)
|
||||
|
||||
|
||||
for category, cat_issues in by_category.items():
|
||||
label = category_labels.get(category, category.upper())
|
||||
lines.append(f"[{label}]")
|
||||
@@ -215,36 +218,37 @@ def format_output(issues: list, file_path: str) -> str:
|
||||
sev = severity_icons.get(issue["severity"], "[?]")
|
||||
lines.append(f" {sev} {issue['message']}")
|
||||
lines.append("")
|
||||
|
||||
|
||||
lines.append("=" * 50)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
|
||||
if not config.get("component_checker", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
file_ext = Path(file_path).suffix.lower() if file_path else ""
|
||||
|
||||
|
||||
# Only check React/TypeScript files
|
||||
if file_ext not in [".jsx", ".tsx", ".js", ".ts"]:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Get content to check
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
@@ -252,17 +256,18 @@ def main():
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
|
||||
if not content:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
issues = check_content(content, file_path, config)
|
||||
|
||||
|
||||
if issues:
|
||||
output = format_output(issues, file_path)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -44,14 +44,14 @@ function checkLock() {
|
||||
if (!fs.existsSync(STATE_DIR)) {
|
||||
fs.mkdirSync(STATE_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
|
||||
if (fs.existsSync(LOCK_FILE)) {
|
||||
const lastRun = parseInt(fs.readFileSync(LOCK_FILE, 'utf8'));
|
||||
if (!isNaN(lastRun) && (Date.now() - lastRun < LOCK_TIMEOUT_MS)) {
|
||||
return false; // Already ran recently
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fs.writeFileSync(LOCK_FILE, Date.now().toString(), 'utf8');
|
||||
return true;
|
||||
} catch (e) {
|
||||
@@ -81,16 +81,16 @@ function getChangeSummary() {
|
||||
try {
|
||||
const status = execSync('git status --short', { encoding: 'utf8' });
|
||||
const lines = status.trim().split('\n').filter(Boolean);
|
||||
|
||||
|
||||
let added = 0, modified = 0, deleted = 0;
|
||||
|
||||
|
||||
for (const line of lines) {
|
||||
const status = line.trim().charAt(0);
|
||||
if (status === 'A' || status === '?') added++;
|
||||
else if (status === 'M') modified++;
|
||||
else if (status === 'D') deleted++;
|
||||
}
|
||||
|
||||
|
||||
return { added, modified, deleted, total: lines.length };
|
||||
} catch (e) {
|
||||
return { added: 0, modified: 0, deleted: 0, total: 0 };
|
||||
@@ -99,30 +99,30 @@ function getChangeSummary() {
|
||||
|
||||
function createBackup(config) {
|
||||
const backupConfig = config.git_backup || {};
|
||||
|
||||
|
||||
try {
|
||||
// Stage all changes
|
||||
execSync('git add -A', { stdio: 'pipe' });
|
||||
|
||||
|
||||
// Build commit message
|
||||
const parts = [backupConfig.commit_prefix || 'auto-backup'];
|
||||
|
||||
|
||||
if (backupConfig.include_timestamp) {
|
||||
const timestamp = new Date().toISOString().replace('T', ' ').replace(/\..+/, '');
|
||||
parts.push(timestamp);
|
||||
}
|
||||
|
||||
|
||||
const summary = getChangeSummary();
|
||||
const summaryText = `(${summary.total} files: +${summary.added} ~${summary.modified} -${summary.deleted})`;
|
||||
|
||||
|
||||
const commitMessage = `${parts.join(': ')} ${summaryText}\n\nGenerated by DSS Git Backup Hook`;
|
||||
|
||||
|
||||
// Create commit
|
||||
execSync(`git commit -m "${commitMessage}"`, { stdio: 'pipe' });
|
||||
|
||||
|
||||
// Get commit hash
|
||||
const commitHash = execSync('git rev-parse --short HEAD', { encoding: 'utf8' }).trim();
|
||||
|
||||
|
||||
return { success: true, hash: commitHash, files: summary.total };
|
||||
} catch (e) {
|
||||
return { success: false, error: e.message };
|
||||
@@ -143,39 +143,39 @@ function main() {
|
||||
if (!checkLock()) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
// Prevent hook recursion
|
||||
if (process.env.STOP_HOOK_ACTIVE === 'true') {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
const config = loadConfig();
|
||||
|
||||
|
||||
if (!config.git_backup?.enabled) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
// Check for git repo
|
||||
if (config.git_backup.require_git_repo && !isGitRepo()) {
|
||||
log(config, 'DSS Git Backup: Not a git repository, skipping');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
// Check for changes
|
||||
if (config.git_backup.commit_only_if_changes && !hasChanges()) {
|
||||
log(config, 'DSS Git Backup: No changes to commit');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
// Create backup
|
||||
const result = createBackup(config);
|
||||
|
||||
|
||||
if (result.success) {
|
||||
log(config, `DSS Git Backup: Committed ${result.files} files (${result.hash})`);
|
||||
} else {
|
||||
log(config, `DSS Git Backup: Failed - ${result.error}`);
|
||||
}
|
||||
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Security Check Hook
|
||||
DSS Security Check Hook.
|
||||
|
||||
Validates file edits for common security vulnerabilities.
|
||||
Written from scratch for DSS - no external dependencies.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Security patterns to detect
|
||||
@@ -18,73 +17,74 @@ SECURITY_PATTERNS = [
|
||||
"patterns": [".innerHTML =", ".innerHTML=", "innerHTML:"],
|
||||
"severity": "high",
|
||||
"message": "Potential XSS: innerHTML assignment detected. Use textContent for plain text or sanitize HTML with DOMPurify.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "xss-dangerously",
|
||||
"patterns": ["dangerouslySetInnerHTML"],
|
||||
"severity": "high",
|
||||
"message": "Potential XSS: dangerouslySetInnerHTML detected. Ensure content is sanitized before rendering.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "eval-usage",
|
||||
"patterns": ["eval(", "new Function("],
|
||||
"severity": "critical",
|
||||
"message": "Code injection risk: eval() or new Function() detected. These can execute arbitrary code.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"]
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "document-write",
|
||||
"patterns": ["document.write("],
|
||||
"severity": "medium",
|
||||
"message": "Deprecated: document.write() detected. Use DOM manipulation methods instead.",
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx", ".html"]
|
||||
"file_types": [".js", ".jsx", ".ts", ".tsx", ".html"],
|
||||
},
|
||||
{
|
||||
"id": "sql-injection",
|
||||
"patterns": ["execute(f\"", "execute(f'", "cursor.execute(\"", ".query(`${"],
|
||||
"patterns": ['execute(f"', "execute(f'", 'cursor.execute("', ".query(`${"],
|
||||
"severity": "critical",
|
||||
"message": "Potential SQL injection: String interpolation in SQL query. Use parameterized queries.",
|
||||
"file_types": [".py", ".js", ".ts"]
|
||||
"file_types": [".py", ".js", ".ts"],
|
||||
},
|
||||
{
|
||||
"id": "hardcoded-secret",
|
||||
"patterns": ["password=", "api_key=", "secret=", "token=", "apiKey:"],
|
||||
"severity": "high",
|
||||
"message": "Potential hardcoded secret detected. Use environment variables instead.",
|
||||
"file_types": [".py", ".js", ".ts", ".jsx", ".tsx"]
|
||||
"file_types": [".py", ".js", ".ts", ".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "python-pickle",
|
||||
"patterns": ["pickle.load", "pickle.loads"],
|
||||
"severity": "high",
|
||||
"message": "Insecure deserialization: pickle can execute arbitrary code. Use JSON for untrusted data.",
|
||||
"file_types": [".py"]
|
||||
"file_types": [".py"],
|
||||
},
|
||||
{
|
||||
"id": "python-shell",
|
||||
"patterns": ["os.system(", "subprocess.call(shell=True", "subprocess.run(shell=True"],
|
||||
"severity": "high",
|
||||
"message": "Shell injection risk: Use subprocess with shell=False and pass args as list.",
|
||||
"file_types": [".py"]
|
||||
"file_types": [".py"],
|
||||
},
|
||||
{
|
||||
"id": "react-ref-current",
|
||||
"patterns": ["ref.current.innerHTML"],
|
||||
"severity": "high",
|
||||
"message": "XSS via React ref: Avoid setting innerHTML on refs. Use state/props instead.",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "unsafe-regex",
|
||||
"patterns": ["new RegExp(", "RegExp("],
|
||||
"severity": "medium",
|
||||
"message": "Potential ReDoS: Dynamic regex from user input can cause denial of service.",
|
||||
"file_types": [".js", ".ts", ".jsx", ".tsx"]
|
||||
}
|
||||
"file_types": [".js", ".ts", ".jsx", ".tsx"],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
@@ -93,10 +93,10 @@ def get_config():
|
||||
"enabled": True,
|
||||
"block_on_critical": False,
|
||||
"warn_only": True,
|
||||
"ignored_patterns": []
|
||||
"ignored_patterns": [],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
@@ -106,72 +106,77 @@ def get_config():
|
||||
pass
|
||||
return default_config
|
||||
|
||||
|
||||
def check_content(content: str, file_path: str) -> list:
|
||||
"""Check content for security patterns."""
|
||||
issues = []
|
||||
file_ext = Path(file_path).suffix.lower()
|
||||
|
||||
|
||||
for pattern_def in SECURITY_PATTERNS:
|
||||
# Skip if file type doesn't match
|
||||
if file_ext not in pattern_def.get("file_types", []):
|
||||
continue
|
||||
|
||||
|
||||
for pattern in pattern_def["patterns"]:
|
||||
if pattern.lower() in content.lower():
|
||||
issues.append({
|
||||
"id": pattern_def["id"],
|
||||
"severity": pattern_def["severity"],
|
||||
"message": pattern_def["message"],
|
||||
"pattern": pattern
|
||||
})
|
||||
issues.append(
|
||||
{
|
||||
"id": pattern_def["id"],
|
||||
"severity": pattern_def["severity"],
|
||||
"message": pattern_def["message"],
|
||||
"pattern": pattern,
|
||||
}
|
||||
)
|
||||
break # One match per pattern definition is enough
|
||||
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def format_output(issues: list, file_path: str) -> str:
|
||||
"""Format issues for display."""
|
||||
if not issues:
|
||||
return ""
|
||||
|
||||
|
||||
severity_icons = {
|
||||
"critical": "[CRITICAL]",
|
||||
"high": "[HIGH]",
|
||||
"medium": "[MEDIUM]",
|
||||
"low": "[LOW]"
|
||||
"low": "[LOW]",
|
||||
}
|
||||
|
||||
|
||||
lines = [f"\n=== DSS Security Check: {file_path} ===\n"]
|
||||
|
||||
|
||||
for issue in issues:
|
||||
icon = severity_icons.get(issue["severity"], "[?]")
|
||||
lines.append(f"{icon} {issue['message']}")
|
||||
lines.append(f" Pattern: {issue['pattern']}\n")
|
||||
|
||||
|
||||
lines.append("=" * 50)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
|
||||
if not config.get("security_check", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0) # Allow tool to proceed if we can't parse
|
||||
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
|
||||
# Only check Edit and Write tools
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
|
||||
|
||||
# Get content to check
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
@@ -179,23 +184,24 @@ def main():
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
|
||||
if not content or not file_path:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Check for security issues
|
||||
issues = check_content(content, file_path)
|
||||
|
||||
|
||||
if issues:
|
||||
output = format_output(issues, file_path)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
|
||||
# Check if we should block on critical issues
|
||||
has_critical = any(i["severity"] == "critical" for i in issues)
|
||||
if has_critical and config.get("security_check", {}).get("block_on_critical", False):
|
||||
sys.exit(2) # Block the tool
|
||||
|
||||
|
||||
sys.exit(0) # Allow tool to proceed
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -40,17 +40,17 @@ function getGitInfo() {
|
||||
diff: '',
|
||||
modifiedFiles: []
|
||||
};
|
||||
|
||||
|
||||
try {
|
||||
// Check if in git repo
|
||||
execSync('git rev-parse --is-inside-work-tree', { stdio: 'pipe' });
|
||||
|
||||
|
||||
// Get branch
|
||||
info.branch = execSync('git branch --show-current', { encoding: 'utf8' }).trim();
|
||||
|
||||
|
||||
// Get status
|
||||
info.status = execSync('git status --short', { encoding: 'utf8' }).trim();
|
||||
|
||||
|
||||
// Get modified files
|
||||
const statusLines = info.status.split('\n').filter(Boolean);
|
||||
info.modifiedFiles = statusLines.map(line => {
|
||||
@@ -60,7 +60,7 @@ function getGitInfo() {
|
||||
file: parts.slice(1).join(' ')
|
||||
};
|
||||
});
|
||||
|
||||
|
||||
// Get diff summary
|
||||
try {
|
||||
info.diff = execSync('git diff --stat', { encoding: 'utf8' }).trim();
|
||||
@@ -70,7 +70,7 @@ function getGitInfo() {
|
||||
} catch (e) {
|
||||
// Not a git repo or git not available
|
||||
}
|
||||
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
@@ -82,12 +82,12 @@ function getSessionStats() {
|
||||
linesAdded: 0,
|
||||
linesRemoved: 0
|
||||
};
|
||||
|
||||
|
||||
try {
|
||||
// Get diff stats from git
|
||||
const diffStat = execSync('git diff --numstat', { encoding: 'utf8' });
|
||||
const lines = diffStat.trim().split('\n').filter(Boolean);
|
||||
|
||||
|
||||
for (const line of lines) {
|
||||
const [added, removed] = line.split('\t');
|
||||
stats.linesAdded += parseInt(added) || 0;
|
||||
@@ -97,7 +97,7 @@ function getSessionStats() {
|
||||
} catch (e) {
|
||||
// Git not available
|
||||
}
|
||||
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
@@ -105,29 +105,29 @@ function generateReport(config) {
|
||||
const summaryConfig = config.session_summary || {};
|
||||
const gitInfo = getGitInfo();
|
||||
const stats = getSessionStats();
|
||||
|
||||
|
||||
const timestamp = new Date().toLocaleString();
|
||||
const lines = [];
|
||||
|
||||
|
||||
lines.push('# DSS Session Summary');
|
||||
lines.push(`\n**Generated:** ${timestamp}`);
|
||||
|
||||
|
||||
if (gitInfo.branch) {
|
||||
lines.push(`**Branch:** ${gitInfo.branch}`);
|
||||
}
|
||||
|
||||
|
||||
lines.push('\n## Changes Overview');
|
||||
lines.push('');
|
||||
lines.push(`- Files modified: ${stats.filesModified}`);
|
||||
lines.push(`- Lines added: +${stats.linesAdded}`);
|
||||
lines.push(`- Lines removed: -${stats.linesRemoved}`);
|
||||
|
||||
|
||||
if (summaryConfig.include_file_list && gitInfo.modifiedFiles.length > 0) {
|
||||
lines.push('\n## Modified Files');
|
||||
lines.push('');
|
||||
lines.push('| Status | File |');
|
||||
lines.push('|--------|------|');
|
||||
|
||||
|
||||
const statusLabels = {
|
||||
'M': 'Modified',
|
||||
'A': 'Added',
|
||||
@@ -135,17 +135,17 @@ function generateReport(config) {
|
||||
'R': 'Renamed',
|
||||
'??': 'Untracked'
|
||||
};
|
||||
|
||||
|
||||
for (const file of gitInfo.modifiedFiles.slice(0, 20)) {
|
||||
const label = statusLabels[file.status] || file.status;
|
||||
lines.push(`| ${label} | ${file.file} |`);
|
||||
}
|
||||
|
||||
|
||||
if (gitInfo.modifiedFiles.length > 20) {
|
||||
lines.push(`| ... | +${gitInfo.modifiedFiles.length - 20} more files |`);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (summaryConfig.include_git_diff && gitInfo.diff) {
|
||||
lines.push('\n## Diff Summary');
|
||||
lines.push('');
|
||||
@@ -158,27 +158,27 @@ function generateReport(config) {
|
||||
}
|
||||
lines.push('```');
|
||||
}
|
||||
|
||||
|
||||
lines.push('\n---');
|
||||
lines.push('*Generated by DSS Session Summary Hook*');
|
||||
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
function main() {
|
||||
const config = loadConfig();
|
||||
|
||||
|
||||
if (!config.session_summary?.enabled) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
const report = generateReport(config);
|
||||
const outputFile = config.session_summary.output_file || '.dss-session-summary.md';
|
||||
const outputPath = path.join(process.cwd(), outputFile);
|
||||
|
||||
|
||||
fs.writeFileSync(outputPath, report, 'utf8');
|
||||
|
||||
|
||||
// Output confirmation
|
||||
console.log(JSON.stringify({
|
||||
systemMessage: `Session summary saved to ${outputFile}`,
|
||||
@@ -187,7 +187,7 @@ function main() {
|
||||
} catch (e) {
|
||||
// Fail silently
|
||||
}
|
||||
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Storybook Reminder Hook
|
||||
DSS Storybook Reminder Hook.
|
||||
|
||||
Reminds developers to update Storybook stories when components change.
|
||||
Written from scratch for DSS.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
@@ -20,10 +21,10 @@ def get_config():
|
||||
"component_patterns": ["**/components/**/*.tsx", "**/ui/**/*.tsx"],
|
||||
"story_extensions": [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"],
|
||||
"remind_on_new": True,
|
||||
"remind_on_props_change": True
|
||||
"remind_on_props_change": True,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
@@ -33,38 +34,40 @@ def get_config():
|
||||
pass
|
||||
return default_config
|
||||
|
||||
|
||||
def is_component_file(file_path: str) -> bool:
|
||||
"""Check if file is a React component."""
|
||||
path = Path(file_path)
|
||||
|
||||
|
||||
# Must be a tsx/jsx file
|
||||
if path.suffix.lower() not in [".tsx", ".jsx"]:
|
||||
return False
|
||||
|
||||
|
||||
# Skip story files, test files, index files
|
||||
name = path.stem.lower()
|
||||
if any(x in name for x in [".stories", ".story", ".test", ".spec", "index"]):
|
||||
return False
|
||||
|
||||
|
||||
# Check if in component-like directory
|
||||
parts = str(path).lower()
|
||||
component_dirs = ["components", "ui", "atoms", "molecules", "organisms", "templates"]
|
||||
return any(d in parts for d in component_dirs)
|
||||
|
||||
|
||||
def find_story_file(component_path: str) -> tuple:
|
||||
"""Find corresponding story file for a component."""
|
||||
path = Path(component_path)
|
||||
base_name = path.stem
|
||||
parent = path.parent
|
||||
|
||||
|
||||
story_extensions = [".stories.tsx", ".stories.jsx", ".stories.ts", ".stories.js"]
|
||||
|
||||
|
||||
# Check same directory
|
||||
for ext in story_extensions:
|
||||
story_path = parent / f"{base_name}{ext}"
|
||||
if story_path.exists():
|
||||
return (True, str(story_path))
|
||||
|
||||
|
||||
# Check __stories__ subdirectory
|
||||
stories_dir = parent / "__stories__"
|
||||
if stories_dir.exists():
|
||||
@@ -72,7 +75,7 @@ def find_story_file(component_path: str) -> tuple:
|
||||
story_path = stories_dir / f"{base_name}{ext}"
|
||||
if story_path.exists():
|
||||
return (True, str(story_path))
|
||||
|
||||
|
||||
# Check stories subdirectory
|
||||
stories_dir = parent / "stories"
|
||||
if stories_dir.exists():
|
||||
@@ -80,9 +83,10 @@ def find_story_file(component_path: str) -> tuple:
|
||||
story_path = stories_dir / f"{base_name}{ext}"
|
||||
if story_path.exists():
|
||||
return (True, str(story_path))
|
||||
|
||||
|
||||
return (False, None)
|
||||
|
||||
|
||||
def detect_props_change(content: str) -> bool:
|
||||
"""Detect if content includes prop changes."""
|
||||
prop_patterns = [
|
||||
@@ -90,20 +94,21 @@ def detect_props_change(content: str) -> bool:
|
||||
r"type\s+\w+Props\s*=",
|
||||
r"Props\s*=\s*\{",
|
||||
r"defaultProps\s*=",
|
||||
r"propTypes\s*="
|
||||
r"propTypes\s*=",
|
||||
]
|
||||
|
||||
|
||||
for pattern in prop_patterns:
|
||||
if re.search(pattern, content):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def format_reminder(file_path: str, has_story: bool, story_path: str, props_changed: bool) -> str:
|
||||
"""Format the reminder message."""
|
||||
lines = [f"\n=== DSS Storybook Reminder ===\n"]
|
||||
|
||||
lines = ["\n=== DSS Storybook Reminder ===\n"]
|
||||
|
||||
component_name = Path(file_path).stem
|
||||
|
||||
|
||||
if not has_story:
|
||||
lines.append(f"[NEW] Component '{component_name}' has no Storybook story!")
|
||||
lines.append(f" Consider creating: {component_name}.stories.tsx")
|
||||
@@ -116,36 +121,37 @@ def format_reminder(file_path: str, has_story: bool, story_path: str, props_chan
|
||||
lines.append(f"[UPDATE] Props changed in '{component_name}'")
|
||||
lines.append(f" Story file: {story_path}")
|
||||
lines.append(" Consider updating stories to reflect new props.")
|
||||
|
||||
|
||||
lines.append("")
|
||||
lines.append("=" * 40)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
|
||||
if not config.get("storybook_reminder", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
|
||||
|
||||
# Only check component files
|
||||
if not is_component_file(file_path):
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Get content
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
@@ -153,27 +159,28 @@ def main():
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
|
||||
# Check for story file
|
||||
has_story, story_path = find_story_file(file_path)
|
||||
|
||||
|
||||
# Check for props changes
|
||||
props_changed = detect_props_change(content) if content else False
|
||||
|
||||
|
||||
reminder_config = config.get("storybook_reminder", {})
|
||||
|
||||
|
||||
# Determine if we should show reminder
|
||||
should_remind = False
|
||||
if not has_story and reminder_config.get("remind_on_new", True):
|
||||
should_remind = True
|
||||
elif has_story and props_changed and reminder_config.get("remind_on_props_change", True):
|
||||
should_remind = True
|
||||
|
||||
|
||||
if should_remind:
|
||||
output = format_reminder(file_path, has_story, story_path, props_changed)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS Token Validator Hook
|
||||
DSS Token Validator Hook.
|
||||
|
||||
Detects hardcoded values that should use design tokens.
|
||||
Written from scratch for DSS.
|
||||
"""
|
||||
@@ -18,7 +19,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "color",
|
||||
"message": "Hardcoded hex color detected. Consider using a design token.",
|
||||
"suggestion": "Use: var(--color-*) or theme.colors.*",
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "color-rgb",
|
||||
@@ -26,7 +27,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "color",
|
||||
"message": "Hardcoded RGB color detected. Consider using a design token.",
|
||||
"suggestion": "Use: var(--color-*) or theme.colors.*",
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "color-hsl",
|
||||
@@ -34,7 +35,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "color",
|
||||
"message": "Hardcoded HSL color detected. Consider using a design token.",
|
||||
"suggestion": "Use: var(--color-*) or theme.colors.*",
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"]
|
||||
"file_types": [".css", ".scss", ".less", ".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "spacing-px",
|
||||
@@ -42,7 +43,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "spacing",
|
||||
"message": "Hardcoded pixel spacing detected. Consider using a spacing token.",
|
||||
"suggestion": "Use: var(--spacing-*) or theme.spacing.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
"file_types": [".css", ".scss", ".less"],
|
||||
},
|
||||
{
|
||||
"id": "font-size-px",
|
||||
@@ -50,7 +51,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "typography",
|
||||
"message": "Hardcoded font-size detected. Consider using a typography token.",
|
||||
"suggestion": "Use: var(--font-size-*) or theme.fontSize.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
"file_types": [".css", ".scss", ".less"],
|
||||
},
|
||||
{
|
||||
"id": "font-family-direct",
|
||||
@@ -58,7 +59,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "typography",
|
||||
"message": "Hardcoded font-family detected. Consider using a typography token.",
|
||||
"suggestion": "Use: var(--font-family-*) or theme.fontFamily.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
"file_types": [".css", ".scss", ".less"],
|
||||
},
|
||||
{
|
||||
"id": "border-radius-px",
|
||||
@@ -66,7 +67,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "border",
|
||||
"message": "Hardcoded border-radius detected. Consider using a radius token.",
|
||||
"suggestion": "Use: var(--radius-*) or theme.borderRadius.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
"file_types": [".css", ".scss", ".less"],
|
||||
},
|
||||
{
|
||||
"id": "box-shadow-direct",
|
||||
@@ -74,7 +75,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "effects",
|
||||
"message": "Hardcoded box-shadow detected. Consider using a shadow token.",
|
||||
"suggestion": "Use: var(--shadow-*) or theme.boxShadow.*",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
"file_types": [".css", ".scss", ".less"],
|
||||
},
|
||||
{
|
||||
"id": "z-index-magic",
|
||||
@@ -82,7 +83,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "layout",
|
||||
"message": "Magic number z-index detected. Consider using a z-index token.",
|
||||
"suggestion": "Use: var(--z-index-*) with semantic names (modal, dropdown, tooltip)",
|
||||
"file_types": [".css", ".scss", ".less"]
|
||||
"file_types": [".css", ".scss", ".less"],
|
||||
},
|
||||
{
|
||||
"id": "inline-style-color",
|
||||
@@ -90,7 +91,7 @@ HARDCODED_PATTERNS = [
|
||||
"category": "color",
|
||||
"message": "Hardcoded color in inline style. Consider using theme tokens.",
|
||||
"suggestion": "Use: style={{ color: theme.colors.* }}",
|
||||
"file_types": [".jsx", ".tsx"]
|
||||
"file_types": [".jsx", ".tsx"],
|
||||
},
|
||||
{
|
||||
"id": "tailwind-arbitrary",
|
||||
@@ -98,8 +99,8 @@ HARDCODED_PATTERNS = [
|
||||
"category": "color",
|
||||
"message": "Arbitrary Tailwind color value. Consider using theme colors.",
|
||||
"suggestion": "Use: bg-primary, text-secondary, etc.",
|
||||
"file_types": [".jsx", ".tsx", ".html"]
|
||||
}
|
||||
"file_types": [".jsx", ".tsx", ".html"],
|
||||
},
|
||||
]
|
||||
|
||||
# Allowlist patterns (common exceptions)
|
||||
@@ -114,6 +115,7 @@ ALLOWLIST = [
|
||||
r"colors\.", # Already using colors object
|
||||
]
|
||||
|
||||
|
||||
def get_config():
|
||||
"""Load hook configuration."""
|
||||
config_path = Path.home() / ".dss" / "hooks-config.json"
|
||||
@@ -122,10 +124,10 @@ def get_config():
|
||||
"enabled": True,
|
||||
"strict_mode": False,
|
||||
"warn_only": True,
|
||||
"categories": ["color", "spacing", "typography"]
|
||||
"categories": ["color", "spacing", "typography"],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
@@ -135,6 +137,7 @@ def get_config():
|
||||
pass
|
||||
return default_config
|
||||
|
||||
|
||||
def is_allowlisted(match: str) -> bool:
|
||||
"""Check if match is in allowlist."""
|
||||
for pattern in ALLOWLIST:
|
||||
@@ -142,33 +145,36 @@ def is_allowlisted(match: str) -> bool:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_content(content: str, file_path: str, config: dict) -> list:
|
||||
"""Check content for hardcoded values."""
|
||||
issues = []
|
||||
file_ext = Path(file_path).suffix.lower()
|
||||
enabled_categories = config.get("token_validator", {}).get("categories", [])
|
||||
|
||||
|
||||
for pattern_def in HARDCODED_PATTERNS:
|
||||
# Skip if file type doesn't match
|
||||
if file_ext not in pattern_def.get("file_types", []):
|
||||
continue
|
||||
|
||||
|
||||
# Skip if category not enabled (unless empty = all)
|
||||
if enabled_categories and pattern_def["category"] not in enabled_categories:
|
||||
continue
|
||||
|
||||
|
||||
matches = re.findall(pattern_def["regex"], content, re.IGNORECASE)
|
||||
|
||||
|
||||
for match in matches:
|
||||
if not is_allowlisted(match):
|
||||
issues.append({
|
||||
"id": pattern_def["id"],
|
||||
"category": pattern_def["category"],
|
||||
"message": pattern_def["message"],
|
||||
"suggestion": pattern_def["suggestion"],
|
||||
"value": match[:50] # Truncate long matches
|
||||
})
|
||||
|
||||
issues.append(
|
||||
{
|
||||
"id": pattern_def["id"],
|
||||
"category": pattern_def["category"],
|
||||
"message": pattern_def["message"],
|
||||
"suggestion": pattern_def["suggestion"],
|
||||
"value": match[:50], # Truncate long matches
|
||||
}
|
||||
)
|
||||
|
||||
# Deduplicate by id
|
||||
seen = set()
|
||||
unique_issues = []
|
||||
@@ -176,55 +182,57 @@ def check_content(content: str, file_path: str, config: dict) -> list:
|
||||
if issue["id"] not in seen:
|
||||
seen.add(issue["id"])
|
||||
unique_issues.append(issue)
|
||||
|
||||
|
||||
return unique_issues
|
||||
|
||||
|
||||
def format_output(issues: list, file_path: str) -> str:
|
||||
"""Format issues for display."""
|
||||
if not issues:
|
||||
return ""
|
||||
|
||||
|
||||
category_icons = {
|
||||
"color": "[COLOR]",
|
||||
"spacing": "[SPACE]",
|
||||
"typography": "[FONT]",
|
||||
"border": "[BORDER]",
|
||||
"effects": "[EFFECT]",
|
||||
"layout": "[LAYOUT]"
|
||||
"layout": "[LAYOUT]",
|
||||
}
|
||||
|
||||
|
||||
lines = [f"\n=== DSS Token Validator: {file_path} ===\n"]
|
||||
|
||||
|
||||
for issue in issues:
|
||||
icon = category_icons.get(issue["category"], "[TOKEN]")
|
||||
lines.append(f"{icon} {issue['message']}")
|
||||
lines.append(f" Found: {issue['value']}")
|
||||
lines.append(f" {issue['suggestion']}\n")
|
||||
|
||||
|
||||
lines.append("=" * 50)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook entry point."""
|
||||
config = get_config()
|
||||
|
||||
|
||||
if not config.get("token_validator", {}).get("enabled", True):
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Read hook input from stdin
|
||||
try:
|
||||
input_data = json.loads(sys.stdin.read())
|
||||
except json.JSONDecodeError:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
tool_name = input_data.get("tool_name", "")
|
||||
tool_input = input_data.get("tool_input", {})
|
||||
|
||||
|
||||
if tool_name not in ["Edit", "Write"]:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
file_path = tool_input.get("file_path", "")
|
||||
|
||||
|
||||
# Get content to check
|
||||
if tool_name == "Write":
|
||||
content = tool_input.get("content", "")
|
||||
@@ -232,22 +240,23 @@ def main():
|
||||
content = tool_input.get("new_string", "")
|
||||
else:
|
||||
content = ""
|
||||
|
||||
|
||||
if not content or not file_path:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Check for token issues
|
||||
issues = check_content(content, file_path, config)
|
||||
|
||||
|
||||
if issues:
|
||||
output = format_output(issues, file_path)
|
||||
print(output, file=sys.stderr)
|
||||
|
||||
|
||||
# In strict mode, block on issues
|
||||
if config.get("token_validator", {}).get("strict_mode", False):
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,7 @@ transparently.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional, Dict, Any
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
class BrowserStrategy(ABC):
|
||||
@@ -22,10 +22,7 @@ class BrowserStrategy(ABC):
|
||||
|
||||
@abstractmethod
|
||||
async def get_console_logs(
|
||||
self,
|
||||
session_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
level: Optional[str] = None
|
||||
self, session_id: Optional[str] = None, limit: int = 100, level: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieve console logs from the browser session.
|
||||
@@ -42,9 +39,7 @@ class BrowserStrategy(ABC):
|
||||
|
||||
@abstractmethod
|
||||
async def capture_screenshot(
|
||||
self,
|
||||
selector: Optional[str] = None,
|
||||
full_page: bool = False
|
||||
self, selector: Optional[str] = None, full_page: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Capture a screenshot of the current page or specific element.
|
||||
@@ -72,9 +67,7 @@ class BrowserStrategy(ABC):
|
||||
|
||||
@abstractmethod
|
||||
async def get_errors(
|
||||
self,
|
||||
severity: Optional[str] = None,
|
||||
limit: int = 50
|
||||
self, severity: Optional[str] = None, limit: int = 50
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Retrieve accumulated browser errors (console errors, crashes, network failures).
|
||||
@@ -89,10 +82,7 @@ class BrowserStrategy(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def run_accessibility_audit(
|
||||
self,
|
||||
selector: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
async def run_accessibility_audit(self, selector: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Run accessibility audit using axe-core.
|
||||
|
||||
|
||||
@@ -23,15 +23,11 @@ AXE_CORE_SCRIPT_URL = "https://cdnjs.cloudflare.com/ajax/libs/axe-core/4.8.4/axe
|
||||
|
||||
# Optional Playwright import for graceful degradation
|
||||
try:
|
||||
from playwright.async_api import (
|
||||
Browser,
|
||||
ConsoleMessage,
|
||||
Error as PlaywrightError,
|
||||
Page,
|
||||
Playwright,
|
||||
TimeoutError as PlaywrightTimeoutError,
|
||||
async_playwright,
|
||||
)
|
||||
from playwright.async_api import Browser, ConsoleMessage
|
||||
from playwright.async_api import Error as PlaywrightError
|
||||
from playwright.async_api import Page, Playwright
|
||||
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
|
||||
from playwright.async_api import async_playwright
|
||||
|
||||
PLAYWRIGHT_AVAILABLE = True
|
||||
except ImportError:
|
||||
@@ -199,8 +195,8 @@ class LocalBrowserStrategy(BrowserStrategy):
|
||||
"timestamp": None, # Playwright doesn't provide timestamp directly
|
||||
"category": "console",
|
||||
"data": {
|
||||
"location": msg.location if hasattr(msg, 'location') else None,
|
||||
}
|
||||
"location": msg.location if hasattr(msg, "location") else None,
|
||||
},
|
||||
}
|
||||
logs.append(log_entry)
|
||||
except Exception as e:
|
||||
@@ -234,10 +230,8 @@ class LocalBrowserStrategy(BrowserStrategy):
|
||||
raise RuntimeError("No active page to capture screenshot from.")
|
||||
|
||||
# Generate unique filename
|
||||
session_id = getattr(self.context, 'session_id', 'local')
|
||||
path = os.path.join(
|
||||
tempfile.gettempdir(), f"dss_screenshot_{session_id}.png"
|
||||
)
|
||||
session_id = getattr(self.context, "session_id", "local")
|
||||
path = os.path.join(tempfile.gettempdir(), f"dss_screenshot_{session_id}.png")
|
||||
|
||||
try:
|
||||
if selector:
|
||||
@@ -284,9 +278,9 @@ class LocalBrowserStrategy(BrowserStrategy):
|
||||
"category": "uncaughtError",
|
||||
"message": str(err),
|
||||
"data": {
|
||||
"name": getattr(err, 'name', 'Error'),
|
||||
"stack": getattr(err, 'stack', None),
|
||||
}
|
||||
"name": getattr(err, "name", "Error"),
|
||||
"stack": getattr(err, "stack", None),
|
||||
},
|
||||
}
|
||||
errors.append(error_entry)
|
||||
except Exception as e:
|
||||
@@ -294,9 +288,7 @@ class LocalBrowserStrategy(BrowserStrategy):
|
||||
|
||||
return errors[-limit:]
|
||||
|
||||
async def run_accessibility_audit(
|
||||
self, selector: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
async def run_accessibility_audit(self, selector: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Run an accessibility audit on the current page using axe-core.
|
||||
|
||||
@@ -330,13 +322,11 @@ class LocalBrowserStrategy(BrowserStrategy):
|
||||
|
||||
# Run axe with selector context if provided
|
||||
if selector:
|
||||
result = await self.page.evaluate(
|
||||
"(selector) => axe.run(selector)", selector
|
||||
)
|
||||
result = await self.page.evaluate("(selector) => axe.run(selector)", selector)
|
||||
else:
|
||||
result = await self.page.evaluate("() => axe.run()")
|
||||
|
||||
violations_count = len(result.get('violations', []))
|
||||
violations_count = len(result.get("violations", []))
|
||||
logger.info(f"Accessibility audit complete. Found {violations_count} violations.")
|
||||
|
||||
return result
|
||||
@@ -357,9 +347,7 @@ class LocalBrowserStrategy(BrowserStrategy):
|
||||
raise RuntimeError("No active page to get performance metrics from.")
|
||||
|
||||
# 1. Get Navigation Timing API metrics
|
||||
timing_raw = await self.page.evaluate(
|
||||
"() => JSON.stringify(window.performance.timing)"
|
||||
)
|
||||
timing_raw = await self.page.evaluate("() => JSON.stringify(window.performance.timing)")
|
||||
nav_timing = json.loads(timing_raw)
|
||||
|
||||
# 2. Get Core Web Vitals via PerformanceObserver
|
||||
@@ -417,14 +405,13 @@ class LocalBrowserStrategy(BrowserStrategy):
|
||||
"""
|
||||
core_web_vitals = await self.page.evaluate(metrics_script)
|
||||
|
||||
return {
|
||||
"navigation_timing": nav_timing,
|
||||
"core_web_vitals": core_web_vitals
|
||||
}
|
||||
return {"navigation_timing": nav_timing, "core_web_vitals": core_web_vitals}
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the current page. Browser instance is kept in pool for reuse.
|
||||
Close the current page.
|
||||
|
||||
Browser instance is kept in pool for reuse.
|
||||
|
||||
To fully close the browser, use close_browser() class method.
|
||||
"""
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
"""
|
||||
Remote Browser Strategy implementation.
|
||||
|
||||
Connects to the DSS API to retrieve browser state and logs via Shadow State pattern.
|
||||
"""
|
||||
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import logging
|
||||
import base64
|
||||
from typing import List, Dict, Any, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import aiohttp
|
||||
|
||||
from ..base import BrowserStrategy
|
||||
from ...core.context import DSSContext
|
||||
from ..base import BrowserStrategy
|
||||
|
||||
# Configure module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -19,6 +19,7 @@ logger = logging.getLogger(__name__)
|
||||
class RemoteBrowserStrategy(BrowserStrategy):
|
||||
"""
|
||||
Implements browser interaction via remote API calls.
|
||||
|
||||
Relies on the browser-side Logger to sync state to the server.
|
||||
"""
|
||||
|
||||
@@ -42,7 +43,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
base_url = self.context.get_api_url()
|
||||
|
||||
# Ensure base_url doesn't have trailing slash for clean concatenation
|
||||
base_url = base_url.rstrip('/')
|
||||
base_url = base_url.rstrip("/")
|
||||
url = f"{base_url}/api/browser-logs/{session_id}"
|
||||
|
||||
try:
|
||||
@@ -71,10 +72,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
return []
|
||||
|
||||
async def get_console_logs(
|
||||
self,
|
||||
session_id: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
level: Optional[str] = None
|
||||
self, session_id: Optional[str] = None, limit: int = 100, level: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get browser console logs from the remote API.
|
||||
@@ -88,7 +86,8 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
|
||||
# Filter by console category mostly, but also capture uncaught errors
|
||||
console_logs = [
|
||||
l for l in logs
|
||||
l
|
||||
for l in logs
|
||||
if l.get("category") in ["console", "uncaughtError", "unhandledRejection"]
|
||||
]
|
||||
|
||||
@@ -102,9 +101,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
return console_logs[:limit]
|
||||
|
||||
async def capture_screenshot(
|
||||
self,
|
||||
selector: Optional[str] = None,
|
||||
full_page: bool = False
|
||||
self, selector: Optional[str] = None, full_page: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Capture a screenshot.
|
||||
@@ -133,8 +130,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
|
||||
# Filter for snapshots
|
||||
snapshots = [
|
||||
l for l in logs
|
||||
if l.get("category") == "snapshot" and "snapshot" in l.get("data", {})
|
||||
l for l in logs if l.get("category") == "snapshot" and "snapshot" in l.get("data", {})
|
||||
]
|
||||
|
||||
if not snapshots:
|
||||
@@ -154,9 +150,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
return "<!-- Corrupted or unexpected snapshot data format -->"
|
||||
|
||||
async def get_errors(
|
||||
self,
|
||||
severity: Optional[str] = None,
|
||||
limit: int = 50
|
||||
self, severity: Optional[str] = None, limit: int = 50
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get error logs from the remote API.
|
||||
@@ -178,10 +172,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
|
||||
return errors[:limit]
|
||||
|
||||
async def run_accessibility_audit(
|
||||
self,
|
||||
selector: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
async def run_accessibility_audit(self, selector: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get accessibility audit results from Shadow State.
|
||||
|
||||
@@ -198,7 +189,8 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
|
||||
# Look for accessibility audits in the logs
|
||||
audits = [
|
||||
l for l in logs
|
||||
l
|
||||
for l in logs
|
||||
if l.get("category") == "accessibility" or l.get("category") == "accessibilitySnapshot"
|
||||
]
|
||||
|
||||
@@ -207,7 +199,7 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
"violations": [],
|
||||
"passes": [],
|
||||
"incomplete": [],
|
||||
"message": "No accessibility audit found in Shadow State. Trigger audit from browser console using __DSS_BROWSER_LOGS.audit()"
|
||||
"message": "No accessibility audit found in Shadow State. Trigger audit from browser console using __DSS_BROWSER_LOGS.audit()",
|
||||
}
|
||||
|
||||
# Get the latest audit
|
||||
@@ -236,14 +228,13 @@ class RemoteBrowserStrategy(BrowserStrategy):
|
||||
|
||||
# Look for performance metrics in the logs
|
||||
perf_logs = [
|
||||
l for l in logs
|
||||
if l.get("category") in ["performance", "accessibilitySnapshot"]
|
||||
l for l in logs if l.get("category") in ["performance", "accessibilitySnapshot"]
|
||||
]
|
||||
|
||||
if not perf_logs:
|
||||
return {
|
||||
"error": "No performance data found in Shadow State.",
|
||||
"message": "Performance metrics are captured automatically during page load."
|
||||
"message": "Performance metrics are captured automatically during page load.",
|
||||
}
|
||||
|
||||
# Get the latest performance entry
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
"""
|
||||
Remote Filesystem Strategy implementation.
|
||||
|
||||
Filesystem operations are restricted in REMOTE mode for security.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Dict, Any
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from ..base import FilesystemStrategy
|
||||
from ...core.context import DSSContext
|
||||
from ..base import FilesystemStrategy
|
||||
|
||||
# Configure module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1,29 +1,21 @@
|
||||
"""
|
||||
Test Suite for DSS Context Compiler
|
||||
Test Suite for DSS Context Compiler.
|
||||
|
||||
Validates all core functionality: cascade merging, token resolution, security, and error handling.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from core import ContextCompiler, get_compiler_status, list_skins, resolve_token
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from core import (
|
||||
ContextCompiler,
|
||||
get_active_context,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status,
|
||||
EMERGENCY_SKIN
|
||||
)
|
||||
|
||||
|
||||
class TestContextCompiler:
|
||||
"""Test suite for Context Compiler"""
|
||||
"""Test suite for Context Compiler."""
|
||||
|
||||
def __init__(self):
|
||||
self.base_dir = Path(__file__).parent.parent
|
||||
@@ -34,7 +26,7 @@ class TestContextCompiler:
|
||||
self.failed = 0
|
||||
|
||||
def assert_equal(self, actual, expected, message):
|
||||
"""Simple assertion helper"""
|
||||
"""Simple assertion helper."""
|
||||
if actual == expected:
|
||||
print(f"✓ {message}")
|
||||
self.passed += 1
|
||||
@@ -47,7 +39,7 @@ class TestContextCompiler:
|
||||
return False
|
||||
|
||||
def assert_true(self, condition, message):
|
||||
"""Assert condition is true"""
|
||||
"""Assert condition is true."""
|
||||
if condition:
|
||||
print(f"✓ {message}")
|
||||
self.passed += 1
|
||||
@@ -58,7 +50,7 @@ class TestContextCompiler:
|
||||
return False
|
||||
|
||||
def assert_in(self, needle, haystack, message):
|
||||
"""Assert needle is in haystack"""
|
||||
"""Assert needle is in haystack."""
|
||||
if needle in haystack:
|
||||
print(f"✓ {message}")
|
||||
self.passed += 1
|
||||
@@ -70,7 +62,7 @@ class TestContextCompiler:
|
||||
return False
|
||||
|
||||
def test_basic_compilation(self):
|
||||
"""Test 1: Basic 3-layer cascade compilation"""
|
||||
"""Test 1: Basic 3-layer cascade compilation."""
|
||||
print("\n=== Test 1: Basic Compilation (3-Layer Cascade) ===")
|
||||
|
||||
try:
|
||||
@@ -80,29 +72,27 @@ class TestContextCompiler:
|
||||
self.assert_equal(
|
||||
context.get("tokens", {}).get("colors", {}).get("primary"),
|
||||
"#6366f1",
|
||||
"Project override applied correctly (colors.primary)"
|
||||
"Project override applied correctly (colors.primary)",
|
||||
)
|
||||
|
||||
# Test skin value (Layer 2 - workbench)
|
||||
self.assert_equal(
|
||||
context.get("tokens", {}).get("colors", {}).get("background"),
|
||||
"#0F172A",
|
||||
"Workbench skin value inherited (colors.background)"
|
||||
"Workbench skin value inherited (colors.background)",
|
||||
)
|
||||
|
||||
# Test base value (Layer 1)
|
||||
self.assert_equal(
|
||||
context.get("tokens", {}).get("spacing", {}).get("0"),
|
||||
"0px",
|
||||
"Base skin value inherited (spacing.0)"
|
||||
"Base skin value inherited (spacing.0)",
|
||||
)
|
||||
|
||||
# Test metadata injection
|
||||
self.assert_in("_meta", context, "Metadata injected into context")
|
||||
self.assert_equal(
|
||||
context.get("_meta", {}).get("project_id"),
|
||||
"dss-admin",
|
||||
"Project ID in metadata"
|
||||
context.get("_meta", {}).get("project_id"), "dss-admin", "Project ID in metadata"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -110,7 +100,7 @@ class TestContextCompiler:
|
||||
self.failed += 1
|
||||
|
||||
def test_debug_provenance(self):
|
||||
"""Test 2: Debug provenance tracking"""
|
||||
"""Test 2: Debug provenance tracking."""
|
||||
print("\n=== Test 2: Debug Provenance Tracking ===")
|
||||
|
||||
try:
|
||||
@@ -118,12 +108,10 @@ class TestContextCompiler:
|
||||
|
||||
self.assert_in("_provenance", context, "Provenance data included in debug mode")
|
||||
self.assert_true(
|
||||
isinstance(context.get("_provenance", []), list),
|
||||
"Provenance is a list"
|
||||
isinstance(context.get("_provenance", []), list), "Provenance is a list"
|
||||
)
|
||||
self.assert_true(
|
||||
len(context.get("_provenance", [])) > 0,
|
||||
"Provenance contains tracking entries"
|
||||
len(context.get("_provenance", [])) > 0, "Provenance contains tracking entries"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -131,7 +119,7 @@ class TestContextCompiler:
|
||||
self.failed += 1
|
||||
|
||||
def test_token_resolution(self):
|
||||
"""Test 3: Token resolution via MCP tool"""
|
||||
"""Test 3: Token resolution via MCP tool."""
|
||||
print("\n=== Test 3: Token Resolution ===")
|
||||
|
||||
try:
|
||||
@@ -149,10 +137,7 @@ class TestContextCompiler:
|
||||
|
||||
# Test nested token
|
||||
result = resolve_token(str(self.admin_manifest), "typography.fontFamily.sans")
|
||||
self.assert_true(
|
||||
"Inter" in result or "system-ui" in result,
|
||||
"Resolved nested token"
|
||||
)
|
||||
self.assert_true("Inter" in result or "system-ui" in result, "Resolved nested token")
|
||||
|
||||
# Test non-existent token
|
||||
result = resolve_token(str(self.admin_manifest), "nonexistent.token")
|
||||
@@ -163,7 +148,7 @@ class TestContextCompiler:
|
||||
self.failed += 1
|
||||
|
||||
def test_skin_listing(self):
|
||||
"""Test 4: Skin listing functionality"""
|
||||
"""Test 4: Skin listing functionality."""
|
||||
print("\n=== Test 4: Skin Listing ===")
|
||||
|
||||
try:
|
||||
@@ -180,7 +165,7 @@ class TestContextCompiler:
|
||||
self.failed += 1
|
||||
|
||||
def test_safe_boot_protocol(self):
|
||||
"""Test 5: Safe Boot Protocol (emergency fallback)"""
|
||||
"""Test 5: Safe Boot Protocol (emergency fallback)."""
|
||||
print("\n=== Test 5: Safe Boot Protocol ===")
|
||||
|
||||
try:
|
||||
@@ -188,9 +173,7 @@ class TestContextCompiler:
|
||||
context = self.compiler.compile("/nonexistent/path.json")
|
||||
|
||||
self.assert_equal(
|
||||
context.get("status"),
|
||||
"emergency_mode",
|
||||
"Emergency mode activated for invalid path"
|
||||
context.get("status"), "emergency_mode", "Emergency mode activated for invalid path"
|
||||
)
|
||||
|
||||
self.assert_in("_error", context, "Error details included in safe boot")
|
||||
@@ -198,14 +181,18 @@ class TestContextCompiler:
|
||||
# Validate emergency skin has required structure
|
||||
self.assert_in("tokens", context, "Emergency skin has tokens")
|
||||
self.assert_in("colors", context.get("tokens", {}), "Emergency skin has colors")
|
||||
self.assert_in("primary", context.get("tokens", {}).get("colors", {}), "Emergency skin has primary color")
|
||||
self.assert_in(
|
||||
"primary",
|
||||
context.get("tokens", {}).get("colors", {}),
|
||||
"Emergency skin has primary color",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Safe Boot Protocol test failed with error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_path_traversal_prevention(self):
|
||||
"""Test 6: Security - Path traversal prevention"""
|
||||
"""Test 6: Security - Path traversal prevention."""
|
||||
print("\n=== Test 6: Path Traversal Prevention (Security) ===")
|
||||
|
||||
try:
|
||||
@@ -215,11 +202,7 @@ class TestContextCompiler:
|
||||
print("✗ Path traversal not prevented!")
|
||||
self.failed += 1
|
||||
except ValueError as e:
|
||||
self.assert_in(
|
||||
"path traversal",
|
||||
str(e).lower(),
|
||||
"Path traversal attack blocked"
|
||||
)
|
||||
self.assert_in("path traversal", str(e).lower(), "Path traversal attack blocked")
|
||||
|
||||
# Attempt another variant
|
||||
try:
|
||||
@@ -227,18 +210,14 @@ class TestContextCompiler:
|
||||
print("✗ Path traversal variant not prevented!")
|
||||
self.failed += 1
|
||||
except ValueError as e:
|
||||
self.assert_in(
|
||||
"path traversal",
|
||||
str(e).lower(),
|
||||
"Path traversal variant blocked"
|
||||
)
|
||||
self.assert_in("path traversal", str(e).lower(), "Path traversal variant blocked")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Path traversal prevention test failed with unexpected error: {e}")
|
||||
self.failed += 1
|
||||
|
||||
def test_compiler_status(self):
|
||||
"""Bonus Test: Compiler status tool"""
|
||||
"""Bonus Test: Compiler status tool."""
|
||||
print("\n=== Bonus Test: Compiler Status ===")
|
||||
|
||||
try:
|
||||
@@ -254,7 +233,7 @@ class TestContextCompiler:
|
||||
self.failed += 1
|
||||
|
||||
def run_all_tests(self):
|
||||
"""Execute all tests and report results"""
|
||||
"""Execute all tests and report results."""
|
||||
print("=" * 60)
|
||||
print("DSS Context Compiler Test Suite")
|
||||
print("=" * 60)
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Verify that dss-mcp-server.py properly exports Context Compiler tools
|
||||
"""
|
||||
"""Verify that dss-mcp-server.py properly exports Context Compiler tools."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
@@ -19,11 +17,12 @@ print("\n1. Testing Context Compiler imports...")
|
||||
try:
|
||||
from core import (
|
||||
get_active_context,
|
||||
get_compiler_status,
|
||||
list_skins,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status
|
||||
)
|
||||
|
||||
print(" ✓ All Context Compiler functions imported successfully")
|
||||
CONTEXT_COMPILER_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
@@ -36,10 +35,8 @@ print("\n2. Checking MCP server tool list...")
|
||||
try:
|
||||
# We need to simulate the MCP server initialization
|
||||
# to see what tools it would export
|
||||
import asyncio
|
||||
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import Tool, TextContent
|
||||
|
||||
# Create a test server instance
|
||||
server = Server("dss-test")
|
||||
@@ -109,7 +106,7 @@ try:
|
||||
'elif name == "dss_resolve_token"',
|
||||
'elif name == "dss_validate_manifest"',
|
||||
'elif name == "dss_list_skins"',
|
||||
'elif name == "dss_get_compiler_status"'
|
||||
'elif name == "dss_get_compiler_status"',
|
||||
]
|
||||
|
||||
for handler in handlers:
|
||||
@@ -140,7 +137,7 @@ try:
|
||||
status = json.loads(status_json)
|
||||
print(f" ✓ get_compiler_status() returned status: {status['status']}")
|
||||
|
||||
if status['status'] == 'active':
|
||||
if status["status"] == "active":
|
||||
print(" ✓ Context Compiler is active and ready")
|
||||
else:
|
||||
print(f" ✗ Context Compiler status is: {status['status']}")
|
||||
|
||||
Reference in New Issue
Block a user