fix: Address high-severity bandit issues
This commit is contained in:
@@ -1,19 +1,20 @@
|
||||
"""
|
||||
DSS Core Module - Configuration and Context Management
|
||||
DSS Core Module - Configuration and Context Management.
|
||||
|
||||
Extended with Context Compiler for design system context resolution.
|
||||
"""
|
||||
|
||||
from .compiler import EMERGENCY_SKIN, ContextCompiler
|
||||
from .config import DSSConfig, DSSMode
|
||||
from .context import DSSContext
|
||||
from .compiler import ContextCompiler, EMERGENCY_SKIN
|
||||
from .mcp_extensions import (
|
||||
COMPILER,
|
||||
get_active_context,
|
||||
get_compiler_status,
|
||||
list_skins,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status,
|
||||
with_context,
|
||||
COMPILER
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -28,5 +29,5 @@ __all__ = [
|
||||
"list_skins",
|
||||
"get_compiler_status",
|
||||
"with_context",
|
||||
"COMPILER"
|
||||
"COMPILER",
|
||||
]
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
"""
|
||||
DSS Context Compiler
|
||||
DSS Context Compiler.
|
||||
|
||||
Resolves project context via 3-layer cascade: Base -> Skin -> Project
|
||||
Includes Safe Boot Protocol and Debug Provenance.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any, Optional, List, Union
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
@@ -21,25 +21,26 @@ logger = logging.getLogger("DSSCompiler")
|
||||
EMERGENCY_SKIN = {
|
||||
"meta": {"id": "emergency", "version": "1.0.0"},
|
||||
"tokens": {
|
||||
"colors": {
|
||||
"primary": "#FF0000",
|
||||
"background": "#FFFFFF",
|
||||
"text": "#000000"
|
||||
},
|
||||
"spacing": {"base": "4px"}
|
||||
"colors": {"primary": "#FF0000", "background": "#FFFFFF", "text": "#000000"},
|
||||
"spacing": {"base": "4px"},
|
||||
},
|
||||
"status": "emergency_mode"
|
||||
"status": "emergency_mode",
|
||||
}
|
||||
|
||||
|
||||
class ContextCompiler:
|
||||
def __init__(self, skins_dir: str = "./skins"):
|
||||
self.skins_dir = Path(skins_dir)
|
||||
self.cache: Dict[str, Any] = {}
|
||||
self._manifest_mtimes: Dict[str, float] = {} # Track file modification times
|
||||
|
||||
def compile(self, manifest_path: str, debug: bool = False, force_refresh: bool = False) -> Dict[str, Any]:
|
||||
def compile(
|
||||
self, manifest_path: str, debug: bool = False, force_refresh: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Main entry point. Compiles context by merging:
|
||||
Main entry point.
|
||||
|
||||
Compiles context by merging:
|
||||
1. Base Skin (Implicit or Explicit)
|
||||
2. Extended Skin (defined in manifest)
|
||||
3. Project Overrides (defined in manifest)
|
||||
@@ -83,17 +84,17 @@ class ContextCompiler:
|
||||
|
||||
# Merge Result + Project Overrides
|
||||
# Need to wrap project overrides in same structure as skins
|
||||
project_overrides_wrapped = {
|
||||
"tokens": manifest.get("overrides", {}).get("tokens", {})
|
||||
}
|
||||
final_context = self._deep_merge(context, project_overrides_wrapped, path="skin->project", debug=debug)
|
||||
project_overrides_wrapped = {"tokens": manifest.get("overrides", {}).get("tokens", {})}
|
||||
final_context = self._deep_merge(
|
||||
context, project_overrides_wrapped, path="skin->project", debug=debug
|
||||
)
|
||||
|
||||
# Inject Metadata
|
||||
final_context["_meta"] = {
|
||||
"project_id": manifest["project"]["id"],
|
||||
"compiled_at": datetime.now(timezone.utc).isoformat(),
|
||||
"debug_enabled": debug,
|
||||
"compiler_config": manifest.get("compiler", {})
|
||||
"compiler_config": manifest.get("compiler", {}),
|
||||
}
|
||||
|
||||
if debug:
|
||||
@@ -138,19 +139,28 @@ class ContextCompiler:
|
||||
return data
|
||||
|
||||
def _load_json(self, path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r') as f:
|
||||
with open(path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
def _deep_merge(self, base: Dict, override: Dict, path: str = "", debug: bool = False, provenance: List[Dict] = None) -> Dict:
|
||||
def _deep_merge(
|
||||
self,
|
||||
base: Dict,
|
||||
override: Dict,
|
||||
path: str = "",
|
||||
debug: bool = False,
|
||||
provenance: List[Dict] = None,
|
||||
) -> Dict:
|
||||
"""
|
||||
Deep merge dictionaries. Replaces arrays.
|
||||
Deep merge dictionaries.
|
||||
|
||||
Replaces arrays.
|
||||
Populates provenance list if debug is True (thread-safe).
|
||||
"""
|
||||
# Thread-safe: use method parameter instead of instance variable
|
||||
if provenance is None and debug:
|
||||
provenance = []
|
||||
# Store reference on first call for later retrieval
|
||||
if not hasattr(self, 'provenance_log'):
|
||||
if not hasattr(self, "provenance_log"):
|
||||
self.provenance_log = provenance
|
||||
|
||||
result = copy.deepcopy(base)
|
||||
@@ -158,16 +168,20 @@ class ContextCompiler:
|
||||
for key, value in override.items():
|
||||
if isinstance(value, dict) and key in result and isinstance(result[key], dict):
|
||||
# Recursive merge - pass provenance down
|
||||
result[key] = self._deep_merge(result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance)
|
||||
result[key] = self._deep_merge(
|
||||
result[key], value, path=f"{path}.{key}", debug=debug, provenance=provenance
|
||||
)
|
||||
else:
|
||||
# Direct replacement (Primitive or Array)
|
||||
if debug and provenance is not None:
|
||||
provenance.append({
|
||||
"key": key,
|
||||
"action": "override",
|
||||
"layer": path,
|
||||
"value_type": type(value).__name__
|
||||
})
|
||||
provenance.append(
|
||||
{
|
||||
"key": key,
|
||||
"action": "override",
|
||||
"layer": path,
|
||||
"value_type": type(value).__name__,
|
||||
}
|
||||
)
|
||||
result[key] = copy.deepcopy(value)
|
||||
|
||||
return result
|
||||
|
||||
@@ -7,17 +7,15 @@ Supports local/remote mode detection, persistent configuration storage, and
|
||||
environment variable overrides.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import uuid
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union, Any
|
||||
|
||||
import aiohttp
|
||||
from pydantic import BaseModel, Field, HttpUrl, ValidationError
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
# Configure module-level logger
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -30,6 +28,7 @@ DEFAULT_LOCAL_URL = "http://localhost:6006"
|
||||
|
||||
class DSSMode(str, Enum):
|
||||
"""Operation modes for the DSS plugin."""
|
||||
|
||||
LOCAL = "local"
|
||||
REMOTE = "remote"
|
||||
AUTO = "auto"
|
||||
@@ -45,10 +44,13 @@ class DSSConfig(BaseModel):
|
||||
local_url (str): URL for the local DSS API (usually localhost).
|
||||
session_id (str): Unique identifier for this client instance.
|
||||
"""
|
||||
|
||||
mode: DSSMode = Field(default=DSSMode.AUTO, description="Operation mode preference")
|
||||
remote_url: str = Field(default=DEFAULT_REMOTE_URL, description="Remote API endpoint")
|
||||
local_url: str = Field(default=DEFAULT_LOCAL_URL, description="Local API endpoint")
|
||||
session_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID")
|
||||
session_id: str = Field(
|
||||
default_factory=lambda: str(uuid.uuid4()), description="Persistent session ID"
|
||||
)
|
||||
|
||||
class Config:
|
||||
validate_assignment = True
|
||||
@@ -58,6 +60,7 @@ class DSSConfig(BaseModel):
|
||||
def load(cls) -> "DSSConfig":
|
||||
"""
|
||||
Load configuration from ~/.dss/config.json.
|
||||
|
||||
Returns a default instance if the file does not exist or is invalid.
|
||||
"""
|
||||
if not CONFIG_FILE.exists():
|
||||
@@ -79,6 +82,7 @@ class DSSConfig(BaseModel):
|
||||
def save(self) -> None:
|
||||
"""
|
||||
Save the current configuration to ~/.dss/config.json.
|
||||
|
||||
Creates the directory if it does not exist.
|
||||
"""
|
||||
try:
|
||||
@@ -153,9 +157,7 @@ class DSSConfig(BaseModel):
|
||||
return False
|
||||
|
||||
def get_api_url(self, active_mode: DSSMode) -> str:
|
||||
"""
|
||||
Helper to get the correct API URL for the determined mode.
|
||||
"""
|
||||
"""Helper to get the correct API URL for the determined mode."""
|
||||
if active_mode == DSSMode.LOCAL:
|
||||
return self.local_url
|
||||
return self.remote_url
|
||||
|
||||
@@ -8,7 +8,7 @@ Handles configuration loading, mode detection, and strategy instantiation.
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional, Dict, Any
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from .config import DSSConfig, DSSMode
|
||||
|
||||
@@ -26,12 +26,15 @@ class DSSContext:
|
||||
Handles configuration loading, mode detection (Local/Remote),
|
||||
and strategy instantiation.
|
||||
"""
|
||||
_instance: Optional['DSSContext'] = None
|
||||
|
||||
_instance: Optional["DSSContext"] = None
|
||||
_lock: asyncio.Lock = asyncio.Lock()
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Private initializer. Use get_instance() instead.
|
||||
Private initializer.
|
||||
|
||||
Use get_instance() instead.
|
||||
"""
|
||||
if DSSContext._instance is not None:
|
||||
raise RuntimeError("DSSContext is a singleton. Use get_instance() to access it.")
|
||||
@@ -43,9 +46,10 @@ class DSSContext:
|
||||
self.session_id: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls) -> 'DSSContext':
|
||||
async def get_instance(cls) -> "DSSContext":
|
||||
"""
|
||||
Async factory method to get the singleton instance.
|
||||
|
||||
Ensures config is loaded and mode is detected before returning.
|
||||
"""
|
||||
if not cls._instance:
|
||||
@@ -61,13 +65,16 @@ class DSSContext:
|
||||
@classmethod
|
||||
def reset(cls) -> None:
|
||||
"""
|
||||
Resets the singleton instance. Useful for testing.
|
||||
Resets the singleton instance.
|
||||
|
||||
Useful for testing.
|
||||
"""
|
||||
cls._instance = None
|
||||
|
||||
async def _initialize(self) -> None:
|
||||
"""
|
||||
Internal initialization logic:
|
||||
|
||||
1. Load Config
|
||||
2. Detect Mode
|
||||
3. Cache Capabilities
|
||||
@@ -80,7 +87,9 @@ class DSSContext:
|
||||
# 2. Detect Mode (Async check)
|
||||
self.active_mode = await self.config.get_active_mode()
|
||||
|
||||
logger.info(f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}")
|
||||
logger.info(
|
||||
f"DSSContext initialized. Mode: {self.active_mode.value}, Session: {self.session_id}"
|
||||
)
|
||||
|
||||
# 3. Cache Capabilities
|
||||
self._cache_capabilities()
|
||||
@@ -92,15 +101,13 @@ class DSSContext:
|
||||
self._capabilities = {"limited": True}
|
||||
|
||||
def _cache_capabilities(self) -> None:
|
||||
"""
|
||||
Determines what the plugin can do based on the active mode.
|
||||
"""
|
||||
"""Determines what the plugin can do based on the active mode."""
|
||||
# Base capabilities
|
||||
caps = {
|
||||
"can_read_files": False,
|
||||
"can_execute_browser": False,
|
||||
"can_screenshot": False,
|
||||
"can_connect_remote": True
|
||||
"can_connect_remote": True,
|
||||
}
|
||||
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
@@ -111,8 +118,10 @@ class DSSContext:
|
||||
elif self.active_mode == DSSMode.REMOTE:
|
||||
# Remote mode relies on API capabilities
|
||||
# Depending on remote configuration, these might differ
|
||||
caps["can_execute_browser"] = False # Typically restricted in pure remote unless via API
|
||||
caps["can_read_files"] = False # Security restriction
|
||||
caps[
|
||||
"can_execute_browser"
|
||||
] = False # Typically restricted in pure remote unless via API
|
||||
caps["can_read_files"] = False # Security restriction
|
||||
|
||||
self._capabilities = caps
|
||||
|
||||
@@ -151,18 +160,22 @@ class DSSContext:
|
||||
# Will be implemented in Phase 2 & 3
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
from ..strategies.local.browser import LocalBrowserStrategy
|
||||
|
||||
strategy_instance = LocalBrowserStrategy(self)
|
||||
else:
|
||||
from ..strategies.remote.browser import RemoteBrowserStrategy
|
||||
|
||||
strategy_instance = RemoteBrowserStrategy(self)
|
||||
|
||||
elif strategy_type == "filesystem":
|
||||
# Will be implemented in Phase 2
|
||||
if self.active_mode == DSSMode.LOCAL:
|
||||
from ..strategies.local.filesystem import LocalFilesystemStrategy
|
||||
|
||||
strategy_instance = LocalFilesystemStrategy(self)
|
||||
else:
|
||||
from ..strategies.remote.filesystem import RemoteFilesystemStrategy
|
||||
|
||||
strategy_instance = RemoteFilesystemStrategy(self)
|
||||
|
||||
elif strategy_type == "screenshot":
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
"""
|
||||
MCP Extensions for Context Awareness
|
||||
MCP Extensions for Context Awareness.
|
||||
|
||||
Implements the Factory Pattern to wrap existing tools with context
|
||||
and defines 5 new tools for the Context Compiler.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Callable
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
from typing import Callable
|
||||
|
||||
from .compiler import ContextCompiler
|
||||
|
||||
# Singleton compiler instance
|
||||
@@ -15,19 +17,22 @@ COMPILER = ContextCompiler(skins_dir=os.path.join(os.path.dirname(__file__), "sk
|
||||
|
||||
# --- FACTORY PATTERN: Context Wrapper ---
|
||||
|
||||
|
||||
def with_context(default_manifest_path: str = None):
|
||||
"""
|
||||
Decorator that injects the compiled context into the tool's arguments.
|
||||
|
||||
Use this to upgrade existing 'token extractor' tools to be 'context aware'.
|
||||
|
||||
The manifest path is extracted from kwargs['manifest_path'] if present,
|
||||
otherwise falls back to the default_manifest_path provided at decoration time.
|
||||
"""
|
||||
|
||||
def decorator(func: Callable):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
# 1. Get manifest path (runtime kwarg or decorator default)
|
||||
manifest_path = kwargs.get('manifest_path', default_manifest_path)
|
||||
manifest_path = kwargs.get("manifest_path", default_manifest_path)
|
||||
if not manifest_path:
|
||||
raise ValueError("No manifest_path provided to context-aware tool")
|
||||
|
||||
@@ -35,33 +40,39 @@ def with_context(default_manifest_path: str = None):
|
||||
context = COMPILER.compile(manifest_path)
|
||||
|
||||
# 3. Inject into kwargs
|
||||
kwargs['dss_context'] = context
|
||||
kwargs["dss_context"] = context
|
||||
|
||||
# 4. Execute Tool
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
# --- 5 NEW MCP TOOLS ---
|
||||
|
||||
|
||||
def get_active_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
[Tool 1] Returns the fully resolved JSON context for the project.
|
||||
|
||||
Set debug=True to see provenance (which layer defined which token).
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
"""
|
||||
context = COMPILER.compile(manifest_path, debug=debug, force_refresh=force_refresh)
|
||||
return json.dumps(context, indent=2)
|
||||
|
||||
|
||||
def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
[Tool 2] Resolves a specific token value (e.g. 'colors.primary')
|
||||
[Tool 2] Resolves a specific token value (e.g. 'colors.primary').
|
||||
|
||||
through the cascade.
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
"""
|
||||
context = COMPILER.compile(manifest_path, force_refresh=force_refresh)
|
||||
keys = token_path.split('.')
|
||||
keys = token_path.split(".")
|
||||
current = context.get("tokens", {})
|
||||
|
||||
for k in keys:
|
||||
@@ -72,10 +83,9 @@ def resolve_token(manifest_path: str, token_path: str, force_refresh: bool = Fal
|
||||
|
||||
return str(current)
|
||||
|
||||
|
||||
def validate_manifest(manifest_path: str) -> str:
|
||||
"""
|
||||
[Tool 3] Validates the ds.config.json against the schema.
|
||||
"""
|
||||
"""[Tool 3] Validates the ds.config.json against the schema."""
|
||||
# In a full implementation, we would use 'jsonschema' library here.
|
||||
# For now, we perform a basic structural check via the Compiler's loader.
|
||||
try:
|
||||
@@ -84,10 +94,9 @@ def validate_manifest(manifest_path: str) -> str:
|
||||
except Exception as e:
|
||||
return f"Invalid: {str(e)}"
|
||||
|
||||
|
||||
def list_skins() -> str:
|
||||
"""
|
||||
[Tool 4] Lists all available skins in the registry.
|
||||
"""
|
||||
"""[Tool 4] Lists all available skins in the registry."""
|
||||
skins_path = COMPILER.skins_dir
|
||||
if not skins_path.exists():
|
||||
return "No skins directory found."
|
||||
@@ -95,18 +104,18 @@ def list_skins() -> str:
|
||||
skins = [f.stem for f in skins_path.glob("*.json")]
|
||||
return json.dumps(skins)
|
||||
|
||||
|
||||
def get_compiler_status() -> str:
|
||||
"""
|
||||
[Tool 5] Returns the health and configuration of the Context Compiler.
|
||||
"""
|
||||
"""[Tool 5] Returns the health and configuration of the Context Compiler."""
|
||||
status = {
|
||||
"status": "active",
|
||||
"skins_directory": str(COMPILER.skins_dir),
|
||||
"cached_skins": list(COMPILER.cache.keys()),
|
||||
"safe_boot_ready": True
|
||||
"safe_boot_ready": True,
|
||||
}
|
||||
return json.dumps(status, indent=2)
|
||||
|
||||
|
||||
# Instructions for Main Server File:
|
||||
# 1. Import these tools
|
||||
# 2. Register them with the MCP server instance
|
||||
|
||||
@@ -1,23 +1,21 @@
|
||||
"""
|
||||
MCP Integration Layer for DSS Context Compiler
|
||||
MCP Integration Layer for DSS Context Compiler.
|
||||
|
||||
Provides MCP-compliant tool wrappers for the 5 new context tools.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
import json
|
||||
from . import (
|
||||
get_active_context,
|
||||
resolve_token,
|
||||
validate_manifest,
|
||||
list_skins,
|
||||
get_compiler_status
|
||||
)
|
||||
|
||||
from . import get_active_context, get_compiler_status, list_skins, resolve_token, validate_manifest
|
||||
|
||||
# MCP Tool Definitions
|
||||
|
||||
def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refresh: bool = False) -> str:
|
||||
|
||||
def mcp_get_resolved_context(
|
||||
manifest_path: str, debug: bool = False, force_refresh: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
MCP Tool: Get Active Context
|
||||
MCP Tool: Get Active Context.
|
||||
|
||||
Returns the fully resolved JSON context for a project.
|
||||
Set debug=True to see provenance (which layer defined which token).
|
||||
@@ -39,7 +37,7 @@ def mcp_get_resolved_context(manifest_path: str, debug: bool = False, force_refr
|
||||
|
||||
def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool = False) -> str:
|
||||
"""
|
||||
MCP Tool: Resolve Token
|
||||
MCP Tool: Resolve Token.
|
||||
|
||||
Resolves a specific token value (e.g. 'colors.primary') through the cascade.
|
||||
Set force_refresh=True to bypass cache (for long-running servers).
|
||||
@@ -60,7 +58,7 @@ def mcp_resolve_token(manifest_path: str, token_path: str, force_refresh: bool =
|
||||
|
||||
def mcp_validate_manifest(manifest_path: str) -> str:
|
||||
"""
|
||||
MCP Tool: Validate Manifest
|
||||
MCP Tool: Validate Manifest.
|
||||
|
||||
Validates the ds.config.json against the schema.
|
||||
|
||||
@@ -78,7 +76,7 @@ def mcp_validate_manifest(manifest_path: str) -> str:
|
||||
|
||||
def mcp_list_skins() -> str:
|
||||
"""
|
||||
MCP Tool: List Skins
|
||||
MCP Tool: List Skins.
|
||||
|
||||
Lists all available skins in the registry.
|
||||
|
||||
@@ -93,7 +91,7 @@ def mcp_list_skins() -> str:
|
||||
|
||||
def mcp_get_compiler_status() -> str:
|
||||
"""
|
||||
MCP Tool: Get Compiler Status
|
||||
MCP Tool: Get Compiler Status.
|
||||
|
||||
Returns the health and configuration of the Context Compiler.
|
||||
|
||||
@@ -117,15 +115,15 @@ MCP_TOOLS = {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
"required": True,
|
||||
},
|
||||
"debug": {
|
||||
"type": "boolean",
|
||||
"description": "Enable debug provenance tracking",
|
||||
"required": False,
|
||||
"default": False
|
||||
}
|
||||
}
|
||||
"default": False,
|
||||
},
|
||||
},
|
||||
},
|
||||
"dss_resolve_token": {
|
||||
"function": mcp_resolve_token,
|
||||
@@ -134,14 +132,14 @@ MCP_TOOLS = {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
"required": True,
|
||||
},
|
||||
"token_path": {
|
||||
"type": "string",
|
||||
"description": "Dot-notation path to token (e.g. 'colors.primary')",
|
||||
"required": True
|
||||
}
|
||||
}
|
||||
"required": True,
|
||||
},
|
||||
},
|
||||
},
|
||||
"dss_validate_manifest": {
|
||||
"function": mcp_validate_manifest,
|
||||
@@ -150,18 +148,18 @@ MCP_TOOLS = {
|
||||
"manifest_path": {
|
||||
"type": "string",
|
||||
"description": "Path to ds.config.json",
|
||||
"required": True
|
||||
"required": True,
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
"dss_list_skins": {
|
||||
"function": mcp_list_skins,
|
||||
"description": "List all available design system skins",
|
||||
"parameters": {}
|
||||
"parameters": {},
|
||||
},
|
||||
"dss_get_compiler_status": {
|
||||
"function": mcp_get_compiler_status,
|
||||
"description": "Get Context Compiler health and configuration",
|
||||
"parameters": {}
|
||||
}
|
||||
"parameters": {},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
DSS Runtime - Dependency Injection & Boundary Enforcement
|
||||
DSS Runtime - Dependency Injection & Boundary Enforcement.
|
||||
|
||||
This module provides a bounded runtime environment for DSS MCP tools.
|
||||
All external API access (Figma, Browser, HTTP) MUST go through this runtime.
|
||||
@@ -16,20 +16,24 @@ Usage:
|
||||
browser = runtime.get_browser() # Sandboxed
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
# Setup logging
|
||||
logger = logging.getLogger("dss.runtime")
|
||||
|
||||
|
||||
class BoundaryViolationError(Exception):
|
||||
"""Raised when an operation violates DSS boundaries"""
|
||||
"""Raised when an operation violates DSS boundaries."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class DSSRuntime:
|
||||
"""
|
||||
Bounded runtime environment for DSS operations.
|
||||
@@ -52,7 +56,11 @@ class DSSRuntime:
|
||||
self.config = self._load_config()
|
||||
self.enforcement_mode = self.config.get("enforcement", {}).get("mode", "strict")
|
||||
self.log_violations = self.config.get("enforcement", {}).get("log_violations", True)
|
||||
self.violation_log_path = Path(self.config.get("enforcement", {}).get("violation_log", ".dss/logs/boundary-violations.jsonl"))
|
||||
self.violation_log_path = Path(
|
||||
self.config.get("enforcement", {}).get(
|
||||
"violation_log", ".dss/logs/boundary-violations.jsonl"
|
||||
)
|
||||
)
|
||||
|
||||
# Client caches (lazy initialization)
|
||||
self._figma_client = None
|
||||
@@ -62,7 +70,7 @@ class DSSRuntime:
|
||||
logger.info(f"DSSRuntime initialized with enforcement mode: {self.enforcement_mode}")
|
||||
|
||||
def _load_config(self) -> Dict[str, Any]:
|
||||
"""Load boundary configuration from YAML"""
|
||||
"""Load boundary configuration from YAML."""
|
||||
if not self.config_path.exists():
|
||||
logger.warning(f"Boundary config not found: {self.config_path}, using defaults")
|
||||
return self._default_config()
|
||||
@@ -75,7 +83,7 @@ class DSSRuntime:
|
||||
return self._default_config()
|
||||
|
||||
def _default_config(self) -> Dict[str, Any]:
|
||||
"""Default boundary configuration (strict)"""
|
||||
"""Default boundary configuration (strict)."""
|
||||
return {
|
||||
"version": "1.0",
|
||||
"blocked_external_apis": ["api.figma.com"],
|
||||
@@ -83,12 +91,12 @@ class DSSRuntime:
|
||||
"enforcement": {
|
||||
"mode": "strict",
|
||||
"log_violations": True,
|
||||
"violation_log": ".dss/logs/boundary-violations.jsonl"
|
||||
}
|
||||
"violation_log": ".dss/logs/boundary-violations.jsonl",
|
||||
},
|
||||
}
|
||||
|
||||
def _log_violation(self, operation: str, details: Dict[str, Any]):
|
||||
"""Log boundary violation to audit trail"""
|
||||
"""Log boundary violation to audit trail."""
|
||||
if not self.log_violations:
|
||||
return
|
||||
|
||||
@@ -99,7 +107,7 @@ class DSSRuntime:
|
||||
"type": "boundary_violation",
|
||||
"operation": operation,
|
||||
"enforcement_mode": self.enforcement_mode,
|
||||
"details": details
|
||||
"details": details,
|
||||
}
|
||||
|
||||
with open(self.violation_log_path, "a") as f:
|
||||
@@ -108,7 +116,7 @@ class DSSRuntime:
|
||||
logger.warning(f"Boundary violation: {operation} - {details}")
|
||||
|
||||
def _log_access(self, operation: str, allowed: bool, details: Dict[str, Any]):
|
||||
"""Log successful access for audit trail"""
|
||||
"""Log successful access for audit trail."""
|
||||
access_log_path = Path(".dss/logs/runtime-access.jsonl")
|
||||
access_log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@@ -117,7 +125,7 @@ class DSSRuntime:
|
||||
"type": "runtime_access",
|
||||
"operation": operation,
|
||||
"allowed": allowed,
|
||||
"details": details
|
||||
"details": details,
|
||||
}
|
||||
|
||||
with open(access_log_path, "a") as f:
|
||||
@@ -139,11 +147,7 @@ class DSSRuntime:
|
||||
# Check if operation requires going through DSS tools
|
||||
for category, tools in required_tools.items():
|
||||
if operation in category:
|
||||
details = {
|
||||
"operation": operation,
|
||||
"context": context,
|
||||
"required_tools": tools
|
||||
}
|
||||
details = {"operation": operation, "context": context, "required_tools": tools}
|
||||
|
||||
self._log_violation(operation, details)
|
||||
|
||||
@@ -173,8 +177,8 @@ class DSSRuntime:
|
||||
|
||||
self._figma_client = SafeFigmaClient(
|
||||
token=token,
|
||||
allow_write=False, # Read-only by default
|
||||
runtime=self
|
||||
allow_write=False,
|
||||
runtime=self, # Read-only by default
|
||||
)
|
||||
|
||||
logger.info("Figma client initialized (read-only mode)")
|
||||
@@ -195,6 +199,7 @@ class DSSRuntime:
|
||||
if strategy == "local":
|
||||
try:
|
||||
from strategies.local.browser import LocalBrowserStrategy
|
||||
|
||||
self._browser_strategy = LocalBrowserStrategy(runtime=self)
|
||||
logger.info("Local browser strategy initialized")
|
||||
except ImportError:
|
||||
@@ -204,6 +209,7 @@ class DSSRuntime:
|
||||
elif strategy == "remote":
|
||||
try:
|
||||
from strategies.remote.browser import RemoteBrowserStrategy
|
||||
|
||||
self._browser_strategy = RemoteBrowserStrategy(runtime=self)
|
||||
logger.info("Remote browser strategy initialized")
|
||||
except ImportError:
|
||||
@@ -224,8 +230,7 @@ class DSSRuntime:
|
||||
from core.safe_http_client import SafeHTTPClient
|
||||
|
||||
self._http_client = SafeHTTPClient(
|
||||
blocked_domains=self.config.get("blocked_external_apis", []),
|
||||
runtime=self
|
||||
blocked_domains=self.config.get("blocked_external_apis", []), runtime=self
|
||||
)
|
||||
|
||||
logger.info("HTTP client initialized with URL validation")
|
||||
@@ -245,10 +250,7 @@ class DSSRuntime:
|
||||
blocked = self.config.get("blocked_imports", [])
|
||||
|
||||
if module_name in blocked:
|
||||
details = {
|
||||
"module": module_name,
|
||||
"blocked_imports": blocked
|
||||
}
|
||||
details = {"module": module_name, "blocked_imports": blocked}
|
||||
|
||||
self._log_violation(f"direct_import:{module_name}", details)
|
||||
|
||||
@@ -292,14 +294,16 @@ class DSSRuntime:
|
||||
"browser": self._browser_strategy is not None,
|
||||
"http": self._http_client is not None,
|
||||
},
|
||||
"config_version": self.config.get("version", "unknown")
|
||||
"config_version": self.config.get("version", "unknown"),
|
||||
}
|
||||
|
||||
|
||||
# Global runtime instance (singleton pattern)
|
||||
_runtime_instance: Optional[DSSRuntime] = None
|
||||
|
||||
|
||||
def get_runtime() -> DSSRuntime:
|
||||
"""Get the global DSSRuntime instance (singleton)"""
|
||||
"""Get the global DSSRuntime instance (singleton)."""
|
||||
global _runtime_instance
|
||||
|
||||
if _runtime_instance is None:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
DSS Structured Logger - JSON-based logging for AI-consumable audit trails
|
||||
DSS Structured Logger - JSON-based logging for AI-consumable audit trails.
|
||||
|
||||
Provides structured, machine-readable logging in JSONL format (one JSON object per line).
|
||||
All DSS operations are logged with consistent fields for analysis, debugging, and compliance.
|
||||
@@ -27,11 +27,11 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from contextlib import contextmanager
|
||||
import threading
|
||||
|
||||
# Thread-local storage for context
|
||||
_context = threading.local()
|
||||
@@ -51,7 +51,7 @@ class DSSJSONFormatter(logging.Formatter):
|
||||
"""
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
"""Format log record as single-line JSON"""
|
||||
"""Format log record as single-line JSON."""
|
||||
|
||||
# Build base log entry
|
||||
log_entry = {
|
||||
@@ -100,8 +100,10 @@ class DSSLogger(logging.Logger):
|
||||
as keyword arguments for structured logging.
|
||||
"""
|
||||
|
||||
def _log_with_extra(self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Internal method to log with extra structured data"""
|
||||
def _log_with_extra(
|
||||
self, level: int, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs
|
||||
):
|
||||
"""Internal method to log with extra structured data."""
|
||||
if extra:
|
||||
# Store extra data in a custom attribute
|
||||
extra_record = {"extra_data": extra}
|
||||
@@ -110,23 +112,23 @@ class DSSLogger(logging.Logger):
|
||||
super()._log(level, msg, (), **kwargs)
|
||||
|
||||
def debug(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log DEBUG message with optional extra data"""
|
||||
"""Log DEBUG message with optional extra data."""
|
||||
self._log_with_extra(logging.DEBUG, msg, extra, **kwargs)
|
||||
|
||||
def info(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log INFO message with optional extra data"""
|
||||
"""Log INFO message with optional extra data."""
|
||||
self._log_with_extra(logging.INFO, msg, extra, **kwargs)
|
||||
|
||||
def warning(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log WARNING message with optional extra data"""
|
||||
"""Log WARNING message with optional extra data."""
|
||||
self._log_with_extra(logging.WARNING, msg, extra, **kwargs)
|
||||
|
||||
def error(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log ERROR message with optional extra data"""
|
||||
"""Log ERROR message with optional extra data."""
|
||||
self._log_with_extra(logging.ERROR, msg, extra, **kwargs)
|
||||
|
||||
def critical(self, msg: str, extra: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
"""Log CRITICAL message with optional extra data"""
|
||||
"""Log CRITICAL message with optional extra data."""
|
||||
self._log_with_extra(logging.CRITICAL, msg, extra, **kwargs)
|
||||
|
||||
|
||||
@@ -182,7 +184,9 @@ def get_logger(name: str, log_file: Optional[str] = None) -> DSSLogger:
|
||||
|
||||
|
||||
@contextmanager
|
||||
def LogContext(session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None):
|
||||
def LogContext(
|
||||
session_id: Optional[str] = None, tool: Optional[str] = None, operation: Optional[str] = None
|
||||
):
|
||||
"""
|
||||
Context manager for adding structured context to log entries.
|
||||
|
||||
@@ -259,12 +263,15 @@ class PerformanceLogger:
|
||||
self.end_time = None
|
||||
|
||||
def start(self):
|
||||
"""Mark operation start time"""
|
||||
"""Mark operation start time."""
|
||||
self.start_time = datetime.now(timezone.utc)
|
||||
self.logger.debug(f"Started: {self.operation}", extra={
|
||||
"operation": self.operation,
|
||||
"start_time": self.start_time.isoformat(),
|
||||
})
|
||||
self.logger.debug(
|
||||
f"Started: {self.operation}",
|
||||
extra={
|
||||
"operation": self.operation,
|
||||
"start_time": self.start_time.isoformat(),
|
||||
},
|
||||
)
|
||||
|
||||
def end(self, extra: Optional[Dict[str, Any]] = None):
|
||||
"""
|
||||
@@ -276,7 +283,9 @@ class PerformanceLogger:
|
||||
self.end_time = datetime.now(timezone.utc)
|
||||
|
||||
if self.start_time is None:
|
||||
self.logger.warning(f"Performance logger end() called without start() for: {self.operation}")
|
||||
self.logger.warning(
|
||||
f"Performance logger end() called without start() for: {self.operation}"
|
||||
)
|
||||
return
|
||||
|
||||
duration_ms = (self.end_time - self.start_time).total_seconds() * 1000
|
||||
@@ -294,7 +303,9 @@ class PerformanceLogger:
|
||||
self.logger.info(f"Completed: {self.operation}", extra=perf_data)
|
||||
|
||||
|
||||
def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5):
|
||||
def configure_log_rotation(
|
||||
log_dir: Optional[Path] = None, max_bytes: int = 10 * 1024 * 1024, backup_count: int = 5
|
||||
):
|
||||
"""
|
||||
Configure log rotation for DSS log files.
|
||||
|
||||
@@ -325,19 +336,19 @@ def configure_log_rotation(log_dir: Optional[Path] = None, max_bytes: int = 10 *
|
||||
|
||||
# Add rotating file handler
|
||||
rotating_handler = RotatingFileHandler(
|
||||
str(log_file),
|
||||
maxBytes=max_bytes,
|
||||
backupCount=backup_count,
|
||||
encoding="utf-8"
|
||||
str(log_file), maxBytes=max_bytes, backupCount=backup_count, encoding="utf-8"
|
||||
)
|
||||
rotating_handler.setFormatter(DSSJSONFormatter())
|
||||
logger.addHandler(rotating_handler)
|
||||
|
||||
logger.info("Log rotation configured", extra={
|
||||
"max_bytes": max_bytes,
|
||||
"backup_count": backup_count,
|
||||
"log_file": str(log_file),
|
||||
})
|
||||
logger.info(
|
||||
"Log rotation configured",
|
||||
extra={
|
||||
"max_bytes": max_bytes,
|
||||
"backup_count": backup_count,
|
||||
"log_file": str(log_file),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# Example usage (can be removed in production)
|
||||
@@ -356,6 +367,7 @@ if __name__ == "__main__":
|
||||
perf.start()
|
||||
# Simulate work
|
||||
import time
|
||||
|
||||
time.sleep(0.1)
|
||||
perf.end(extra={"tokens_found": 100})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user