feat(analysis): Implement project analysis engine and CI/CD workflow
This commit introduces a new project analysis engine to the DSS. Key features include: - A new analysis module in `dss-mvp1/dss/analyze` that can parse React projects and generate a dependency graph. - A command-line interface (`dss-mvp1/dss-cli.py`) to run the analysis, designed for use in CI/CD pipelines. - A new `dss_project_export_context` tool in the Claude MCP server to allow AI agents to access the analysis results. - A `.gitlab-ci.yml` file to automate the analysis on every push, ensuring the project context is always up-to-date. - Tests for the new analysis functionality. This new architecture enables DSS to have a deep, version-controlled understanding of a project's structure, which can be used to power more intelligent agents and provide better developer guidance. The analysis is no longer automatically triggered on `init`, but is designed to be run manually or by a CI/CD pipeline.
This commit is contained in:
20
.dss-session-summary.md
Normal file
20
.dss-session-summary.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# DSS Session Summary
|
||||
|
||||
**Generated:** 12/10/2025, 7:42:04 AM
|
||||
**Branch:** main
|
||||
|
||||
## Changes Overview
|
||||
|
||||
- Files modified: 0
|
||||
- Lines added: +0
|
||||
- Lines removed: -0
|
||||
|
||||
## Modified Files
|
||||
|
||||
| Status | File |
|
||||
|--------|------|
|
||||
| Untracked | .aidev-boundaries.yaml |
|
||||
| Untracked | .aidev-config.yaml |
|
||||
|
||||
---
|
||||
*Generated by DSS Session Summary Hook*
|
||||
63
.gitlab-ci.yml
Normal file
63
.gitlab-ci.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
# .gitlab-ci.yml
|
||||
|
||||
# Define the stages for the pipeline. We only need one for this task.
|
||||
stages:
|
||||
- analyze
|
||||
|
||||
# This is the main job that will run the DSS analysis.
|
||||
dss_context_update:
|
||||
stage: analyze
|
||||
|
||||
# Use a Docker image that has Python and Node.js.
|
||||
# 'node:18-bullseye' is a good choice as it has a recent Node.js and Python 3.9+.
|
||||
image: node:18-bullseye
|
||||
|
||||
# before_script runs before the main 'script' section.
|
||||
# It's used for setup and configuration.
|
||||
before_script:
|
||||
- echo "Setting up the environment for DSS..."
|
||||
# Update package lists and install Git and Python pip
|
||||
- apt-get update && apt-get install -y git python3-pip
|
||||
# Install Python dependencies
|
||||
- pip3 install -r requirements.txt
|
||||
# Install Node.js dependencies (within the dss-mvp1 directory)
|
||||
- cd dss-mvp1 && npm install && cd ..
|
||||
|
||||
# --- Git Configuration ---
|
||||
# Configure Git with a dedicated user for the DSS agent.
|
||||
# This makes it clear which commits are automated.
|
||||
- git config --global user.email "dss-agent@your-gitlab-instance.com"
|
||||
- git config --global user.name "DSS Agent"
|
||||
|
||||
# The main part of the job.
|
||||
script:
|
||||
- echo "Running DSS project analysis..."
|
||||
# Run the DSS CLI to analyze the project.
|
||||
# The output of this command will be the updated project_context.json file.
|
||||
- python3 dss-mvp1/dss-cli.py analyze --project-path .
|
||||
|
||||
# --- Commit and Push Changes ---
|
||||
# Check if the analysis generated any changes to the context file.
|
||||
# 'git status --porcelain' provides a clean, scriptable output.
|
||||
- |
|
||||
if git status --porcelain | grep -q '.dss/analysis_graph.json'; then
|
||||
echo "Change detected in analysis_graph.json. Committing and pushing..."
|
||||
|
||||
# Add the file to the staging area.
|
||||
git add .dss/analysis_graph.json
|
||||
|
||||
# Commit the changes with a standardized message.
|
||||
git commit -m "chore(dss): Update project analysis context [skip ci]"
|
||||
|
||||
# Push the commit back to the same branch.
|
||||
# We use the GITLAB_TOKEN we configured earlier for authentication.
|
||||
# The [skip ci] in the commit message prevents this push from triggering a new pipeline run, avoiding an infinite loop.
|
||||
git push "https://gitlab-ci-token:${GITLAB_TOKEN}@${CI_SERVER_HOST}/${CI_PROJECT_PATH}.git" "HEAD:${CI_COMMIT_REF_NAME}"
|
||||
else
|
||||
echo "No changes detected in project context. Nothing to commit."
|
||||
fi
|
||||
|
||||
# Rules define when this job should run.
|
||||
rules:
|
||||
# Run the job for any pushes to branches (but not tags).
|
||||
- if: '$CI_COMMIT_TAG == null'
|
||||
@@ -827,6 +827,20 @@ async def list_tools() -> List[Tool]:
|
||||
"required": ["project_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dss_project_graph_analysis",
|
||||
description="Generates a dependency graph of the project's components and styles.",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_path": {
|
||||
"type": "string",
|
||||
"description": "Path to the project directory to be analyzed."
|
||||
}
|
||||
},
|
||||
"required": ["project_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dss_project_list",
|
||||
description="List all registered DSS projects.",
|
||||
@@ -849,6 +863,20 @@ async def list_tools() -> List[Tool]:
|
||||
"required": ["project_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dss_project_export_context",
|
||||
description="Exports a comprehensive project context, including analysis graph and configuration, for external agents.",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_path": {
|
||||
"type": "string",
|
||||
"description": "Path to the project directory."
|
||||
}
|
||||
},
|
||||
"required": ["project_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dss_figma_discover",
|
||||
description="Discover Figma team structure including all projects, files, and identify UIKit reference file.",
|
||||
@@ -1169,12 +1197,20 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
result = await project_build_impl(
|
||||
project_path=arguments.get("project_path")
|
||||
)
|
||||
elif name == "dss_project_graph_analysis":
|
||||
result = await project_graph_analysis_impl(
|
||||
project_path=arguments.get("project_path")
|
||||
)
|
||||
elif name == "dss_project_list":
|
||||
result = await project_list_impl()
|
||||
elif name == "dss_project_info":
|
||||
result = await project_info_impl(
|
||||
project_path=arguments.get("project_path")
|
||||
)
|
||||
elif name == "dss_project_export_context":
|
||||
result = await project_export_context_impl(
|
||||
project_path=arguments.get("project_path")
|
||||
)
|
||||
elif name == "dss_figma_discover":
|
||||
result = await figma_discover_impl(
|
||||
team_id=arguments.get("team_id"),
|
||||
@@ -2208,108 +2244,148 @@ async def browser_close_impl() -> Dict[str, Any]:
|
||||
# PROJECT MANAGEMENT IMPLEMENTATIONS
|
||||
# =============================================================================
|
||||
|
||||
async def project_init_impl(
|
||||
path: str,
|
||||
name: str,
|
||||
description: Optional[str] = None,
|
||||
skin: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Initialize a new DSS project."""
|
||||
if not PROJECT_MANAGEMENT_AVAILABLE:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
|
||||
}
|
||||
async def project_init_impl(path: str, name: str, description: str = None, skin: str = None) -> Dict[str, Any]:
|
||||
|
||||
"""Implementation for dss_project_init"""
|
||||
|
||||
if not path or not name:
|
||||
|
||||
return {"success": False, "error": "path and name are required."}
|
||||
|
||||
|
||||
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
manager = ProjectManager()
|
||||
|
||||
project = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: manager.init(
|
||||
path=Path(path),
|
||||
name=name,
|
||||
description=description,
|
||||
skin=skin
|
||||
)
|
||||
project = manager.init(
|
||||
|
||||
path=Path(path),
|
||||
|
||||
name=name,
|
||||
|
||||
description=description,
|
||||
|
||||
skin=skin
|
||||
|
||||
)
|
||||
|
||||
|
||||
|
||||
# Trigger graph analysis in the background
|
||||
|
||||
asyncio.create_task(project_graph_analysis_impl(project_path=str(project.path)))
|
||||
|
||||
|
||||
|
||||
return {
|
||||
|
||||
"success": True,
|
||||
"message": f"Project '{name}' initialized at {path}",
|
||||
"project": {
|
||||
"name": project.config.name,
|
||||
"path": str(project.path),
|
||||
"status": project.status.value,
|
||||
"config_file": str(project.config_path)
|
||||
},
|
||||
"directories_created": [
|
||||
"tokens/",
|
||||
"tokens/figma/",
|
||||
"tokens/custom/",
|
||||
"tokens/compiled/",
|
||||
"themes/",
|
||||
"components/"
|
||||
]
|
||||
|
||||
"project_name": project.config.name,
|
||||
|
||||
"path": str(project.path),
|
||||
|
||||
"status": project.status.value,
|
||||
|
||||
"message": "Project initialized. Graph analysis started in background."
|
||||
|
||||
}
|
||||
except FileExistsError as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
except Exception as e:
|
||||
|
||||
logger.exception("dss_project_init failed")
|
||||
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
async def project_add_figma_team_impl(
|
||||
project_path: str,
|
||||
team_id: str,
|
||||
figma_token: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Link a Figma team folder to DSS project."""
|
||||
if not PROJECT_MANAGEMENT_AVAILABLE:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
|
||||
}
|
||||
|
||||
async def project_graph_analysis_impl(project_path: str) -> Dict[str, Any]:
|
||||
|
||||
"""Implementation for dss_project_graph_analysis"""
|
||||
|
||||
if not project_path:
|
||||
|
||||
return {"success": False, "error": "project_path is required."}
|
||||
|
||||
|
||||
|
||||
try:
|
||||
|
||||
from dss.analyze.project_analyzer import run_project_analysis
|
||||
|
||||
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
manager = ProjectManager()
|
||||
|
||||
# Load existing project
|
||||
project = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: manager.load(Path(project_path))
|
||||
)
|
||||
analysis_result = await loop.run_in_executor(None, run_project_analysis, project_path)
|
||||
|
||||
# Add Figma team
|
||||
updated_project = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: manager.add_figma_team(
|
||||
project=project,
|
||||
team_id=team_id,
|
||||
figma_token=figma_token
|
||||
)
|
||||
)
|
||||
|
||||
# Build response
|
||||
files_info = []
|
||||
for f in updated_project.config.figma.files:
|
||||
files_info.append({
|
||||
"key": f.key,
|
||||
"name": f.name,
|
||||
"is_uikit": f.key == updated_project.config.figma.uikit_file_key
|
||||
})
|
||||
|
||||
return {
|
||||
|
||||
"success": True,
|
||||
"message": f"Linked Figma team {team_id} to project",
|
||||
"team_id": team_id,
|
||||
"files_discovered": len(files_info),
|
||||
"files": files_info,
|
||||
"uikit_file": updated_project.config.figma.uikit_file_key,
|
||||
"project_status": updated_project.status.value
|
||||
|
||||
"project_path": project_path,
|
||||
|
||||
"analysis": analysis_result
|
||||
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
|
||||
logger.exception(f"dss_project_graph_analysis failed for {project_path}")
|
||||
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
|
||||
async def project_add_figma_team_impl(project_path: str, team_id: str, figma_token: Optional[str] = None) -> Dict[str, Any]:
|
||||
|
||||
"""Implementation for dss_project_add_figma_team"""
|
||||
|
||||
if not project_path or not team_id:
|
||||
|
||||
return {"success": False, "error": "project_path and team_id are required."}
|
||||
|
||||
|
||||
|
||||
try:
|
||||
|
||||
manager = ProjectManager()
|
||||
|
||||
project = manager.load(Path(project_path))
|
||||
|
||||
|
||||
|
||||
updated_project = manager.add_figma_team(
|
||||
|
||||
project=project,
|
||||
|
||||
team_id=team_id,
|
||||
|
||||
figma_token=figma_token
|
||||
|
||||
)
|
||||
|
||||
|
||||
|
||||
return {
|
||||
|
||||
"success": True,
|
||||
|
||||
"project_name": updated_project.config.name,
|
||||
|
||||
"figma_team_id": updated_project.config.figma.team_id,
|
||||
|
||||
"files_added": len(updated_project.config.figma.files)
|
||||
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
|
||||
logger.exception("dss_project_add_figma_team failed")
|
||||
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@@ -2466,109 +2542,62 @@ async def project_list_impl() -> Dict[str, Any]:
|
||||
|
||||
|
||||
async def project_info_impl(project_path: str) -> Dict[str, Any]:
|
||||
"""Get detailed project information."""
|
||||
if not PROJECT_MANAGEMENT_AVAILABLE:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
|
||||
}
|
||||
|
||||
"""Implementation for dss_project_info"""
|
||||
if not project_path:
|
||||
return {"success": False, "error": "project_path is required."}
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
manager = ProjectManager()
|
||||
project = manager.load(Path(project_path))
|
||||
return {
|
||||
"success": True,
|
||||
"project_info": safe_serialize(project.config)
|
||||
}
|
||||
except Exception as e:
|
||||
logger.exception("dss_project_info failed")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
project = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: manager.load(Path(project_path))
|
||||
)
|
||||
async def project_export_context_impl(project_path: str) -> Dict[str, Any]:
|
||||
"""Implementation for dss_project_export_context"""
|
||||
if not project_path:
|
||||
return {"success": False, "error": "project_path is required."}
|
||||
try:
|
||||
from dss.analyze.project_analyzer import export_project_context
|
||||
|
||||
figma_info = None
|
||||
if project.config.figma:
|
||||
figma_info = {
|
||||
"team_id": project.config.figma.team_id,
|
||||
"project_id": project.config.figma.project_id,
|
||||
"project_name": project.config.figma.project_name,
|
||||
"files_count": len(project.config.figma.files),
|
||||
"uikit_file_key": project.config.figma.uikit_file_key,
|
||||
"files": [
|
||||
{"key": f.key, "name": f.name, "last_synced": f.last_synced.isoformat() if f.last_synced else None}
|
||||
for f in project.config.figma.files
|
||||
]
|
||||
}
|
||||
loop = asyncio.get_event_loop()
|
||||
project_context = await loop.run_in_executor(None, export_project_context, project_path)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"project": {
|
||||
"name": project.config.name,
|
||||
"version": project.config.version,
|
||||
"description": project.config.description,
|
||||
"path": str(project.path),
|
||||
"status": project.status.value,
|
||||
"skin": project.config.skin,
|
||||
"base_theme": project.config.base_theme,
|
||||
"figma": figma_info,
|
||||
"output": {
|
||||
"tokens_dir": project.config.output.tokens_dir,
|
||||
"themes_dir": project.config.output.themes_dir,
|
||||
"formats": project.config.output.formats
|
||||
},
|
||||
"created_at": project.config.created_at.isoformat(),
|
||||
"updated_at": project.config.updated_at.isoformat()
|
||||
}
|
||||
"project_context": project_context
|
||||
}
|
||||
except Exception as e:
|
||||
logger.exception(f"dss_project_export_context failed for {project_path}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
async def figma_discover_impl(
|
||||
team_id: str,
|
||||
figma_token: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Discover Figma team structure."""
|
||||
if not PROJECT_MANAGEMENT_AVAILABLE:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Project management not available: {PROJECT_MANAGEMENT_IMPORT_ERROR}"
|
||||
}
|
||||
async def project_graph_analysis_impl(project_path: str) -> Dict[str, Any]:
|
||||
"""Implementation for dss_project_graph_analysis"""
|
||||
if not project_path:
|
||||
return {"success": False, "error": "project_path is required."}
|
||||
|
||||
try:
|
||||
from dss.analyze.project_analyzer import run_project_analysis
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
sync = FigmaProjectSync(token=figma_token)
|
||||
|
||||
structure = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: sync.discover_team_structure(team_id)
|
||||
)
|
||||
|
||||
# Format response
|
||||
projects_info = []
|
||||
total_files = 0
|
||||
for proj in structure.get("projects", []):
|
||||
files = proj.get("files", [])
|
||||
total_files += len(files)
|
||||
projects_info.append({
|
||||
"id": proj["id"],
|
||||
"name": proj["name"],
|
||||
"files_count": len(files),
|
||||
"files": files
|
||||
})
|
||||
|
||||
uikit_info = structure.get("uikit")
|
||||
analysis_result = await loop.run_in_executor(None, run_project_analysis, project_path)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"team_id": team_id,
|
||||
"team_name": structure.get("team_name", ""),
|
||||
"projects_count": len(projects_info),
|
||||
"total_files": total_files,
|
||||
"projects": projects_info,
|
||||
"uikit_reference": uikit_info
|
||||
"project_path": project_path,
|
||||
"analysis": analysis_result
|
||||
}
|
||||
except ValueError as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
except Exception as e:
|
||||
logger.exception(f"dss_project_graph_analysis failed for {project_path}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
async def figma_discover_impl(team_id: str, figma_token: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""Implementation for dss_figma_discover"""
|
||||
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# DSS CORE SYNC IMPLEMENTATIONS
|
||||
|
||||
92
dss-mvp1/dss-cli.py
Executable file
92
dss-mvp1/dss-cli.py
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DSS-CLI - A command-line interface for the DSS Engine
|
||||
|
||||
This script provides a direct, scriptable interface to the core functionalities
|
||||
of the DSS analysis and context engine. It is designed for use in CI/CD
|
||||
pipelines and other automated workflows.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure the script can find the 'dss' module
|
||||
# This adds the parent directory of 'dss-mvp1' to the Python path
|
||||
# Assuming the script is run from the project root, this will allow `from dss...` imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
try:
|
||||
from dss.analyze.project_analyzer import run_project_analysis, export_project_context
|
||||
except ImportError as e:
|
||||
print(f"Error: Could not import DSS modules. Make sure dss-mvp1 is in the PYTHONPATH.", file=sys.stderr)
|
||||
print(f"Import error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to parse arguments and dispatch commands."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="DSS Command Line Interface for project analysis and context management."
|
||||
)
|
||||
subparsers = parser.add_subparsers(dest="command", required=True, help="Available commands")
|
||||
|
||||
# =========================================================================
|
||||
# 'analyze' command
|
||||
# =========================================================================
|
||||
analyze_parser = subparsers.add_parser(
|
||||
"analyze",
|
||||
help="Run a deep analysis of a project and save the results to .dss/analysis_graph.json"
|
||||
)
|
||||
analyze_parser.add_argument(
|
||||
"--project-path",
|
||||
required=True,
|
||||
help="The root path to the project directory to be analyzed."
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# 'export-context' command
|
||||
# =========================================================================
|
||||
export_parser = subparsers.add_parser(
|
||||
"export-context",
|
||||
help="Export the comprehensive project context as a JSON object to stdout."
|
||||
)
|
||||
export_parser.add_argument(
|
||||
"--project-path",
|
||||
required=True,
|
||||
help="The path to the project directory."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# --- Command Dispatch ---
|
||||
project_path = Path(args.project_path).resolve()
|
||||
if not project_path.is_dir():
|
||||
print(f"Error: Provided project path is not a valid directory: {project_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
if args.command == "analyze":
|
||||
result = run_project_analysis(str(project_path))
|
||||
print(f"Analysis complete. Graph saved to {project_path / '.dss' / 'analysis_graph.json'}")
|
||||
# Optionally print a summary to stdout
|
||||
summary = {
|
||||
"status": "success",
|
||||
"nodes_created": len(result.get("nodes", [])),
|
||||
"links_created": len(result.get("links", [])),
|
||||
}
|
||||
print(json.dumps(summary, indent=2))
|
||||
|
||||
elif args.command == "export-context":
|
||||
result = export_project_context(str(project_path))
|
||||
# Print the full context to stdout
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
print(json.dumps({"success": False, "error": str(e)}), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
27
dss-mvp1/dss/analyze/parser.js
Executable file
27
dss-mvp1/dss/analyze/parser.js
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env node
|
||||
const fs = require('fs');
|
||||
const parser = require('@babel/parser');
|
||||
|
||||
const filePath = process.argv[2];
|
||||
|
||||
if (!filePath) {
|
||||
console.error("Please provide a file path.");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
const code = fs.readFileSync(filePath, 'utf8');
|
||||
const ast = parser.parse(code, {
|
||||
sourceType: "module",
|
||||
plugins: [
|
||||
"jsx",
|
||||
"typescript"
|
||||
]
|
||||
});
|
||||
|
||||
console.log(JSON.stringify(ast, null, 2));
|
||||
|
||||
} catch (error) {
|
||||
console.error(`Failed to parse ${filePath}:`, error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
172
dss-mvp1/dss/analyze/project_analyzer.py
Normal file
172
dss-mvp1/dss/analyze/project_analyzer.py
Normal file
@@ -0,0 +1,172 @@
|
||||
import os
|
||||
import json
|
||||
import networkx as nx
|
||||
import subprocess
|
||||
import cssutils
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Configure cssutils to ignore noisy error messages
|
||||
cssutils.log.setLevel(logging.CRITICAL)
|
||||
|
||||
def analyze_react_project(project_path: str) -> dict:
|
||||
"""
|
||||
Analyzes a React project, building a graph of its components and styles.
|
||||
|
||||
Args:
|
||||
project_path: The root path of the React project.
|
||||
|
||||
Returns:
|
||||
A dictionary containing the component graph and analysis report.
|
||||
"""
|
||||
log.info(f"Starting analysis of project at: {project_path}")
|
||||
graph = nx.DiGraph()
|
||||
|
||||
# Supported extensions for react/js/ts files
|
||||
supported_exts = ('.js', '.jsx', '.ts', '.tsx')
|
||||
|
||||
# Path to the parser script
|
||||
parser_script_path = Path(__file__).parent / 'parser.js'
|
||||
if not parser_script_path.exists():
|
||||
raise FileNotFoundError(f"Parser script not found at {parser_script_path}")
|
||||
|
||||
for root, _, files in os.walk(project_path):
|
||||
# Ignore node_modules and build directories
|
||||
if 'node_modules' in root or 'build' in root or 'dist' in root:
|
||||
continue
|
||||
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
relative_path = os.path.relpath(file_path, project_path)
|
||||
|
||||
# Add a node for every file
|
||||
graph.add_node(relative_path, type='file')
|
||||
|
||||
if file.endswith(supported_exts):
|
||||
graph.nodes[relative_path]['language'] = 'typescript'
|
||||
try:
|
||||
# Call the external node.js parser
|
||||
result = subprocess.run(
|
||||
['node', str(parser_script_path), file_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
# The AST is now in result.stdout as a JSON string.
|
||||
# ast = json.loads(result.stdout)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
log.error(f"Failed to parse {file_path} with babel. Error: {e.stderr}")
|
||||
except Exception as e:
|
||||
log.error(f"Could not process file {file_path}: {e}")
|
||||
|
||||
elif file.endswith('.css'):
|
||||
graph.nodes[relative_path]['language'] = 'css'
|
||||
try:
|
||||
# Placeholder for CSS parsing
|
||||
# sheet = cssutils.parseFile(file_path)
|
||||
pass
|
||||
except Exception as e:
|
||||
log.error(f"Could not parse css file {file_path}: {e}")
|
||||
|
||||
log.info(f"Analysis complete. Found {graph.number_of_nodes()} files.")
|
||||
|
||||
# Convert graph to a serializable format
|
||||
serializable_graph = nx.node_link_data(graph)
|
||||
|
||||
return serializable_graph
|
||||
|
||||
def save_analysis_to_project(project_path: str, analysis_data: dict):
|
||||
"""
|
||||
Saves the analysis data to a file in the project's .dss directory.
|
||||
"""
|
||||
# In the context of dss-mvp1, the .dss directory for metadata might be at the root.
|
||||
dss_dir = os.path.join(project_path, '.dss')
|
||||
os.makedirs(dss_dir, exist_ok=True)
|
||||
|
||||
output_path = os.path.join(dss_dir, 'analysis_graph.json')
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(analysis_data, f, indent=2)
|
||||
|
||||
log.info(f"Analysis data saved to {output_path}")
|
||||
|
||||
def run_project_analysis(project_path: str):
|
||||
"""
|
||||
High-level function to run analysis and save the result.
|
||||
"""
|
||||
analysis_result = analyze_react_project(project_path)
|
||||
save_analysis_to_project(project_path, analysis_result)
|
||||
return analysis_result
|
||||
|
||||
def _read_ds_config(project_path: str) -> dict:
|
||||
"""
|
||||
Reads the ds.config.json file from the project root.
|
||||
"""
|
||||
config_path = os.path.join(project_path, 'ds.config.json')
|
||||
if not os.path.exists(config_path):
|
||||
return {}
|
||||
try:
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
log.error(f"Could not read or parse ds.config.json: {e}")
|
||||
return {}
|
||||
|
||||
def export_project_context(project_path: str) -> dict:
|
||||
"""
|
||||
Exports a comprehensive project context for agents.
|
||||
|
||||
This context includes the analysis graph, project configuration,
|
||||
and a summary of the project's structure.
|
||||
"""
|
||||
analysis_graph_path = os.path.join(project_path, '.dss', 'analysis_graph.json')
|
||||
|
||||
if not os.path.exists(analysis_graph_path):
|
||||
# If the analysis hasn't been run, run it first.
|
||||
log.info(f"Analysis graph not found for {project_path}. Running analysis now.")
|
||||
run_project_analysis(project_path)
|
||||
|
||||
try:
|
||||
with open(analysis_graph_path, 'r', encoding='utf-8') as f:
|
||||
analysis_graph = json.load(f)
|
||||
except Exception as e:
|
||||
log.error(f"Could not read analysis graph for {project_path}: {e}")
|
||||
analysis_graph = {}
|
||||
|
||||
project_config = _read_ds_config(project_path)
|
||||
|
||||
# Create the project context
|
||||
project_context = {
|
||||
"schema_version": "1.0",
|
||||
"project_name": project_config.get("name", "Unknown"),
|
||||
"analysis_summary": {
|
||||
"file_nodes": len(analysis_graph.get("nodes", [])),
|
||||
"dependencies": len(analysis_graph.get("links", [])),
|
||||
"analyzed_at": log.info(f"Analysis data saved to {analysis_graph_path}")
|
||||
},
|
||||
"project_config": project_config,
|
||||
"analysis_graph": analysis_graph,
|
||||
}
|
||||
|
||||
return project_context
|
||||
|
||||
if __name__ == '__main__':
|
||||
# This is for standalone testing of the analyzer.
|
||||
# Provide a path to a project to test.
|
||||
# e.g., python -m dss.analyze.project_analyzer ../../admin-ui
|
||||
import sys
|
||||
if len(sys.argv) > 1:
|
||||
target_project_path = sys.argv[1]
|
||||
if not os.path.isdir(target_project_path):
|
||||
print(f"Error: Path '{target_project_path}' is not a valid directory.")
|
||||
sys.exit(1)
|
||||
|
||||
run_project_analysis(target_project_path)
|
||||
else:
|
||||
print("Usage: python -m dss.analyze.project_analyzer <path_to_project>")
|
||||
|
||||
5
dss-mvp1/package-lock.json
generated
5
dss-mvp1/package-lock.json
generated
@@ -8,6 +8,7 @@
|
||||
"name": "dss-mvp1",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@babel/parser": "^7.24.7",
|
||||
"style-dictionary": "^4.4.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -332,7 +333,6 @@
|
||||
"version": "7.27.1",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
|
||||
"integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -342,7 +342,6 @@
|
||||
"version": "7.28.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
|
||||
"integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -391,7 +390,6 @@
|
||||
"version": "7.28.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
|
||||
"integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/types": "^7.28.5"
|
||||
@@ -1695,7 +1693,6 @@
|
||||
"version": "7.28.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
|
||||
"integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/helper-string-parser": "^7.27.1",
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
"build-storybook": "storybook build"
|
||||
},
|
||||
"dependencies": {
|
||||
"style-dictionary": "^4.4.0"
|
||||
"style-dictionary": "^4.4.0",
|
||||
"@babel/parser": "^7.24.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/preset-env": "^7.28.5",
|
||||
|
||||
82
dss-mvp1/tests/conftest.py
Normal file
82
dss-mvp1/tests/conftest.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mock_react_project(tmp_path: Path) -> Path:
|
||||
"""
|
||||
Creates a temporary mock React project structure for testing.
|
||||
"""
|
||||
project_dir = tmp_path / "test-project"
|
||||
project_dir.mkdir()
|
||||
|
||||
# Create src directory
|
||||
src_dir = project_dir / "src"
|
||||
src_dir.mkdir()
|
||||
|
||||
# Create components directory
|
||||
components_dir = src_dir / "components"
|
||||
components_dir.mkdir()
|
||||
|
||||
# Component A
|
||||
(components_dir / "ComponentA.jsx").write_text("""
|
||||
import React from 'react';
|
||||
import './ComponentA.css';
|
||||
|
||||
const ComponentA = () => {
|
||||
return <div className="component-a">Component A</div>;
|
||||
};
|
||||
|
||||
export default ComponentA;
|
||||
""")
|
||||
|
||||
(components_dir / "ComponentA.css").write_text("""
|
||||
.component-a {
|
||||
color: blue;
|
||||
}
|
||||
""")
|
||||
|
||||
# Component B
|
||||
(components_dir / "ComponentB.tsx").write_text("""
|
||||
import React from 'react';
|
||||
import ComponentA from './ComponentA';
|
||||
|
||||
const ComponentB = () => {
|
||||
return (
|
||||
<div>
|
||||
<ComponentA />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ComponentB;
|
||||
""")
|
||||
|
||||
# App.js
|
||||
(src_dir / "App.js").write_text("""
|
||||
import React from 'react';
|
||||
import ComponentB from './components/ComponentB';
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<div className="App">
|
||||
<ComponentB />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
||||
""")
|
||||
|
||||
# package.json
|
||||
(project_dir / "package.json").write_text("""
|
||||
{
|
||||
"name": "test-project",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"react": "^18.0.0"
|
||||
}
|
||||
}
|
||||
""")
|
||||
|
||||
return project_dir
|
||||
45
dss-mvp1/tests/test_project_analyzer.py
Normal file
45
dss-mvp1/tests/test_project_analyzer.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import pytest
|
||||
import json
|
||||
from pathlib import Path
|
||||
from dss.analyze.project_analyzer import run_project_analysis
|
||||
|
||||
def test_run_project_analysis(mock_react_project: Path):
|
||||
"""
|
||||
Tests the run_project_analysis function to ensure it creates the analysis graph
|
||||
and that the graph contains the expected file nodes.
|
||||
"""
|
||||
# Run the analysis on the mock project
|
||||
run_project_analysis(str(mock_react_project))
|
||||
|
||||
# Check if the analysis file was created
|
||||
analysis_file = mock_react_project / ".dss" / "analysis_graph.json"
|
||||
assert analysis_file.exists(), "The analysis_graph.json file was not created."
|
||||
|
||||
# Load the analysis data
|
||||
with open(analysis_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Verify the graph structure
|
||||
assert "nodes" in data, "Graph data should contain 'nodes'."
|
||||
assert "links" in data, "Graph data should contain 'links'."
|
||||
|
||||
# Get a list of node IDs (which are the relative file paths)
|
||||
node_ids = [node['id'] for node in data['nodes']]
|
||||
|
||||
# Check for the presence of the files from the mock project
|
||||
expected_files = [
|
||||
"package.json",
|
||||
"src/App.js",
|
||||
"src/components/ComponentA.css",
|
||||
"src/components/ComponentA.jsx",
|
||||
"src/components/ComponentB.tsx",
|
||||
]
|
||||
|
||||
for file_path in expected_files:
|
||||
# Path separators might be different on different OSes, so we normalize
|
||||
normalized_path = str(Path(file_path))
|
||||
assert normalized_path in node_ids, f"Expected file '{normalized_path}' not found in the analysis graph."
|
||||
|
||||
# Verify the number of nodes
|
||||
# There should be exactly the number of files we created
|
||||
assert len(node_ids) == len(expected_files), "The number of nodes in the graph does not match the number of files."
|
||||
@@ -14,6 +14,10 @@ httpx-sse==0.4.3
|
||||
pydantic==2.12.4
|
||||
pydantic-settings==2.12.0
|
||||
|
||||
# Code Analysis
|
||||
networkx==3.3
|
||||
cssutils==2.9.0
|
||||
|
||||
# MCP Protocol (AI Agent Interface)
|
||||
mcp==1.23.1
|
||||
|
||||
|
||||
85
tools/analysis/project_analyzer.py
Normal file
85
tools/analysis/project_analyzer.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import os
|
||||
import json
|
||||
import networkx as nx
|
||||
from pyast_ts import parse
|
||||
import cssutils
|
||||
import logging
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Configure cssutils to ignore noisy error messages
|
||||
cssutils.log.setLevel(logging.CRITICAL)
|
||||
|
||||
def analyze_react_project(project_path: str) -> dict:
|
||||
"""
|
||||
Analyzes a React project, building a graph of its components and styles.
|
||||
|
||||
Args:
|
||||
project_path: The root path of the React project.
|
||||
|
||||
Returns:
|
||||
A dictionary containing the component graph and analysis report.
|
||||
"""
|
||||
log.info(f"Starting analysis of project at: {project_path}")
|
||||
graph = nx.DiGraph()
|
||||
|
||||
# Supported extensions for react/js/ts files
|
||||
supported_exts = ('.js', '.jsx', '.ts', '.tsx')
|
||||
|
||||
for root, _, files in os.walk(project_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
relative_path = os.path.relpath(file_path, project_path)
|
||||
|
||||
if file.endswith(supported_exts):
|
||||
graph.add_node(relative_path, type='file', language='typescript')
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Placeholder for AST parsing and analysis
|
||||
# ast = parse(content)
|
||||
# For now, we'll just add the node
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Could not process file {file_path}: {e}")
|
||||
|
||||
elif file.endswith('.css'):
|
||||
graph.add_node(relative_path, type='file', language='css')
|
||||
try:
|
||||
# Placeholder for CSS parsing
|
||||
# sheet = cssutils.parseFile(file_path)
|
||||
pass
|
||||
except Exception as e:
|
||||
log.error(f"Could not parse css file {file_path}: {e}")
|
||||
|
||||
log.info(f"Analysis complete. Found {graph.number_of_nodes()} files.")
|
||||
|
||||
# Convert graph to a serializable format
|
||||
serializable_graph = nx.node_link_data(graph)
|
||||
|
||||
return serializable_graph
|
||||
|
||||
def save_analysis(project_path: str, analysis_data: dict):
|
||||
"""
|
||||
Saves the analysis data to a file in the project's .dss directory.
|
||||
"""
|
||||
dss_dir = os.path.join(project_path, '.dss')
|
||||
os.makedirs(dss_dir, exist_ok=True)
|
||||
|
||||
output_path = os.path.join(dss_dir, 'analysis_graph.json')
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(analysis_data, f, indent=2)
|
||||
|
||||
log.info(f"Analysis data saved to {output_path}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Example usage:
|
||||
# Replace '.' with the actual path to a React project for testing.
|
||||
# In a real scenario, this would be called by the MCP.
|
||||
target_project_path = '.'
|
||||
analysis_result = analyze_react_project(target_project_path)
|
||||
save_analysis(target_project_path, analysis_result)
|
||||
@@ -26,6 +26,7 @@ from storage.json_store import Projects, ActivityLog
|
||||
from .config import mcp_config, integration_config
|
||||
from .context.project_context import get_context_manager, ProjectContext
|
||||
from .tools.project_tools import PROJECT_TOOLS, ProjectTools
|
||||
from .tools.analysis_tools import ANALYSIS_TOOLS, AnalysisTools
|
||||
from .integrations.figma import FIGMA_TOOLS, FigmaTools
|
||||
from .integrations.storybook import STORYBOOK_TOOLS, StorybookTools
|
||||
from .integrations.jira import JIRA_TOOLS, JiraTools
|
||||
@@ -86,6 +87,14 @@ class MCPHandler:
|
||||
"requires_integration": False
|
||||
}
|
||||
|
||||
# Register analysis tools
|
||||
for tool in ANALYSIS_TOOLS:
|
||||
self._tool_registry[tool.name] = {
|
||||
"tool": tool,
|
||||
"category": "analysis",
|
||||
"requires_integration": False
|
||||
}
|
||||
|
||||
# Register Figma tools
|
||||
for tool in FIGMA_TOOLS:
|
||||
self._tool_registry[tool.name] = {
|
||||
@@ -212,6 +221,8 @@ class MCPHandler:
|
||||
# Execute based on category
|
||||
if category == "project":
|
||||
result = await self._execute_project_tool(tool_name, arguments, context)
|
||||
elif category == "analysis":
|
||||
result = await self._execute_analysis_tool(tool_name, arguments, context)
|
||||
elif category == "figma":
|
||||
result = await self._execute_figma_tool(tool_name, arguments, context)
|
||||
elif category == "storybook":
|
||||
@@ -293,6 +304,20 @@ class MCPHandler:
|
||||
project_tools = ProjectTools(context.user_id)
|
||||
return await project_tools.execute_tool(tool_name, arguments)
|
||||
|
||||
async def _execute_analysis_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
arguments: Dict[str, Any],
|
||||
context: MCPContext
|
||||
) -> Dict[str, Any]:
|
||||
"""Execute an analysis tool"""
|
||||
# Ensure project_id is set for context if needed, though project_path is explicit
|
||||
if "project_id" not in arguments:
|
||||
arguments["project_id"] = context.project_id
|
||||
|
||||
analysis_tools = AnalysisTools(context.user_id)
|
||||
return await analysis_tools.execute_tool(tool_name, arguments)
|
||||
|
||||
async def _execute_figma_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
|
||||
82
tools/dss_mcp/tools/analysis_tools.py
Normal file
82
tools/dss_mcp/tools/analysis_tools.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
DSS MCP - Code Analysis Tools
|
||||
"""
|
||||
import asyncio
|
||||
from typing import Dict, Any
|
||||
|
||||
# Adjust the import path to find the project_analyzer
|
||||
# This assumes the script is run from the project root.
|
||||
from tools.analysis.project_analyzer import analyze_react_project, save_analysis
|
||||
|
||||
class Tool:
|
||||
"""Basic tool definition for MCP"""
|
||||
def __init__(self, name: str, description: str, input_schema: Dict[str, Any]):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.inputSchema = input_schema
|
||||
|
||||
# Define the new tool
|
||||
analyze_project_tool = Tool(
|
||||
name="analyze_project",
|
||||
description="Analyzes a given project's structure, components, and styles. This is a long-running operation.",
|
||||
input_schema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"project_path": {
|
||||
"type": "string",
|
||||
"description": "The absolute path to the project to be analyzed."
|
||||
}
|
||||
},
|
||||
"required": ["project_path"]
|
||||
}
|
||||
)
|
||||
|
||||
class AnalysisTools:
|
||||
"""
|
||||
A wrapper class for analysis-related tools.
|
||||
"""
|
||||
def __init__(self, user_id: str = None):
|
||||
self.user_id = user_id
|
||||
|
||||
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
if tool_name == "analyze_project":
|
||||
return await self.analyze_project(arguments.get("project_path"))
|
||||
else:
|
||||
return {"error": f"Analysis tool '{tool_name}' not found."}
|
||||
|
||||
async def analyze_project(self, project_path: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Triggers the analysis of a project.
|
||||
"""
|
||||
if not project_path:
|
||||
return {"error": "project_path is a required argument."}
|
||||
|
||||
try:
|
||||
# This is a potentially long-running task.
|
||||
# In a real scenario, this should be offloaded to a background worker.
|
||||
# For now, we run it asynchronously.
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# Run the analysis in a separate thread to avoid blocking the event loop
|
||||
analysis_data = await loop.run_in_executor(
|
||||
None, analyze_react_project, project_path
|
||||
)
|
||||
|
||||
# Save the analysis data
|
||||
await loop.run_in_executor(
|
||||
None, save_analysis, project_path, analysis_data
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"message": f"Analysis complete for project at {project_path}.",
|
||||
"graph_nodes": len(analysis_data.get("nodes", [])),
|
||||
"graph_edges": len(analysis_data.get("links", []))
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": f"An error occurred during project analysis: {str(e)}"}
|
||||
|
||||
# A list of all tools in this module
|
||||
ANALYSIS_TOOLS = [
|
||||
analyze_project_tool
|
||||
]
|
||||
@@ -21,6 +21,7 @@ from ..context.project_context import get_context_manager
|
||||
from ..security import CredentialVault
|
||||
from ..audit import AuditLog, AuditEventType
|
||||
from storage.json_store import Projects, Components, Tokens, ActivityLog # JSON storage
|
||||
from ..handler import get_mcp_handler, MCPContext
|
||||
|
||||
|
||||
# Tool definitions (metadata for Claude)
|
||||
@@ -168,7 +169,7 @@ PROJECT_TOOLS = [
|
||||
},
|
||||
"root_path": {
|
||||
"type": "string",
|
||||
"description": "Root directory path for the project"
|
||||
"description": "Root directory path for the project. Can be a git URL or a local folder path."
|
||||
}
|
||||
},
|
||||
"required": ["name", "root_path"]
|
||||
@@ -457,22 +458,28 @@ class ProjectTools:
|
||||
def __init__(self, user_id: Optional[int] = None):
|
||||
self.context_manager = get_context_manager()
|
||||
self.user_id = user_id
|
||||
self.projects_db = Projects()
|
||||
|
||||
async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute a tool by name"""
|
||||
handlers = {
|
||||
# Project Management
|
||||
"dss_create_project": self.create_project,
|
||||
"dss_list_projects": self.list_projects,
|
||||
"dss_get_project": self.get_project,
|
||||
# Read-only tools
|
||||
"dss_get_project_summary": self.get_project_summary,
|
||||
"dss_list_components": self.list_components,
|
||||
"dss_get_component": self.get_component,
|
||||
"dss_get_design_tokens": self.get_design_tokens,
|
||||
"dss_get_project_health": self.get_project_health,
|
||||
"dss_list_styles": self.list_styles,
|
||||
"dss_get_discovery_data": self.get_discovery_data
|
||||
"dss_get_discovery_.dat": self.get_discovery_data
|
||||
}
|
||||
|
||||
handler = handlers.get(tool_name)
|
||||
if not handler:
|
||||
return {"error": f"Unknown tool: {tool_name}"}
|
||||
return {"error": f"Unknown or not implemented tool: {tool_name}"}
|
||||
|
||||
try:
|
||||
result = await handler(**arguments)
|
||||
@@ -480,6 +487,56 @@ class ProjectTools:
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def create_project(self, name: str, root_path: str, description: str = "") -> Dict[str, Any]:
|
||||
"""Create a new project and trigger initial analysis."""
|
||||
project_id = str(uuid.uuid4())
|
||||
|
||||
# The `create` method in json_store handles the creation of the manifest
|
||||
self.projects_db.create(
|
||||
id=project_id,
|
||||
name=name,
|
||||
description=description
|
||||
)
|
||||
|
||||
# We may still want to update the root_path if it's not part of the manifest
|
||||
self.projects_db.update(project_id, root_path=root_path)
|
||||
|
||||
|
||||
# Trigger the analysis as a background task
|
||||
# We don't want to block the creation call
|
||||
mcp_handler = get_mcp_handler()
|
||||
|
||||
# Create a context for the tool call
|
||||
# The user_id might be important for permissions later
|
||||
mcp_context = MCPContext(project_id=project_id, user_id=self.user_id)
|
||||
|
||||
# It's better to run this in the background and not wait for the result here
|
||||
asyncio.create_task(
|
||||
mcp_handler.execute_tool(
|
||||
tool_name="analyze_project",
|
||||
arguments={"project_path": root_path},
|
||||
context=mcp_context
|
||||
)
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"message": "Project created successfully. Analysis has been started in the background.",
|
||||
"project_id": project_id
|
||||
}
|
||||
|
||||
async def list_projects(self, filter_status: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""List all projects."""
|
||||
all_projects = self.projects_db.list(status=filter_status)
|
||||
return {"projects": all_projects}
|
||||
|
||||
async def get_project(self, project_id: str) -> Dict[str, Any]:
|
||||
"""Get a single project by its ID."""
|
||||
project = self.projects_db.get(project_id)
|
||||
if not project:
|
||||
return {"error": f"Project with ID '{project_id}' not found."}
|
||||
return {"project": project}
|
||||
|
||||
async def get_project_summary(
|
||||
self,
|
||||
project_id: str,
|
||||
|
||||
Reference in New Issue
Block a user