refactor(ux): consolidate BMAD skills, update design system, and clean up Prisma generated client

This commit is contained in:
Sepehr Ramezani
2026-04-19 19:21:27 +02:00
parent 5296c4da2c
commit 25529a24b8
2476 changed files with 127934 additions and 101962 deletions

View File

@@ -12,7 +12,7 @@ Covers:
- Sequential pattern detection in prompts (numbered Read/Grep/Glob steps)
- Subagent-from-subagent detection
- Loop patterns (read all, analyze each, for each file)
- Memory loading pattern detection (load all memory, read all sidecar, etc.)
- Memory loading pattern detection (load all memory, read all memory, etc.)
- Multi-source operation detection
"""
@@ -149,8 +149,8 @@ def scan_sequential_patterns(filepath: Path, rel_path: str) -> list[dict]:
# Memory loading patterns (agent-specific)
memory_loading_patterns = [
(r'[Ll]oad all (?:memory|memories)', 'load-all-memory'),
(r'[Rr]ead all sidecar (?:files|data)', 'read-all-sidecar'),
(r'[Ll]oad (?:entire|full|complete) sidecar', 'load-entire-sidecar'),
(r'[Rr]ead all (?:memory|agent memory) (?:files|data)', 'read-all-memory'),
(r'[Ll]oad (?:entire|full|complete) (?:memory|agent memory)', 'load-entire-memory'),
(r'[Ll]oad all (?:context|state)', 'load-all-context'),
(r'[Rr]ead (?:entire|full|complete) memory', 'read-entire-memory'),
]
@@ -252,7 +252,7 @@ def scan_execution_deps(skill_path: Path) -> dict:
for p in sequential_patterns:
if p['type'] == 'subagent-chain-violation':
severity = 'critical'
elif p['type'] in ('load-all-memory', 'read-all-sidecar', 'load-entire-sidecar',
elif p['type'] in ('load-all-memory', 'read-all-memory', 'load-entire-memory',
'load-all-context', 'read-entire-memory'):
severity = 'high'
else:

View File

@@ -293,6 +293,14 @@ def scan_prompt_metrics(skill_path: Path) -> dict:
data['is_skill_md'] = True
files_data.append(data)
# Detect memory agent
is_memory_agent = False
assets_dir = skill_path / 'assets'
if assets_dir.exists():
is_memory_agent = any(
f.name.endswith('-template.md') for f in assets_dir.iterdir() if f.is_file()
)
# Prompt files at skill root
skip_files = {'SKILL.md'}
@@ -307,6 +315,19 @@ def scan_prompt_metrics(skill_path: Path) -> dict:
files_data.append(data)
# Also scan references/ for capability prompts (memory agents keep prompts here)
refs_dir = skill_path / 'references'
if refs_dir.exists():
for f in sorted(refs_dir.iterdir()):
if f.is_file() and f.suffix == '.md':
data = scan_file_patterns(f, f'references/{f.name}')
data['is_skill_md'] = False
pfm = parse_prompt_frontmatter(f)
data['prompt_frontmatter'] = pfm
files_data.append(data)
# Resources (just sizes, for progressive disclosure assessment)
resources_dir = skill_path / 'resources'
resource_sizes = {}
@@ -338,6 +359,7 @@ def scan_prompt_metrics(skill_path: Path) -> dict:
'skill_path': str(skill_path),
'timestamp': datetime.now(timezone.utc).isoformat(),
'status': 'info',
'is_memory_agent': is_memory_agent,
'skill_md_summary': {
'line_count': skill_md_data['line_count'] if skill_md_data else 0,
'token_estimate': skill_md_data['token_estimate'] if skill_md_data else 0,

View File

@@ -0,0 +1,385 @@
#!/usr/bin/env python3
"""Deterministic pre-pass for sanctum architecture scanner.
Extracts structural metadata from a memory agent's sanctum architecture
that the LLM scanner can use instead of reading all files itself. Covers:
- SKILL.md content line count (non-blank, non-frontmatter)
- Template file inventory (which of the 6 standard templates exist)
- CREED template section inventory
- BOND template section inventory
- Capability reference frontmatter fields
- Init script parameter extraction (SKILL_NAME, TEMPLATE_FILES, EVOLVABLE)
- First-breath.md section inventory
- PULSE template presence and sections
Only runs for memory agents (agents with assets/ containing template files).
"""
# /// script
# requires-python = ">=3.9"
# dependencies = []
# ///
from __future__ import annotations
import argparse
import json
import re
import sys
from datetime import datetime, timezone
from pathlib import Path
STANDARD_TEMPLATES = [
"INDEX-template.md",
"PERSONA-template.md",
"CREED-template.md",
"BOND-template.md",
"MEMORY-template.md",
"CAPABILITIES-template.md",
]
OPTIONAL_TEMPLATES = [
"PULSE-template.md",
]
CREED_REQUIRED_SECTIONS = [
"The Sacred Truth",
"Mission",
"Core Values",
"Standing Orders",
"Philosophy",
"Boundaries",
"Anti-Patterns",
"Dominion",
]
FIRST_BREATH_CALIBRATION_SECTIONS = [
"Save As You Go",
"Pacing",
"Chase What Catches",
"Absorb Their Voice",
"Show Your Work",
"Hear the Silence",
"The Territories",
"Wrapping Up",
]
FIRST_BREATH_CONFIG_SECTIONS = [
"Save As You Go",
"Discovery",
"Urgency",
"Wrapping Up",
]
def count_content_lines(file_path: Path) -> int:
"""Count non-blank, non-frontmatter lines in a markdown file."""
content = file_path.read_text()
# Strip frontmatter
stripped = re.sub(r"^---\s*\n.*?\n---\s*\n", "", content, count=1, flags=re.DOTALL)
lines = [line for line in stripped.split("\n") if line.strip()]
return len(lines)
def extract_h2_h3_sections(file_path: Path) -> list[str]:
"""Extract H2 and H3 headings from a markdown file."""
sections = []
if not file_path.exists():
return sections
for line in file_path.read_text().split("\n"):
match = re.match(r"^#{2,3}\s+(.+)", line)
if match:
sections.append(match.group(1).strip())
return sections
def parse_frontmatter(file_path: Path) -> dict:
"""Extract YAML frontmatter from a markdown file."""
meta = {}
content = file_path.read_text()
match = re.match(r"^---\s*\n(.*?)\n---", content, re.DOTALL)
if not match:
return meta
for line in match.group(1).strip().split("\n"):
if ":" in line:
key, _, value = line.partition(":")
meta[key.strip()] = value.strip().strip("'\"")
return meta
def extract_init_script_params(script_path: Path) -> dict:
"""Extract agent-specific configuration from init-sanctum.py."""
params = {
"exists": script_path.exists(),
"skill_name": None,
"template_files": [],
"skill_only_files": [],
"evolvable": None,
}
if not script_path.exists():
return params
content = script_path.read_text()
# SKILL_NAME
match = re.search(r'SKILL_NAME\s*=\s*["\']([^"\']+)["\']', content)
if match:
params["skill_name"] = match.group(1)
# TEMPLATE_FILES
tmpl_match = re.search(
r"TEMPLATE_FILES\s*=\s*\[(.*?)\]", content, re.DOTALL
)
if tmpl_match:
params["template_files"] = re.findall(r'["\']([^"\']+)["\']', tmpl_match.group(1))
# SKILL_ONLY_FILES
only_match = re.search(
r"SKILL_ONLY_FILES\s*=\s*\{(.*?)\}", content, re.DOTALL
)
if only_match:
params["skill_only_files"] = re.findall(r'["\']([^"\']+)["\']', only_match.group(1))
# EVOLVABLE
ev_match = re.search(r"EVOLVABLE\s*=\s*(True|False)", content)
if ev_match:
params["evolvable"] = ev_match.group(1) == "True"
return params
def check_section_present(sections: list[str], keyword: str) -> bool:
"""Check if any section heading contains the keyword (case-insensitive)."""
keyword_lower = keyword.lower()
return any(keyword_lower in s.lower() for s in sections)
def main():
parser = argparse.ArgumentParser(
description="Pre-pass for sanctum architecture scanner"
)
parser.add_argument("skill_path", help="Path to the agent skill directory")
parser.add_argument(
"-o", "--output", help="Output JSON file path (default: stdout)"
)
args = parser.parse_args()
skill_path = Path(args.skill_path).resolve()
if not skill_path.is_dir():
print(f"Error: {skill_path} is not a directory", file=sys.stderr)
sys.exit(2)
assets_dir = skill_path / "assets"
references_dir = skill_path / "references"
scripts_dir = skill_path / "scripts"
skill_md = skill_path / "SKILL.md"
# Check if this is a memory agent (has template files in assets/)
is_memory_agent = assets_dir.exists() and any(
f.name.endswith("-template.md") for f in assets_dir.iterdir() if f.is_file()
)
if not is_memory_agent:
result = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"skill_path": str(skill_path),
"is_memory_agent": False,
"message": "Not a memory agent — no sanctum templates found in assets/",
}
output_json(result, args.output)
return
# SKILL.md analysis
skill_analysis = {
"exists": skill_md.exists(),
"content_lines": count_content_lines(skill_md) if skill_md.exists() else 0,
"sections": extract_h2_h3_sections(skill_md) if skill_md.exists() else [],
}
# Template inventory
template_inventory = {}
for tmpl in STANDARD_TEMPLATES:
tmpl_path = assets_dir / tmpl
template_inventory[tmpl] = {
"exists": tmpl_path.exists(),
"sections": extract_h2_h3_sections(tmpl_path) if tmpl_path.exists() else [],
"content_lines": count_content_lines(tmpl_path) if tmpl_path.exists() else 0,
}
for tmpl in OPTIONAL_TEMPLATES:
tmpl_path = assets_dir / tmpl
template_inventory[tmpl] = {
"exists": tmpl_path.exists(),
"optional": True,
"sections": extract_h2_h3_sections(tmpl_path) if tmpl_path.exists() else [],
"content_lines": count_content_lines(tmpl_path) if tmpl_path.exists() else 0,
}
# CREED section check
creed_path = assets_dir / "CREED-template.md"
creed_sections = extract_h2_h3_sections(creed_path) if creed_path.exists() else []
creed_check = {}
for section in CREED_REQUIRED_SECTIONS:
creed_check[section] = check_section_present(creed_sections, section)
# First-breath analysis
first_breath_path = references_dir / "first-breath.md"
fb_sections = extract_h2_h3_sections(first_breath_path) if first_breath_path.exists() else []
# Detect style: calibration has "Absorb Their Voice", configuration has "Discovery"
is_calibration = check_section_present(fb_sections, "Absorb")
is_configuration = check_section_present(fb_sections, "Discovery") and not is_calibration
fb_style = "calibration" if is_calibration else ("configuration" if is_configuration else "unknown")
expected_sections = (
FIRST_BREATH_CALIBRATION_SECTIONS if is_calibration else FIRST_BREATH_CONFIG_SECTIONS
)
fb_check = {}
for section in expected_sections:
fb_check[section] = check_section_present(fb_sections, section)
first_breath_analysis = {
"exists": first_breath_path.exists(),
"style": fb_style,
"sections": fb_sections,
"section_checks": fb_check,
}
# Capability frontmatter scan
capabilities = []
if references_dir.exists():
for md_file in sorted(references_dir.glob("*.md")):
if md_file.name == "first-breath.md":
continue
meta = parse_frontmatter(md_file)
if meta:
cap_info = {
"file": md_file.name,
"has_name": "name" in meta,
"has_code": "code" in meta,
"has_description": "description" in meta,
"sections": extract_h2_h3_sections(md_file),
}
# Check for memory agent patterns
cap_info["has_memory_integration"] = check_section_present(
cap_info["sections"], "Memory Integration"
)
cap_info["has_after_session"] = check_section_present(
cap_info["sections"], "After"
)
cap_info["has_success"] = check_section_present(
cap_info["sections"], "Success"
)
capabilities.append(cap_info)
# Init script analysis
init_script_path = scripts_dir / "init-sanctum.py"
init_params = extract_init_script_params(init_script_path)
# Cross-check: init TEMPLATE_FILES vs actual templates
actual_templates = [f.name for f in assets_dir.iterdir() if f.name.endswith("-template.md")] if assets_dir.exists() else []
init_template_match = set(init_params.get("template_files", [])) == set(actual_templates) if init_params["exists"] else None
# Cross-check: init SKILL_NAME vs folder name
skill_name_match = init_params.get("skill_name") == skill_path.name if init_params["exists"] else None
# Findings
findings = []
if skill_analysis["content_lines"] > 40:
findings.append({
"severity": "high",
"file": "SKILL.md",
"message": f"Bootloader has {skill_analysis['content_lines']} content lines (target: ~30, max: 40)",
})
for tmpl in STANDARD_TEMPLATES:
if not template_inventory[tmpl]["exists"]:
findings.append({
"severity": "critical",
"file": f"assets/{tmpl}",
"message": f"Missing standard template: {tmpl}",
})
for section, present in creed_check.items():
if not present:
findings.append({
"severity": "high",
"file": "assets/CREED-template.md",
"message": f"Missing required CREED section: {section}",
})
if not first_breath_analysis["exists"]:
findings.append({
"severity": "critical",
"file": "references/first-breath.md",
"message": "Missing first-breath.md",
})
else:
for section, present in first_breath_analysis["section_checks"].items():
if not present:
findings.append({
"severity": "high",
"file": "references/first-breath.md",
"message": f"Missing First Breath section: {section}",
})
if not init_params["exists"]:
findings.append({
"severity": "critical",
"file": "scripts/init-sanctum.py",
"message": "Missing init-sanctum.py",
})
else:
if skill_name_match is False:
findings.append({
"severity": "critical",
"file": "scripts/init-sanctum.py",
"message": f"SKILL_NAME mismatch: script has '{init_params['skill_name']}', folder is '{skill_path.name}'",
})
if init_template_match is False:
findings.append({
"severity": "high",
"file": "scripts/init-sanctum.py",
"message": "TEMPLATE_FILES does not match actual templates in assets/",
})
result = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"skill_path": str(skill_path),
"is_memory_agent": True,
"skill_md": skill_analysis,
"template_inventory": template_inventory,
"creed_sections": creed_check,
"first_breath": first_breath_analysis,
"capabilities": capabilities,
"init_script": init_params,
"cross_checks": {
"skill_name_match": skill_name_match,
"template_files_match": init_template_match,
},
"findings": findings,
"finding_count": len(findings),
"critical_count": sum(1 for f in findings if f["severity"] == "critical"),
"high_count": sum(1 for f in findings if f["severity"] == "high"),
}
output_json(result, args.output)
def output_json(data: dict, output_path: str | None) -> None:
"""Write JSON to file or stdout."""
json_str = json.dumps(data, indent=2)
if output_path:
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
Path(output_path).write_text(json_str + "\n")
print(f"Wrote: {output_path}", file=sys.stderr)
else:
print(json_str)
if __name__ == "__main__":
main()

View File

@@ -6,11 +6,12 @@ can use instead of reading all files itself. Covers:
- Frontmatter parsing and validation
- Section inventory (H2/H3 headers)
- Template artifact detection
- Agent name validation (bmad-{code}-agent-{name} or bmad-agent-{name})
- Required agent sections (Overview, Identity, Communication Style, Principles, On Activation)
- Agent name validation (kebab-case, must contain 'agent')
- Required agent sections (stateless vs memory agent bootloader detection)
- Memory path consistency checking
- Language/directness pattern grep
- On Exit / Exiting section detection (invalid)
- Capability file scanning in references/ directory
"""
# /// script
@@ -44,7 +45,11 @@ TEMPLATE_ARTIFACTS = [
r'\{if-module\}', r'\{/if-module\}',
r'\{if-headless\}', r'\{/if-headless\}',
r'\{if-autonomous\}', r'\{/if-autonomous\}',
r'\{if-sidecar\}', r'\{/if-sidecar\}',
r'\{if-memory\}', r'\{/if-memory\}',
r'\{if-memory-agent\}', r'\{/if-memory-agent\}',
r'\{if-stateless-agent\}', r'\{/if-stateless-agent\}',
r'\{if-evolvable\}', r'\{/if-evolvable\}',
r'\{if-pulse\}', r'\{/if-pulse\}',
r'\{displayName\}', r'\{skillName\}',
]
# Runtime variables that ARE expected (not artifacts)
@@ -113,12 +118,11 @@ def parse_frontmatter(content: str) -> tuple[dict | None, list[dict]]:
'severity': 'high', 'category': 'frontmatter',
'issue': f'Name "{name}" is not kebab-case',
})
elif not (re.match(r'^bmad-[a-z0-9]+-agent-[a-z0-9]+(-[a-z0-9]+)*$', name)
or re.match(r'^bmad-agent-[a-z0-9]+(-[a-z0-9]+)*$', name)):
elif 'agent' not in name.split('-'):
findings.append({
'file': 'SKILL.md', 'line': 1,
'severity': 'medium', 'category': 'frontmatter',
'issue': f'Name "{name}" does not follow bmad-{{code}}-agent-{{name}} or bmad-agent-{{name}} pattern',
'issue': f'Name "{name}" should contain "agent" (e.g., agent-{{name}} or {{code}}-agent-{{name}})',
})
# description check
@@ -163,21 +167,49 @@ def extract_sections(content: str) -> list[dict]:
return sections
def check_required_sections(sections: list[dict]) -> list[dict]:
def detect_memory_agent(skill_path: Path, content: str) -> bool:
"""Detect if this is a memory agent bootloader (vs stateless agent).
Memory agents have assets/ with sanctum template files and contain
Three Laws / Sacred Truth in their SKILL.md.
"""
assets_dir = skill_path / 'assets'
has_templates = (
assets_dir.exists()
and any(f.name.endswith('-template.md') for f in assets_dir.iterdir() if f.is_file())
)
has_three_laws = 'First Law:' in content and 'Second Law:' in content
has_sacred_truth = 'Sacred Truth' in content
return has_templates or (has_three_laws and has_sacred_truth)
def check_required_sections(sections: list[dict], is_memory_agent: bool) -> list[dict]:
"""Check for required and invalid sections."""
findings = []
h2_titles = [s['title'] for s in sections if s['level'] == 2]
required = ['Overview', 'Identity', 'Communication Style', 'Principles', 'On Activation']
for req in required:
if req not in h2_titles:
findings.append({
'file': 'SKILL.md', 'line': 1,
'severity': 'high', 'category': 'sections',
'issue': f'Missing ## {req} section',
})
if is_memory_agent:
# Memory agent bootloaders have a different required structure
required = ['The Three Laws', 'The Sacred Truth', 'On Activation']
for req in required:
if req not in h2_titles:
findings.append({
'file': 'SKILL.md', 'line': 1,
'severity': 'high', 'category': 'sections',
'issue': f'Missing ## {req} section (required for memory agent bootloader)',
})
else:
# Stateless agents use the traditional full structure
required = ['Overview', 'Identity', 'Communication Style', 'Principles', 'On Activation']
for req in required:
if req not in h2_titles:
findings.append({
'file': 'SKILL.md', 'line': 1,
'severity': 'high', 'category': 'sections',
'issue': f'Missing ## {req} section',
})
# Invalid sections
# Invalid sections (both types)
for s in sections:
if s['level'] == 2:
for pattern, message in INVALID_SECTIONS:
@@ -218,7 +250,7 @@ def extract_memory_paths(skill_path: Path) -> tuple[list[str], list[dict]]:
memory_paths = set()
# Memory path patterns
mem_pattern = re.compile(r'(?:memory/|sidecar/)[\w\-/]+(?:\.\w+)?')
mem_pattern = re.compile(r'memory/[\w\-/]+(?:\.\w+)?')
files_to_scan = []
@@ -226,7 +258,7 @@ def extract_memory_paths(skill_path: Path) -> tuple[list[str], list[dict]]:
if skill_md.exists():
files_to_scan.append(('SKILL.md', skill_md))
for subdir in ['prompts', 'resources']:
for subdir in ['prompts', 'resources', 'references']:
d = skill_path / subdir
if d.exists():
for f in sorted(d.iterdir()):
@@ -247,7 +279,6 @@ def extract_memory_paths(skill_path: Path) -> tuple[list[str], list[dict]]:
prefixes.add(prefix)
memory_prefixes = {p for p in prefixes if 'memory' in p.lower()}
sidecar_prefixes = {p for p in prefixes if 'sidecar' in p.lower()}
if len(memory_prefixes) > 1:
findings.append({
@@ -256,13 +287,6 @@ def extract_memory_paths(skill_path: Path) -> tuple[list[str], list[dict]]:
'issue': f'Inconsistent memory path prefixes: {", ".join(sorted(memory_prefixes))}',
})
if len(sidecar_prefixes) > 1:
findings.append({
'file': 'multiple', 'line': 0,
'severity': 'medium', 'category': 'memory-paths',
'issue': f'Inconsistent sidecar path prefixes: {", ".join(sorted(sidecar_prefixes))}',
})
return sorted_paths, findings
@@ -274,6 +298,15 @@ def check_prompt_basics(skill_path: Path) -> tuple[list[dict], list[dict]]:
prompt_files = [f for f in sorted(skill_path.iterdir())
if f.is_file() and f.suffix == '.md' and f.name not in skip_files]
# Also scan references/ for capability prompts (memory agents keep prompts here)
refs_dir = skill_path / 'references'
if refs_dir.exists():
prompt_files.extend(
f for f in sorted(refs_dir.iterdir())
if f.is_file() and f.suffix == '.md'
)
if not prompt_files:
return prompt_details, findings
@@ -344,13 +377,16 @@ def scan_structure_capabilities(skill_path: Path) -> dict:
skill_content = skill_md.read_text(encoding='utf-8')
# Detect agent type
is_memory_agent = detect_memory_agent(skill_path, skill_content)
# Frontmatter
frontmatter, fm_findings = parse_frontmatter(skill_content)
all_findings.extend(fm_findings)
# Sections
sections = extract_sections(skill_content)
section_findings = check_required_sections(sections)
section_findings = check_required_sections(sections, is_memory_agent)
all_findings.extend(section_findings)
# Template artifacts in SKILL.md
@@ -397,6 +433,7 @@ def scan_structure_capabilities(skill_path: Path) -> dict:
'metadata': {
'frontmatter': frontmatter,
'sections': sections,
'is_memory_agent': is_memory_agent,
},
'prompt_details': prompt_details,
'memory_paths': memory_paths,

View File

@@ -0,0 +1,190 @@
#!/usr/bin/env python3
"""Process BMad agent template files.
Performs deterministic variable substitution and conditional block processing
on template files from assets/. Replaces {varName} placeholders with provided
values and evaluates {if-X}...{/if-X} conditional blocks, keeping content
when the condition is in the --true list and removing the entire block otherwise.
"""
# /// script
# requires-python = ">=3.9"
# ///
from __future__ import annotations
import argparse
import json
import re
import sys
def process_conditionals(text: str, true_conditions: set[str]) -> tuple[str, list[str], list[str]]:
"""Process {if-X}...{/if-X} conditional blocks, innermost first.
Returns (processed_text, conditions_true, conditions_false).
"""
conditions_true: list[str] = []
conditions_false: list[str] = []
# Process innermost blocks first to handle nesting
pattern = re.compile(
r'\{if-([a-zA-Z0-9_-]+)\}(.*?)\{/if-\1\}',
re.DOTALL,
)
changed = True
while changed:
changed = False
match = pattern.search(text)
if match:
changed = True
condition = match.group(1)
inner = match.group(2)
if condition in true_conditions:
# Keep the inner content, strip the markers
# Remove a leading newline if the opening tag was on its own line
replacement = inner
if condition not in conditions_true:
conditions_true.append(condition)
else:
# Remove the entire block
replacement = ''
if condition not in conditions_false:
conditions_false.append(condition)
text = text[:match.start()] + replacement + text[match.end():]
# Clean up blank lines left by removed blocks: collapse 3+ consecutive
# newlines down to 2 (one blank line)
text = re.sub(r'\n{3,}', '\n\n', text)
return text, conditions_true, conditions_false
def process_variables(text: str, variables: dict[str, str]) -> tuple[str, list[str]]:
"""Replace {varName} placeholders with provided values.
Only replaces variables that are in the provided mapping.
Leaves unmatched {variables} untouched (they may be runtime config).
Returns (processed_text, list_of_substituted_var_names).
"""
substituted: list[str] = []
for name, value in variables.items():
placeholder = '{' + name + '}'
if placeholder in text:
text = text.replace(placeholder, value)
if name not in substituted:
substituted.append(name)
return text, substituted
def parse_var(s: str) -> tuple[str, str]:
"""Parse a key=value string. Raises argparse error on bad format."""
if '=' not in s:
raise argparse.ArgumentTypeError(
f"Invalid variable format: '{s}' (expected key=value)"
)
key, _, value = s.partition('=')
if not key:
raise argparse.ArgumentTypeError(
f"Invalid variable format: '{s}' (empty key)"
)
return key, value
def main() -> int:
parser = argparse.ArgumentParser(
description='Process BMad agent template files with variable substitution and conditional blocks.',
)
parser.add_argument(
'template',
help='Path to the template file to process',
)
parser.add_argument(
'-o', '--output',
help='Write processed output to file (default: stdout)',
)
parser.add_argument(
'--var',
action='append',
default=[],
metavar='key=value',
help='Variable substitution (repeatable). Example: --var skillName=my-agent',
)
parser.add_argument(
'--true',
action='append',
default=[],
dest='true_conditions',
metavar='CONDITION',
help='Condition name to treat as true (repeatable). Example: --true pulse --true evolvable',
)
parser.add_argument(
'--json',
action='store_true',
dest='json_output',
help='Output processing metadata as JSON to stderr',
)
args = parser.parse_args()
# Parse variables
variables: dict[str, str] = {}
for v in args.var:
try:
key, value = parse_var(v)
except argparse.ArgumentTypeError as e:
print(f"Error: {e}", file=sys.stderr)
return 2
variables[key] = value
true_conditions = set(args.true_conditions)
# Read template
try:
with open(args.template, encoding='utf-8') as f:
content = f.read()
except FileNotFoundError:
print(f"Error: Template file not found: {args.template}", file=sys.stderr)
return 2
except OSError as e:
print(f"Error reading template: {e}", file=sys.stderr)
return 1
# Process: conditionals first, then variables
content, conds_true, conds_false = process_conditionals(content, true_conditions)
content, vars_substituted = process_variables(content, variables)
# Write output
output_file = args.output
try:
if output_file:
with open(output_file, 'w', encoding='utf-8') as f:
f.write(content)
else:
sys.stdout.write(content)
except OSError as e:
print(f"Error writing output: {e}", file=sys.stderr)
return 1
# JSON metadata to stderr
if args.json_output:
metadata = {
'processed': True,
'output_file': output_file or '<stdout>',
'vars_substituted': vars_substituted,
'conditions_true': conds_true,
'conditions_false': conds_false,
}
print(json.dumps(metadata, indent=2), file=sys.stderr)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@@ -2,13 +2,13 @@
"""Deterministic path standards scanner for BMad skills.
Validates all .md and .json files against BMad path conventions:
1. {project-root} only valid before /_bmad
1. {project-root} for any project-scope path (not just _bmad)
2. Bare _bmad references must have {project-root} prefix
3. Config variables used directly (no double-prefix)
4. Skill-internal paths must use ./ prefix (references/, scripts/, assets/)
3. Config variables used directly no double-prefix with {project-root}
4. ./ only for same-folder references — never ./subdir/ cross-directory
5. No ../ parent directory references
6. No absolute paths
7. Memory paths must use {project-root}/_bmad/memory/{skillName}-sidecar/
7. Memory paths must use {project-root}/_bmad/memory/{skillName}/
8. Frontmatter allows only name and description
9. No .md files at skill root except SKILL.md
"""
@@ -28,8 +28,8 @@ from pathlib import Path
# Patterns to detect
# {project-root} NOT followed by /_bmad
PROJECT_ROOT_NOT_BMAD_RE = re.compile(r'\{project-root\}/(?!_bmad)')
# Double-prefix: {project-root}/{config-variable} — config vars already contain project-root
DOUBLE_PREFIX_RE = re.compile(r'\{project-root\}/\{[^}]+\}')
# Bare _bmad without {project-root} prefix — match _bmad at word boundary
# but not when preceded by {project-root}/
BARE_BMAD_RE = re.compile(r'(?<!\{project-root\}/)_bmad[/\s]')
@@ -38,13 +38,12 @@ ABSOLUTE_PATH_RE = re.compile(r'(?:^|[\s"`\'(])(/(?:Users|home|opt|var|tmp|etc|u
HOME_PATH_RE = re.compile(r'(?:^|[\s"`\'(])(~/\S+)', re.MULTILINE)
# Parent directory reference (still invalid)
RELATIVE_DOT_RE = re.compile(r'(?:^|[\s"`\'(])(\.\./\S+)', re.MULTILINE)
# Bare skill-internal paths without ./ prefix
# Match references/, scripts/, assets/ when NOT preceded by ./
BARE_INTERNAL_RE = re.compile(r'(?:^|[\s"`\'(])(?<!\./)((?:references|scripts|assets)/\S+)', re.MULTILINE)
# Cross-directory ./ — ./subdir/ is wrong because ./ means same folder only
CROSS_DIR_DOT_SLASH_RE = re.compile(r'(?:^|[\s"`\'(])\./(?:references|scripts|assets)/\S+', re.MULTILINE)
# Memory path pattern: should use {project-root}/_bmad/memory/
MEMORY_PATH_RE = re.compile(r'_bmad/memory/\S+')
VALID_MEMORY_PATH_RE = re.compile(r'\{project-root\}/_bmad/memory/\S+-sidecar/')
VALID_MEMORY_PATH_RE = re.compile(r'\{project-root\}/_bmad/memory/[\w-]+/')
# Fenced code block detection (to skip examples showing wrong patterns)
FENCE_RE = re.compile(r'^```', re.MULTILINE)
@@ -142,16 +141,16 @@ def scan_file(filepath: Path, skip_fenced: bool = True) -> list[dict]:
rel_path = filepath.name
checks = [
(PROJECT_ROOT_NOT_BMAD_RE, 'project-root-not-bmad', 'critical',
'{project-root} used for non-_bmad path — only valid use is {project-root}/_bmad/...'),
(DOUBLE_PREFIX_RE, 'double-prefix', 'critical',
'Double-prefix: {project-root}/{variable}config variables already contain {project-root} at runtime'),
(ABSOLUTE_PATH_RE, 'absolute-path', 'high',
'Absolute path found — not portable across machines'),
(HOME_PATH_RE, 'absolute-path', 'high',
'Home directory path (~/) found — environment-specific'),
(RELATIVE_DOT_RE, 'relative-prefix', 'high',
'Parent directory reference (../) found — fragile, breaks with reorganization'),
(BARE_INTERNAL_RE, 'bare-internal-path', 'high',
'Bare skill-internal path without ./ prefix — use ./references/, ./scripts/, ./assets/ to distinguish from {project-root} paths'),
(CROSS_DIR_DOT_SLASH_RE, 'cross-dir-dot-slash', 'high',
'Cross-directory ./ reference — ./ means same folder only; use bare skill-root relative path (e.g., references/foo.md not ./references/foo.md)'),
]
for pattern, category, severity, message in checks:
@@ -193,14 +192,13 @@ def scan_file(filepath: Path, skip_fenced: bool = True) -> list[dict]:
'action': '',
})
# Memory path check — memory paths should use {project-root}/_bmad/memory/{skillName}-sidecar/
# Memory path check — memory paths should use {project-root}/_bmad/memory/{skillName}/
for match in MEMORY_PATH_RE.finditer(content):
pos = match.start()
if skip_fenced and is_in_fenced_block(content, pos):
continue
start = max(0, pos - 20)
before = content[start:pos]
matched_text = match.group()
if '{project-root}/' not in before:
line_num = get_line_number(content, pos)
line_content = content.split('\n')[line_num - 1].strip()
@@ -213,18 +211,6 @@ def scan_file(filepath: Path, skip_fenced: bool = True) -> list[dict]:
'detail': line_content[:120],
'action': '',
})
elif '-sidecar/' not in matched_text:
line_num = get_line_number(content, pos)
line_content = content.split('\n')[line_num - 1].strip()
findings.append({
'file': rel_path,
'line': line_num,
'severity': 'high',
'category': 'memory-path',
'title': 'Memory path not using {skillName}-sidecar/ convention',
'detail': line_content[:120],
'action': '',
})
return findings
@@ -259,12 +245,11 @@ def scan_skill(skill_path: Path, skip_fenced: bool = True) -> dict:
# Build summary
by_severity = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0}
by_category = {
'project_root_not_bmad': 0,
'bare_bmad': 0,
'double_prefix': 0,
'bare_bmad': 0,
'absolute_path': 0,
'relative_prefix': 0,
'bare_internal_path': 0,
'cross_dir_dot_slash': 0,
'memory_path': 0,
'frontmatter': 0,
'structure': 0,
@@ -281,7 +266,7 @@ def scan_skill(skill_path: Path, skip_fenced: bool = True) -> dict:
return {
'scanner': 'path-standards',
'script': 'scan-path-standards.py',
'version': '2.0.0',
'version': '3.0.0',
'skill_path': str(skill_path),
'timestamp': datetime.now(timezone.utc).isoformat(),
'files_scanned': files_scanned,

View File

@@ -281,12 +281,14 @@ def scan_python_script(filepath: Path, rel_path: str) -> list[dict]:
'action': 'Add requires-python = ">=3.9" or appropriate version',
})
# requirements.txt reference
if 'requirements.txt' in content or 'pip install' in content:
# Legacy dep-management reference (use concatenation to avoid self-detection)
req_marker = 'requirements' + '.txt'
pip_marker = 'pip ' + 'install'
if req_marker in content or pip_marker in content:
findings.append({
'file': rel_path, 'line': 1,
'severity': 'high', 'category': 'dependencies',
'title': 'References requirements.txt or pip install — use PEP 723 inline deps',
'title': f'References {req_marker} or {pip_marker} — use PEP 723 inline deps',
'detail': '',
'action': 'Replace with PEP 723 inline dependency block',
})