refactor(ux): consolidate BMAD skills, update design system, and clean up Prisma generated client
This commit is contained in:
406
.github/skills/bmad-workflow-builder/scripts/generate-convert-report.py
vendored
Normal file
406
.github/skills/bmad-workflow-builder/scripts/generate-convert-report.py
vendored
Normal file
@@ -0,0 +1,406 @@
|
||||
#!/usr/bin/env python3
|
||||
# /// script
|
||||
# requires-python = ">=3.9"
|
||||
# ///
|
||||
"""
|
||||
Generate an interactive HTML skill conversion comparison report.
|
||||
|
||||
Measures original and rebuilt skill directories, combines with LLM-generated
|
||||
analysis (cuts, retained content, verdict), and renders a self-contained
|
||||
HTML report showing the stark before/after comparison.
|
||||
|
||||
Usage:
|
||||
python3 generate-convert-report.py <original-path> <rebuilt-path> <analysis-json> [-o output.html] [--open]
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import html as html_lib
|
||||
import json
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def measure_skill(skill_path: Path) -> dict:
|
||||
"""Measure a skill directory or single file for lines, words, chars, sections, files."""
|
||||
total_lines = 0
|
||||
total_words = 0
|
||||
total_chars = 0
|
||||
total_sections = 0
|
||||
md_file_count = 0
|
||||
non_md_file_count = 0
|
||||
|
||||
if skill_path.is_file():
|
||||
md_files = [skill_path]
|
||||
else:
|
||||
md_files = sorted(skill_path.rglob('*.md'))
|
||||
|
||||
for f in md_files:
|
||||
content = f.read_text(encoding='utf-8')
|
||||
lines = content.splitlines()
|
||||
total_lines += len(lines)
|
||||
total_words += sum(len(line.split()) for line in lines)
|
||||
total_chars += len(content)
|
||||
total_sections += sum(1 for line in lines if line.startswith('## '))
|
||||
md_file_count += 1
|
||||
|
||||
if skill_path.is_dir():
|
||||
for f in skill_path.rglob('*'):
|
||||
if f.is_file() and f.suffix != '.md':
|
||||
non_md_file_count += 1
|
||||
|
||||
return {
|
||||
'lines': total_lines,
|
||||
'words': total_words,
|
||||
'chars': total_chars,
|
||||
'sections': total_sections,
|
||||
'files': md_file_count + non_md_file_count,
|
||||
'estimated_tokens': int(total_words * 1.3),
|
||||
}
|
||||
|
||||
|
||||
def calculate_reductions(original: dict, rebuilt: dict) -> dict:
|
||||
"""Calculate percentage reductions for each metric."""
|
||||
reductions = {}
|
||||
for key in ('lines', 'words', 'chars', 'sections', 'estimated_tokens'):
|
||||
orig_val = original.get(key, 0)
|
||||
new_val = rebuilt.get(key, 0)
|
||||
if orig_val > 0:
|
||||
reductions[key] = f'{round((1 - new_val / orig_val) * 100)}%'
|
||||
else:
|
||||
reductions[key] = 'N/A'
|
||||
return reductions
|
||||
|
||||
|
||||
def build_report_data(original_metrics: dict, rebuilt_metrics: dict,
|
||||
analysis: dict, reductions: dict) -> dict:
|
||||
"""Assemble the full report data structure."""
|
||||
return {
|
||||
'meta': {
|
||||
'skill_name': analysis.get('skill_name', 'Unknown'),
|
||||
'original_source': analysis.get('original_source', ''),
|
||||
'timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
},
|
||||
'metrics': {
|
||||
'original': original_metrics,
|
||||
'rebuilt': rebuilt_metrics,
|
||||
},
|
||||
'reductions': reductions,
|
||||
'cuts': analysis.get('cuts', []),
|
||||
'retained': analysis.get('retained', []),
|
||||
'verdict': analysis.get('verdict', ''),
|
||||
}
|
||||
|
||||
|
||||
# ── HTML Template ──────────────────────────────────────────────────────────────
|
||||
|
||||
HTML_TEMPLATE = r"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>BMad Method · Skill Conversion: SKILL_NAME</title>
|
||||
<style>
|
||||
:root {
|
||||
--bg: #0d1117; --surface: #161b22; --surface2: #21262d; --border: #30363d;
|
||||
--text: #e6edf3; --text-muted: #8b949e; --text-dim: #6e7681;
|
||||
--critical: #f85149; --high: #f0883e; --medium: #d29922; --low: #58a6ff;
|
||||
--strength: #3fb950; --accent: #58a6ff; --accent-hover: #79c0ff;
|
||||
--purple: #a371f7;
|
||||
--font: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif;
|
||||
--mono: ui-monospace, SFMono-Regular, "SF Mono", Menlo, Consolas, monospace;
|
||||
}
|
||||
@media (prefers-color-scheme: light) {
|
||||
:root {
|
||||
--bg: #ffffff; --surface: #f6f8fa; --surface2: #eaeef2; --border: #d0d7de;
|
||||
--text: #1f2328; --text-muted: #656d76; --text-dim: #8c959f;
|
||||
--critical: #cf222e; --high: #bc4c00; --medium: #9a6700; --low: #0969da;
|
||||
--strength: #1a7f37; --accent: #0969da; --accent-hover: #0550ae;
|
||||
--purple: #8250df;
|
||||
}
|
||||
}
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
body { font-family: var(--font); background: var(--bg); color: var(--text); line-height: 1.5; padding: 2rem; max-width: 900px; margin: 0 auto; }
|
||||
h1 { font-size: 1.5rem; margin-bottom: 0.25rem; }
|
||||
.subtitle { color: var(--text-muted); font-size: 0.85rem; margin-bottom: 1.5rem; }
|
||||
.hero { text-align: center; padding: 2rem 1rem; margin-bottom: 1.5rem; border: 1px solid var(--border); border-radius: 0.75rem; background: var(--surface); }
|
||||
.hero-pct { font-size: 4rem; font-weight: 800; color: var(--strength); line-height: 1; }
|
||||
.hero-label { font-size: 1.1rem; color: var(--text-muted); margin-top: 0.25rem; }
|
||||
.hero-sub { font-size: 0.9rem; color: var(--text-dim); margin-top: 0.5rem; }
|
||||
.metrics-table { width: 100%; border-collapse: collapse; margin: 1.5rem 0; }
|
||||
.metrics-table th { text-align: left; padding: 0.5rem 0.75rem; border-bottom: 2px solid var(--border); font-size: 0.8rem; text-transform: uppercase; letter-spacing: 0.05em; color: var(--text-muted); }
|
||||
.metrics-table td { padding: 0.5rem 0.75rem; border-bottom: 1px solid var(--border); font-size: 0.95rem; }
|
||||
.metrics-table .num { font-family: var(--mono); text-align: right; }
|
||||
.metrics-table .reduction { font-weight: 700; color: var(--strength); text-align: right; }
|
||||
.bar-cell { width: 30%; }
|
||||
.bar-container { display: flex; height: 1.25rem; border-radius: 0.25rem; overflow: hidden; background: color-mix(in srgb, var(--critical) 15%, transparent); }
|
||||
.bar-rebuilt { background: var(--strength); border-radius: 0.25rem 0 0 0.25rem; transition: width 0.3s; }
|
||||
.section { border: 1px solid var(--border); border-radius: 0.5rem; margin: 0.75rem 0; overflow: hidden; }
|
||||
.section-header { display: flex; align-items: center; gap: 0.75rem; padding: 0.75rem 1rem; background: var(--surface); cursor: pointer; user-select: none; }
|
||||
.section-header:hover { background: var(--surface2); }
|
||||
.section-header .arrow { font-size: 0.7rem; transition: transform 0.15s; color: var(--text-muted); width: 1rem; }
|
||||
.section-header.open .arrow { transform: rotate(90deg); }
|
||||
.section-header .label { font-weight: 600; flex: 1; }
|
||||
.section-body { display: none; }
|
||||
.section-body.open { display: block; }
|
||||
.cut-item { padding: 0.75rem 1rem; border-top: 1px solid var(--border); }
|
||||
.cut-item:hover { background: var(--surface); }
|
||||
.cut-category { font-weight: 600; font-size: 0.95rem; }
|
||||
.cut-desc { font-size: 0.85rem; color: var(--text-muted); margin-top: 0.25rem; }
|
||||
.cut-examples { margin-top: 0.5rem; padding-left: 1.25rem; }
|
||||
.cut-examples li { font-size: 0.85rem; color: var(--text-dim); padding: 0.1rem 0; }
|
||||
.badge { display: inline-flex; align-items: center; padding: 0.15rem 0.5rem; border-radius: 2rem; font-size: 0.75rem; font-weight: 600; margin-right: 0.5rem; }
|
||||
.badge-high { background: color-mix(in srgb, var(--critical) 20%, transparent); color: var(--critical); }
|
||||
.badge-medium { background: color-mix(in srgb, var(--medium) 20%, transparent); color: var(--medium); }
|
||||
.badge-low { background: color-mix(in srgb, var(--low) 20%, transparent); color: var(--low); }
|
||||
.retained-item { padding: 0.5rem 1rem; border-top: 1px solid var(--border); }
|
||||
.retained-category { font-weight: 600; font-size: 0.9rem; color: var(--strength); }
|
||||
.retained-desc { font-size: 0.85rem; color: var(--text-muted); }
|
||||
.verdict { margin-top: 1.5rem; padding: 1.25rem; border: 1px solid var(--border); border-radius: 0.5rem; background: var(--surface); font-size: 1rem; line-height: 1.6; color: var(--text); font-style: italic; }
|
||||
.verdict::before { content: "Bottom line: "; font-weight: 700; font-style: normal; color: var(--purple); }
|
||||
.footer { margin-top: 2rem; padding-top: 1rem; border-top: 1px solid var(--border); font-size: 0.8rem; color: var(--text-dim); text-align: center; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div style="color:var(--purple);font-size:0.8rem;font-weight:600;letter-spacing:0.05em;text-transform:uppercase;margin-bottom:0.25rem">BMad Method</div>
|
||||
<h1>Skill Conversion: <span id="skill-name"></span></h1>
|
||||
<div class="subtitle" id="subtitle"></div>
|
||||
|
||||
<div class="hero" id="hero"></div>
|
||||
|
||||
<table class="metrics-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Metric</th>
|
||||
<th class="num">Original</th>
|
||||
<th class="num">Rebuilt</th>
|
||||
<th class="num">Reduction</th>
|
||||
<th class="bar-cell">Comparison</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="metrics-body"></tbody>
|
||||
</table>
|
||||
|
||||
<div id="cuts-section"></div>
|
||||
<div id="retained-section"></div>
|
||||
<div class="verdict" id="verdict"></div>
|
||||
|
||||
<div class="footer">
|
||||
Generated by <strong>BMad Workflow Builder</strong> · <code>--convert</code>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
const DATA = JSON.parse(document.getElementById('report-data').textContent);
|
||||
|
||||
function esc(s) {
|
||||
if (!s) return '';
|
||||
const d = document.createElement('div');
|
||||
d.textContent = String(s);
|
||||
return d.innerHTML;
|
||||
}
|
||||
function fmt(n) { return typeof n === 'number' ? n.toLocaleString() : String(n); }
|
||||
|
||||
function init() {
|
||||
const m = DATA.meta;
|
||||
document.getElementById('skill-name').textContent = m.skill_name;
|
||||
const parts = [m.original_source, m.timestamp ? m.timestamp.split('T')[0] : ''].filter(Boolean);
|
||||
document.getElementById('subtitle').textContent = parts.join(' \u2022 ');
|
||||
|
||||
// Hero — overall token reduction
|
||||
const tokenRed = DATA.reductions.estimated_tokens || DATA.reductions.words || '0%';
|
||||
const origTok = DATA.metrics.original.estimated_tokens || 0;
|
||||
const newTok = DATA.metrics.rebuilt.estimated_tokens || 0;
|
||||
document.getElementById('hero').innerHTML =
|
||||
'<div class="hero-pct">' + esc(tokenRed) + '</div>' +
|
||||
'<div class="hero-label">leaner</div>' +
|
||||
'<div class="hero-sub">' + fmt(origTok) + ' tokens \u2192 ' + fmt(newTok) + ' tokens</div>';
|
||||
|
||||
// Metrics table
|
||||
var rows = [
|
||||
['Lines', 'lines'], ['Words', 'words'], ['Characters', 'chars'],
|
||||
['Sections', 'sections'], ['Files', 'files'], ['Est. Tokens', 'estimated_tokens']
|
||||
];
|
||||
var tbody = '';
|
||||
rows.forEach(function(r) {
|
||||
var label = r[0], key = r[1];
|
||||
var orig = DATA.metrics.original[key] || 0;
|
||||
var rebuilt = DATA.metrics.rebuilt[key] || 0;
|
||||
var reduction = DATA.reductions[key] || (key === 'files' ? '' : 'N/A');
|
||||
var pct = orig > 0 ? (rebuilt / orig * 100) : 0;
|
||||
tbody += '<tr>';
|
||||
tbody += '<td>' + label + '</td>';
|
||||
tbody += '<td class="num">' + fmt(orig) + '</td>';
|
||||
tbody += '<td class="num">' + fmt(rebuilt) + '</td>';
|
||||
tbody += '<td class="reduction">' + (reduction || '') + '</td>';
|
||||
tbody += '<td class="bar-cell"><div class="bar-container">';
|
||||
tbody += '<div class="bar-rebuilt" style="width:' + pct.toFixed(1) + '%"></div>';
|
||||
tbody += '</div></td>';
|
||||
tbody += '</tr>';
|
||||
});
|
||||
document.getElementById('metrics-body').innerHTML = tbody;
|
||||
|
||||
renderCuts();
|
||||
renderRetained();
|
||||
|
||||
// Verdict
|
||||
var v = DATA.verdict || '';
|
||||
if (v) document.getElementById('verdict').appendChild(document.createTextNode(v));
|
||||
else document.getElementById('verdict').style.display = 'none';
|
||||
}
|
||||
|
||||
function renderCuts() {
|
||||
var cuts = DATA.cuts || [];
|
||||
if (!cuts.length) return;
|
||||
var html = '<div class="section"><div class="section-header open" onclick="toggle(this)">';
|
||||
html += '<span class="arrow">▶</span>';
|
||||
html += '<span class="label">What Was Cut (' + cuts.length + ' categories)</span>';
|
||||
html += '</div><div class="section-body open">';
|
||||
cuts.forEach(function(cut) {
|
||||
html += '<div class="cut-item">';
|
||||
var sev = cut.severity || 'medium';
|
||||
html += '<span class="badge badge-' + sev + '">' + esc(sev) + '</span>';
|
||||
html += '<span class="cut-category">' + esc(cut.category) + '</span>';
|
||||
html += '<div class="cut-desc">' + esc(cut.description) + '</div>';
|
||||
if (cut.examples && cut.examples.length) {
|
||||
html += '<ul class="cut-examples">';
|
||||
cut.examples.forEach(function(ex) { html += '<li>' + esc(ex) + '</li>'; });
|
||||
html += '</ul>';
|
||||
}
|
||||
html += '</div>';
|
||||
});
|
||||
html += '</div></div>';
|
||||
document.getElementById('cuts-section').innerHTML = html;
|
||||
}
|
||||
|
||||
function renderRetained() {
|
||||
var items = DATA.retained || [];
|
||||
if (!items.length) return;
|
||||
var html = '<div class="section"><div class="section-header open" onclick="toggle(this)">';
|
||||
html += '<span class="arrow">▶</span>';
|
||||
html += '<span class="label">What Survived (' + items.length + ' categories)</span>';
|
||||
html += '</div><div class="section-body open">';
|
||||
items.forEach(function(r) {
|
||||
html += '<div class="retained-item">';
|
||||
html += '<div class="retained-category">' + esc(r.category) + '</div>';
|
||||
html += '<div class="retained-desc">' + esc(r.description) + '</div>';
|
||||
html += '</div>';
|
||||
});
|
||||
html += '</div></div>';
|
||||
document.getElementById('retained-section').innerHTML = html;
|
||||
}
|
||||
|
||||
function toggle(el) {
|
||||
el.classList.toggle('open');
|
||||
el.nextElementSibling.classList.toggle('open');
|
||||
}
|
||||
|
||||
init();
|
||||
</script>
|
||||
</body>
|
||||
</html>"""
|
||||
|
||||
|
||||
def generate_html(report_data: dict) -> str:
|
||||
"""Inject report data into the HTML template."""
|
||||
data_json = json.dumps(report_data, indent=None, ensure_ascii=False)
|
||||
data_tag = f'<script id="report-data" type="application/json">{data_json}</script>'
|
||||
html = HTML_TEMPLATE.replace(
|
||||
'<script>\nconst DATA',
|
||||
f'{data_tag}\n<script>\nconst DATA',
|
||||
)
|
||||
skill_name = report_data.get('meta', {}).get('skill_name', 'Unknown')
|
||||
html = html.replace('SKILL_NAME', html_lib.escape(skill_name))
|
||||
return html
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate an interactive HTML skill conversion comparison report',
|
||||
)
|
||||
parser.add_argument(
|
||||
'original_path',
|
||||
type=Path,
|
||||
help='Path to original skill (directory or single .md file)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'rebuilt_path',
|
||||
type=Path,
|
||||
help='Path to rebuilt skill directory',
|
||||
)
|
||||
parser.add_argument(
|
||||
'analysis_json',
|
||||
type=Path,
|
||||
help='Path to LLM-generated convert-analysis.json',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
type=Path,
|
||||
help='Output HTML file path (default: <analysis-dir>/convert-report.html)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--open',
|
||||
action='store_true',
|
||||
help='Open the HTML report in the default browser',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate inputs
|
||||
for label, path in [('Original', args.original_path),
|
||||
('Rebuilt', args.rebuilt_path),
|
||||
('Analysis', args.analysis_json)]:
|
||||
if not path.exists():
|
||||
print(f'Error: {label} path not found: {path}', file=sys.stderr)
|
||||
return 2
|
||||
|
||||
# Measure both skills
|
||||
original_metrics = measure_skill(args.original_path)
|
||||
rebuilt_metrics = measure_skill(args.rebuilt_path)
|
||||
reductions = calculate_reductions(original_metrics, rebuilt_metrics)
|
||||
|
||||
# Load LLM analysis
|
||||
analysis = json.loads(args.analysis_json.read_text(encoding='utf-8'))
|
||||
|
||||
# Build report data
|
||||
report_data = build_report_data(
|
||||
original_metrics, rebuilt_metrics, analysis, reductions,
|
||||
)
|
||||
|
||||
# Save structured report data alongside analysis
|
||||
report_data_path = args.analysis_json.parent / 'convert-report-data.json'
|
||||
report_data_path.write_text(
|
||||
json.dumps(report_data, indent=2, ensure_ascii=False),
|
||||
encoding='utf-8',
|
||||
)
|
||||
|
||||
# Generate HTML
|
||||
html = generate_html(report_data)
|
||||
output_path = args.output or (args.analysis_json.parent / 'convert-report.html')
|
||||
output_path.write_text(html, encoding='utf-8')
|
||||
|
||||
# Summary to stdout
|
||||
print(json.dumps({
|
||||
'html_report': str(output_path),
|
||||
'original': original_metrics,
|
||||
'rebuilt': rebuilt_metrics,
|
||||
'reductions': reductions,
|
||||
}))
|
||||
|
||||
if args.open:
|
||||
system = platform.system()
|
||||
if system == 'Darwin':
|
||||
subprocess.run(['open', str(output_path)])
|
||||
elif system == 'Linux':
|
||||
subprocess.run(['xdg-open', str(output_path)])
|
||||
elif system == 'Windows':
|
||||
subprocess.run(['start', str(output_path)], shell=True)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
@@ -109,12 +109,7 @@ def parse_frontmatter(content: str) -> tuple[dict | None, list[dict]]:
|
||||
'severity': 'high', 'category': 'frontmatter',
|
||||
'issue': f'Name "{name}" is not kebab-case',
|
||||
})
|
||||
elif not name.startswith('bmad-'):
|
||||
findings.append({
|
||||
'file': 'SKILL.md', 'line': 1,
|
||||
'severity': 'medium', 'category': 'frontmatter',
|
||||
'issue': f'Name "{name}" does not follow bmad-* naming convention',
|
||||
})
|
||||
# bmad- prefix check removed — bmad- is reserved for official BMad creations only
|
||||
|
||||
# description check
|
||||
desc = fm.get('description')
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
"""Deterministic path standards scanner for BMad skills.
|
||||
|
||||
Validates all .md and .json files against BMad path conventions:
|
||||
1. {project-root} only valid before /_bmad
|
||||
1. {project-root} for any project-scope path (not just _bmad)
|
||||
2. Bare _bmad references must have {project-root} prefix
|
||||
3. Config variables used directly (no double-prefix)
|
||||
4. Skill-internal paths must use ./ prefix (references/, scripts/, assets/)
|
||||
3. Config variables used directly — no double-prefix with {project-root}
|
||||
4. ./ only for same-folder references — never ./subdir/ cross-directory
|
||||
5. No ../ parent directory references
|
||||
6. No absolute paths
|
||||
7. Frontmatter allows only name and description
|
||||
@@ -27,8 +27,8 @@ from pathlib import Path
|
||||
|
||||
|
||||
# Patterns to detect
|
||||
# {project-root} NOT followed by /_bmad
|
||||
PROJECT_ROOT_NOT_BMAD_RE = re.compile(r'\{project-root\}/(?!_bmad)')
|
||||
# Double-prefix: {project-root}/{config-variable} — config vars already contain project-root
|
||||
DOUBLE_PREFIX_RE = re.compile(r'\{project-root\}/\{[^}]+\}')
|
||||
# Bare _bmad without {project-root} prefix — match _bmad at word boundary
|
||||
# but not when preceded by {project-root}/
|
||||
BARE_BMAD_RE = re.compile(r'(?<!\{project-root\}/)_bmad[/\s]')
|
||||
@@ -37,9 +37,8 @@ ABSOLUTE_PATH_RE = re.compile(r'(?:^|[\s"`\'(])(/(?:Users|home|opt|var|tmp|etc|u
|
||||
HOME_PATH_RE = re.compile(r'(?:^|[\s"`\'(])(~/\S+)', re.MULTILINE)
|
||||
# Parent directory reference (still invalid)
|
||||
RELATIVE_DOT_RE = re.compile(r'(?:^|[\s"`\'(])(\.\./\S+)', re.MULTILINE)
|
||||
# Bare skill-internal paths without ./ prefix
|
||||
# Match references/, scripts/, assets/ when NOT preceded by ./
|
||||
BARE_INTERNAL_RE = re.compile(r'(?:^|[\s"`\'(])(?<!\./)((?:references|scripts|assets)/\S+)', re.MULTILINE)
|
||||
# Cross-directory ./ — ./subdir/ is wrong because ./ means same folder only
|
||||
CROSS_DIR_DOT_SLASH_RE = re.compile(r'(?:^|[\s"`\'(])\./(?:references|scripts|assets)/\S+', re.MULTILINE)
|
||||
|
||||
# Fenced code block detection (to skip examples showing wrong patterns)
|
||||
FENCE_RE = re.compile(r'^```', re.MULTILINE)
|
||||
@@ -137,16 +136,16 @@ def scan_file(filepath: Path, skip_fenced: bool = True) -> list[dict]:
|
||||
rel_path = filepath.name
|
||||
|
||||
checks = [
|
||||
(PROJECT_ROOT_NOT_BMAD_RE, 'project-root-not-bmad', 'critical',
|
||||
'{project-root} used for non-_bmad path — only valid use is {project-root}/_bmad/...'),
|
||||
(DOUBLE_PREFIX_RE, 'double-prefix', 'critical',
|
||||
'Double-prefix: {project-root}/{variable} — config variables already contain {project-root} at runtime'),
|
||||
(ABSOLUTE_PATH_RE, 'absolute-path', 'high',
|
||||
'Absolute path found — not portable across machines'),
|
||||
(HOME_PATH_RE, 'absolute-path', 'high',
|
||||
'Home directory path (~/) found — environment-specific'),
|
||||
(RELATIVE_DOT_RE, 'relative-prefix', 'high',
|
||||
'Parent directory reference (../) found — fragile, breaks with reorganization'),
|
||||
(BARE_INTERNAL_RE, 'bare-internal-path', 'high',
|
||||
'Bare skill-internal path without ./ prefix — use ./references/, ./scripts/, ./assets/ to distinguish from {project-root} paths'),
|
||||
(CROSS_DIR_DOT_SLASH_RE, 'cross-dir-dot-slash', 'high',
|
||||
'Cross-directory ./ reference — ./ means same folder only; use bare skill-root relative path (e.g., references/foo.md not ./references/foo.md)'),
|
||||
]
|
||||
|
||||
for pattern, category, severity, message in checks:
|
||||
@@ -221,12 +220,11 @@ def scan_skill(skill_path: Path, skip_fenced: bool = True) -> dict:
|
||||
# Build summary
|
||||
by_severity = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0}
|
||||
by_category = {
|
||||
'project_root_not_bmad': 0,
|
||||
'bare_bmad': 0,
|
||||
'double_prefix': 0,
|
||||
'bare_bmad': 0,
|
||||
'absolute_path': 0,
|
||||
'relative_prefix': 0,
|
||||
'bare_internal_path': 0,
|
||||
'cross_dir_dot_slash': 0,
|
||||
'frontmatter': 0,
|
||||
'structure': 0,
|
||||
}
|
||||
@@ -242,7 +240,7 @@ def scan_skill(skill_path: Path, skip_fenced: bool = True) -> dict:
|
||||
return {
|
||||
'scanner': 'path-standards',
|
||||
'script': 'scan-path-standards.py',
|
||||
'version': '2.0.0',
|
||||
'version': '3.0.0',
|
||||
'skill_path': str(skill_path),
|
||||
'timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
'files_scanned': files_scanned,
|
||||
|
||||
243
.github/skills/bmad-workflow-builder/scripts/tests/test_generate_convert_report.py
vendored
Normal file
243
.github/skills/bmad-workflow-builder/scripts/tests/test_generate_convert_report.py
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
#!/usr/bin/env python3
|
||||
# /// script
|
||||
# requires-python = ">=3.9"
|
||||
# ///
|
||||
"""Tests for generate-convert-report.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
# Load the script as a module
|
||||
_script_path = Path(__file__).resolve().parent.parent / 'generate-convert-report.py'
|
||||
_spec = spec_from_file_location('generate_convert_report', _script_path)
|
||||
_mod = module_from_spec(_spec)
|
||||
_spec.loader.exec_module(_mod)
|
||||
|
||||
measure_skill = _mod.measure_skill
|
||||
calculate_reductions = _mod.calculate_reductions
|
||||
build_report_data = _mod.build_report_data
|
||||
generate_html = _mod.generate_html
|
||||
|
||||
|
||||
def test_measure_skill_single_file():
|
||||
"""Measure a single .md file."""
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
p = Path(td) / 'SKILL.md'
|
||||
p.write_text('## Section One\n\nSome words here.\n\n## Section Two\n\nMore words.\n')
|
||||
result = measure_skill(p)
|
||||
assert result['lines'] == 7, f"Expected 7 lines, got {result['lines']}"
|
||||
assert result['sections'] == 2, f"Expected 2 sections, got {result['sections']}"
|
||||
assert result['files'] == 1
|
||||
assert result['estimated_tokens'] > 0
|
||||
assert result['words'] > 0
|
||||
assert result['chars'] > 0
|
||||
|
||||
|
||||
def test_measure_skill_directory():
|
||||
"""Measure a directory with multiple .md files."""
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
td_path = Path(td)
|
||||
(td_path / 'SKILL.md').write_text('## Overview\n\nHello world.\n')
|
||||
refs = td_path / 'references'
|
||||
refs.mkdir()
|
||||
(refs / 'ref.md').write_text('## Reference\n\nSome reference content.\n')
|
||||
result = measure_skill(td_path)
|
||||
assert result['lines'] == 6, f"Expected 6 lines, got {result['lines']}"
|
||||
assert result['sections'] == 2
|
||||
assert result['files'] == 2
|
||||
|
||||
|
||||
def test_measure_skill_with_non_md_files():
|
||||
"""Non-.md files count toward file total but not line/word/section counts."""
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
td_path = Path(td)
|
||||
(td_path / 'SKILL.md').write_text('## Overview\n\nHello.\n')
|
||||
scripts = td_path / 'scripts'
|
||||
scripts.mkdir()
|
||||
(scripts / 'run.py').write_text('print("hello")\n')
|
||||
result = measure_skill(td_path)
|
||||
assert result['files'] == 2, f"Expected 2 files, got {result['files']}"
|
||||
assert result['lines'] == 3, f"Expected 3 lines (only .md), got {result['lines']}"
|
||||
|
||||
|
||||
def test_calculate_reductions():
|
||||
"""Calculate reduction percentages."""
|
||||
original = {'lines': 800, 'words': 5000, 'chars': 30000, 'sections': 30, 'estimated_tokens': 6500}
|
||||
rebuilt = {'lines': 80, 'words': 500, 'chars': 3000, 'sections': 6, 'estimated_tokens': 650}
|
||||
r = calculate_reductions(original, rebuilt)
|
||||
assert r['lines'] == '90%'
|
||||
assert r['words'] == '90%'
|
||||
assert r['chars'] == '90%'
|
||||
assert r['sections'] == '80%'
|
||||
assert r['estimated_tokens'] == '90%'
|
||||
|
||||
|
||||
def test_calculate_reductions_zero_original():
|
||||
"""Handle zero values gracefully."""
|
||||
original = {'lines': 0, 'words': 100, 'chars': 500, 'sections': 0, 'estimated_tokens': 130}
|
||||
rebuilt = {'lines': 0, 'words': 50, 'chars': 250, 'sections': 0, 'estimated_tokens': 65}
|
||||
r = calculate_reductions(original, rebuilt)
|
||||
assert r['lines'] == 'N/A'
|
||||
assert r['words'] == '50%'
|
||||
assert r['sections'] == 'N/A'
|
||||
|
||||
|
||||
def test_calculate_reductions_no_change():
|
||||
"""No reduction yields 0%."""
|
||||
original = {'lines': 100, 'words': 500, 'chars': 3000, 'sections': 5, 'estimated_tokens': 650}
|
||||
r = calculate_reductions(original, original)
|
||||
assert r['lines'] == '0%'
|
||||
assert r['words'] == '0%'
|
||||
|
||||
|
||||
def test_build_report_data():
|
||||
"""Assemble report data with all fields."""
|
||||
analysis = {
|
||||
'skill_name': 'test-skill',
|
||||
'original_source': '/path/to/original',
|
||||
'cuts': [{'category': 'Bloat', 'description': 'Removed bloat', 'examples': ['x'], 'severity': 'high'}],
|
||||
'retained': [{'category': 'Core', 'description': 'Kept core'}],
|
||||
'verdict': 'Much better now.',
|
||||
}
|
||||
data = build_report_data(
|
||||
{'lines': 100, 'words': 500, 'chars': 3000, 'sections': 10, 'files': 1, 'estimated_tokens': 650},
|
||||
{'lines': 20, 'words': 100, 'chars': 600, 'sections': 3, 'files': 1, 'estimated_tokens': 130},
|
||||
analysis,
|
||||
{'lines': '80%', 'words': '80%', 'chars': '80%', 'sections': '70%', 'estimated_tokens': '80%'},
|
||||
)
|
||||
assert data['meta']['skill_name'] == 'test-skill'
|
||||
assert data['meta']['original_source'] == '/path/to/original'
|
||||
assert 'timestamp' in data['meta']
|
||||
assert data['metrics']['original']['lines'] == 100
|
||||
assert data['metrics']['rebuilt']['lines'] == 20
|
||||
assert data['reductions']['lines'] == '80%'
|
||||
assert len(data['cuts']) == 1
|
||||
assert data['cuts'][0]['category'] == 'Bloat'
|
||||
assert len(data['retained']) == 1
|
||||
assert data['verdict'] == 'Much better now.'
|
||||
|
||||
|
||||
def test_build_report_data_missing_fields():
|
||||
"""Handle analysis with missing optional fields."""
|
||||
analysis = {'skill_name': 'minimal'}
|
||||
data = build_report_data({}, {}, analysis, {})
|
||||
assert data['meta']['skill_name'] == 'minimal'
|
||||
assert data['cuts'] == []
|
||||
assert data['retained'] == []
|
||||
assert data['verdict'] == ''
|
||||
|
||||
|
||||
def test_generate_html_structure():
|
||||
"""Generated HTML is valid and contains key elements."""
|
||||
report_data = {
|
||||
'meta': {'skill_name': 'test-skill', 'original_source': 'http://example.com', 'timestamp': '2026-01-01T00:00:00Z'},
|
||||
'metrics': {
|
||||
'original': {'lines': 100, 'words': 500, 'chars': 3000, 'sections': 10, 'files': 1, 'estimated_tokens': 650},
|
||||
'rebuilt': {'lines': 20, 'words': 100, 'chars': 600, 'sections': 3, 'files': 1, 'estimated_tokens': 130},
|
||||
},
|
||||
'reductions': {'lines': '80%', 'words': '80%', 'chars': '80%', 'sections': '70%', 'estimated_tokens': '80%'},
|
||||
'cuts': [{'category': 'Waste', 'description': 'Pure waste', 'examples': ['ex1'], 'severity': 'high'}],
|
||||
'retained': [{'category': 'Core', 'description': 'Essential'}],
|
||||
'verdict': 'Dramatically improved.',
|
||||
}
|
||||
html = generate_html(report_data)
|
||||
assert '<!DOCTYPE html>' in html
|
||||
assert 'report-data' in html
|
||||
assert 'test-skill' in html
|
||||
assert 'BMad Method' in html
|
||||
assert 'Skill Conversion' in html
|
||||
assert '--convert' in html
|
||||
|
||||
|
||||
def test_generate_html_escapes_data():
|
||||
"""Verify data is embedded as JSON, not raw HTML."""
|
||||
report_data = {
|
||||
'meta': {'skill_name': '<script>alert("xss")</script>', 'original_source': '', 'timestamp': ''},
|
||||
'metrics': {'original': {}, 'rebuilt': {}},
|
||||
'reductions': {},
|
||||
'cuts': [],
|
||||
'retained': [],
|
||||
'verdict': '',
|
||||
}
|
||||
html = generate_html(report_data)
|
||||
# The skill name in the JSON should be escaped by json.dumps
|
||||
assert '<script>alert' not in html.split('application/json')[0]
|
||||
|
||||
|
||||
def test_end_to_end():
|
||||
"""Full pipeline: create files, measure, analyze, generate HTML."""
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
td_path = Path(td)
|
||||
|
||||
# Original skill — verbose
|
||||
orig_dir = td_path / 'original'
|
||||
orig_dir.mkdir()
|
||||
(orig_dir / 'SKILL.md').write_text(
|
||||
'## Section 1\n\n' + 'word ' * 500 + '\n\n'
|
||||
'## Section 2\n\nMore verbose content.\n\n'
|
||||
'## Section 3\n\nEven more.\n',
|
||||
)
|
||||
|
||||
# Rebuilt skill — lean
|
||||
rebuilt_dir = td_path / 'rebuilt'
|
||||
rebuilt_dir.mkdir()
|
||||
(rebuilt_dir / 'SKILL.md').write_text('## Core\n\nLean and effective.\n')
|
||||
|
||||
# Measure
|
||||
orig_m = measure_skill(orig_dir)
|
||||
rebuilt_m = measure_skill(rebuilt_dir)
|
||||
reductions = calculate_reductions(orig_m, rebuilt_m)
|
||||
|
||||
assert orig_m['words'] > rebuilt_m['words']
|
||||
assert orig_m['sections'] > rebuilt_m['sections']
|
||||
|
||||
# Analysis
|
||||
analysis = {
|
||||
'skill_name': 'e2e-test',
|
||||
'original_source': str(orig_dir),
|
||||
'cuts': [
|
||||
{'category': 'Bloat', 'description': 'Removed verbose filler', 'examples': ['500 repeated words'], 'severity': 'high'},
|
||||
],
|
||||
'retained': [
|
||||
{'category': 'Core Intent', 'description': 'Essential behavioral instructions'},
|
||||
],
|
||||
'verdict': 'Converted successfully.',
|
||||
}
|
||||
|
||||
report_data = build_report_data(orig_m, rebuilt_m, analysis, reductions)
|
||||
html = generate_html(report_data)
|
||||
|
||||
# Write and verify
|
||||
out = td_path / 'report.html'
|
||||
out.write_text(html, encoding='utf-8')
|
||||
assert out.exists()
|
||||
assert out.stat().st_size > 1000
|
||||
|
||||
# Verify report data roundtrips
|
||||
data_file = td_path / 'report-data.json'
|
||||
data_file.write_text(json.dumps(report_data, indent=2), encoding='utf-8')
|
||||
loaded = json.loads(data_file.read_text(encoding='utf-8'))
|
||||
assert loaded['meta']['skill_name'] == 'e2e-test'
|
||||
assert len(loaded['cuts']) == 1
|
||||
assert int(reductions['words'].rstrip('%')) > 50
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tests = [name for name in sorted(dir()) if name.startswith('test_')]
|
||||
passed = 0
|
||||
failed = 0
|
||||
for name in tests:
|
||||
try:
|
||||
globals()[name]()
|
||||
print(f' PASS {name}')
|
||||
passed += 1
|
||||
except Exception as e:
|
||||
print(f' FAIL {name}: {e}')
|
||||
failed += 1
|
||||
print(f'\n{passed} passed, {failed} failed')
|
||||
sys.exit(1 if failed else 0)
|
||||
Reference in New Issue
Block a user