146 lines
5.1 KiB
Python
146 lines
5.1 KiB
Python
#!/usr/bin/env python3
|
|
from __future__ import annotations
|
|
|
|
import argparse
|
|
import json
|
|
from dataclasses import asdict
|
|
from datetime import date
|
|
from pathlib import Path
|
|
|
|
from inire.tests.example_scenarios import PERFORMANCE_SCENARIO_SNAPSHOTS, SCENARIO_SNAPSHOTS
|
|
|
|
|
|
SUMMARY_METRICS = (
|
|
"route_iterations",
|
|
"nets_routed",
|
|
"nodes_expanded",
|
|
"ray_cast_calls",
|
|
"moves_generated",
|
|
"moves_added",
|
|
"dynamic_tree_rebuilds",
|
|
"visibility_builds",
|
|
"congestion_check_calls",
|
|
"verify_path_report_calls",
|
|
)
|
|
|
|
|
|
def _snapshot_registry(include_performance_only: bool) -> tuple[tuple[str, object], ...]:
|
|
if not include_performance_only:
|
|
return SCENARIO_SNAPSHOTS
|
|
return SCENARIO_SNAPSHOTS + PERFORMANCE_SCENARIO_SNAPSHOTS
|
|
|
|
|
|
def _build_payload(
|
|
selected_scenarios: tuple[str, ...] | None = None,
|
|
*,
|
|
include_performance_only: bool = False,
|
|
) -> dict[str, object]:
|
|
allowed = None if selected_scenarios is None else set(selected_scenarios)
|
|
snapshots = []
|
|
for name, run in _snapshot_registry(include_performance_only):
|
|
if allowed is not None and name not in allowed:
|
|
continue
|
|
snapshots.append(run())
|
|
return {
|
|
"generated_on": date.today().isoformat(),
|
|
"generator": "scripts/record_performance_baseline.py",
|
|
"scenarios": [asdict(snapshot) for snapshot in snapshots],
|
|
}
|
|
|
|
|
|
def _render_markdown(payload: dict[str, object]) -> str:
|
|
rows = payload["scenarios"]
|
|
lines = [
|
|
"# Performance Baseline",
|
|
"",
|
|
f"Generated on {payload['generated_on']} by `{payload['generator']}`.",
|
|
"",
|
|
"The full machine-readable snapshot lives in `docs/performance_baseline.json`.",
|
|
"Use `scripts/diff_performance_baseline.py` to compare a fresh run against that snapshot.",
|
|
"",
|
|
"| Scenario | Duration (s) | Total | Valid | Reached | Iter | Nets Routed | Nodes | Ray Casts | Moves Gen | Moves Added | Dyn Tree | Visibility Builds | Congestion Checks | Verify Calls |",
|
|
"| :-- | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: |",
|
|
]
|
|
for row in rows:
|
|
metrics = row["metrics"]
|
|
lines.append(
|
|
"| "
|
|
f"{row['name']} | "
|
|
f"{row['duration_s']:.4f} | "
|
|
f"{row['total_results']} | "
|
|
f"{row['valid_results']} | "
|
|
f"{row['reached_targets']} | "
|
|
f"{metrics['route_iterations']} | "
|
|
f"{metrics['nets_routed']} | "
|
|
f"{metrics['nodes_expanded']} | "
|
|
f"{metrics['ray_cast_calls']} | "
|
|
f"{metrics['moves_generated']} | "
|
|
f"{metrics['moves_added']} | "
|
|
f"{metrics['dynamic_tree_rebuilds']} | "
|
|
f"{metrics['visibility_builds']} | "
|
|
f"{metrics['congestion_check_calls']} | "
|
|
f"{metrics['verify_path_report_calls']} |"
|
|
)
|
|
|
|
lines.extend(
|
|
[
|
|
"",
|
|
"## Full Counter Set",
|
|
"",
|
|
"Each scenario entry in `docs/performance_baseline.json` records the full `RouteMetrics` snapshot, including cache, index, congestion, and verification counters.",
|
|
"These counters are currently observational only and are not enforced as CI regression gates.",
|
|
"",
|
|
"Tracked metric keys:",
|
|
"",
|
|
", ".join(rows[0]["metrics"].keys()) if rows else "",
|
|
]
|
|
)
|
|
return "\n".join(lines) + "\n"
|
|
|
|
|
|
def main() -> None:
|
|
parser = argparse.ArgumentParser(description="Record the example-scenario performance baseline.")
|
|
parser.add_argument(
|
|
"--output-dir",
|
|
type=Path,
|
|
default=None,
|
|
help="Directory to write performance_baseline.json and performance.md into. Defaults to <repo>/docs.",
|
|
)
|
|
parser.add_argument(
|
|
"--scenario",
|
|
action="append",
|
|
dest="scenarios",
|
|
default=[],
|
|
help="Optional scenario name to include. May be passed more than once.",
|
|
)
|
|
parser.add_argument(
|
|
"--include-performance-only",
|
|
action="store_true",
|
|
help="Include performance-only snapshot scenarios that are excluded from the default baseline corpus.",
|
|
)
|
|
args = parser.parse_args()
|
|
|
|
repo_root = Path(__file__).resolve().parents[1]
|
|
docs_dir = repo_root / "docs" if args.output_dir is None else args.output_dir.resolve()
|
|
docs_dir.mkdir(exist_ok=True)
|
|
|
|
selected = tuple(args.scenarios) if args.scenarios else None
|
|
payload = _build_payload(selected, include_performance_only=args.include_performance_only)
|
|
json_path = docs_dir / "performance_baseline.json"
|
|
markdown_path = docs_dir / "performance.md"
|
|
|
|
json_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n")
|
|
markdown_path.write_text(_render_markdown(payload))
|
|
|
|
if json_path.is_relative_to(repo_root):
|
|
print(f"Wrote {json_path.relative_to(repo_root)}")
|
|
else:
|
|
print(f"Wrote {json_path}")
|
|
if markdown_path.is_relative_to(repo_root):
|
|
print(f"Wrote {markdown_path.relative_to(repo_root)}")
|
|
else:
|
|
print(f"Wrote {markdown_path}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|