more perf counters

This commit is contained in:
Jan Petykiewicz 2026-03-31 17:41:15 -07:00
commit e77fd6e69f
15 changed files with 643 additions and 54 deletions

View file

@ -0,0 +1,113 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
from dataclasses import asdict
from pathlib import Path
from inire.tests.example_scenarios import SCENARIO_SNAPSHOTS
SUMMARY_KEYS = (
"duration_s",
"route_iterations",
"nets_routed",
"nodes_expanded",
"ray_cast_calls",
"moves_generated",
"moves_added",
"congestion_check_calls",
"verify_path_report_calls",
)
def _current_snapshots(selected_scenarios: tuple[str, ...] | None) -> dict[str, dict[str, object]]:
allowed = None if selected_scenarios is None else set(selected_scenarios)
snapshots: dict[str, dict[str, object]] = {}
for name, run in SCENARIO_SNAPSHOTS:
if allowed is not None and name not in allowed:
continue
snapshots[name] = asdict(run())
return snapshots
def _load_baseline(path: Path, selected_scenarios: tuple[str, ...] | None) -> dict[str, dict[str, object]]:
payload = json.loads(path.read_text())
allowed = None if selected_scenarios is None else set(selected_scenarios)
return {
entry["name"]: entry
for entry in payload["scenarios"]
if allowed is None or entry["name"] in allowed
}
def _metric_value(snapshot: dict[str, object], key: str) -> float:
if key == "duration_s":
return float(snapshot["duration_s"])
return float(snapshot["metrics"][key])
def _render_report(baseline: dict[str, dict[str, object]], current: dict[str, dict[str, object]]) -> str:
scenario_names = sorted(set(baseline) | set(current))
lines = [
"# Performance Baseline Diff",
"",
"| Scenario | Metric | Baseline | Current | Delta |",
"| :-- | :-- | --: | --: | --: |",
]
for scenario in scenario_names:
base_snapshot = baseline.get(scenario)
curr_snapshot = current.get(scenario)
if base_snapshot is None:
lines.append(f"| {scenario} | added | - | - | - |")
continue
if curr_snapshot is None:
lines.append(f"| {scenario} | missing | - | - | - |")
continue
for key in SUMMARY_KEYS:
base_value = _metric_value(base_snapshot, key)
curr_value = _metric_value(curr_snapshot, key)
lines.append(
f"| {scenario} | {key} | {base_value:.4f} | {curr_value:.4f} | {curr_value - base_value:+.4f} |"
)
return "\n".join(lines) + "\n"
def main() -> None:
parser = argparse.ArgumentParser(description="Diff the committed performance baseline against a fresh run.")
parser.add_argument(
"--baseline",
type=Path,
default=Path("docs/performance_baseline.json"),
help="Baseline JSON to compare against.",
)
parser.add_argument(
"--output",
type=Path,
default=None,
help="Optional file to write the report to. Defaults to stdout.",
)
parser.add_argument(
"--scenario",
action="append",
dest="scenarios",
default=[],
help="Optional scenario name to include. May be passed more than once.",
)
args = parser.parse_args()
selected = tuple(args.scenarios) if args.scenarios else None
baseline = _load_baseline(args.baseline, selected)
current = _current_snapshots(selected)
report = _render_report(baseline, current)
if args.output is None:
print(report, end="")
else:
args.output.write_text(report)
print(f"Wrote {args.output}")
if __name__ == "__main__":
main()

View file

@ -46,6 +46,7 @@ def _render_markdown(payload: dict[str, object]) -> str:
f"Generated on {payload['generated_on']} by `{payload['generator']}`.",
"",
"The full machine-readable snapshot lives in `docs/performance_baseline.json`.",
"Use `scripts/diff_performance_baseline.py` to compare a fresh run against that snapshot.",
"",
"| Scenario | Duration (s) | Total | Valid | Reached | Iter | Nets Routed | Nodes | Ray Casts | Moves Gen | Moves Added | Dyn Tree | Visibility Builds | Congestion Checks | Verify Calls |",
"| :-- | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: | --: |",
@ -77,6 +78,7 @@ def _render_markdown(payload: dict[str, object]) -> str:
"## Full Counter Set",
"",
"Each scenario entry in `docs/performance_baseline.json` records the full `RouteMetrics` snapshot, including cache, index, congestion, and verification counters.",
"These counters are currently observational only and are not enforced as CI regression gates.",
"",
"Tracked metric keys:",
"",