#!/usr/bin/env python3 from __future__ import annotations import argparse import json from collections import Counter from dataclasses import asdict from datetime import datetime from pathlib import Path from inire.tests.example_scenarios import TRACE_PERFORMANCE_SCENARIO_RUNS, TRACE_SCENARIO_RUNS def _trace_registry(include_performance_only: bool) -> tuple[tuple[str, object], ...]: if include_performance_only: return TRACE_SCENARIO_RUNS + TRACE_PERFORMANCE_SCENARIO_RUNS return TRACE_SCENARIO_RUNS def _selected_runs( selected_scenarios: tuple[str, ...] | None, *, include_performance_only: bool, ) -> tuple[tuple[str, object], ...]: if selected_scenarios is None: perf_registry = dict(TRACE_PERFORMANCE_SCENARIO_RUNS) return ( ( "example_07_large_scale_routing_no_warm_start", perf_registry["example_07_large_scale_routing_no_warm_start"], ), ( "example_07_large_scale_routing_no_warm_start_seed43", perf_registry["example_07_large_scale_routing_no_warm_start_seed43"], ), ) registry = dict(TRACE_SCENARIO_RUNS + TRACE_PERFORMANCE_SCENARIO_RUNS) allowed_standard = dict(_trace_registry(include_performance_only)) runs = [] for name in selected_scenarios: if name in allowed_standard: runs.append((name, allowed_standard[name])) continue if name in registry: runs.append((name, registry[name])) continue valid = ", ".join(sorted(registry)) raise SystemExit(f"Unknown iteration-trace scenario: {name}. Valid scenarios: {valid}") return tuple(runs) def _build_payload( selected_scenarios: tuple[str, ...] | None, *, include_performance_only: bool, ) -> dict[str, object]: scenarios = [] for name, run in _selected_runs(selected_scenarios, include_performance_only=include_performance_only): result = run() scenarios.append( { "name": name, "summary": { "total_results": len(result.results_by_net), "valid_results": sum(1 for entry in result.results_by_net.values() if entry.is_valid), "reached_targets": sum(1 for entry in result.results_by_net.values() if entry.reached_target), }, "metrics": asdict(result.metrics), "iteration_trace": [asdict(entry) for entry in result.iteration_trace], } ) return { "generated_at": datetime.now().astimezone().isoformat(timespec="seconds"), "generator": "scripts/record_iteration_trace.py", "scenarios": scenarios, } def _render_markdown(payload: dict[str, object]) -> str: lines = [ "# Iteration Trace", "", f"Generated at {payload['generated_at']} by `{payload['generator']}`.", "", ] for scenario in payload["scenarios"]: lines.extend( [ f"## {scenario['name']}", "", f"Results: {scenario['summary']['valid_results']} valid / " f"{scenario['summary']['reached_targets']} reached / " f"{scenario['summary']['total_results']} total.", "", "| Iteration | Penalty | Routed Nets | Completed | Conflict Edges | Dynamic Collisions | Nodes | Congestion Checks | Candidate Ids | Exact Pairs |", "| --: | --: | --: | --: | --: | --: | --: | --: | --: | --: |", ] ) net_node_counts: Counter[str] = Counter() net_check_counts: Counter[str] = Counter() for entry in scenario["iteration_trace"]: lines.append( "| " f"{entry['iteration']} | " f"{entry['congestion_penalty']:.1f} | " f"{len(entry['routed_net_ids'])} | " f"{entry['completed_nets']} | " f"{entry['conflict_edges']} | " f"{entry['total_dynamic_collisions']} | " f"{entry['nodes_expanded']} | " f"{entry['congestion_check_calls']} | " f"{entry['congestion_candidate_ids']} | " f"{entry['congestion_exact_pair_checks']} |" ) for attempt in entry["net_attempts"]: net_node_counts[attempt["net_id"]] += attempt["nodes_expanded"] net_check_counts[attempt["net_id"]] += attempt["congestion_check_calls"] lines.extend(["", "Top nets by iteration-attributed nodes expanded:", ""]) if net_node_counts: for net_id, count in net_node_counts.most_common(10): lines.append(f"- `{net_id}`: {count}") else: lines.append("- None") lines.extend(["", "Top nets by iteration-attributed congestion checks:", ""]) if net_check_counts: for net_id, count in net_check_counts.most_common(10): lines.append(f"- `{net_id}`: {count}") else: lines.append("- None") lines.append("") return "\n".join(lines) def main() -> None: parser = argparse.ArgumentParser(description="Record iteration-trace artifacts for selected trace scenarios.") parser.add_argument( "--scenario", action="append", dest="scenarios", default=[], help="Optional trace scenario name to include. May be passed more than once.", ) parser.add_argument( "--include-performance-only", action="store_true", help="Include performance-only trace scenarios when selecting from the standard registry.", ) parser.add_argument( "--output-dir", type=Path, default=None, help="Directory to write iteration_trace.json and iteration_trace.md into. Defaults to /docs.", ) args = parser.parse_args() repo_root = Path(__file__).resolve().parents[1] output_dir = repo_root / "docs" if args.output_dir is None else args.output_dir.resolve() output_dir.mkdir(exist_ok=True) selected = tuple(args.scenarios) if args.scenarios else None payload = _build_payload(selected, include_performance_only=args.include_performance_only) json_path = output_dir / "iteration_trace.json" markdown_path = output_dir / "iteration_trace.md" json_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n") markdown_path.write_text(_render_markdown(payload) + "\n") if json_path.is_relative_to(repo_root): print(f"Wrote {json_path.relative_to(repo_root)}") else: print(f"Wrote {json_path}") if markdown_path.is_relative_to(repo_root): print(f"Wrote {markdown_path.relative_to(repo_root)}") else: print(f"Wrote {markdown_path}") if __name__ == "__main__": main()