228 lines
8.2 KiB
Python
228 lines
8.2 KiB
Python
|
|
#!/usr/bin/env python3
|
||
|
|
from __future__ import annotations
|
||
|
|
|
||
|
|
import argparse
|
||
|
|
import json
|
||
|
|
from collections import Counter
|
||
|
|
from dataclasses import asdict
|
||
|
|
from datetime import datetime
|
||
|
|
from pathlib import Path
|
||
|
|
|
||
|
|
from inire.results import RoutingRunResult
|
||
|
|
from inire.tests.example_scenarios import TRACE_PERFORMANCE_SCENARIO_RUNS, TRACE_SCENARIO_RUNS
|
||
|
|
|
||
|
|
|
||
|
|
def _trace_registry(include_performance_only: bool) -> tuple[tuple[str, object], ...]:
|
||
|
|
if include_performance_only:
|
||
|
|
return TRACE_SCENARIO_RUNS + TRACE_PERFORMANCE_SCENARIO_RUNS
|
||
|
|
return TRACE_SCENARIO_RUNS
|
||
|
|
|
||
|
|
|
||
|
|
def _selected_runs(
|
||
|
|
selected_scenarios: tuple[str, ...] | None,
|
||
|
|
*,
|
||
|
|
include_performance_only: bool,
|
||
|
|
) -> tuple[tuple[str, object], ...]:
|
||
|
|
if selected_scenarios is None:
|
||
|
|
return (("example_07_large_scale_routing_no_warm_start", dict(TRACE_PERFORMANCE_SCENARIO_RUNS)["example_07_large_scale_routing_no_warm_start"]),)
|
||
|
|
|
||
|
|
registry = dict(TRACE_SCENARIO_RUNS + TRACE_PERFORMANCE_SCENARIO_RUNS)
|
||
|
|
allowed_standard = dict(_trace_registry(include_performance_only))
|
||
|
|
runs = []
|
||
|
|
for name in selected_scenarios:
|
||
|
|
if name in allowed_standard:
|
||
|
|
runs.append((name, allowed_standard[name]))
|
||
|
|
continue
|
||
|
|
if name in registry:
|
||
|
|
runs.append((name, registry[name]))
|
||
|
|
continue
|
||
|
|
valid = ", ".join(sorted(registry))
|
||
|
|
raise SystemExit(f"Unknown trace scenario: {name}. Valid scenarios: {valid}")
|
||
|
|
return tuple(runs)
|
||
|
|
|
||
|
|
|
||
|
|
def _result_summary(run: RoutingRunResult) -> dict[str, object]:
|
||
|
|
return {
|
||
|
|
"total_results": len(run.results_by_net),
|
||
|
|
"valid_results": sum(1 for result in run.results_by_net.values() if result.is_valid),
|
||
|
|
"reached_targets": sum(1 for result in run.results_by_net.values() if result.reached_target),
|
||
|
|
"results_by_net": {
|
||
|
|
net_id: {
|
||
|
|
"outcome": result.outcome,
|
||
|
|
"reached_target": result.reached_target,
|
||
|
|
"report": asdict(result.report),
|
||
|
|
}
|
||
|
|
for net_id, result in run.results_by_net.items()
|
||
|
|
},
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
def _build_payload(
|
||
|
|
selected_scenarios: tuple[str, ...] | None,
|
||
|
|
*,
|
||
|
|
include_performance_only: bool,
|
||
|
|
) -> dict[str, object]:
|
||
|
|
scenarios = []
|
||
|
|
for name, run in _selected_runs(selected_scenarios, include_performance_only=include_performance_only):
|
||
|
|
result = run()
|
||
|
|
scenarios.append(
|
||
|
|
{
|
||
|
|
"name": name,
|
||
|
|
"summary": _result_summary(result),
|
||
|
|
"metrics": asdict(result.metrics),
|
||
|
|
"conflict_trace": [asdict(entry) for entry in result.conflict_trace],
|
||
|
|
}
|
||
|
|
)
|
||
|
|
return {
|
||
|
|
"generated_at": datetime.now().astimezone().isoformat(timespec="seconds"),
|
||
|
|
"generator": "scripts/record_conflict_trace.py",
|
||
|
|
"scenarios": scenarios,
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
def _count_stage_nets(entry: dict[str, object]) -> int:
|
||
|
|
return sum(
|
||
|
|
1
|
||
|
|
for net in entry["nets"]
|
||
|
|
if net["report"]["dynamic_collision_count"] > 0
|
||
|
|
)
|
||
|
|
|
||
|
|
|
||
|
|
def _canonical_component_pair(
|
||
|
|
net_id: str,
|
||
|
|
self_component_index: int,
|
||
|
|
other_net_id: str,
|
||
|
|
other_component_index: int,
|
||
|
|
) -> tuple[tuple[str, int], tuple[str, int]]:
|
||
|
|
left = (net_id, self_component_index)
|
||
|
|
right = (other_net_id, other_component_index)
|
||
|
|
if left <= right:
|
||
|
|
return (left, right)
|
||
|
|
return (right, left)
|
||
|
|
|
||
|
|
|
||
|
|
def _render_markdown(payload: dict[str, object]) -> str:
|
||
|
|
lines = [
|
||
|
|
"# Conflict Trace",
|
||
|
|
"",
|
||
|
|
f"Generated at {payload['generated_at']} by `{payload['generator']}`.",
|
||
|
|
"",
|
||
|
|
]
|
||
|
|
|
||
|
|
for scenario in payload["scenarios"]:
|
||
|
|
lines.extend(
|
||
|
|
[
|
||
|
|
f"## {scenario['name']}",
|
||
|
|
"",
|
||
|
|
f"Results: {scenario['summary']['valid_results']} valid / "
|
||
|
|
f"{scenario['summary']['reached_targets']} reached / "
|
||
|
|
f"{scenario['summary']['total_results']} total.",
|
||
|
|
"",
|
||
|
|
"| Stage | Iteration | Conflicting Nets | Conflict Edges | Completed Nets |",
|
||
|
|
"| :-- | --: | --: | --: | --: |",
|
||
|
|
]
|
||
|
|
)
|
||
|
|
|
||
|
|
net_stage_counts: Counter[str] = Counter()
|
||
|
|
edge_counts: Counter[tuple[str, str]] = Counter()
|
||
|
|
component_pair_counts: Counter[tuple[tuple[str, int], tuple[str, int]]] = Counter()
|
||
|
|
trace_entries = scenario["conflict_trace"]
|
||
|
|
for entry in trace_entries:
|
||
|
|
lines.append(
|
||
|
|
"| "
|
||
|
|
f"{entry['stage']} | "
|
||
|
|
f"{'' if entry['iteration'] is None else entry['iteration']} | "
|
||
|
|
f"{_count_stage_nets(entry)} | "
|
||
|
|
f"{len(entry['conflict_edges'])} | "
|
||
|
|
f"{len(entry['completed_net_ids'])} |"
|
||
|
|
)
|
||
|
|
seen_component_pairs: set[tuple[tuple[str, int], tuple[str, int]]] = set()
|
||
|
|
for edge in entry["conflict_edges"]:
|
||
|
|
edge_counts[tuple(edge)] += 1
|
||
|
|
for net in entry["nets"]:
|
||
|
|
if net["report"]["dynamic_collision_count"] > 0:
|
||
|
|
net_stage_counts[net["net_id"]] += 1
|
||
|
|
for component_conflict in net["component_conflicts"]:
|
||
|
|
pair = _canonical_component_pair(
|
||
|
|
net["net_id"],
|
||
|
|
component_conflict["self_component_index"],
|
||
|
|
component_conflict["other_net_id"],
|
||
|
|
component_conflict["other_component_index"],
|
||
|
|
)
|
||
|
|
seen_component_pairs.add(pair)
|
||
|
|
for pair in seen_component_pairs:
|
||
|
|
component_pair_counts[pair] += 1
|
||
|
|
|
||
|
|
lines.extend(["", "Top nets by traced dynamic-collision stages:", ""])
|
||
|
|
if net_stage_counts:
|
||
|
|
for net_id, count in net_stage_counts.most_common(10):
|
||
|
|
lines.append(f"- `{net_id}`: {count}")
|
||
|
|
else:
|
||
|
|
lines.append("- None")
|
||
|
|
|
||
|
|
lines.extend(["", "Top net pairs by frequency:", ""])
|
||
|
|
if edge_counts:
|
||
|
|
for (left, right), count in edge_counts.most_common(10):
|
||
|
|
lines.append(f"- `{left}` <-> `{right}`: {count}")
|
||
|
|
else:
|
||
|
|
lines.append("- None")
|
||
|
|
|
||
|
|
lines.extend(["", "Top component pairs by frequency:", ""])
|
||
|
|
if component_pair_counts:
|
||
|
|
for pair, count in component_pair_counts.most_common(10):
|
||
|
|
(left_net, left_index), (right_net, right_index) = pair
|
||
|
|
lines.append(f"- `{left_net}[{left_index}]` <-> `{right_net}[{right_index}]`: {count}")
|
||
|
|
else:
|
||
|
|
lines.append("- None")
|
||
|
|
|
||
|
|
lines.append("")
|
||
|
|
|
||
|
|
return "\n".join(lines)
|
||
|
|
|
||
|
|
|
||
|
|
def main() -> None:
|
||
|
|
parser = argparse.ArgumentParser(description="Record conflict-trace artifacts for selected trace scenarios.")
|
||
|
|
parser.add_argument(
|
||
|
|
"--scenario",
|
||
|
|
action="append",
|
||
|
|
dest="scenarios",
|
||
|
|
default=[],
|
||
|
|
help="Optional trace scenario name to include. May be passed more than once.",
|
||
|
|
)
|
||
|
|
parser.add_argument(
|
||
|
|
"--include-performance-only",
|
||
|
|
action="store_true",
|
||
|
|
help="Include performance-only trace scenarios when selecting from the standard registry.",
|
||
|
|
)
|
||
|
|
parser.add_argument(
|
||
|
|
"--output-dir",
|
||
|
|
type=Path,
|
||
|
|
default=None,
|
||
|
|
help="Directory to write conflict_trace.json and conflict_trace.md into. Defaults to <repo>/docs.",
|
||
|
|
)
|
||
|
|
args = parser.parse_args()
|
||
|
|
|
||
|
|
repo_root = Path(__file__).resolve().parents[1]
|
||
|
|
output_dir = repo_root / "docs" if args.output_dir is None else args.output_dir.resolve()
|
||
|
|
output_dir.mkdir(exist_ok=True)
|
||
|
|
|
||
|
|
selected = tuple(args.scenarios) if args.scenarios else None
|
||
|
|
payload = _build_payload(selected, include_performance_only=args.include_performance_only)
|
||
|
|
json_path = output_dir / "conflict_trace.json"
|
||
|
|
markdown_path = output_dir / "conflict_trace.md"
|
||
|
|
|
||
|
|
json_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n")
|
||
|
|
markdown_path.write_text(_render_markdown(payload) + "\n")
|
||
|
|
|
||
|
|
if json_path.is_relative_to(repo_root):
|
||
|
|
print(f"Wrote {json_path.relative_to(repo_root)}")
|
||
|
|
else:
|
||
|
|
print(f"Wrote {json_path}")
|
||
|
|
if markdown_path.is_relative_to(repo_root):
|
||
|
|
print(f"Wrote {markdown_path.relative_to(repo_root)}")
|
||
|
|
else:
|
||
|
|
print(f"Wrote {markdown_path}")
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
main()
|