inire/inire/router/_router.py

1008 lines
41 KiB
Python

from __future__ import annotations
import random
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING
from inire.geometry.collision import RoutingWorld
from inire.model import NetOrder, NetSpec, RoutingProblem, resolve_bend_geometry
from inire.results import (
ComponentConflictTrace,
ConflictTraceEntry,
FrontierPruneSample,
NetConflictTrace,
NetFrontierTrace,
RoutingOutcome,
RoutingReport,
RoutingResult,
)
from inire.router._astar_types import AStarContext, AStarMetrics, FrontierTraceCollector, SearchRunConfig
from inire.router._search import route_astar
from inire.router._seed_materialization import materialize_path_seed
from inire.router.cost import CostEvaluator
from inire.router.danger_map import DangerMap
from inire.router.refiner import PathRefiner
if TYPE_CHECKING:
from collections.abc import Callable, Sequence
from shapely.geometry import Polygon
from inire.geometry.collision import PathVerificationDetail
from inire.geometry.components import ComponentResult
@dataclass(slots=True)
class _RoutingState:
net_specs: dict[str, NetSpec]
ordered_net_ids: list[str]
results: dict[str, RoutingResult]
needs_self_collision_check: set[str]
start_time: float
timeout_s: float
initial_paths: dict[str, tuple[ComponentResult, ...]] | None
accumulated_expanded_nodes: list[tuple[int, int, int]]
best_results: dict[str, RoutingResult]
best_completed_nets: int
best_conflict_edges: int
best_dynamic_collisions: int
last_conflict_signature: tuple[tuple[str, str], ...]
last_conflict_edge_count: int
repeated_conflict_count: int
@dataclass(slots=True)
class _IterationReview:
conflicting_nets: set[str]
conflict_edges: set[tuple[str, str]]
completed_net_ids: set[str]
total_dynamic_collisions: int
@dataclass(frozen=True, slots=True)
class _PairLocalTarget:
net_ids: tuple[str, str]
class PathFinder:
__slots__ = (
"context",
"metrics",
"refiner",
"accumulated_expanded_nodes",
"conflict_trace",
"frontier_trace",
)
def __init__(
self,
context: AStarContext,
metrics: AStarMetrics | None = None,
) -> None:
self.context = context
self.metrics = self.context.metrics if metrics is None else metrics
self.context.metrics = self.metrics
self.context.cost_evaluator.collision_engine.metrics = self.metrics
if self.context.cost_evaluator.danger_map is not None:
self.context.cost_evaluator.danger_map.metrics = self.metrics
self.refiner = PathRefiner(self.context)
self.accumulated_expanded_nodes: list[tuple[int, int, int]] = []
self.conflict_trace: list[ConflictTraceEntry] = []
self.frontier_trace: list[NetFrontierTrace] = []
def _install_path(self, net_id: str, path: Sequence[ComponentResult]) -> None:
all_geoms: list[Polygon] = []
all_dilated: list[Polygon] = []
component_indexes: list[int] = []
for component_index, result in enumerate(path):
all_geoms.extend(result.collision_geometry)
all_dilated.extend(result.dilated_collision_geometry)
component_indexes.extend([component_index] * len(result.collision_geometry))
self.context.cost_evaluator.collision_engine.add_path(
net_id,
all_geoms,
dilated_geometry=all_dilated,
component_indexes=component_indexes,
)
def _routing_order(
self,
net_specs: dict[str, NetSpec],
order: NetOrder,
) -> list[str]:
ordered_net_ids = list(net_specs.keys())
if order == "user":
return ordered_net_ids
ordered_net_ids.sort(
key=lambda net_id: abs(net_specs[net_id].target.x - net_specs[net_id].start.x)
+ abs(net_specs[net_id].target.y - net_specs[net_id].start.y),
reverse=(order == "longest"),
)
return ordered_net_ids
def _build_greedy_warm_start_paths(
self,
net_specs: dict[str, NetSpec],
order: NetOrder,
) -> dict[str, tuple[ComponentResult, ...]]:
greedy_paths: dict[str, tuple[ComponentResult, ...]] = {}
temp_obj_ids: list[int] = []
greedy_node_limit = min(self.context.options.search.node_limit, 2000)
for net_id in self._routing_order(net_specs, order):
net = net_specs[net_id]
h_start = self.context.cost_evaluator.h_manhattan(
net.start,
net.target,
min_bend_radius=self.context.min_bend_radius,
)
max_cost_limit = max(h_start * 3.0, 2000.0)
run_config = SearchRunConfig.from_options(
self.context.options,
skip_congestion=True,
max_cost=max_cost_limit,
self_collision_check=True,
node_limit=greedy_node_limit,
)
path = route_astar(
net.start,
net.target,
net.width,
context=self.context,
metrics=self.metrics,
net_id=net_id,
config=run_config,
)
if not path:
continue
self.metrics.total_warm_start_paths_built += 1
greedy_paths[net_id] = tuple(path)
for result in path:
for polygon in result.physical_geometry:
temp_obj_ids.append(self.context.cost_evaluator.collision_engine.add_static_obstacle(polygon))
self.context.clear_static_caches()
for obj_id in temp_obj_ids:
self.context.cost_evaluator.collision_engine.remove_static_obstacle(obj_id)
return greedy_paths
def _prepare_state(self) -> _RoutingState:
problem = self.context.problem
congestion = self.context.options.congestion
initial_paths = self._materialize_problem_initial_paths()
net_specs = {net.net_id: net for net in problem.nets}
num_nets = len(net_specs)
state = _RoutingState(
net_specs=net_specs,
ordered_net_ids=list(net_specs.keys()),
results={},
needs_self_collision_check=set(),
start_time=time.monotonic(),
timeout_s=max(60.0, 10.0 * num_nets * congestion.max_iterations),
initial_paths=initial_paths,
accumulated_expanded_nodes=[],
best_results={},
best_completed_nets=-1,
best_conflict_edges=10**9,
best_dynamic_collisions=10**9,
last_conflict_signature=(),
last_conflict_edge_count=0,
repeated_conflict_count=0,
)
if state.initial_paths is None and congestion.warm_start_enabled:
state.initial_paths = self._build_greedy_warm_start_paths(net_specs, congestion.net_order)
self.context.clear_static_caches()
if congestion.net_order != "user":
state.ordered_net_ids = self._routing_order(net_specs, congestion.net_order)
return state
def _materialize_problem_initial_paths(self) -> dict[str, tuple[ComponentResult, ...]] | None:
if not self.context.problem.initial_paths:
return None
search = self.context.options.search
net_specs = {net.net_id: net for net in self.context.problem.nets}
initial_paths: dict[str, tuple[ComponentResult, ...]] = {}
for net_id, seed in self.context.problem.initial_paths.items():
if net_id not in net_specs:
raise ValueError(f"Initial path provided for unknown net: {net_id}")
net = net_specs[net_id]
initial_paths[net_id] = materialize_path_seed(
seed,
start=net.start,
net_width=net.width,
search=search,
clearance=self.context.cost_evaluator.collision_engine.clearance,
)
return initial_paths
def _replace_installed_paths(self, state: _RoutingState, results: dict[str, RoutingResult]) -> None:
for net_id in state.ordered_net_ids:
self.context.cost_evaluator.collision_engine.remove_path(net_id)
for net_id in state.ordered_net_ids:
result = results.get(net_id)
if result and result.path:
self._install_path(net_id, result.path)
def _update_best_iteration(self, state: _RoutingState, review: _IterationReview) -> bool:
completed_nets = len(review.completed_net_ids)
conflict_edges = len(review.conflict_edges)
dynamic_collisions = review.total_dynamic_collisions
is_better = (
completed_nets > state.best_completed_nets
or (
completed_nets == state.best_completed_nets
and (
conflict_edges < state.best_conflict_edges
or (
conflict_edges == state.best_conflict_edges
and dynamic_collisions < state.best_dynamic_collisions
)
)
)
)
if not is_better:
return False
state.best_results = dict(state.results)
state.best_completed_nets = completed_nets
state.best_conflict_edges = conflict_edges
state.best_dynamic_collisions = dynamic_collisions
return True
def _restore_best_iteration(self, state: _RoutingState) -> None:
if not state.best_results:
return
state.results = dict(state.best_results)
self._replace_installed_paths(state, state.results)
def _capture_conflict_trace_entry(
self,
state: _RoutingState,
*,
stage: str,
iteration: int | None,
results: dict[str, RoutingResult],
details_by_net: dict[str, PathVerificationDetail],
review: _IterationReview,
) -> None:
if not self.context.options.diagnostics.capture_conflict_trace:
return
nets = []
for net_id in state.ordered_net_ids:
result = results.get(net_id)
if result is None:
result = RoutingResult(net_id=net_id, path=(), reached_target=False)
detail = details_by_net.get(net_id)
component_conflicts = ()
conflicting_net_ids = ()
if detail is not None:
conflicting_net_ids = detail.conflicting_net_ids
component_conflicts = tuple(
ComponentConflictTrace(
other_net_id=other_net_id,
self_component_index=self_component_index,
other_component_index=other_component_index,
)
for self_component_index, other_net_id, other_component_index in detail.component_conflicts
)
nets.append(
NetConflictTrace(
net_id=net_id,
outcome=result.outcome,
reached_target=result.reached_target,
report=result.report,
conflicting_net_ids=tuple(conflicting_net_ids),
component_conflicts=component_conflicts,
)
)
self.conflict_trace.append(
ConflictTraceEntry(
stage=stage, # type: ignore[arg-type]
iteration=iteration,
completed_net_ids=tuple(sorted(review.completed_net_ids)),
conflict_edges=tuple(sorted(review.conflict_edges)),
nets=tuple(nets),
)
)
def _build_frontier_hotspot_bounds(
self,
state: _RoutingState,
net_id: str,
details_by_net: dict[str, PathVerificationDetail],
) -> tuple[tuple[float, float, float, float], ...]:
result = state.results.get(net_id)
detail = details_by_net.get(net_id)
if result is None or detail is None or not result.path:
return ()
hotspot_bounds: list[tuple[float, float, float, float]] = []
seen: set[tuple[float, float, float, float]] = set()
margin = max(5.0, self.context.cost_evaluator.collision_engine.clearance * 2.0)
for self_component_index, other_net_id, other_component_index in detail.component_conflicts:
other_result = state.results.get(other_net_id)
if other_result is None or not other_result.path:
continue
if self_component_index >= len(result.path) or other_component_index >= len(other_result.path):
continue
left_component = result.path[self_component_index]
right_component = other_result.path[other_component_index]
overlap_found = False
for left_poly in left_component.dilated_physical_geometry:
for right_poly in right_component.dilated_physical_geometry:
if not left_poly.intersects(right_poly) or left_poly.touches(right_poly):
continue
overlap = left_poly.intersection(right_poly)
if overlap.is_empty:
continue
buffered = overlap.buffer(margin, join_style="mitre").bounds
if buffered not in seen:
seen.add(buffered)
hotspot_bounds.append(buffered)
overlap_found = True
if overlap_found:
continue
left_bounds = left_component.total_dilated_bounds
right_bounds = right_component.total_dilated_bounds
if (
left_bounds[0] < right_bounds[2]
and left_bounds[2] > right_bounds[0]
and left_bounds[1] < right_bounds[3]
and left_bounds[3] > right_bounds[1]
):
buffered = (
max(left_bounds[0], right_bounds[0]) - margin,
max(left_bounds[1], right_bounds[1]) - margin,
min(left_bounds[2], right_bounds[2]) + margin,
min(left_bounds[3], right_bounds[3]) + margin,
)
if buffered not in seen:
seen.add(buffered)
hotspot_bounds.append(buffered)
return tuple(hotspot_bounds)
def _analyze_results(
self,
ordered_net_ids: Sequence[str],
results: dict[str, RoutingResult],
*,
capture_component_conflicts: bool,
count_iteration_metrics: bool,
) -> tuple[dict[str, RoutingResult], dict[str, PathVerificationDetail], _IterationReview]:
if count_iteration_metrics:
self.metrics.total_iteration_reverify_calls += 1
conflict_edges: set[tuple[str, str]] = set()
conflicting_nets: set[str] = set()
completed_net_ids: set[str] = set()
total_dynamic_collisions = 0
analyzed_results = dict(results)
details_by_net: dict[str, PathVerificationDetail] = {}
for net_id in ordered_net_ids:
result = results.get(net_id)
if not result or not result.path or not result.reached_target:
continue
if count_iteration_metrics:
self.metrics.total_iteration_reverified_nets += 1
detail = self.context.cost_evaluator.collision_engine.verify_path_details(
net_id,
result.path,
capture_component_conflicts=capture_component_conflicts,
)
details_by_net[net_id] = detail
analyzed_results[net_id] = RoutingResult(
net_id=net_id,
path=result.path,
reached_target=result.reached_target,
report=detail.report,
)
total_dynamic_collisions += detail.report.dynamic_collision_count
if analyzed_results[net_id].outcome == "completed":
completed_net_ids.add(net_id)
if not detail.conflicting_net_ids:
continue
conflicting_nets.add(net_id)
for other_net_id in detail.conflicting_net_ids:
conflicting_nets.add(other_net_id)
if other_net_id == net_id:
continue
conflict_edges.add(tuple(sorted((net_id, other_net_id))))
if count_iteration_metrics:
self.metrics.total_iteration_conflicting_nets += len(conflicting_nets)
self.metrics.total_iteration_conflict_edges += len(conflict_edges)
return (
analyzed_results,
details_by_net,
_IterationReview(
conflicting_nets=conflicting_nets,
conflict_edges=conflict_edges,
completed_net_ids=completed_net_ids,
total_dynamic_collisions=total_dynamic_collisions,
),
)
def _capture_frontier_trace(
self,
state: _RoutingState,
final_results: dict[str, RoutingResult],
) -> None:
if not self.context.options.diagnostics.capture_frontier_trace:
return
self.frontier_trace = []
state.results = dict(final_results)
state.results, details_by_net, _ = self._analyze_results(
state.ordered_net_ids,
state.results,
capture_component_conflicts=True,
count_iteration_metrics=False,
)
original_metrics = self.metrics
original_context_metrics = self.context.metrics
original_engine_metrics = self.context.cost_evaluator.collision_engine.metrics
original_danger_metrics = None
if self.context.cost_evaluator.danger_map is not None:
original_danger_metrics = self.context.cost_evaluator.danger_map.metrics
try:
for net_id in state.ordered_net_ids:
result = state.results.get(net_id)
detail = details_by_net.get(net_id)
if result is None or detail is None or not result.reached_target:
continue
if detail.report.dynamic_collision_count == 0 or not detail.component_conflicts:
continue
hotspot_bounds = self._build_frontier_hotspot_bounds(state, net_id, details_by_net)
if not hotspot_bounds:
continue
scratch_metrics = AStarMetrics()
self.context.metrics = scratch_metrics
self.context.cost_evaluator.collision_engine.metrics = scratch_metrics
if self.context.cost_evaluator.danger_map is not None:
self.context.cost_evaluator.danger_map.metrics = scratch_metrics
guidance_seed = result.as_seed().segments if result.path else None
guidance_bonus = 0.0
if guidance_seed:
guidance_bonus = max(10.0, self.context.options.objective.bend_penalty * 0.25)
collector = FrontierTraceCollector(hotspot_bounds=hotspot_bounds)
run_config = SearchRunConfig.from_options(
self.context.options,
return_partial=True,
store_expanded=False,
guidance_seed=guidance_seed,
guidance_bonus=guidance_bonus,
frontier_trace=collector,
self_collision_check=(net_id in state.needs_self_collision_check),
node_limit=self.context.options.search.node_limit,
)
self.context.cost_evaluator.collision_engine.remove_path(net_id)
try:
route_astar(
state.net_specs[net_id].start,
state.net_specs[net_id].target,
state.net_specs[net_id].width,
context=self.context,
metrics=scratch_metrics,
net_id=net_id,
config=run_config,
)
finally:
if result.path:
self._install_path(net_id, result.path)
self.frontier_trace.append(
NetFrontierTrace(
net_id=net_id,
hotspot_bounds=hotspot_bounds,
pruned_closed_set=collector.pruned_closed_set,
pruned_hard_collision=collector.pruned_hard_collision,
pruned_self_collision=collector.pruned_self_collision,
pruned_cost=collector.pruned_cost,
samples=tuple(
FrontierPruneSample(
reason=reason, # type: ignore[arg-type]
move_type=move_type,
hotspot_index=hotspot_index,
parent_state=parent_state,
end_state=end_state,
)
for reason, move_type, hotspot_index, parent_state, end_state in collector.samples
),
)
)
finally:
self.metrics = original_metrics
self.context.metrics = original_context_metrics
self.context.cost_evaluator.collision_engine.metrics = original_engine_metrics
if self.context.cost_evaluator.danger_map is not None:
self.context.cost_evaluator.danger_map.metrics = original_danger_metrics
def _whole_set_is_better(
self,
candidate_results: dict[str, RoutingResult],
candidate_review: _IterationReview,
incumbent_results: dict[str, RoutingResult],
incumbent_review: _IterationReview,
) -> bool:
candidate_completed = len(candidate_review.completed_net_ids)
incumbent_completed = len(incumbent_review.completed_net_ids)
if candidate_completed != incumbent_completed:
return candidate_completed > incumbent_completed
candidate_edges = len(candidate_review.conflict_edges)
incumbent_edges = len(incumbent_review.conflict_edges)
if candidate_edges != incumbent_edges:
return candidate_edges < incumbent_edges
if candidate_review.total_dynamic_collisions != incumbent_review.total_dynamic_collisions:
return candidate_review.total_dynamic_collisions < incumbent_review.total_dynamic_collisions
candidate_length = sum(
result.report.total_length
for result in candidate_results.values()
if result.reached_target
)
incumbent_length = sum(
result.report.total_length
for result in incumbent_results.values()
if result.reached_target
)
if abs(candidate_length - incumbent_length) > 1e-6:
return candidate_length < incumbent_length
return False
def _collect_pair_local_targets(
self,
state: _RoutingState,
results: dict[str, RoutingResult],
review: _IterationReview,
) -> list[_PairLocalTarget]:
if not review.conflict_edges:
return []
order_index = {net_id: idx for idx, net_id in enumerate(state.ordered_net_ids)}
seen_net_ids: set[str] = set()
targets: list[_PairLocalTarget] = []
for left_net_id, right_net_id in sorted(review.conflict_edges):
if left_net_id in seen_net_ids or right_net_id in seen_net_ids:
return []
left_result = results.get(left_net_id)
right_result = results.get(right_net_id)
if (
left_result is None
or right_result is None
or not left_result.reached_target
or not right_result.reached_target
):
continue
seen_net_ids.update((left_net_id, right_net_id))
targets.append(_PairLocalTarget(net_ids=(left_net_id, right_net_id)))
targets.sort(key=lambda target: min(order_index[target.net_ids[0]], order_index[target.net_ids[1]]))
return targets
def _build_pair_local_context(
self,
state: _RoutingState,
incumbent_results: dict[str, RoutingResult],
pair_net_ids: tuple[str, str],
) -> AStarContext:
problem = self.context.problem
objective = self.context.options.objective
static_obstacles = tuple(self.context.cost_evaluator.collision_engine._static_obstacles.geometries.values())
engine = RoutingWorld(
clearance=self.context.cost_evaluator.collision_engine.clearance,
safety_zone_radius=self.context.cost_evaluator.collision_engine.safety_zone_radius,
)
for obstacle in static_obstacles:
engine.add_static_obstacle(obstacle)
for net_id in state.ordered_net_ids:
if net_id in pair_net_ids:
continue
result = incumbent_results.get(net_id)
if result is None or not result.path:
continue
for component in result.path:
for polygon in component.physical_geometry:
engine.add_static_obstacle(polygon)
danger_map = DangerMap(bounds=problem.bounds)
danger_map.precompute(list(static_obstacles))
evaluator = CostEvaluator(
engine,
danger_map,
unit_length_cost=objective.unit_length_cost,
greedy_h_weight=self.context.cost_evaluator.greedy_h_weight,
bend_penalty=objective.bend_penalty,
sbend_penalty=objective.sbend_penalty,
danger_weight=objective.danger_weight,
)
return AStarContext(
evaluator,
RoutingProblem(
bounds=problem.bounds,
nets=tuple(state.net_specs[net_id] for net_id in state.ordered_net_ids),
static_obstacles=static_obstacles,
clearance=problem.clearance,
safety_zone_radius=problem.safety_zone_radius,
),
self.context.options,
metrics=AStarMetrics(),
)
def _run_pair_local_attempt(
self,
state: _RoutingState,
incumbent_results: dict[str, RoutingResult],
pair_order: tuple[str, str],
) -> tuple[dict[str, RoutingResult], int] | None:
local_context = self._build_pair_local_context(state, incumbent_results, pair_order)
local_results = dict(incumbent_results)
for net_id in pair_order:
net = state.net_specs[net_id]
guidance_result = incumbent_results.get(net_id)
guidance_seed = None
guidance_bonus = 0.0
if guidance_result and guidance_result.reached_target and guidance_result.path:
guidance_seed = guidance_result.as_seed().segments
guidance_bonus = max(10.0, self.context.options.objective.bend_penalty * 0.25)
run_config = SearchRunConfig.from_options(
self.context.options,
return_partial=False,
skip_congestion=True,
self_collision_check=(net_id in state.needs_self_collision_check),
guidance_seed=guidance_seed,
guidance_bonus=guidance_bonus,
node_limit=self.context.options.search.node_limit,
)
path = route_astar(
net.start,
net.target,
net.width,
context=local_context,
metrics=local_context.metrics,
net_id=net_id,
config=run_config,
)
if not path or path[-1].end_port != net.target:
return None
report = local_context.cost_evaluator.collision_engine.verify_path_report(net_id, path)
if not report.is_valid:
return None
local_results[net_id] = RoutingResult(
net_id=net_id,
path=tuple(path),
reached_target=True,
report=report,
)
for component in path:
for polygon in component.physical_geometry:
local_context.cost_evaluator.collision_engine.add_static_obstacle(polygon)
local_context.clear_static_caches()
return local_results, local_context.metrics.total_nodes_expanded
def _run_pair_local_search(self, state: _RoutingState) -> None:
state.results, _details_by_net, review = self._analyze_results(
state.ordered_net_ids,
state.results,
capture_component_conflicts=True,
count_iteration_metrics=False,
)
targets = self._collect_pair_local_targets(state, state.results, review)
if not targets:
return
for target in targets[:2]:
self.metrics.total_pair_local_search_pairs_considered += 1
incumbent_results = dict(state.results)
incumbent_review = review
accepted = False
for pair_order in (target.net_ids, target.net_ids[::-1]):
self.metrics.total_pair_local_search_attempts += 1
candidate = self._run_pair_local_attempt(state, incumbent_results, pair_order)
if candidate is None:
continue
candidate_results, nodes_expanded = candidate
self.metrics.total_pair_local_search_nodes_expanded += nodes_expanded
self._replace_installed_paths(state, candidate_results)
candidate_results, _candidate_details_by_net, candidate_review = self._analyze_results(
state.ordered_net_ids,
candidate_results,
capture_component_conflicts=True,
count_iteration_metrics=False,
)
if self._whole_set_is_better(
candidate_results,
candidate_review,
incumbent_results,
incumbent_review,
):
self.metrics.total_pair_local_search_accepts += 1
state.results = candidate_results
review = candidate_review
accepted = True
break
self._replace_installed_paths(state, incumbent_results)
if not accepted:
state.results = incumbent_results
self._replace_installed_paths(state, incumbent_results)
def _route_net_once(
self,
state: _RoutingState,
iteration: int,
net_id: str,
) -> RoutingResult:
search = self.context.options.search
congestion = self.context.options.congestion
diagnostics = self.context.options.diagnostics
net = state.net_specs[net_id]
self.metrics.total_nets_routed += 1
self.context.cost_evaluator.collision_engine.remove_path(net_id)
if iteration == 0 and state.initial_paths and net_id in state.initial_paths:
self.metrics.total_warm_start_paths_used += 1
path: Sequence[ComponentResult] | None = state.initial_paths[net_id]
else:
coll_model, _ = resolve_bend_geometry(search)
skip_congestion = False
guidance_seed = None
guidance_bonus = 0.0
if congestion.use_tiered_strategy and iteration == 0:
skip_congestion = True
if coll_model == "arc":
coll_model = "clipped_bbox"
elif iteration > 0:
guidance_result = state.results.get(net_id)
if guidance_result and guidance_result.reached_target and guidance_result.path:
guidance_seed = guidance_result.as_seed().segments
guidance_bonus = max(10.0, self.context.options.objective.bend_penalty * 0.25)
run_config = SearchRunConfig.from_options(
self.context.options,
bend_collision_type=coll_model,
return_partial=True,
store_expanded=diagnostics.capture_expanded,
guidance_seed=guidance_seed,
guidance_bonus=guidance_bonus,
skip_congestion=skip_congestion,
self_collision_check=(net_id in state.needs_self_collision_check),
node_limit=search.node_limit,
)
path = route_astar(
net.start,
net.target,
net.width,
context=self.context,
metrics=self.metrics,
net_id=net_id,
config=run_config,
)
if diagnostics.capture_expanded and self.metrics.last_expanded_nodes:
state.accumulated_expanded_nodes.extend(self.metrics.last_expanded_nodes)
if not path:
return RoutingResult(net_id=net_id, path=(), reached_target=False)
reached_target = path[-1].end_port == net.target
if reached_target:
self.metrics.total_nets_reached_target += 1
report = None
self._install_path(net_id, path)
if reached_target:
report = self.context.cost_evaluator.collision_engine.verify_path_report(net_id, path)
if report.self_collision_count > 0:
state.needs_self_collision_check.add(net_id)
return RoutingResult(
net_id=net_id,
path=tuple(path),
reached_target=reached_target,
report=RoutingReport() if report is None else report,
)
def _run_iteration(
self,
state: _RoutingState,
iteration: int,
reroute_net_ids: set[str],
iteration_callback: Callable[[int, dict[str, RoutingResult]], None] | None,
) -> _IterationReview | None:
congestion = self.context.options.congestion
self.metrics.total_route_iterations += 1
self.metrics.reset_per_route()
if congestion.shuffle_nets and (iteration > 0 or state.initial_paths is None):
iteration_seed = (congestion.seed + iteration) if congestion.seed is not None else None
random.Random(iteration_seed).shuffle(state.ordered_net_ids)
routed_net_ids = [net_id for net_id in state.ordered_net_ids if net_id in reroute_net_ids]
self.metrics.total_nets_carried_forward += len(state.ordered_net_ids) - len(routed_net_ids)
for net_id in routed_net_ids:
if time.monotonic() - state.start_time > state.timeout_s:
self.metrics.total_timeout_events += 1
return None
result = self._route_net_once(state, iteration, net_id)
state.results[net_id] = result
review = self._reverify_iteration_results(state)
if iteration_callback:
iteration_callback(iteration, state.results)
return review
def _reverify_iteration_results(self, state: _RoutingState) -> _IterationReview:
state.results, details_by_net, review = self._analyze_results(
state.ordered_net_ids,
state.results,
capture_component_conflicts=self.context.options.diagnostics.capture_conflict_trace,
count_iteration_metrics=True,
)
self._capture_conflict_trace_entry(
state,
stage="iteration",
iteration=self.metrics.total_route_iterations - 1,
results=state.results,
details_by_net=details_by_net,
review=review,
)
return review
def _run_iterations(
self,
state: _RoutingState,
iteration_callback: Callable[[int, dict[str, RoutingResult]], None] | None,
) -> bool:
congestion = self.context.options.congestion
for iteration in range(congestion.max_iterations):
review = self._run_iteration(
state,
iteration,
set(state.ordered_net_ids),
iteration_callback,
)
if review is None:
return True
self._update_best_iteration(state, review)
if not any(
result.outcome in {"colliding", "partial", "unroutable"}
for result in state.results.values()
):
return False
current_signature = tuple(sorted(review.conflict_edges))
repeated = (
bool(current_signature)
and (
current_signature == state.last_conflict_signature
or len(current_signature) == state.last_conflict_edge_count
)
)
state.repeated_conflict_count = state.repeated_conflict_count + 1 if repeated else 0
state.last_conflict_signature = current_signature
state.last_conflict_edge_count = len(current_signature)
if state.repeated_conflict_count >= 2:
return False
self.context.congestion_penalty *= congestion.multiplier
return False
def _refine_results(self, state: _RoutingState) -> None:
if not self.context.options.refinement.enabled or not state.results:
return
for net_id in state.ordered_net_ids:
result = state.results.get(net_id)
if not result or not result.path or result.outcome in {"colliding", "partial", "unroutable"}:
continue
net = state.net_specs[net_id]
self.metrics.total_refine_path_calls += 1
self.context.cost_evaluator.collision_engine.remove_path(net_id)
refined_path = self.refiner.refine_path(net_id, net.start, net.width, result.path)
self._install_path(net_id, refined_path)
# Defer full verification until _verify_results() so we do not
# verify the same refined path twice in one route_all() call.
state.results[net_id] = RoutingResult(
net_id=net_id,
path=tuple(refined_path),
reached_target=result.reached_target,
report=result.report,
)
def _verify_results(self, state: _RoutingState) -> dict[str, RoutingResult]:
final_results: dict[str, RoutingResult] = {}
details_by_net: dict[str, PathVerificationDetail] = {}
for net in self.context.problem.nets:
result = state.results.get(net.net_id)
if not result or not result.path:
final_results[net.net_id] = RoutingResult(net_id=net.net_id, path=(), reached_target=False)
continue
detail = self.context.cost_evaluator.collision_engine.verify_path_details(
net.net_id,
result.path,
capture_component_conflicts=self.context.options.diagnostics.capture_conflict_trace,
)
details_by_net[net.net_id] = detail
final_results[net.net_id] = RoutingResult(
net_id=net.net_id,
path=result.path,
reached_target=result.reached_target,
report=detail.report,
)
if self.context.options.diagnostics.capture_conflict_trace:
_, _, review = self._analyze_results(
state.ordered_net_ids,
final_results,
capture_component_conflicts=True,
count_iteration_metrics=False,
)
self._capture_conflict_trace_entry(
state,
stage="final",
iteration=None,
results=final_results,
details_by_net=details_by_net,
review=review,
)
return final_results
def route_all(
self,
*,
iteration_callback: Callable[[int, dict[str, RoutingResult]], None] | None = None,
) -> dict[str, RoutingResult]:
self.context.congestion_penalty = self.context.options.congestion.base_penalty
self.accumulated_expanded_nodes = []
self.conflict_trace = []
self.frontier_trace = []
self.metrics.reset_totals()
self.metrics.reset_per_route()
state = self._prepare_state()
timed_out = self._run_iterations(state, iteration_callback)
self.accumulated_expanded_nodes = list(state.accumulated_expanded_nodes)
self._restore_best_iteration(state)
if self.context.options.diagnostics.capture_conflict_trace:
state.results, details_by_net, review = self._analyze_results(
state.ordered_net_ids,
state.results,
capture_component_conflicts=True,
count_iteration_metrics=False,
)
self._capture_conflict_trace_entry(
state,
stage="restored_best",
iteration=None,
results=state.results,
details_by_net=details_by_net,
review=review,
)
if timed_out:
final_results = self._verify_results(state)
self._capture_frontier_trace(state, final_results)
return final_results
self._run_pair_local_search(state)
self._refine_results(state)
final_results = self._verify_results(state)
self._capture_frontier_trace(state, final_results)
return final_results