inire/inire/tests/test_example_performance.py

63 lines
2.8 KiB
Python

from __future__ import annotations
import os
import statistics
import pytest
from inire.tests.example_scenarios import SCENARIOS, ScenarioDefinition, ScenarioOutcome
RUN_PERFORMANCE = os.environ.get("INIRE_RUN_PERFORMANCE") == "1"
PERFORMANCE_REPEATS = 3
REGRESSION_FACTOR = 1.5
# Baselines are measured from the current code path without plotting.
BASELINE_SECONDS = {
"example_01_simple_route": 0.0035,
"example_02_congestion_resolution": 0.2666,
"example_03_locked_routes": 0.2304,
"example_04_sbends_and_radii": 1.8734,
"example_05_orientation_stress": 0.5630,
"example_06_bend_collision_models": 5.2382,
"example_07_large_scale_routing": 1.2081,
"example_08_custom_bend_geometry": 0.9848,
"example_09_unroutable_best_effort": 0.0056,
}
EXPECTED_OUTCOMES = {
"example_01_simple_route": {"total_results": 1, "valid_results": 1, "reached_targets": 1},
"example_02_congestion_resolution": {"total_results": 3, "valid_results": 3, "reached_targets": 3},
"example_03_locked_routes": {"total_results": 2, "valid_results": 2, "reached_targets": 2},
"example_04_sbends_and_radii": {"total_results": 2, "valid_results": 2, "reached_targets": 2},
"example_05_orientation_stress": {"total_results": 3, "valid_results": 3, "reached_targets": 3},
"example_06_bend_collision_models": {"total_results": 3, "valid_results": 3, "reached_targets": 3},
"example_07_large_scale_routing": {"total_results": 10, "valid_results": 10, "reached_targets": 10},
"example_08_custom_bend_geometry": {"total_results": 2, "valid_results": 2, "reached_targets": 2},
"example_09_unroutable_best_effort": {"total_results": 1, "valid_results": 0, "reached_targets": 0},
}
def _assert_expected_outcome(name: str, outcome: ScenarioOutcome) -> None:
expected = EXPECTED_OUTCOMES[name]
assert outcome.total_results == expected["total_results"]
assert outcome.valid_results == expected["valid_results"]
assert outcome.reached_targets == expected["reached_targets"]
@pytest.mark.performance
@pytest.mark.skipif(not RUN_PERFORMANCE, reason="set INIRE_RUN_PERFORMANCE=1 to run runtime regression checks")
@pytest.mark.parametrize("scenario", SCENARIOS, ids=[scenario.name for scenario in SCENARIOS])
def test_example_like_runtime_regression(scenario: ScenarioDefinition) -> None:
timings = []
for _ in range(PERFORMANCE_REPEATS):
outcome = scenario.run()
_assert_expected_outcome(scenario.name, outcome)
timings.append(outcome.duration_s)
median_runtime = statistics.median(timings)
assert median_runtime <= BASELINE_SECONDS[scenario.name] * REGRESSION_FACTOR, (
f"{scenario.name} median runtime {median_runtime:.4f}s exceeded "
f"{REGRESSION_FACTOR:.1f}x baseline {BASELINE_SECONDS[scenario.name]:.4f}s "
f"from timings {timings!r}"
)