rrt/crates/rrt-cli/src/main.rs

5383 lines
202 KiB
Rust

#![recursion_limit = "256"]
use std::collections::{BTreeMap, BTreeSet};
use std::env;
use std::fs;
use std::io::Read;
use std::path::{Path, PathBuf};
use rrt_fixtures::{
FixtureValidationReport, JsonDiffEntry, compare_expected_state_fragment, diff_json_values,
load_fixture_document, normalize_runtime_state, validate_fixture_document,
};
use rrt_model::{
BINARY_SUMMARY_PATH, CANONICAL_EXE_PATH, CONTROL_LOOP_ATLAS_PATH, FUNCTION_MAP_PATH,
REQUIRED_ATLAS_HEADINGS, REQUIRED_EXPORTS,
finance::{FinanceOutcome, FinanceSnapshot},
load_binary_summary, load_function_map,
};
use rrt_runtime::{
CAMPAIGN_SCENARIO_COUNT, CampaignExeInspectionReport, OBSERVED_CAMPAIGN_SCENARIO_NAMES,
OVERLAY_IMPORT_DOCUMENT_FORMAT_VERSION, Pk4ExtractionReport, Pk4InspectionReport,
RuntimeOverlayImportDocument, RuntimeOverlayImportDocumentSource, RuntimeSaveSliceDocument,
RuntimeSaveSliceDocumentSource, RuntimeSnapshotDocument, RuntimeSnapshotSource, RuntimeSummary,
SAVE_SLICE_DOCUMENT_FORMAT_VERSION, SNAPSHOT_FORMAT_VERSION, SmpClassicPackedProfileBlock,
SmpInspectionReport, SmpLoadedSaveSlice, SmpRt3105PackedProfileBlock, SmpSaveLoadSummary,
WinInspectionReport, execute_step_command, extract_pk4_entry_file, inspect_campaign_exe_file,
inspect_pk4_file, inspect_smp_file, inspect_win_file, load_runtime_snapshot_document,
load_runtime_state_import, load_save_slice_file, project_save_slice_to_runtime_state_import,
save_runtime_overlay_import_document, save_runtime_save_slice_document,
save_runtime_snapshot_document, validate_runtime_snapshot_document,
};
use serde::Serialize;
use serde_json::Value;
use sha2::{Digest, Sha256};
const SPECIAL_CONDITIONS_OFFSET: usize = 0x0d64;
const SPECIAL_CONDITION_COUNT: usize = 36;
const SPECIAL_CONDITION_HIDDEN_SENTINEL_SLOT: usize = 35;
const SMP_ALIGNED_RUNTIME_RULE_DWORD_COUNT: usize = 50;
const SMP_ALIGNED_RUNTIME_RULE_KNOWN_EDITOR_RULE_COUNT: usize = 49;
const SMP_ALIGNED_RUNTIME_RULE_END_OFFSET: usize =
SPECIAL_CONDITIONS_OFFSET + SMP_ALIGNED_RUNTIME_RULE_DWORD_COUNT * 4;
const POST_SPECIAL_CONDITIONS_SCALAR_OFFSET: usize = 0x0df4;
const POST_SPECIAL_CONDITIONS_SCALAR_END_OFFSET: usize = 0x0f30;
const POST_SPECIAL_CONDITIONS_SCALAR_TAIL_OFFSET: usize = SMP_ALIGNED_RUNTIME_RULE_END_OFFSET;
const SPECIAL_CONDITION_LABELS: [&str; SPECIAL_CONDITION_COUNT] = [
"Disable Stock Buying and Selling",
"Disable Margin Buying/Short Selling Stock",
"Disable Company Issue/Buy Back Stock",
"Disable Issuing/Repaying Bonds",
"Disable Declaring Bankruptcy",
"Disable Changing the Dividend Rate",
"Disable Replacing a Locomotive",
"Disable Retiring a Train",
"Disable Changing Cargo Consist On Train",
"Disable Buying a Train",
"Disable All Track Building",
"Disable Unconnected Track Building",
"Limited Track Building Amount",
"Disable Building Stations",
"Disable Building Hotel/Restaurant/Tavern/Post Office",
"Disable Building Customs House",
"Disable Building Industry Buildings",
"Disable Buying Existing Industry Buildings",
"Disable Being Fired As Chairman",
"Disable Resigning as Chairman",
"Disable Chairmanship Takeover",
"Disable Starting Any Companies",
"Disable Starting Multiple Companies",
"Disable Merging Companies",
"Disable Bulldozing",
"Show Visited Track",
"Show Visited Stations",
"Use Slow Date",
"Completely Disable Money-Related Things",
"Use Bio-Accelerator Cars",
"Disable Cargo Economy",
"Use Wartime Cargos",
"Disable Train Crashes",
"Disable Train Crashes AND Breakdowns",
"AI Ignore Territories At Startup",
"Hidden sentinel",
];
enum Command {
Validate {
repo_root: PathBuf,
},
FinanceEval {
snapshot_path: PathBuf,
},
FinanceDiff {
left_path: PathBuf,
right_path: PathBuf,
},
RuntimeValidateFixture {
fixture_path: PathBuf,
},
RuntimeSummarizeFixture {
fixture_path: PathBuf,
},
RuntimeExportFixtureState {
fixture_path: PathBuf,
output_path: PathBuf,
},
RuntimeDiffState {
left_path: PathBuf,
right_path: PathBuf,
},
RuntimeSummarizeState {
snapshot_path: PathBuf,
},
RuntimeImportState {
input_path: PathBuf,
output_path: PathBuf,
},
RuntimeInspectSmp {
smp_path: PathBuf,
},
RuntimeSummarizeSaveLoad {
smp_path: PathBuf,
},
RuntimeLoadSaveSlice {
smp_path: PathBuf,
},
RuntimeImportSaveState {
smp_path: PathBuf,
output_path: PathBuf,
},
RuntimeExportSaveSlice {
smp_path: PathBuf,
output_path: PathBuf,
},
RuntimeExportOverlayImport {
snapshot_path: PathBuf,
save_slice_path: PathBuf,
output_path: PathBuf,
},
RuntimeInspectPk4 {
pk4_path: PathBuf,
},
RuntimeInspectWin {
win_path: PathBuf,
},
RuntimeExtractPk4Entry {
pk4_path: PathBuf,
entry_name: String,
output_path: PathBuf,
},
RuntimeInspectCampaignExe {
exe_path: PathBuf,
},
RuntimeCompareClassicProfile {
smp_paths: Vec<PathBuf>,
},
RuntimeCompareRt3105Profile {
smp_paths: Vec<PathBuf>,
},
RuntimeCompareCandidateTable {
smp_paths: Vec<PathBuf>,
},
RuntimeCompareRecipeBookLines {
smp_paths: Vec<PathBuf>,
},
RuntimeCompareSetupPayloadCore {
smp_paths: Vec<PathBuf>,
},
RuntimeCompareSetupLaunchPayload {
smp_paths: Vec<PathBuf>,
},
RuntimeComparePostSpecialConditionsScalars {
smp_paths: Vec<PathBuf>,
},
RuntimeScanCandidateTableHeaders {
root_path: PathBuf,
},
RuntimeScanSpecialConditions {
root_path: PathBuf,
},
RuntimeScanAlignedRuntimeRuleBand {
root_path: PathBuf,
},
RuntimeScanPostSpecialConditionsScalars {
root_path: PathBuf,
},
RuntimeScanPostSpecialConditionsTail {
root_path: PathBuf,
},
RuntimeScanRecipeBookLines {
root_path: PathBuf,
},
RuntimeExportProfileBlock {
smp_path: PathBuf,
output_path: PathBuf,
},
}
#[derive(Debug, Serialize)]
struct FinanceDiffEntry {
path: String,
left: Value,
right: Value,
}
#[derive(Debug, Serialize)]
struct FinanceDiffReport {
matches: bool,
difference_count: usize,
differences: Vec<FinanceDiffEntry>,
}
#[derive(Debug, Serialize)]
struct RuntimeFixtureSummaryReport {
fixture_id: String,
command_count: usize,
final_summary: RuntimeSummary,
expected_summary_matches: bool,
expected_summary_mismatches: Vec<String>,
expected_state_fragment_matches: bool,
expected_state_fragment_mismatches: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeStateSummaryReport {
snapshot_id: String,
summary: RuntimeSummary,
}
#[derive(Debug, Serialize)]
struct RuntimeStateDiffReport {
matches: bool,
difference_count: usize,
differences: Vec<JsonDiffEntry>,
}
#[derive(Debug, Serialize)]
struct RuntimeSmpInspectionOutput {
path: String,
inspection: SmpInspectionReport,
}
#[derive(Debug, Serialize)]
struct RuntimeSaveLoadSummaryOutput {
path: String,
summary: SmpSaveLoadSummary,
}
#[derive(Debug, Serialize)]
struct RuntimeLoadedSaveSliceOutput {
path: String,
save_slice: SmpLoadedSaveSlice,
}
#[derive(Debug, Serialize)]
struct RuntimeSaveSliceExportOutput {
path: String,
output_path: String,
save_slice_id: String,
}
#[derive(Debug, Serialize)]
struct RuntimeOverlayImportExportOutput {
output_path: String,
import_id: String,
base_snapshot_path: String,
save_slice_path: String,
}
#[derive(Debug, Serialize)]
struct RuntimePk4InspectionOutput {
path: String,
inspection: Pk4InspectionReport,
}
#[derive(Debug, Serialize)]
struct RuntimeWinInspectionOutput {
path: String,
inspection: WinInspectionReport,
}
#[derive(Debug, Serialize)]
struct RuntimePk4ExtractionOutput {
path: String,
output_path: String,
extraction: Pk4ExtractionReport,
}
#[derive(Debug, Serialize)]
struct RuntimeCampaignExeInspectionOutput {
path: String,
inspection: CampaignExeInspectionReport,
}
#[derive(Debug, Clone, Serialize)]
struct RuntimeClassicProfileSample {
path: String,
profile_family: String,
progress_32dc_offset: usize,
progress_3714_offset: usize,
progress_3715_offset: usize,
packed_profile_offset: usize,
packed_profile_len: usize,
packed_profile_block: SmpClassicPackedProfileBlock,
}
#[derive(Debug, Clone, Serialize)]
struct RuntimeClassicProfileDifferenceValue {
path: String,
value: Value,
}
#[derive(Debug, Clone, Serialize)]
struct RuntimeClassicProfileDifference {
field_path: String,
values: Vec<RuntimeClassicProfileDifferenceValue>,
}
#[derive(Debug, Serialize)]
struct RuntimeClassicProfileComparisonReport {
file_count: usize,
matches: bool,
common_profile_family: Option<String>,
samples: Vec<RuntimeClassicProfileSample>,
difference_count: usize,
differences: Vec<RuntimeClassicProfileDifference>,
}
#[derive(Debug, Clone, Serialize)]
struct RuntimeRt3105ProfileSample {
path: String,
profile_family: String,
packed_profile_offset: usize,
packed_profile_len: usize,
packed_profile_block: SmpRt3105PackedProfileBlock,
}
#[derive(Debug, Serialize)]
struct RuntimeRt3105ProfileComparisonReport {
file_count: usize,
matches: bool,
common_profile_family: Option<String>,
samples: Vec<RuntimeRt3105ProfileSample>,
difference_count: usize,
differences: Vec<RuntimeClassicProfileDifference>,
}
#[derive(Debug, Serialize)]
struct RuntimeCandidateTableSample {
path: String,
profile_family: String,
source_kind: String,
semantic_family: String,
header_word_0_hex: String,
header_word_1_hex: String,
header_word_2_hex: String,
observed_entry_count: usize,
zero_trailer_entry_count: usize,
nonzero_trailer_entry_count: usize,
zero_trailer_entry_names: Vec<String>,
footer_progress_word_0_hex: String,
footer_progress_word_1_hex: String,
availability_by_name: BTreeMap<String, u32>,
}
#[derive(Debug, Serialize)]
struct RuntimeCandidateTableComparisonReport {
file_count: usize,
matches: bool,
common_profile_family: Option<String>,
common_semantic_family: Option<String>,
samples: Vec<RuntimeCandidateTableSample>,
difference_count: usize,
differences: Vec<RuntimeClassicProfileDifference>,
}
#[derive(Debug, Clone, Serialize)]
struct RuntimeRecipeBookLineSample {
path: String,
profile_family: String,
source_kind: String,
book_count: usize,
book_stride_hex: String,
line_count: usize,
line_stride_hex: String,
book_head_kind_by_index: BTreeMap<String, String>,
book_line_area_kind_by_index: BTreeMap<String, String>,
max_annual_production_word_hex_by_book: BTreeMap<String, String>,
line_kind_by_path: BTreeMap<String, String>,
mode_word_hex_by_path: BTreeMap<String, String>,
annual_amount_word_hex_by_path: BTreeMap<String, String>,
supplied_cargo_token_word_hex_by_path: BTreeMap<String, String>,
demanded_cargo_token_word_hex_by_path: BTreeMap<String, String>,
}
#[derive(Debug, Serialize)]
struct RuntimeRecipeBookLineComparisonReport {
file_count: usize,
matches: bool,
content_matches: bool,
common_profile_family: Option<String>,
samples: Vec<RuntimeRecipeBookLineSample>,
difference_count: usize,
differences: Vec<RuntimeClassicProfileDifference>,
content_difference_count: usize,
content_differences: Vec<RuntimeClassicProfileDifference>,
}
#[derive(Debug, Clone)]
struct RuntimeRecipeBookLineScanSample {
path: String,
profile_family: String,
source_kind: String,
nonzero_mode_paths: BTreeMap<String, String>,
nonzero_supplied_token_paths: BTreeMap<String, String>,
nonzero_demanded_token_paths: BTreeMap<String, String>,
}
#[derive(Debug, Serialize)]
struct RuntimeRecipeBookLineFieldSummary {
line_path: String,
file_count_present: usize,
distinct_value_count: usize,
sample_value_hexes: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeRecipeBookLineFamilySummary {
profile_family: String,
source_kinds: Vec<String>,
file_count: usize,
files_with_any_nonzero_modes_count: usize,
files_with_any_nonzero_supplied_tokens_count: usize,
files_with_any_nonzero_demanded_tokens_count: usize,
stable_nonzero_mode_paths: Vec<String>,
stable_nonzero_supplied_token_paths: Vec<String>,
stable_nonzero_demanded_token_paths: Vec<String>,
mode_summaries: Vec<RuntimeRecipeBookLineFieldSummary>,
supplied_token_summaries: Vec<RuntimeRecipeBookLineFieldSummary>,
demanded_token_summaries: Vec<RuntimeRecipeBookLineFieldSummary>,
sample_paths: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeRecipeBookLineScanReport {
root_path: String,
file_count: usize,
files_with_probe_count: usize,
files_with_any_nonzero_modes_count: usize,
files_with_any_nonzero_supplied_tokens_count: usize,
files_with_any_nonzero_demanded_tokens_count: usize,
skipped_file_count: usize,
family_summaries: Vec<RuntimeRecipeBookLineFamilySummary>,
}
#[derive(Debug, Serialize)]
struct RuntimeSetupPayloadCoreSample {
path: String,
file_extension: String,
inferred_profile_family: String,
payload_word_0x14: u16,
payload_word_0x14_hex: String,
payload_byte_0x20: u8,
payload_byte_0x20_hex: String,
marker_bytes_0x2c9_0x2d0_hex: String,
row_category_byte_0x31a: u8,
row_category_byte_0x31a_hex: String,
row_visibility_byte_0x31b: u8,
row_visibility_byte_0x31b_hex: String,
row_visibility_byte_0x31c: u8,
row_visibility_byte_0x31c_hex: String,
row_count_word_0x3ae: u16,
row_count_word_0x3ae_hex: String,
payload_word_0x3b2: u16,
payload_word_0x3b2_hex: String,
payload_word_0x3ba: u16,
payload_word_0x3ba_hex: String,
candidate_header_word_0_hex: Option<String>,
candidate_header_word_1_hex: Option<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeSetupPayloadCoreComparisonReport {
file_count: usize,
matches: bool,
samples: Vec<RuntimeSetupPayloadCoreSample>,
difference_count: usize,
differences: Vec<RuntimeClassicProfileDifference>,
}
#[derive(Debug, Serialize)]
struct RuntimeSetupLaunchPayloadSample {
path: String,
file_extension: String,
inferred_profile_family: String,
launch_flag_byte_0x22: u8,
launch_flag_byte_0x22_hex: String,
campaign_progress_in_known_range: bool,
campaign_progress_scenario_name: Option<String>,
campaign_progress_page_index: Option<usize>,
launch_selector_byte_0x33: u8,
launch_selector_byte_0x33_hex: String,
launch_token_block_0x23_0x32_hex: String,
campaign_selector_values: BTreeMap<String, u8>,
nonzero_campaign_selector_values: BTreeMap<String, u8>,
}
#[derive(Debug, Serialize)]
struct RuntimeSetupLaunchPayloadComparisonReport {
file_count: usize,
matches: bool,
samples: Vec<RuntimeSetupLaunchPayloadSample>,
difference_count: usize,
differences: Vec<RuntimeClassicProfileDifference>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsScalarSample {
path: String,
profile_family: String,
source_kind: String,
nonzero_relative_offset_hexes: Vec<String>,
values_by_relative_offset_hex: BTreeMap<String, String>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsScalarComparisonReport {
file_count: usize,
matches: bool,
common_profile_family: Option<String>,
samples: Vec<RuntimePostSpecialConditionsScalarSample>,
difference_count: usize,
differences: Vec<RuntimeClassicProfileDifference>,
}
#[derive(Debug, Serialize)]
struct RuntimeCandidateTableHeaderCluster {
header_word_0_hex: String,
header_word_1_hex: String,
file_count: usize,
profile_families: Vec<String>,
source_kinds: Vec<String>,
zero_trailer_count_min: usize,
zero_trailer_count_max: usize,
zero_trailer_count_values: Vec<usize>,
distinct_zero_name_set_count: usize,
sample_paths: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeCandidateTableHeaderScanReport {
root_path: String,
file_count: usize,
cluster_count: usize,
skipped_file_count: usize,
clusters: Vec<RuntimeCandidateTableHeaderCluster>,
}
#[derive(Debug, Clone)]
struct RuntimeCandidateTableHeaderScanSample {
path: String,
profile_family: String,
source_kind: String,
header_word_0_hex: String,
header_word_1_hex: String,
zero_trailer_entry_count: usize,
zero_trailer_entry_names: Vec<String>,
}
#[derive(Debug, Clone, Serialize)]
struct RuntimeSpecialConditionsScanSample {
path: String,
profile_family: String,
source_kind: String,
enabled_visible_count: usize,
enabled_visible_labels: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeSpecialConditionsSlotSummary {
slot_index: u8,
label: String,
file_count_enabled: usize,
sample_paths: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeSpecialConditionsScanReport {
root_path: String,
file_count: usize,
files_with_probe_count: usize,
files_with_any_enabled_count: usize,
skipped_file_count: usize,
enabled_slot_summaries: Vec<RuntimeSpecialConditionsSlotSummary>,
sample_files_with_any_enabled: Vec<RuntimeSpecialConditionsScanSample>,
}
#[derive(Debug, Clone)]
struct RuntimePostSpecialConditionsScalarScanSample {
path: String,
profile_family: String,
source_kind: String,
nonzero_relative_offsets: Vec<usize>,
values_by_relative_offset_hex: BTreeMap<String, String>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsScalarOffsetSummary {
relative_offset_hex: String,
file_count_present: usize,
distinct_value_count: usize,
sample_value_hexes: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsScalarFamilySummary {
profile_family: String,
source_kinds: Vec<String>,
file_count: usize,
files_with_any_nonzero_count: usize,
distinct_nonzero_offset_set_count: usize,
stable_nonzero_relative_offset_hexes: Vec<String>,
union_nonzero_relative_offset_hexes: Vec<String>,
offset_summaries: Vec<RuntimePostSpecialConditionsScalarOffsetSummary>,
sample_paths: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsScalarScanReport {
root_path: String,
file_count: usize,
files_with_probe_count: usize,
files_with_any_nonzero_count: usize,
skipped_file_count: usize,
family_summaries: Vec<RuntimePostSpecialConditionsScalarFamilySummary>,
}
#[derive(Debug, Clone)]
struct RuntimePostSpecialConditionsTailScanSample {
path: String,
profile_family: String,
source_kind: String,
nonzero_relative_offsets: Vec<usize>,
values_by_relative_offset_hex: BTreeMap<String, String>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsTailOffsetSummary {
relative_offset_hex: String,
file_count_present: usize,
distinct_value_count: usize,
sample_value_hexes: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsTailFamilySummary {
profile_family: String,
source_kinds: Vec<String>,
file_count: usize,
files_with_any_nonzero_count: usize,
distinct_nonzero_offset_set_count: usize,
stable_nonzero_relative_offset_hexes: Vec<String>,
union_nonzero_relative_offset_hexes: Vec<String>,
offset_summaries: Vec<RuntimePostSpecialConditionsTailOffsetSummary>,
sample_paths: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimePostSpecialConditionsTailScanReport {
root_path: String,
file_count: usize,
files_with_probe_count: usize,
files_with_any_nonzero_count: usize,
skipped_file_count: usize,
family_summaries: Vec<RuntimePostSpecialConditionsTailFamilySummary>,
}
#[derive(Debug, Clone)]
struct RuntimeAlignedRuntimeRuleBandScanSample {
path: String,
profile_family: String,
source_kind: String,
nonzero_band_indices: Vec<usize>,
values_by_band_index: BTreeMap<usize, String>,
}
#[derive(Debug, Serialize)]
struct RuntimeAlignedRuntimeRuleBandOffsetSummary {
band_index: usize,
relative_offset_hex: String,
lane_kind: String,
known_label: Option<String>,
file_count_present: usize,
distinct_value_count: usize,
sample_value_hexes: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeAlignedRuntimeRuleBandFamilySummary {
profile_family: String,
source_kinds: Vec<String>,
file_count: usize,
files_with_any_nonzero_count: usize,
distinct_nonzero_index_set_count: usize,
stable_nonzero_band_indices: Vec<usize>,
union_nonzero_band_indices: Vec<usize>,
offset_summaries: Vec<RuntimeAlignedRuntimeRuleBandOffsetSummary>,
sample_paths: Vec<String>,
}
#[derive(Debug, Serialize)]
struct RuntimeAlignedRuntimeRuleBandScanReport {
root_path: String,
file_count: usize,
files_with_probe_count: usize,
files_with_any_nonzero_count: usize,
skipped_file_count: usize,
family_summaries: Vec<RuntimeAlignedRuntimeRuleBandFamilySummary>,
}
#[derive(Debug, Serialize)]
struct RuntimeProfileBlockExportDocument {
source_path: String,
profile_kind: String,
profile_family: String,
payload: Value,
}
#[derive(Debug, Serialize)]
struct RuntimeProfileBlockExportReport {
output_path: String,
profile_kind: String,
profile_family: String,
}
fn main() {
if let Err(err) = real_main() {
eprintln!("error: {err}");
std::process::exit(1);
}
}
fn real_main() -> Result<(), Box<dyn std::error::Error>> {
match parse_command()? {
Command::Validate { repo_root } => {
validate_required_files(&repo_root)?;
validate_binary_summary(&repo_root)?;
validate_function_map(&repo_root)?;
validate_control_loop_atlas(&repo_root)?;
println!("baseline validation passed");
}
Command::FinanceEval { snapshot_path } => {
run_finance_eval(&snapshot_path)?;
}
Command::FinanceDiff {
left_path,
right_path,
} => {
run_finance_diff(&left_path, &right_path)?;
}
Command::RuntimeValidateFixture { fixture_path } => {
run_runtime_validate_fixture(&fixture_path)?;
}
Command::RuntimeSummarizeFixture { fixture_path } => {
run_runtime_summarize_fixture(&fixture_path)?;
}
Command::RuntimeExportFixtureState {
fixture_path,
output_path,
} => {
run_runtime_export_fixture_state(&fixture_path, &output_path)?;
}
Command::RuntimeDiffState {
left_path,
right_path,
} => {
run_runtime_diff_state(&left_path, &right_path)?;
}
Command::RuntimeSummarizeState { snapshot_path } => {
run_runtime_summarize_state(&snapshot_path)?;
}
Command::RuntimeImportState {
input_path,
output_path,
} => {
run_runtime_import_state(&input_path, &output_path)?;
}
Command::RuntimeInspectSmp { smp_path } => {
run_runtime_inspect_smp(&smp_path)?;
}
Command::RuntimeSummarizeSaveLoad { smp_path } => {
run_runtime_summarize_save_load(&smp_path)?;
}
Command::RuntimeLoadSaveSlice { smp_path } => {
run_runtime_load_save_slice(&smp_path)?;
}
Command::RuntimeImportSaveState {
smp_path,
output_path,
} => {
run_runtime_import_save_state(&smp_path, &output_path)?;
}
Command::RuntimeExportSaveSlice {
smp_path,
output_path,
} => {
run_runtime_export_save_slice(&smp_path, &output_path)?;
}
Command::RuntimeExportOverlayImport {
snapshot_path,
save_slice_path,
output_path,
} => {
run_runtime_export_overlay_import(&snapshot_path, &save_slice_path, &output_path)?;
}
Command::RuntimeInspectPk4 { pk4_path } => {
run_runtime_inspect_pk4(&pk4_path)?;
}
Command::RuntimeInspectWin { win_path } => {
run_runtime_inspect_win(&win_path)?;
}
Command::RuntimeExtractPk4Entry {
pk4_path,
entry_name,
output_path,
} => {
run_runtime_extract_pk4_entry(&pk4_path, &entry_name, &output_path)?;
}
Command::RuntimeInspectCampaignExe { exe_path } => {
run_runtime_inspect_campaign_exe(&exe_path)?;
}
Command::RuntimeCompareClassicProfile { smp_paths } => {
run_runtime_compare_classic_profile(&smp_paths)?;
}
Command::RuntimeCompareRt3105Profile { smp_paths } => {
run_runtime_compare_rt3_105_profile(&smp_paths)?;
}
Command::RuntimeCompareCandidateTable { smp_paths } => {
run_runtime_compare_candidate_table(&smp_paths)?;
}
Command::RuntimeCompareRecipeBookLines { smp_paths } => {
run_runtime_compare_recipe_book_lines(&smp_paths)?;
}
Command::RuntimeCompareSetupPayloadCore { smp_paths } => {
run_runtime_compare_setup_payload_core(&smp_paths)?;
}
Command::RuntimeCompareSetupLaunchPayload { smp_paths } => {
run_runtime_compare_setup_launch_payload(&smp_paths)?;
}
Command::RuntimeComparePostSpecialConditionsScalars { smp_paths } => {
run_runtime_compare_post_special_conditions_scalars(&smp_paths)?;
}
Command::RuntimeScanCandidateTableHeaders { root_path } => {
run_runtime_scan_candidate_table_headers(&root_path)?;
}
Command::RuntimeScanSpecialConditions { root_path } => {
run_runtime_scan_special_conditions(&root_path)?;
}
Command::RuntimeScanAlignedRuntimeRuleBand { root_path } => {
run_runtime_scan_aligned_runtime_rule_band(&root_path)?;
}
Command::RuntimeScanPostSpecialConditionsScalars { root_path } => {
run_runtime_scan_post_special_conditions_scalars(&root_path)?;
}
Command::RuntimeScanPostSpecialConditionsTail { root_path } => {
run_runtime_scan_post_special_conditions_tail(&root_path)?;
}
Command::RuntimeScanRecipeBookLines { root_path } => {
run_runtime_scan_recipe_book_lines(&root_path)?;
}
Command::RuntimeExportProfileBlock {
smp_path,
output_path,
} => {
run_runtime_export_profile_block(&smp_path, &output_path)?;
}
}
Ok(())
}
fn parse_command() -> Result<Command, Box<dyn std::error::Error>> {
let args: Vec<String> = env::args().skip(1).collect();
match args.as_slice() {
[] => Ok(Command::Validate {
repo_root: env::current_dir()?,
}),
[command] if command == "validate" => Ok(Command::Validate {
repo_root: env::current_dir()?,
}),
[command, path] if command == "validate" => Ok(Command::Validate {
repo_root: PathBuf::from(path),
}),
[command, subcommand, path] if command == "finance" && subcommand == "eval" => {
Ok(Command::FinanceEval {
snapshot_path: PathBuf::from(path),
})
}
[command, subcommand, left, right] if command == "finance" && subcommand == "diff" => {
Ok(Command::FinanceDiff {
left_path: PathBuf::from(left),
right_path: PathBuf::from(right),
})
}
[command, subcommand, path]
if command == "runtime" && subcommand == "validate-fixture" =>
{
Ok(Command::RuntimeValidateFixture {
fixture_path: PathBuf::from(path),
})
}
[command, subcommand, path]
if command == "runtime" && subcommand == "summarize-fixture" =>
{
Ok(Command::RuntimeSummarizeFixture {
fixture_path: PathBuf::from(path),
})
}
[command, subcommand, fixture_path, output_path]
if command == "runtime" && subcommand == "export-fixture-state" =>
{
Ok(Command::RuntimeExportFixtureState {
fixture_path: PathBuf::from(fixture_path),
output_path: PathBuf::from(output_path),
})
}
[command, subcommand, left_path, right_path]
if command == "runtime" && subcommand == "diff-state" =>
{
Ok(Command::RuntimeDiffState {
left_path: PathBuf::from(left_path),
right_path: PathBuf::from(right_path),
})
}
[command, subcommand, path] if command == "runtime" && subcommand == "summarize-state" => {
Ok(Command::RuntimeSummarizeState {
snapshot_path: PathBuf::from(path),
})
}
[command, subcommand, input_path, output_path]
if command == "runtime" && subcommand == "import-state" =>
{
Ok(Command::RuntimeImportState {
input_path: PathBuf::from(input_path),
output_path: PathBuf::from(output_path),
})
}
[command, subcommand, path] if command == "runtime" && subcommand == "inspect-smp" => {
Ok(Command::RuntimeInspectSmp {
smp_path: PathBuf::from(path),
})
}
[command, subcommand, path]
if command == "runtime" && subcommand == "summarize-save-load" =>
{
Ok(Command::RuntimeSummarizeSaveLoad {
smp_path: PathBuf::from(path),
})
}
[command, subcommand, path]
if command == "runtime" && subcommand == "load-save-slice" =>
{
Ok(Command::RuntimeLoadSaveSlice {
smp_path: PathBuf::from(path),
})
}
[command, subcommand, smp_path, output_path]
if command == "runtime" && subcommand == "import-save-state" =>
{
Ok(Command::RuntimeImportSaveState {
smp_path: PathBuf::from(smp_path),
output_path: PathBuf::from(output_path),
})
}
[command, subcommand, smp_path, output_path]
if command == "runtime" && subcommand == "export-save-slice" =>
{
Ok(Command::RuntimeExportSaveSlice {
smp_path: PathBuf::from(smp_path),
output_path: PathBuf::from(output_path),
})
}
[command, subcommand, snapshot_path, save_slice_path, output_path]
if command == "runtime" && subcommand == "export-overlay-import" =>
{
Ok(Command::RuntimeExportOverlayImport {
snapshot_path: PathBuf::from(snapshot_path),
save_slice_path: PathBuf::from(save_slice_path),
output_path: PathBuf::from(output_path),
})
}
[command, subcommand, path] if command == "runtime" && subcommand == "inspect-pk4" => {
Ok(Command::RuntimeInspectPk4 {
pk4_path: PathBuf::from(path),
})
}
[command, subcommand, path] if command == "runtime" && subcommand == "inspect-win" => {
Ok(Command::RuntimeInspectWin {
win_path: PathBuf::from(path),
})
}
[command, subcommand, pk4_path, entry_name, output_path]
if command == "runtime" && subcommand == "extract-pk4-entry" =>
{
Ok(Command::RuntimeExtractPk4Entry {
pk4_path: PathBuf::from(pk4_path),
entry_name: entry_name.clone(),
output_path: PathBuf::from(output_path),
})
}
[command, subcommand, path]
if command == "runtime" && subcommand == "inspect-campaign-exe" =>
{
Ok(Command::RuntimeInspectCampaignExe {
exe_path: PathBuf::from(path),
})
}
[command, subcommand, smp_paths @ ..]
if command == "runtime"
&& subcommand == "compare-classic-profile"
&& smp_paths.len() >= 2 =>
{
Ok(Command::RuntimeCompareClassicProfile {
smp_paths: smp_paths.iter().map(PathBuf::from).collect(),
})
}
[command, subcommand, smp_paths @ ..]
if command == "runtime"
&& subcommand == "compare-105-profile"
&& smp_paths.len() >= 2 =>
{
Ok(Command::RuntimeCompareRt3105Profile {
smp_paths: smp_paths.iter().map(PathBuf::from).collect(),
})
}
[command, subcommand, smp_paths @ ..]
if command == "runtime"
&& subcommand == "compare-candidate-table"
&& smp_paths.len() >= 2 =>
{
Ok(Command::RuntimeCompareCandidateTable {
smp_paths: smp_paths.iter().map(PathBuf::from).collect(),
})
}
[command, subcommand, smp_paths @ ..]
if command == "runtime"
&& subcommand == "compare-recipe-book-lines"
&& smp_paths.len() >= 2 =>
{
Ok(Command::RuntimeCompareRecipeBookLines {
smp_paths: smp_paths.iter().map(PathBuf::from).collect(),
})
}
[command, subcommand, smp_paths @ ..]
if command == "runtime"
&& subcommand == "compare-setup-payload-core"
&& smp_paths.len() >= 2 =>
{
Ok(Command::RuntimeCompareSetupPayloadCore {
smp_paths: smp_paths.iter().map(PathBuf::from).collect(),
})
}
[command, subcommand, smp_paths @ ..]
if command == "runtime"
&& subcommand == "compare-setup-launch-payload"
&& smp_paths.len() >= 2 =>
{
Ok(Command::RuntimeCompareSetupLaunchPayload {
smp_paths: smp_paths.iter().map(PathBuf::from).collect(),
})
}
[command, subcommand, smp_paths @ ..]
if command == "runtime"
&& subcommand == "compare-post-special-conditions-scalars"
&& smp_paths.len() >= 2 =>
{
Ok(Command::RuntimeComparePostSpecialConditionsScalars {
smp_paths: smp_paths.iter().map(PathBuf::from).collect(),
})
}
[command, subcommand, root_path]
if command == "runtime" && subcommand == "scan-candidate-table-headers" =>
{
Ok(Command::RuntimeScanCandidateTableHeaders {
root_path: PathBuf::from(root_path),
})
}
[command, subcommand, root_path]
if command == "runtime" && subcommand == "scan-special-conditions" =>
{
Ok(Command::RuntimeScanSpecialConditions {
root_path: PathBuf::from(root_path),
})
}
[command, subcommand, root_path]
if command == "runtime" && subcommand == "scan-aligned-runtime-rule-band" =>
{
Ok(Command::RuntimeScanAlignedRuntimeRuleBand {
root_path: PathBuf::from(root_path),
})
}
[command, subcommand, root_path]
if command == "runtime" && subcommand == "scan-post-special-conditions-scalars" =>
{
Ok(Command::RuntimeScanPostSpecialConditionsScalars {
root_path: PathBuf::from(root_path),
})
}
[command, subcommand, root_path]
if command == "runtime" && subcommand == "scan-post-special-conditions-tail" =>
{
Ok(Command::RuntimeScanPostSpecialConditionsTail {
root_path: PathBuf::from(root_path),
})
}
[command, subcommand, root_path]
if command == "runtime" && subcommand == "scan-recipe-book-lines" =>
{
Ok(Command::RuntimeScanRecipeBookLines {
root_path: PathBuf::from(root_path),
})
}
[command, subcommand, smp_path, output_path]
if command == "runtime" && subcommand == "export-profile-block" =>
{
Ok(Command::RuntimeExportProfileBlock {
smp_path: PathBuf::from(smp_path),
output_path: PathBuf::from(output_path),
})
}
_ => Err(
"usage: rrt-cli [validate [repo-root] | finance eval <snapshot.json> | finance diff <left.json> <right.json> | runtime validate-fixture <fixture.json> | runtime summarize-fixture <fixture.json> | runtime export-fixture-state <fixture.json> <snapshot.json> | runtime diff-state <left.json> <right.json> | runtime summarize-state <snapshot.json> | runtime import-state <input.json> <snapshot.json> | runtime inspect-smp <file.smp> | runtime summarize-save-load <file.smp> | runtime load-save-slice <file.smp> | runtime import-save-state <file.smp> <snapshot.json> | runtime export-save-slice <file.smp> <save-slice.json> | runtime export-overlay-import <snapshot.json> <save-slice.json> <overlay-import.json> | runtime inspect-pk4 <file.pk4> | runtime inspect-win <file.win> | runtime extract-pk4-entry <file.pk4> <entry-name> <output-path> | runtime inspect-campaign-exe <RT3.exe> | runtime compare-classic-profile <save1.gms> <save2.gms> [saveN.gms...] | runtime compare-105-profile <save1.gms> <save2.gms> [saveN.gms...] | runtime compare-candidate-table <file1> <file2> [fileN...] | runtime compare-recipe-book-lines <file1> <file2> [fileN...] | runtime compare-setup-payload-core <file1> <file2> [fileN...] | runtime compare-setup-launch-payload <file1> <file2> [fileN...] | runtime compare-post-special-conditions-scalars <file1> <file2> [fileN...] | runtime scan-candidate-table-headers <root-dir> | runtime scan-special-conditions <root-dir> | runtime scan-aligned-runtime-rule-band <root-dir> | runtime scan-post-special-conditions-scalars <root-dir> | runtime scan-post-special-conditions-tail <root-dir> | runtime scan-recipe-book-lines <root-dir> | runtime export-profile-block <save.gms> <profile.json>]"
.into(),
),
}
}
fn run_finance_eval(snapshot_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let outcome = load_finance_outcome(snapshot_path)?;
println!("{}", serde_json::to_string_pretty(&outcome)?);
Ok(())
}
fn run_finance_diff(left_path: &Path, right_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let left = load_finance_outcome(left_path)?;
let right = load_finance_outcome(right_path)?;
let report = diff_finance_outcomes(&left, &right)?;
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_validate_fixture(fixture_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let fixture = load_fixture_document(fixture_path)?;
let report = validate_fixture_document(&fixture);
print_runtime_validation_report(&report)?;
if !report.valid {
return Err(format!("fixture validation failed for {}", fixture_path.display()).into());
}
Ok(())
}
fn run_runtime_summarize_fixture(fixture_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let fixture = load_fixture_document(fixture_path)?;
let validation_report = validate_fixture_document(&fixture);
if !validation_report.valid {
print_runtime_validation_report(&validation_report)?;
return Err(format!("fixture validation failed for {}", fixture_path.display()).into());
}
let mut state = fixture.state.clone();
for command in &fixture.commands {
execute_step_command(&mut state, command)?;
}
let final_summary = RuntimeSummary::from_state(&state);
let expected_summary_mismatches = fixture.expected_summary.compare(&final_summary);
let expected_state_fragment_mismatches = match &fixture.expected_state_fragment {
Some(expected_fragment) => {
let normalized_state = normalize_runtime_state(&state)?;
compare_expected_state_fragment(expected_fragment, &normalized_state)
}
None => Vec::new(),
};
let report = RuntimeFixtureSummaryReport {
fixture_id: fixture.fixture_id,
command_count: fixture.commands.len(),
expected_summary_matches: expected_summary_mismatches.is_empty(),
expected_summary_mismatches: expected_summary_mismatches.clone(),
expected_state_fragment_matches: expected_state_fragment_mismatches.is_empty(),
expected_state_fragment_mismatches: expected_state_fragment_mismatches.clone(),
final_summary,
};
println!("{}", serde_json::to_string_pretty(&report)?);
if !expected_summary_mismatches.is_empty() || !expected_state_fragment_mismatches.is_empty() {
let mut mismatch_messages = expected_summary_mismatches;
mismatch_messages.extend(expected_state_fragment_mismatches);
return Err(format!(
"fixture summary mismatched expected output: {}",
mismatch_messages.join("; ")
)
.into());
}
Ok(())
}
fn run_runtime_export_fixture_state(
fixture_path: &Path,
output_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let fixture = load_fixture_document(fixture_path)?;
let validation_report = validate_fixture_document(&fixture);
if !validation_report.valid {
print_runtime_validation_report(&validation_report)?;
return Err(format!("fixture validation failed for {}", fixture_path.display()).into());
}
let mut state = fixture.state.clone();
for command in &fixture.commands {
execute_step_command(&mut state, command)?;
}
let snapshot = RuntimeSnapshotDocument {
format_version: SNAPSHOT_FORMAT_VERSION,
snapshot_id: format!("{}-final-state", fixture.fixture_id),
source: RuntimeSnapshotSource {
source_fixture_id: Some(fixture.fixture_id.clone()),
description: Some(format!(
"Exported final runtime state for fixture {}",
fixture.fixture_id
)),
},
state,
};
save_runtime_snapshot_document(output_path, &snapshot)?;
let summary = snapshot.summary();
println!(
"{}",
serde_json::to_string_pretty(&RuntimeStateSummaryReport {
snapshot_id: snapshot.snapshot_id,
summary,
})?
);
Ok(())
}
fn run_runtime_summarize_state(snapshot_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
if let Ok(snapshot) = load_runtime_snapshot_document(snapshot_path) {
validate_runtime_snapshot_document(&snapshot)
.map_err(|err| format!("invalid runtime snapshot: {err}"))?;
let report = RuntimeStateSummaryReport {
snapshot_id: snapshot.snapshot_id.clone(),
summary: snapshot.summary(),
};
println!("{}", serde_json::to_string_pretty(&report)?);
return Ok(());
}
let import = load_runtime_state_import(snapshot_path)?;
let report = RuntimeStateSummaryReport {
snapshot_id: import.import_id,
summary: RuntimeSummary::from_state(&import.state),
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_diff_state(
left_path: &Path,
right_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let left = load_normalized_runtime_state(left_path)?;
let right = load_normalized_runtime_state(right_path)?;
let differences = diff_json_values(&left, &right);
let report = RuntimeStateDiffReport {
matches: differences.is_empty(),
difference_count: differences.len(),
differences,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn load_normalized_runtime_state(path: &Path) -> Result<Value, Box<dyn std::error::Error>> {
if let Ok(snapshot) = load_runtime_snapshot_document(path) {
validate_runtime_snapshot_document(&snapshot)
.map_err(|err| format!("invalid runtime snapshot: {err}"))?;
return normalize_runtime_state(&snapshot.state);
}
let import = load_runtime_state_import(path)?;
normalize_runtime_state(&import.state)
}
fn run_runtime_import_state(
input_path: &Path,
output_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let import = load_runtime_state_import(input_path)?;
let snapshot = RuntimeSnapshotDocument {
format_version: SNAPSHOT_FORMAT_VERSION,
snapshot_id: format!("{}-snapshot", import.import_id),
source: RuntimeSnapshotSource {
source_fixture_id: None,
description: Some(match import.description {
Some(description) => format!(
"Imported runtime state from {} ({description})",
input_path.display()
),
None => format!("Imported runtime state from {}", input_path.display()),
}),
},
state: import.state,
};
save_runtime_snapshot_document(output_path, &snapshot)?;
let summary = snapshot.summary();
let report = RuntimeStateSummaryReport {
snapshot_id: snapshot.snapshot_id,
summary,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_inspect_smp(smp_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let report = RuntimeSmpInspectionOutput {
path: smp_path.display().to_string(),
inspection: inspect_smp_file(smp_path)?,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_summarize_save_load(smp_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let inspection = inspect_smp_file(smp_path)?;
let summary = inspection.save_load_summary.ok_or_else(|| {
format!(
"{} did not expose a recognizable save-load summary",
smp_path.display()
)
})?;
let report = RuntimeSaveLoadSummaryOutput {
path: smp_path.display().to_string(),
summary,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_load_save_slice(smp_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let report = RuntimeLoadedSaveSliceOutput {
path: smp_path.display().to_string(),
save_slice: load_save_slice_file(smp_path)?,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_import_save_state(
smp_path: &Path,
output_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let save_slice = load_save_slice_file(smp_path)?;
let import = project_save_slice_to_runtime_state_import(
&save_slice,
smp_path
.file_stem()
.and_then(|stem| stem.to_str())
.unwrap_or("save-state"),
Some(format!(
"Projected partial runtime state from save {}",
smp_path.display()
)),
)
.map_err(|err| format!("failed to project save slice: {err}"))?;
let snapshot = RuntimeSnapshotDocument {
format_version: SNAPSHOT_FORMAT_VERSION,
snapshot_id: format!("{}-snapshot", import.import_id),
source: RuntimeSnapshotSource {
source_fixture_id: None,
description: import.description,
},
state: import.state,
};
save_runtime_snapshot_document(output_path, &snapshot)?;
let report = RuntimeStateSummaryReport {
snapshot_id: snapshot.snapshot_id.clone(),
summary: snapshot.summary(),
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_export_save_slice(
smp_path: &Path,
output_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let save_slice = load_save_slice_file(smp_path)?;
let report = export_runtime_save_slice_document(smp_path, output_path, save_slice)?;
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_export_overlay_import(
snapshot_path: &Path,
save_slice_path: &Path,
output_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let report =
export_runtime_overlay_import_document(snapshot_path, save_slice_path, output_path)?;
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn export_runtime_save_slice_document(
smp_path: &Path,
output_path: &Path,
save_slice: SmpLoadedSaveSlice,
) -> Result<RuntimeSaveSliceExportOutput, Box<dyn std::error::Error>> {
let document = RuntimeSaveSliceDocument {
format_version: SAVE_SLICE_DOCUMENT_FORMAT_VERSION,
save_slice_id: smp_path
.file_stem()
.and_then(|stem| stem.to_str())
.unwrap_or("save-slice")
.to_string(),
source: RuntimeSaveSliceDocumentSource {
description: Some(format!(
"Exported loaded save slice from {}",
smp_path.display()
)),
original_save_filename: smp_path
.file_name()
.and_then(|name| name.to_str())
.map(ToString::to_string),
original_save_sha256: None,
notes: vec![],
},
save_slice,
};
save_runtime_save_slice_document(output_path, &document)?;
Ok(RuntimeSaveSliceExportOutput {
path: smp_path.display().to_string(),
output_path: output_path.display().to_string(),
save_slice_id: document.save_slice_id,
})
}
fn export_runtime_overlay_import_document(
snapshot_path: &Path,
save_slice_path: &Path,
output_path: &Path,
) -> Result<RuntimeOverlayImportExportOutput, Box<dyn std::error::Error>> {
let import_id = output_path
.file_stem()
.and_then(|stem| stem.to_str())
.unwrap_or("overlay-import")
.to_string();
let document = RuntimeOverlayImportDocument {
format_version: OVERLAY_IMPORT_DOCUMENT_FORMAT_VERSION,
import_id: import_id.clone(),
source: RuntimeOverlayImportDocumentSource {
description: Some(format!(
"Overlay import referencing {} and {}",
snapshot_path.display(),
save_slice_path.display()
)),
notes: vec![],
},
base_snapshot_path: snapshot_path.display().to_string(),
save_slice_path: save_slice_path.display().to_string(),
};
save_runtime_overlay_import_document(output_path, &document)?;
Ok(RuntimeOverlayImportExportOutput {
output_path: output_path.display().to_string(),
import_id,
base_snapshot_path: document.base_snapshot_path,
save_slice_path: document.save_slice_path,
})
}
fn run_runtime_inspect_pk4(pk4_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let report = RuntimePk4InspectionOutput {
path: pk4_path.display().to_string(),
inspection: inspect_pk4_file(pk4_path)?,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_inspect_win(win_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let report = RuntimeWinInspectionOutput {
path: win_path.display().to_string(),
inspection: inspect_win_file(win_path)?,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_extract_pk4_entry(
pk4_path: &Path,
entry_name: &str,
output_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let report = RuntimePk4ExtractionOutput {
path: pk4_path.display().to_string(),
output_path: output_path.display().to_string(),
extraction: extract_pk4_entry_file(pk4_path, entry_name, output_path)?,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_inspect_campaign_exe(exe_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let report = RuntimeCampaignExeInspectionOutput {
path: exe_path.display().to_string(),
inspection: inspect_campaign_exe_file(exe_path)?,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_compare_classic_profile(
smp_paths: &[PathBuf],
) -> Result<(), Box<dyn std::error::Error>> {
let samples = smp_paths
.iter()
.map(|path| load_classic_profile_sample(path))
.collect::<Result<Vec<_>, _>>()?;
let common_profile_family = samples
.first()
.map(|sample| sample.profile_family.clone())
.filter(|family| {
samples
.iter()
.all(|sample| sample.profile_family == *family)
});
let differences = diff_classic_profile_samples(&samples)?;
let report = RuntimeClassicProfileComparisonReport {
file_count: samples.len(),
matches: differences.is_empty(),
common_profile_family,
difference_count: differences.len(),
differences,
samples,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_compare_rt3_105_profile(
smp_paths: &[PathBuf],
) -> Result<(), Box<dyn std::error::Error>> {
let samples = smp_paths
.iter()
.map(|path| load_rt3_105_profile_sample(path))
.collect::<Result<Vec<_>, _>>()?;
let common_profile_family = samples
.first()
.map(|sample| sample.profile_family.clone())
.filter(|family| {
samples
.iter()
.all(|sample| sample.profile_family == *family)
});
let differences = diff_rt3_105_profile_samples(&samples)?;
let report = RuntimeRt3105ProfileComparisonReport {
file_count: samples.len(),
matches: differences.is_empty(),
common_profile_family,
difference_count: differences.len(),
differences,
samples,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_compare_candidate_table(
smp_paths: &[PathBuf],
) -> Result<(), Box<dyn std::error::Error>> {
let samples = smp_paths
.iter()
.map(|path| load_candidate_table_sample(path))
.collect::<Result<Vec<_>, _>>()?;
let common_profile_family = samples
.first()
.map(|sample| sample.profile_family.clone())
.filter(|family| {
samples
.iter()
.all(|sample| sample.profile_family == *family)
});
let common_semantic_family = samples
.first()
.map(|sample| sample.semantic_family.clone())
.filter(|family| {
samples
.iter()
.all(|sample| sample.semantic_family == *family)
});
let differences = diff_candidate_table_samples(&samples)?;
let report = RuntimeCandidateTableComparisonReport {
file_count: samples.len(),
matches: differences.is_empty(),
common_profile_family,
common_semantic_family,
difference_count: differences.len(),
differences,
samples,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_compare_recipe_book_lines(
smp_paths: &[PathBuf],
) -> Result<(), Box<dyn std::error::Error>> {
let samples = smp_paths
.iter()
.map(|path| load_recipe_book_line_sample(path))
.collect::<Result<Vec<_>, _>>()?;
let common_profile_family = samples
.first()
.map(|sample| sample.profile_family.clone())
.filter(|family| {
samples
.iter()
.all(|sample| sample.profile_family == *family)
});
let differences = diff_recipe_book_line_samples(&samples)?;
let content_differences = diff_recipe_book_line_content_samples(&samples)?;
let report = RuntimeRecipeBookLineComparisonReport {
file_count: samples.len(),
matches: differences.is_empty(),
content_matches: content_differences.is_empty(),
common_profile_family,
difference_count: differences.len(),
differences,
content_difference_count: content_differences.len(),
content_differences,
samples,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_compare_setup_payload_core(
smp_paths: &[PathBuf],
) -> Result<(), Box<dyn std::error::Error>> {
let samples = smp_paths
.iter()
.map(|path| load_setup_payload_core_sample(path))
.collect::<Result<Vec<_>, _>>()?;
let differences = diff_setup_payload_core_samples(&samples)?;
let report = RuntimeSetupPayloadCoreComparisonReport {
file_count: samples.len(),
matches: differences.is_empty(),
difference_count: differences.len(),
differences,
samples,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_compare_setup_launch_payload(
smp_paths: &[PathBuf],
) -> Result<(), Box<dyn std::error::Error>> {
let samples = smp_paths
.iter()
.map(|path| load_setup_launch_payload_sample(path))
.collect::<Result<Vec<_>, _>>()?;
let differences = diff_setup_launch_payload_samples(&samples)?;
let report = RuntimeSetupLaunchPayloadComparisonReport {
file_count: samples.len(),
matches: differences.is_empty(),
difference_count: differences.len(),
differences,
samples,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_compare_post_special_conditions_scalars(
smp_paths: &[PathBuf],
) -> Result<(), Box<dyn std::error::Error>> {
let samples = smp_paths
.iter()
.map(|path| load_post_special_conditions_scalar_sample(path))
.collect::<Result<Vec<_>, _>>()?;
let common_profile_family = samples
.first()
.map(|sample| sample.profile_family.clone())
.filter(|family| {
samples
.iter()
.all(|sample| sample.profile_family == *family)
});
let differences = diff_post_special_conditions_scalar_samples(&samples)?;
let report = RuntimePostSpecialConditionsScalarComparisonReport {
file_count: samples.len(),
matches: differences.is_empty(),
common_profile_family,
difference_count: differences.len(),
differences,
samples,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_scan_candidate_table_headers(
root_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let mut candidate_paths = Vec::new();
collect_candidate_table_input_paths(root_path, &mut candidate_paths)?;
let mut samples = Vec::new();
let mut skipped_file_count = 0usize;
for path in candidate_paths {
match load_candidate_table_header_scan_sample(&path) {
Ok(sample) => samples.push(sample),
Err(_) => skipped_file_count += 1,
}
}
let mut grouped =
BTreeMap::<(String, String), Vec<RuntimeCandidateTableHeaderScanSample>>::new();
for sample in samples {
grouped
.entry((
sample.header_word_0_hex.clone(),
sample.header_word_1_hex.clone(),
))
.or_default()
.push(sample);
}
let file_count = grouped.values().map(Vec::len).sum();
let clusters = grouped
.into_iter()
.map(|((header_word_0_hex, header_word_1_hex), samples)| {
let mut profile_families = samples
.iter()
.map(|sample| sample.profile_family.clone())
.collect::<BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>();
let mut source_kinds = samples
.iter()
.map(|sample| sample.source_kind.clone())
.collect::<BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>();
let mut zero_trailer_count_values = samples
.iter()
.map(|sample| sample.zero_trailer_entry_count)
.collect::<BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>();
let distinct_zero_name_set_count = samples
.iter()
.map(|sample| sample.zero_trailer_entry_names.clone())
.collect::<BTreeSet<_>>()
.len();
let zero_trailer_count_min = samples
.iter()
.map(|sample| sample.zero_trailer_entry_count)
.min()
.unwrap_or(0);
let zero_trailer_count_max = samples
.iter()
.map(|sample| sample.zero_trailer_entry_count)
.max()
.unwrap_or(0);
let sample_paths = samples
.iter()
.take(12)
.map(|sample| sample.path.clone())
.collect::<Vec<_>>();
profile_families.sort();
source_kinds.sort();
zero_trailer_count_values.sort();
RuntimeCandidateTableHeaderCluster {
header_word_0_hex,
header_word_1_hex,
file_count: samples.len(),
profile_families,
source_kinds,
zero_trailer_count_min,
zero_trailer_count_max,
zero_trailer_count_values,
distinct_zero_name_set_count,
sample_paths,
}
})
.collect::<Vec<_>>();
let report = RuntimeCandidateTableHeaderScanReport {
root_path: root_path.display().to_string(),
file_count,
cluster_count: clusters.len(),
skipped_file_count,
clusters,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_scan_special_conditions(root_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let mut candidate_paths = Vec::new();
collect_special_conditions_input_paths(root_path, &mut candidate_paths)?;
let file_count = candidate_paths.len();
let mut samples = Vec::new();
let mut skipped_file_count = 0usize;
for path in candidate_paths {
match load_special_conditions_scan_sample(&path) {
Ok(sample) => samples.push(sample),
Err(_) => skipped_file_count += 1,
}
}
let files_with_probe_count = samples.len();
let sample_files_with_any_enabled = samples
.iter()
.filter(|sample| sample.enabled_visible_count != 0)
.cloned()
.collect::<Vec<_>>();
let files_with_any_enabled_count = sample_files_with_any_enabled.len();
let mut grouped = BTreeMap::<(u8, String), Vec<String>>::new();
for sample in &samples {
for label in &sample.enabled_visible_labels {
if let Some(slot_index) = parse_special_condition_slot_index(label) {
grouped
.entry((slot_index, label.clone()))
.or_default()
.push(sample.path.clone());
}
}
}
let enabled_slot_summaries = grouped
.into_iter()
.map(
|((slot_index, label), paths)| RuntimeSpecialConditionsSlotSummary {
slot_index,
label,
file_count_enabled: paths.len(),
sample_paths: paths.into_iter().take(12).collect(),
},
)
.collect::<Vec<_>>();
let report = RuntimeSpecialConditionsScanReport {
root_path: root_path.display().to_string(),
file_count,
files_with_probe_count,
files_with_any_enabled_count,
skipped_file_count,
enabled_slot_summaries,
sample_files_with_any_enabled,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_scan_aligned_runtime_rule_band(
root_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let mut candidate_paths = Vec::new();
collect_special_conditions_input_paths(root_path, &mut candidate_paths)?;
let file_count = candidate_paths.len();
let mut samples = Vec::new();
let mut skipped_file_count = 0usize;
for path in candidate_paths {
match load_aligned_runtime_rule_band_scan_sample(&path) {
Ok(sample) => samples.push(sample),
Err(_) => skipped_file_count += 1,
}
}
let files_with_probe_count = samples.len();
let files_with_any_nonzero_count = samples
.iter()
.filter(|sample| !sample.nonzero_band_indices.is_empty())
.count();
let mut grouped = BTreeMap::<String, Vec<RuntimeAlignedRuntimeRuleBandScanSample>>::new();
for sample in samples {
grouped
.entry(sample.profile_family.clone())
.or_default()
.push(sample);
}
let family_summaries = grouped
.into_iter()
.map(|(profile_family, samples)| {
let file_count = samples.len();
let files_with_any_nonzero_count = samples
.iter()
.filter(|sample| !sample.nonzero_band_indices.is_empty())
.count();
let source_kinds = samples
.iter()
.map(|sample| sample.source_kind.clone())
.collect::<BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>();
let distinct_nonzero_index_set_count = samples
.iter()
.map(|sample| sample.nonzero_band_indices.clone())
.collect::<BTreeSet<_>>()
.len();
let stable_band_indices = if samples.is_empty() {
BTreeSet::new()
} else {
let mut stable = samples[0]
.nonzero_band_indices
.iter()
.copied()
.collect::<BTreeSet<_>>();
for sample in samples.iter().skip(1) {
let current = sample
.nonzero_band_indices
.iter()
.copied()
.collect::<BTreeSet<_>>();
stable = stable.intersection(&current).copied().collect();
}
stable
};
let mut band_values = BTreeMap::<usize, BTreeSet<String>>::new();
let mut band_counts = BTreeMap::<usize, usize>::new();
for sample in &samples {
for band_index in &sample.nonzero_band_indices {
*band_counts.entry(*band_index).or_default() += 1;
}
for (band_index, value_hex) in &sample.values_by_band_index {
band_values
.entry(*band_index)
.or_default()
.insert(value_hex.clone());
}
}
let offset_summaries = band_counts
.into_iter()
.map(
|(band_index, count)| RuntimeAlignedRuntimeRuleBandOffsetSummary {
band_index,
relative_offset_hex: format!("0x{:x}", band_index * 4),
lane_kind: aligned_runtime_rule_lane_kind(band_index).to_string(),
known_label: aligned_runtime_rule_known_label(band_index)
.map(str::to_string),
file_count_present: count,
distinct_value_count: band_values
.get(&band_index)
.map(BTreeSet::len)
.unwrap_or(0),
sample_value_hexes: band_values
.get(&band_index)
.map(|values| values.iter().take(8).cloned().collect())
.unwrap_or_default(),
},
)
.collect::<Vec<_>>();
RuntimeAlignedRuntimeRuleBandFamilySummary {
profile_family,
source_kinds,
file_count,
files_with_any_nonzero_count,
distinct_nonzero_index_set_count,
stable_nonzero_band_indices: stable_band_indices.into_iter().collect(),
union_nonzero_band_indices: band_values.keys().copied().collect(),
offset_summaries,
sample_paths: samples
.iter()
.take(12)
.map(|sample| sample.path.clone())
.collect(),
}
})
.collect::<Vec<_>>();
let report = RuntimeAlignedRuntimeRuleBandScanReport {
root_path: root_path.display().to_string(),
file_count,
files_with_probe_count,
files_with_any_nonzero_count,
skipped_file_count,
family_summaries,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_scan_post_special_conditions_scalars(
root_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let mut candidate_paths = Vec::new();
collect_special_conditions_input_paths(root_path, &mut candidate_paths)?;
let file_count = candidate_paths.len();
let mut samples = Vec::new();
let mut skipped_file_count = 0usize;
for path in candidate_paths {
match load_post_special_conditions_scalar_scan_sample(&path) {
Ok(sample) => samples.push(sample),
Err(_) => skipped_file_count += 1,
}
}
let files_with_probe_count = samples.len();
let files_with_any_nonzero_count = samples
.iter()
.filter(|sample| !sample.nonzero_relative_offsets.is_empty())
.count();
let mut grouped = BTreeMap::<String, Vec<RuntimePostSpecialConditionsScalarScanSample>>::new();
for sample in samples {
grouped
.entry(sample.profile_family.clone())
.or_default()
.push(sample);
}
let family_summaries = grouped
.into_iter()
.map(|(profile_family, samples)| {
let file_count = samples.len();
let files_with_any_nonzero_count = samples
.iter()
.filter(|sample| !sample.nonzero_relative_offsets.is_empty())
.count();
let source_kinds = samples
.iter()
.map(|sample| sample.source_kind.clone())
.collect::<BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>();
let distinct_nonzero_offset_set_count = samples
.iter()
.map(|sample| sample.nonzero_relative_offsets.clone())
.collect::<BTreeSet<_>>()
.len();
let stable_offsets = if samples.is_empty() {
BTreeSet::new()
} else {
let mut stable = samples[0]
.nonzero_relative_offsets
.iter()
.copied()
.collect::<BTreeSet<_>>();
for sample in samples.iter().skip(1) {
let current = sample
.nonzero_relative_offsets
.iter()
.copied()
.collect::<BTreeSet<_>>();
stable = stable.intersection(&current).copied().collect();
}
stable
};
let mut offset_values = BTreeMap::<usize, BTreeSet<String>>::new();
let mut offset_counts = BTreeMap::<usize, usize>::new();
for sample in &samples {
for offset in &sample.nonzero_relative_offsets {
*offset_counts.entry(*offset).or_default() += 1;
}
for (offset_hex, value_hex) in &sample.values_by_relative_offset_hex {
if let Some(offset) = parse_hex_offset(offset_hex) {
offset_values
.entry(offset)
.or_default()
.insert(value_hex.clone());
}
}
}
let offset_summaries = offset_counts
.into_iter()
.map(
|(offset, count)| RuntimePostSpecialConditionsScalarOffsetSummary {
relative_offset_hex: format!("0x{offset:x}"),
file_count_present: count,
distinct_value_count: offset_values
.get(&offset)
.map(BTreeSet::len)
.unwrap_or(0),
sample_value_hexes: offset_values
.get(&offset)
.map(|values| values.iter().take(8).cloned().collect())
.unwrap_or_default(),
},
)
.collect::<Vec<_>>();
RuntimePostSpecialConditionsScalarFamilySummary {
profile_family,
source_kinds,
file_count,
files_with_any_nonzero_count,
distinct_nonzero_offset_set_count,
stable_nonzero_relative_offset_hexes: stable_offsets
.into_iter()
.map(|offset| format!("0x{offset:x}"))
.collect(),
union_nonzero_relative_offset_hexes: offset_values
.keys()
.copied()
.map(|offset| format!("0x{offset:x}"))
.collect(),
offset_summaries,
sample_paths: samples
.iter()
.take(12)
.map(|sample| sample.path.clone())
.collect(),
}
})
.collect::<Vec<_>>();
let report = RuntimePostSpecialConditionsScalarScanReport {
root_path: root_path.display().to_string(),
file_count,
files_with_probe_count,
files_with_any_nonzero_count,
skipped_file_count,
family_summaries,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_scan_post_special_conditions_tail(
root_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let mut candidate_paths = Vec::new();
collect_special_conditions_input_paths(root_path, &mut candidate_paths)?;
let file_count = candidate_paths.len();
let mut samples = Vec::new();
let mut skipped_file_count = 0usize;
for path in candidate_paths {
match load_post_special_conditions_tail_scan_sample(&path) {
Ok(sample) => samples.push(sample),
Err(_) => skipped_file_count += 1,
}
}
let files_with_probe_count = samples.len();
let files_with_any_nonzero_count = samples
.iter()
.filter(|sample| !sample.nonzero_relative_offsets.is_empty())
.count();
let mut grouped = BTreeMap::<String, Vec<RuntimePostSpecialConditionsTailScanSample>>::new();
for sample in samples {
grouped
.entry(sample.profile_family.clone())
.or_default()
.push(sample);
}
let family_summaries = grouped
.into_iter()
.map(|(profile_family, samples)| {
let file_count = samples.len();
let files_with_any_nonzero_count = samples
.iter()
.filter(|sample| !sample.nonzero_relative_offsets.is_empty())
.count();
let source_kinds = samples
.iter()
.map(|sample| sample.source_kind.clone())
.collect::<BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>();
let distinct_nonzero_offset_set_count = samples
.iter()
.map(|sample| sample.nonzero_relative_offsets.clone())
.collect::<BTreeSet<_>>()
.len();
let stable_offsets = if samples.is_empty() {
BTreeSet::new()
} else {
let mut stable = samples[0]
.nonzero_relative_offsets
.iter()
.copied()
.collect::<BTreeSet<_>>();
for sample in samples.iter().skip(1) {
let current = sample
.nonzero_relative_offsets
.iter()
.copied()
.collect::<BTreeSet<_>>();
stable = stable.intersection(&current).copied().collect();
}
stable
};
let mut offset_values = BTreeMap::<usize, BTreeSet<String>>::new();
let mut offset_counts = BTreeMap::<usize, usize>::new();
for sample in &samples {
for offset in &sample.nonzero_relative_offsets {
*offset_counts.entry(*offset).or_default() += 1;
}
for (offset_hex, value_hex) in &sample.values_by_relative_offset_hex {
if let Some(offset) = parse_hex_offset(offset_hex) {
offset_values
.entry(offset)
.or_default()
.insert(value_hex.clone());
}
}
}
let offset_summaries = offset_counts
.into_iter()
.map(
|(offset, count)| RuntimePostSpecialConditionsTailOffsetSummary {
relative_offset_hex: format!("0x{offset:x}"),
file_count_present: count,
distinct_value_count: offset_values
.get(&offset)
.map(BTreeSet::len)
.unwrap_or(0),
sample_value_hexes: offset_values
.get(&offset)
.map(|values| values.iter().take(8).cloned().collect())
.unwrap_or_default(),
},
)
.collect::<Vec<_>>();
RuntimePostSpecialConditionsTailFamilySummary {
profile_family,
source_kinds,
file_count,
files_with_any_nonzero_count,
distinct_nonzero_offset_set_count,
stable_nonzero_relative_offset_hexes: stable_offsets
.into_iter()
.map(|offset| format!("0x{offset:x}"))
.collect(),
union_nonzero_relative_offset_hexes: offset_values
.keys()
.copied()
.map(|offset| format!("0x{offset:x}"))
.collect(),
offset_summaries,
sample_paths: samples
.iter()
.take(12)
.map(|sample| sample.path.clone())
.collect(),
}
})
.collect::<Vec<_>>();
let report = RuntimePostSpecialConditionsTailScanReport {
root_path: root_path.display().to_string(),
file_count,
files_with_probe_count,
files_with_any_nonzero_count,
skipped_file_count,
family_summaries,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_scan_recipe_book_lines(root_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let mut candidate_paths = Vec::new();
collect_special_conditions_input_paths(root_path, &mut candidate_paths)?;
let file_count = candidate_paths.len();
let mut samples = Vec::new();
let mut skipped_file_count = 0usize;
for path in candidate_paths {
match load_recipe_book_line_scan_sample(&path) {
Ok(sample) => samples.push(sample),
Err(_) => skipped_file_count += 1,
}
}
let files_with_probe_count = samples.len();
let files_with_any_nonzero_modes_count = samples
.iter()
.filter(|sample| !sample.nonzero_mode_paths.is_empty())
.count();
let files_with_any_nonzero_supplied_tokens_count = samples
.iter()
.filter(|sample| !sample.nonzero_supplied_token_paths.is_empty())
.count();
let files_with_any_nonzero_demanded_tokens_count = samples
.iter()
.filter(|sample| !sample.nonzero_demanded_token_paths.is_empty())
.count();
let mut grouped = BTreeMap::<String, Vec<RuntimeRecipeBookLineScanSample>>::new();
for sample in samples {
grouped
.entry(sample.profile_family.clone())
.or_default()
.push(sample);
}
let family_summaries = grouped
.into_iter()
.map(
|(profile_family, samples)| RuntimeRecipeBookLineFamilySummary {
profile_family,
source_kinds: samples
.iter()
.map(|sample| sample.source_kind.clone())
.collect::<BTreeSet<_>>()
.into_iter()
.collect(),
file_count: samples.len(),
files_with_any_nonzero_modes_count: samples
.iter()
.filter(|sample| !sample.nonzero_mode_paths.is_empty())
.count(),
files_with_any_nonzero_supplied_tokens_count: samples
.iter()
.filter(|sample| !sample.nonzero_supplied_token_paths.is_empty())
.count(),
files_with_any_nonzero_demanded_tokens_count: samples
.iter()
.filter(|sample| !sample.nonzero_demanded_token_paths.is_empty())
.count(),
stable_nonzero_mode_paths: intersect_nonzero_recipe_line_paths(
samples.iter().map(|sample| &sample.nonzero_mode_paths),
),
stable_nonzero_supplied_token_paths: intersect_nonzero_recipe_line_paths(
samples
.iter()
.map(|sample| &sample.nonzero_supplied_token_paths),
),
stable_nonzero_demanded_token_paths: intersect_nonzero_recipe_line_paths(
samples
.iter()
.map(|sample| &sample.nonzero_demanded_token_paths),
),
mode_summaries: build_recipe_line_field_summaries(
samples.iter().map(|sample| &sample.nonzero_mode_paths),
),
supplied_token_summaries: build_recipe_line_field_summaries(
samples
.iter()
.map(|sample| &sample.nonzero_supplied_token_paths),
),
demanded_token_summaries: build_recipe_line_field_summaries(
samples
.iter()
.map(|sample| &sample.nonzero_demanded_token_paths),
),
sample_paths: samples
.iter()
.take(12)
.map(|sample| sample.path.clone())
.collect(),
},
)
.collect::<Vec<_>>();
let report = RuntimeRecipeBookLineScanReport {
root_path: root_path.display().to_string(),
file_count,
files_with_probe_count,
files_with_any_nonzero_modes_count,
files_with_any_nonzero_supplied_tokens_count,
files_with_any_nonzero_demanded_tokens_count,
skipped_file_count,
family_summaries,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn run_runtime_export_profile_block(
smp_path: &Path,
output_path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
let inspection = inspect_smp_file(smp_path)?;
let document = build_profile_block_export_document(smp_path, &inspection)?;
let bytes = serde_json::to_vec_pretty(&document)?;
fs::write(output_path, bytes)?;
let report = RuntimeProfileBlockExportReport {
output_path: output_path.display().to_string(),
profile_kind: document.profile_kind,
profile_family: document.profile_family,
};
println!("{}", serde_json::to_string_pretty(&report)?);
Ok(())
}
fn load_classic_profile_sample(
smp_path: &Path,
) -> Result<RuntimeClassicProfileSample, Box<dyn std::error::Error>> {
let inspection = inspect_smp_file(smp_path)?;
let probe = inspection.classic_rehydrate_profile_probe.ok_or_else(|| {
format!(
"{} did not expose a classic rehydrate packed-profile block",
smp_path.display()
)
})?;
Ok(RuntimeClassicProfileSample {
path: smp_path.display().to_string(),
profile_family: probe.profile_family,
progress_32dc_offset: probe.progress_32dc_offset,
progress_3714_offset: probe.progress_3714_offset,
progress_3715_offset: probe.progress_3715_offset,
packed_profile_offset: probe.packed_profile_offset,
packed_profile_len: probe.packed_profile_len,
packed_profile_block: probe.packed_profile_block,
})
}
fn load_rt3_105_profile_sample(
smp_path: &Path,
) -> Result<RuntimeRt3105ProfileSample, Box<dyn std::error::Error>> {
let inspection = inspect_smp_file(smp_path)?;
let probe = inspection.rt3_105_packed_profile_probe.ok_or_else(|| {
format!(
"{} did not expose an RT3 1.05 packed-profile block",
smp_path.display()
)
})?;
Ok(RuntimeRt3105ProfileSample {
path: smp_path.display().to_string(),
profile_family: probe.profile_family,
packed_profile_offset: probe.packed_profile_offset,
packed_profile_len: probe.packed_profile_len,
packed_profile_block: probe.packed_profile_block,
})
}
fn load_candidate_table_sample(
smp_path: &Path,
) -> Result<RuntimeCandidateTableSample, Box<dyn std::error::Error>> {
let inspection = inspect_smp_file(smp_path)?;
let probe = inspection.rt3_105_save_name_table_probe.ok_or_else(|| {
format!(
"{} did not expose an RT3 1.05 candidate-availability table",
smp_path.display()
)
})?;
Ok(RuntimeCandidateTableSample {
path: smp_path.display().to_string(),
profile_family: probe.profile_family,
source_kind: probe.source_kind,
semantic_family: probe.semantic_family,
header_word_0_hex: probe.header_word_0_hex,
header_word_1_hex: probe.header_word_1_hex,
header_word_2_hex: probe.header_word_2_hex,
observed_entry_count: probe.observed_entry_count,
zero_trailer_entry_count: probe.zero_trailer_entry_count,
nonzero_trailer_entry_count: probe.nonzero_trailer_entry_count,
zero_trailer_entry_names: probe.zero_trailer_entry_names,
footer_progress_word_0_hex: probe.footer_progress_word_0_hex,
footer_progress_word_1_hex: probe.footer_progress_word_1_hex,
availability_by_name: probe
.entries
.into_iter()
.map(|entry| (entry.text, entry.availability_dword))
.collect(),
})
}
fn load_recipe_book_line_sample(
smp_path: &Path,
) -> Result<RuntimeRecipeBookLineSample, Box<dyn std::error::Error>> {
let inspection = inspect_smp_file(smp_path)?;
let probe = inspection.recipe_book_summary_probe.ok_or_else(|| {
format!(
"{} did not expose a grounded recipe-book summary block",
smp_path.display()
)
})?;
let mut book_head_kind_by_index = BTreeMap::new();
let mut book_line_area_kind_by_index = BTreeMap::new();
let mut max_annual_production_word_hex_by_book = BTreeMap::new();
let mut line_kind_by_path = BTreeMap::new();
let mut mode_word_hex_by_path = BTreeMap::new();
let mut annual_amount_word_hex_by_path = BTreeMap::new();
let mut supplied_cargo_token_word_hex_by_path = BTreeMap::new();
let mut demanded_cargo_token_word_hex_by_path = BTreeMap::new();
for book in &probe.books {
let book_key = format!("book{:02}", book.book_index);
book_head_kind_by_index.insert(book_key.clone(), book.head_kind.clone());
book_line_area_kind_by_index.insert(book_key.clone(), book.line_area_kind.clone());
max_annual_production_word_hex_by_book.insert(
book_key.clone(),
book.max_annual_production_word_hex.clone(),
);
for line in &book.lines {
let line_key = format!("{book_key}.line{:02}", line.line_index);
line_kind_by_path.insert(line_key.clone(), line.line_kind.clone());
mode_word_hex_by_path.insert(line_key.clone(), line.mode_word_hex.clone());
annual_amount_word_hex_by_path
.insert(line_key.clone(), line.annual_amount_word_hex.clone());
supplied_cargo_token_word_hex_by_path
.insert(line_key.clone(), line.supplied_cargo_token_word_hex.clone());
demanded_cargo_token_word_hex_by_path
.insert(line_key.clone(), line.demanded_cargo_token_word_hex.clone());
}
}
Ok(RuntimeRecipeBookLineSample {
path: smp_path.display().to_string(),
profile_family: probe.profile_family,
source_kind: probe.source_kind,
book_count: probe.book_count,
book_stride_hex: probe.book_stride_hex,
line_count: probe.line_count,
line_stride_hex: probe.line_stride_hex,
book_head_kind_by_index,
book_line_area_kind_by_index,
max_annual_production_word_hex_by_book,
line_kind_by_path,
mode_word_hex_by_path,
annual_amount_word_hex_by_path,
supplied_cargo_token_word_hex_by_path,
demanded_cargo_token_word_hex_by_path,
})
}
fn load_recipe_book_line_scan_sample(
smp_path: &Path,
) -> Result<RuntimeRecipeBookLineScanSample, Box<dyn std::error::Error>> {
let sample = load_recipe_book_line_sample(smp_path)?;
Ok(RuntimeRecipeBookLineScanSample {
path: sample.path,
profile_family: sample.profile_family,
source_kind: sample.source_kind,
nonzero_mode_paths: sample
.mode_word_hex_by_path
.into_iter()
.filter(|(_, value)| value != "0x00000000")
.collect(),
nonzero_supplied_token_paths: sample
.supplied_cargo_token_word_hex_by_path
.into_iter()
.filter(|(_, value)| value != "0x00000000")
.collect(),
nonzero_demanded_token_paths: sample
.demanded_cargo_token_word_hex_by_path
.into_iter()
.filter(|(_, value)| value != "0x00000000")
.collect(),
})
}
fn load_setup_payload_core_sample(
smp_path: &Path,
) -> Result<RuntimeSetupPayloadCoreSample, Box<dyn std::error::Error>> {
let bytes = fs::read(smp_path)?;
let extension = smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase())
.unwrap_or_default();
let inferred_profile_family =
classify_candidate_table_header_profile(Some(extension.clone()), &bytes);
let candidate_header_word_0 = read_u32_le(&bytes, 0x6a70);
let candidate_header_word_1 = read_u32_le(&bytes, 0x6a74);
Ok(RuntimeSetupPayloadCoreSample {
path: smp_path.display().to_string(),
file_extension: extension,
inferred_profile_family,
payload_word_0x14: read_u16_le(&bytes, 0x14)
.ok_or_else(|| format!("{} missing setup payload word +0x14", smp_path.display()))?,
payload_word_0x14_hex: format!(
"0x{:04x}",
read_u16_le(&bytes, 0x14).ok_or_else(|| format!(
"{} missing setup payload word +0x14",
smp_path.display()
))?
),
payload_byte_0x20: bytes
.get(0x20)
.copied()
.ok_or_else(|| format!("{} missing setup payload byte +0x20", smp_path.display()))?,
payload_byte_0x20_hex: format!(
"0x{:02x}",
bytes.get(0x20).copied().ok_or_else(|| format!(
"{} missing setup payload byte +0x20",
smp_path.display()
))?
),
marker_bytes_0x2c9_0x2d0_hex: bytes
.get(0x2c9..0x2d1)
.map(hex_encode)
.ok_or_else(|| format!("{} missing setup payload marker bytes", smp_path.display()))?,
row_category_byte_0x31a: bytes
.get(0x31a)
.copied()
.ok_or_else(|| format!("{} missing setup payload byte +0x31a", smp_path.display()))?,
row_category_byte_0x31a_hex: format!(
"0x{:02x}",
bytes.get(0x31a).copied().ok_or_else(|| format!(
"{} missing setup payload byte +0x31a",
smp_path.display()
))?
),
row_visibility_byte_0x31b: bytes
.get(0x31b)
.copied()
.ok_or_else(|| format!("{} missing setup payload byte +0x31b", smp_path.display()))?,
row_visibility_byte_0x31b_hex: format!(
"0x{:02x}",
bytes.get(0x31b).copied().ok_or_else(|| format!(
"{} missing setup payload byte +0x31b",
smp_path.display()
))?
),
row_visibility_byte_0x31c: bytes
.get(0x31c)
.copied()
.ok_or_else(|| format!("{} missing setup payload byte +0x31c", smp_path.display()))?,
row_visibility_byte_0x31c_hex: format!(
"0x{:02x}",
bytes.get(0x31c).copied().ok_or_else(|| format!(
"{} missing setup payload byte +0x31c",
smp_path.display()
))?
),
row_count_word_0x3ae: read_u16_le(&bytes, 0x3ae)
.ok_or_else(|| format!("{} missing setup payload word +0x3ae", smp_path.display()))?,
row_count_word_0x3ae_hex: format!(
"0x{:04x}",
read_u16_le(&bytes, 0x3ae).ok_or_else(|| format!(
"{} missing setup payload word +0x3ae",
smp_path.display()
))?
),
payload_word_0x3b2: read_u16_le(&bytes, 0x3b2)
.ok_or_else(|| format!("{} missing setup payload word +0x3b2", smp_path.display()))?,
payload_word_0x3b2_hex: format!(
"0x{:04x}",
read_u16_le(&bytes, 0x3b2).ok_or_else(|| format!(
"{} missing setup payload word +0x3b2",
smp_path.display()
))?
),
payload_word_0x3ba: read_u16_le(&bytes, 0x3ba)
.ok_or_else(|| format!("{} missing setup payload word +0x3ba", smp_path.display()))?,
payload_word_0x3ba_hex: format!(
"0x{:04x}",
read_u16_le(&bytes, 0x3ba).ok_or_else(|| format!(
"{} missing setup payload word +0x3ba",
smp_path.display()
))?
),
candidate_header_word_0_hex: candidate_header_word_0.map(|value| format!("0x{value:08x}")),
candidate_header_word_1_hex: candidate_header_word_1.map(|value| format!("0x{value:08x}")),
})
}
fn load_setup_launch_payload_sample(
smp_path: &Path,
) -> Result<RuntimeSetupLaunchPayloadSample, Box<dyn std::error::Error>> {
let bytes = fs::read(smp_path)?;
let extension = smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase())
.unwrap_or_default();
let inferred_profile_family =
classify_candidate_table_header_profile(Some(extension.clone()), &bytes);
let launch_flag_byte_0x22 = bytes
.get(0x22)
.copied()
.ok_or_else(|| format!("{} missing setup launch byte +0x22", smp_path.display()))?;
let launch_selector_byte_0x33 = bytes
.get(0x33)
.copied()
.ok_or_else(|| format!("{} missing setup launch byte +0x33", smp_path.display()))?;
let token_block = bytes
.get(0x23..0x33)
.ok_or_else(|| format!("{} missing setup launch token block", smp_path.display()))?;
let campaign_progress_in_known_range =
(launch_flag_byte_0x22 as usize) < CAMPAIGN_SCENARIO_COUNT;
let campaign_progress_scenario_name = campaign_progress_in_known_range
.then(|| OBSERVED_CAMPAIGN_SCENARIO_NAMES[launch_flag_byte_0x22 as usize].to_string());
let campaign_progress_page_index = match launch_flag_byte_0x22 {
0..=4 => Some(1),
5..=9 => Some(2),
10..=12 => Some(3),
13..=15 => Some(4),
_ => None,
};
let campaign_selector_values = OBSERVED_CAMPAIGN_SCENARIO_NAMES
.iter()
.enumerate()
.map(|(index, name)| (name.to_string(), token_block[index]))
.collect::<BTreeMap<_, _>>();
let nonzero_campaign_selector_values = campaign_selector_values
.iter()
.filter_map(|(name, value)| (*value != 0).then_some((name.clone(), *value)))
.collect::<BTreeMap<_, _>>();
Ok(RuntimeSetupLaunchPayloadSample {
path: smp_path.display().to_string(),
file_extension: extension,
inferred_profile_family,
launch_flag_byte_0x22,
launch_flag_byte_0x22_hex: format!("0x{launch_flag_byte_0x22:02x}"),
campaign_progress_in_known_range,
campaign_progress_scenario_name,
campaign_progress_page_index,
launch_selector_byte_0x33,
launch_selector_byte_0x33_hex: format!("0x{launch_selector_byte_0x33:02x}"),
launch_token_block_0x23_0x32_hex: hex_encode(token_block),
campaign_selector_values,
nonzero_campaign_selector_values,
})
}
fn load_post_special_conditions_scalar_sample(
smp_path: &Path,
) -> Result<RuntimePostSpecialConditionsScalarSample, Box<dyn std::error::Error>> {
let sample = load_post_special_conditions_scalar_scan_sample(smp_path)?;
Ok(RuntimePostSpecialConditionsScalarSample {
path: sample.path,
profile_family: sample.profile_family,
source_kind: sample.source_kind,
nonzero_relative_offset_hexes: sample
.nonzero_relative_offsets
.into_iter()
.map(|offset| format!("0x{offset:x}"))
.collect(),
values_by_relative_offset_hex: sample.values_by_relative_offset_hex,
})
}
fn load_candidate_table_header_scan_sample(
smp_path: &Path,
) -> Result<RuntimeCandidateTableHeaderScanSample, Box<dyn std::error::Error>> {
let bytes = fs::read(smp_path)?;
let header_offset = 0x6a70usize;
let entries_offset = 0x6ad1usize;
let block_end_offset = 0x73c0usize;
let entry_stride = 0x22usize;
if bytes.len() < block_end_offset {
return Err(format!(
"{} is too small for the fixed candidate table range",
smp_path.display()
)
.into());
}
if !matches_candidate_table_header_bytes(&bytes, header_offset) {
return Err(format!(
"{} does not contain the fixed candidate table header",
smp_path.display()
)
.into());
}
let observed_entry_capacity = read_u32_le(&bytes, header_offset + 0x1c)
.ok_or_else(|| format!("{} is missing candidate table capacity", smp_path.display()))?
as usize;
let observed_entry_count = read_u32_le(&bytes, header_offset + 0x20)
.ok_or_else(|| format!("{} is missing candidate table count", smp_path.display()))?
as usize;
if observed_entry_capacity < observed_entry_count {
return Err(format!(
"{} has invalid candidate table capacity/count {observed_entry_capacity}/{observed_entry_count}",
smp_path.display()
)
.into());
}
let entries_end_offset = entries_offset
.checked_add(
observed_entry_count
.checked_mul(entry_stride)
.ok_or("candidate table length overflow")?,
)
.ok_or("candidate table end overflow")?;
if entries_end_offset > block_end_offset {
return Err(format!(
"{} candidate table overruns fixed block end",
smp_path.display()
)
.into());
}
let mut zero_trailer_entry_names = Vec::new();
for index in 0..observed_entry_count {
let offset = entries_offset + index * entry_stride;
let chunk = &bytes[offset..offset + entry_stride];
let nul_index = chunk
.iter()
.position(|byte| *byte == 0)
.unwrap_or(entry_stride - 4);
let text = std::str::from_utf8(&chunk[..nul_index]).map_err(|_| {
format!(
"{} contains invalid UTF-8 in candidate table",
smp_path.display()
)
})?;
let availability = read_u32_le(&bytes, offset + entry_stride - 4).ok_or_else(|| {
format!(
"{} is missing candidate availability dword",
smp_path.display()
)
})?;
if availability == 0 {
zero_trailer_entry_names.push(text.to_string());
}
}
let profile_family = classify_candidate_table_header_profile(
smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase()),
&bytes,
);
let source_kind = match smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase())
.as_deref()
{
Some("gmp") => "map-fixed-catalog-range",
Some("gms") => "save-fixed-catalog-range",
_ => "fixed-catalog-range",
}
.to_string();
Ok(RuntimeCandidateTableHeaderScanSample {
path: smp_path.display().to_string(),
profile_family,
source_kind,
header_word_0_hex: format!(
"0x{:08x}",
read_u32_le(&bytes, header_offset).ok_or("missing candidate header word 0")?
),
header_word_1_hex: format!(
"0x{:08x}",
read_u32_le(&bytes, header_offset + 4).ok_or("missing candidate header word 1")?
),
zero_trailer_entry_count: zero_trailer_entry_names.len(),
zero_trailer_entry_names,
})
}
fn load_special_conditions_scan_sample(
smp_path: &Path,
) -> Result<RuntimeSpecialConditionsScanSample, Box<dyn std::error::Error>> {
let bytes = fs::read(smp_path)?;
let table_len = SPECIAL_CONDITION_COUNT * 4;
let table_end = SPECIAL_CONDITIONS_OFFSET
.checked_add(table_len)
.ok_or("special-conditions table overflow")?;
if bytes.len() < table_end {
return Err(format!(
"{} is too small for the fixed special-conditions table",
smp_path.display()
)
.into());
}
let hidden_sentinel = read_u32_le(
&bytes,
SPECIAL_CONDITIONS_OFFSET + SPECIAL_CONDITION_HIDDEN_SENTINEL_SLOT * 4,
)
.ok_or_else(|| {
format!(
"{} is missing the hidden special-condition sentinel",
smp_path.display()
)
})?;
if hidden_sentinel != 1 {
return Err(format!(
"{} does not match the fixed special-conditions table sentinel",
smp_path.display()
)
.into());
}
let enabled_visible_labels = (0..SPECIAL_CONDITION_HIDDEN_SENTINEL_SLOT)
.filter_map(|slot_index| {
let value = read_u32_le(&bytes, SPECIAL_CONDITIONS_OFFSET + slot_index * 4)?;
(value != 0).then(|| {
format!(
"slot {}: {}",
slot_index, SPECIAL_CONDITION_LABELS[slot_index]
)
})
})
.collect::<Vec<_>>();
let extension = smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase())
.unwrap_or_default();
let profile_family = classify_candidate_table_header_profile(Some(extension.clone()), &bytes);
let source_kind = match extension.as_str() {
"gmp" => "map-fixed-special-conditions-range",
"gms" => "save-fixed-special-conditions-range",
"gmx" => "sandbox-fixed-special-conditions-range",
_ => "fixed-special-conditions-range",
}
.to_string();
Ok(RuntimeSpecialConditionsScanSample {
path: smp_path.display().to_string(),
profile_family,
source_kind,
enabled_visible_count: enabled_visible_labels.len(),
enabled_visible_labels,
})
}
fn load_post_special_conditions_scalar_scan_sample(
smp_path: &Path,
) -> Result<RuntimePostSpecialConditionsScalarScanSample, Box<dyn std::error::Error>> {
let bytes = fs::read(smp_path)?;
let table_len = SPECIAL_CONDITION_COUNT * 4;
let table_end = SPECIAL_CONDITIONS_OFFSET
.checked_add(table_len)
.ok_or("special-conditions table overflow")?;
if bytes.len() < POST_SPECIAL_CONDITIONS_SCALAR_END_OFFSET || bytes.len() < table_end {
return Err(format!(
"{} is too small for the fixed post-special-conditions scalar window",
smp_path.display()
)
.into());
}
let hidden_sentinel = read_u32_le(
&bytes,
SPECIAL_CONDITIONS_OFFSET + SPECIAL_CONDITION_HIDDEN_SENTINEL_SLOT * 4,
)
.ok_or_else(|| {
format!(
"{} is missing the hidden special-condition sentinel",
smp_path.display()
)
})?;
if hidden_sentinel != 1 {
return Err(format!(
"{} does not match the fixed special-conditions table sentinel",
smp_path.display()
)
.into());
}
let extension = smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase())
.unwrap_or_default();
let profile_family = classify_candidate_table_header_profile(Some(extension.clone()), &bytes);
let source_kind = match extension.as_str() {
"gmp" => "map-post-special-conditions-window",
"gms" => "save-post-special-conditions-window",
"gmx" => "sandbox-post-special-conditions-window",
_ => "post-special-conditions-window",
}
.to_string();
let mut nonzero_relative_offsets = Vec::new();
let mut values_by_relative_offset_hex = BTreeMap::new();
for offset in (POST_SPECIAL_CONDITIONS_SCALAR_OFFSET..POST_SPECIAL_CONDITIONS_SCALAR_END_OFFSET)
.step_by(4)
{
let value = read_u32_le(&bytes, offset).ok_or_else(|| {
format!(
"{} is truncated inside the fixed post-special-conditions scalar window",
smp_path.display()
)
})?;
if value == 0 {
continue;
}
let relative_offset = offset - POST_SPECIAL_CONDITIONS_SCALAR_OFFSET;
nonzero_relative_offsets.push(relative_offset);
values_by_relative_offset_hex
.insert(format!("0x{relative_offset:x}"), format!("0x{value:08x}"));
}
Ok(RuntimePostSpecialConditionsScalarScanSample {
path: smp_path.display().to_string(),
profile_family,
source_kind,
nonzero_relative_offsets,
values_by_relative_offset_hex,
})
}
fn load_post_special_conditions_tail_scan_sample(
smp_path: &Path,
) -> Result<RuntimePostSpecialConditionsTailScanSample, Box<dyn std::error::Error>> {
let bytes = fs::read(smp_path)?;
let table_len = SPECIAL_CONDITION_COUNT * 4;
let table_end = SPECIAL_CONDITIONS_OFFSET
.checked_add(table_len)
.ok_or("special-conditions table overflow")?;
if bytes.len() < POST_SPECIAL_CONDITIONS_SCALAR_END_OFFSET || bytes.len() < table_end {
return Err(format!(
"{} is too small for the fixed post-special-conditions tail window",
smp_path.display()
)
.into());
}
let hidden_sentinel = read_u32_le(
&bytes,
SPECIAL_CONDITIONS_OFFSET + SPECIAL_CONDITION_HIDDEN_SENTINEL_SLOT * 4,
)
.ok_or_else(|| {
format!(
"{} is missing the hidden special-condition sentinel",
smp_path.display()
)
})?;
if hidden_sentinel != 1 {
return Err(format!(
"{} does not match the fixed special-conditions table sentinel",
smp_path.display()
)
.into());
}
let extension = smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase())
.unwrap_or_default();
let profile_family = classify_candidate_table_header_profile(Some(extension.clone()), &bytes);
let source_kind = match extension.as_str() {
"gmp" => "map-post-special-conditions-tail",
"gms" => "save-post-special-conditions-tail",
"gmx" => "sandbox-post-special-conditions-tail",
_ => "post-special-conditions-tail",
}
.to_string();
let mut nonzero_relative_offsets = Vec::new();
let mut values_by_relative_offset_hex = BTreeMap::new();
for offset in (POST_SPECIAL_CONDITIONS_SCALAR_TAIL_OFFSET
..POST_SPECIAL_CONDITIONS_SCALAR_END_OFFSET)
.step_by(4)
{
let value = read_u32_le(&bytes, offset).ok_or_else(|| {
format!(
"{} is truncated inside the fixed post-special-conditions tail window",
smp_path.display()
)
})?;
if value == 0 {
continue;
}
let relative_offset = offset - POST_SPECIAL_CONDITIONS_SCALAR_TAIL_OFFSET;
nonzero_relative_offsets.push(relative_offset);
values_by_relative_offset_hex
.insert(format!("0x{relative_offset:x}"), format!("0x{value:08x}"));
}
Ok(RuntimePostSpecialConditionsTailScanSample {
path: smp_path.display().to_string(),
profile_family,
source_kind,
nonzero_relative_offsets,
values_by_relative_offset_hex,
})
}
fn load_aligned_runtime_rule_band_scan_sample(
smp_path: &Path,
) -> Result<RuntimeAlignedRuntimeRuleBandScanSample, Box<dyn std::error::Error>> {
let bytes = fs::read(smp_path)?;
if bytes.len() < SMP_ALIGNED_RUNTIME_RULE_END_OFFSET {
return Err(format!(
"{} is too small for the fixed aligned runtime-rule band",
smp_path.display()
)
.into());
}
let hidden_sentinel = read_u32_le(
&bytes,
SPECIAL_CONDITIONS_OFFSET + SPECIAL_CONDITION_HIDDEN_SENTINEL_SLOT * 4,
)
.ok_or_else(|| {
format!(
"{} is missing the hidden special-condition sentinel",
smp_path.display()
)
})?;
if hidden_sentinel != 1 {
return Err(format!(
"{} does not match the fixed special-conditions table sentinel",
smp_path.display()
)
.into());
}
let extension = smp_path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_ascii_lowercase())
.unwrap_or_default();
let profile_family = classify_candidate_table_header_profile(Some(extension.clone()), &bytes);
let source_kind = match extension.as_str() {
"gmp" => "map-smp-aligned-runtime-rule-band",
"gms" => "save-smp-aligned-runtime-rule-band",
"gmx" => "sandbox-smp-aligned-runtime-rule-band",
_ => "smp-aligned-runtime-rule-band",
}
.to_string();
let mut nonzero_band_indices = Vec::new();
let mut values_by_band_index = BTreeMap::new();
for band_index in 0..SMP_ALIGNED_RUNTIME_RULE_DWORD_COUNT {
let offset = SPECIAL_CONDITIONS_OFFSET + band_index * 4;
let value = read_u32_le(&bytes, offset).ok_or_else(|| {
format!(
"{} is truncated inside the fixed aligned runtime-rule band",
smp_path.display()
)
})?;
if value == 0 {
continue;
}
nonzero_band_indices.push(band_index);
values_by_band_index.insert(band_index, format!("0x{value:08x}"));
}
Ok(RuntimeAlignedRuntimeRuleBandScanSample {
path: smp_path.display().to_string(),
profile_family,
source_kind,
nonzero_band_indices,
values_by_band_index,
})
}
fn collect_candidate_table_input_paths(
root_path: &Path,
out: &mut Vec<PathBuf>,
) -> Result<(), Box<dyn std::error::Error>> {
let metadata = match fs::symlink_metadata(root_path) {
Ok(metadata) => metadata,
Err(err) if err.kind() == std::io::ErrorKind::PermissionDenied => return Ok(()),
Err(err) => return Err(err.into()),
};
if metadata.file_type().is_symlink() {
return Ok(());
}
if root_path.is_file() {
if root_path
.extension()
.and_then(|ext| ext.to_str())
.is_some_and(|ext| matches!(ext.to_ascii_lowercase().as_str(), "gmp" | "gms"))
{
out.push(root_path.to_path_buf());
}
return Ok(());
}
let entries = match fs::read_dir(root_path) {
Ok(entries) => entries,
Err(err) if err.kind() == std::io::ErrorKind::PermissionDenied => return Ok(()),
Err(err) => return Err(err.into()),
};
for entry in entries {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
collect_candidate_table_input_paths(&path, out)?;
continue;
}
if path
.extension()
.and_then(|ext| ext.to_str())
.is_some_and(|ext| matches!(ext.to_ascii_lowercase().as_str(), "gmp" | "gms"))
{
out.push(path);
}
}
Ok(())
}
fn collect_special_conditions_input_paths(
root_path: &Path,
out: &mut Vec<PathBuf>,
) -> Result<(), Box<dyn std::error::Error>> {
let metadata = match fs::symlink_metadata(root_path) {
Ok(metadata) => metadata,
Err(err) if err.kind() == std::io::ErrorKind::PermissionDenied => return Ok(()),
Err(err) => return Err(err.into()),
};
if metadata.file_type().is_symlink() {
return Ok(());
}
if root_path.is_file() {
if root_path
.extension()
.and_then(|ext| ext.to_str())
.is_some_and(|ext| matches!(ext.to_ascii_lowercase().as_str(), "gmp" | "gms" | "gmx"))
{
out.push(root_path.to_path_buf());
}
return Ok(());
}
let entries = match fs::read_dir(root_path) {
Ok(entries) => entries,
Err(err) if err.kind() == std::io::ErrorKind::PermissionDenied => return Ok(()),
Err(err) => return Err(err.into()),
};
for entry in entries {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
collect_special_conditions_input_paths(&path, out)?;
continue;
}
if path
.extension()
.and_then(|ext| ext.to_str())
.is_some_and(|ext| matches!(ext.to_ascii_lowercase().as_str(), "gmp" | "gms" | "gmx"))
{
out.push(path);
}
}
Ok(())
}
fn parse_special_condition_slot_index(label: &str) -> Option<u8> {
let suffix = label.strip_prefix("slot ")?;
let (slot_index, _) = suffix.split_once(':')?;
slot_index.parse().ok()
}
fn parse_hex_offset(text: &str) -> Option<usize> {
text.strip_prefix("0x")
.and_then(|digits| usize::from_str_radix(digits, 16).ok())
}
fn aligned_runtime_rule_lane_kind(band_index: usize) -> &'static str {
if band_index < SPECIAL_CONDITION_COUNT {
"known-special-condition-dword"
} else if band_index < SMP_ALIGNED_RUNTIME_RULE_KNOWN_EDITOR_RULE_COUNT {
"unlabeled-editor-rule-dword"
} else {
"trailing-runtime-scalar"
}
}
fn aligned_runtime_rule_known_label(band_index: usize) -> Option<&'static str> {
if band_index < SPECIAL_CONDITION_LABELS.len() {
Some(SPECIAL_CONDITION_LABELS[band_index])
} else {
None
}
}
fn matches_candidate_table_header_bytes(bytes: &[u8], header_offset: usize) -> bool {
matches!(
(
read_u32_le(bytes, header_offset + 0x08),
read_u32_le(bytes, header_offset + 0x0c),
read_u32_le(bytes, header_offset + 0x10),
read_u32_le(bytes, header_offset + 0x14),
read_u32_le(bytes, header_offset + 0x18),
read_u32_le(bytes, header_offset + 0x1c),
read_u32_le(bytes, header_offset + 0x20),
read_u32_le(bytes, header_offset + 0x24),
read_u32_le(bytes, header_offset + 0x28),
),
(
Some(0x0000332e),
Some(0x00000001),
Some(0x00000022),
Some(0x00000002),
Some(0x00000002),
Some(68),
Some(67),
Some(0x00000000),
Some(0x00000001),
)
)
}
fn classify_candidate_table_header_profile(extension: Option<String>, bytes: &[u8]) -> String {
let word_2 = read_u32_le(bytes, 8);
let word_3 = read_u32_le(bytes, 12);
let word_5 = read_u32_le(bytes, 20);
match (extension.as_deref().unwrap_or(""), word_2, word_3, word_5) {
("gmp", Some(0x00040001), Some(0x00028000), Some(0x00000771)) => {
"rt3-105-map-container-v1".to_string()
}
("gmp", Some(0x00040001), Some(0x00018000), Some(0x00000746)) => {
"rt3-105-scenario-map-container-v1".to_string()
}
("gmp", Some(0x0001c001), Some(0x00018000), Some(0x00000754)) => {
"rt3-105-alt-map-container-v1".to_string()
}
("gms", Some(0x00040001), Some(0x00028000), Some(0x00000771)) => {
"rt3-105-save-container-v1".to_string()
}
("gms", Some(0x00040001), Some(0x00018000), Some(0x00000746)) => {
"rt3-105-scenario-save-container-v1".to_string()
}
("gms", Some(0x0001c001), Some(0x00018000), Some(0x00000754)) => {
"rt3-105-alt-save-container-v1".to_string()
}
("gmp", _, _, _) => "map-fixed-catalog-container-unknown".to_string(),
("gms", _, _, _) => "save-fixed-catalog-container-unknown".to_string(),
_ => "fixed-catalog-container-unknown".to_string(),
}
}
fn read_u32_le(bytes: &[u8], offset: usize) -> Option<u32> {
let chunk = bytes.get(offset..offset + 4)?;
Some(u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]))
}
fn read_u16_le(bytes: &[u8], offset: usize) -> Option<u16> {
let chunk = bytes.get(offset..offset + 2)?;
Some(u16::from_le_bytes([chunk[0], chunk[1]]))
}
fn hex_encode(bytes: &[u8]) -> String {
let mut text = String::with_capacity(bytes.len() * 2);
for byte in bytes {
use std::fmt::Write as _;
let _ = write!(&mut text, "{byte:02x}");
}
text
}
fn diff_classic_profile_samples(
samples: &[RuntimeClassicProfileSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"profile_family": sample.profile_family,
"progress_32dc_offset": sample.progress_32dc_offset,
"progress_3714_offset": sample.progress_3714_offset,
"progress_3715_offset": sample.progress_3715_offset,
"packed_profile_offset": sample.packed_profile_offset,
"packed_profile_len": sample.packed_profile_len,
"packed_profile_block": sample.packed_profile_block,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn diff_rt3_105_profile_samples(
samples: &[RuntimeRt3105ProfileSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"profile_family": sample.profile_family,
"packed_profile_offset": sample.packed_profile_offset,
"packed_profile_len": sample.packed_profile_len,
"packed_profile_block": sample.packed_profile_block,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn diff_candidate_table_samples(
samples: &[RuntimeCandidateTableSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"profile_family": sample.profile_family,
"source_kind": sample.source_kind,
"semantic_family": sample.semantic_family,
"header_word_0_hex": sample.header_word_0_hex,
"header_word_1_hex": sample.header_word_1_hex,
"header_word_2_hex": sample.header_word_2_hex,
"observed_entry_count": sample.observed_entry_count,
"zero_trailer_entry_count": sample.zero_trailer_entry_count,
"nonzero_trailer_entry_count": sample.nonzero_trailer_entry_count,
"zero_trailer_entry_names": sample.zero_trailer_entry_names,
"footer_progress_word_0_hex": sample.footer_progress_word_0_hex,
"footer_progress_word_1_hex": sample.footer_progress_word_1_hex,
"availability_by_name": sample.availability_by_name,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn diff_recipe_book_line_samples(
samples: &[RuntimeRecipeBookLineSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"profile_family": sample.profile_family,
"source_kind": sample.source_kind,
"book_count": sample.book_count,
"book_stride_hex": sample.book_stride_hex,
"line_count": sample.line_count,
"line_stride_hex": sample.line_stride_hex,
"book_head_kind_by_index": sample.book_head_kind_by_index,
"book_line_area_kind_by_index": sample.book_line_area_kind_by_index,
"max_annual_production_word_hex_by_book": sample.max_annual_production_word_hex_by_book,
"line_kind_by_path": sample.line_kind_by_path,
"mode_word_hex_by_path": sample.mode_word_hex_by_path,
"annual_amount_word_hex_by_path": sample.annual_amount_word_hex_by_path,
"supplied_cargo_token_word_hex_by_path": sample.supplied_cargo_token_word_hex_by_path,
"demanded_cargo_token_word_hex_by_path": sample.demanded_cargo_token_word_hex_by_path,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn diff_recipe_book_line_content_samples(
samples: &[RuntimeRecipeBookLineSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"book_count": sample.book_count,
"book_stride_hex": sample.book_stride_hex,
"line_count": sample.line_count,
"line_stride_hex": sample.line_stride_hex,
"book_head_kind_by_index": sample.book_head_kind_by_index,
"book_line_area_kind_by_index": sample.book_line_area_kind_by_index,
"max_annual_production_word_hex_by_book": sample.max_annual_production_word_hex_by_book,
"line_kind_by_path": sample.line_kind_by_path,
"mode_word_hex_by_path": sample.mode_word_hex_by_path,
"annual_amount_word_hex_by_path": sample.annual_amount_word_hex_by_path,
"supplied_cargo_token_word_hex_by_path": sample.supplied_cargo_token_word_hex_by_path,
"demanded_cargo_token_word_hex_by_path": sample.demanded_cargo_token_word_hex_by_path,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn intersect_nonzero_recipe_line_paths<'a>(
maps: impl Iterator<Item = &'a BTreeMap<String, String>>,
) -> Vec<String> {
let mut maps = maps.peekable();
if maps.peek().is_none() {
return Vec::new();
}
let mut stable = maps
.next()
.map(|map| map.keys().cloned().collect::<BTreeSet<_>>())
.unwrap_or_default();
for map in maps {
let current = map.keys().cloned().collect::<BTreeSet<_>>();
stable = stable.intersection(&current).cloned().collect();
}
stable.into_iter().collect()
}
fn build_recipe_line_field_summaries<'a>(
maps: impl Iterator<Item = &'a BTreeMap<String, String>>,
) -> Vec<RuntimeRecipeBookLineFieldSummary> {
let mut value_sets = BTreeMap::<String, BTreeSet<String>>::new();
let mut counts = BTreeMap::<String, usize>::new();
for map in maps {
for (line_path, value_hex) in map {
*counts.entry(line_path.clone()).or_default() += 1;
value_sets
.entry(line_path.clone())
.or_default()
.insert(value_hex.clone());
}
}
counts
.into_iter()
.map(
|(line_path, file_count_present)| RuntimeRecipeBookLineFieldSummary {
line_path: line_path.clone(),
file_count_present,
distinct_value_count: value_sets.get(&line_path).map(BTreeSet::len).unwrap_or(0),
sample_value_hexes: value_sets
.get(&line_path)
.map(|values| values.iter().take(8).cloned().collect())
.unwrap_or_default(),
},
)
.collect()
}
fn diff_setup_payload_core_samples(
samples: &[RuntimeSetupPayloadCoreSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"file_extension": sample.file_extension,
"inferred_profile_family": sample.inferred_profile_family,
"payload_word_0x14": sample.payload_word_0x14,
"payload_word_0x14_hex": sample.payload_word_0x14_hex,
"payload_byte_0x20": sample.payload_byte_0x20,
"payload_byte_0x20_hex": sample.payload_byte_0x20_hex,
"marker_bytes_0x2c9_0x2d0_hex": sample.marker_bytes_0x2c9_0x2d0_hex,
"row_category_byte_0x31a": sample.row_category_byte_0x31a,
"row_category_byte_0x31a_hex": sample.row_category_byte_0x31a_hex,
"row_visibility_byte_0x31b": sample.row_visibility_byte_0x31b,
"row_visibility_byte_0x31b_hex": sample.row_visibility_byte_0x31b_hex,
"row_visibility_byte_0x31c": sample.row_visibility_byte_0x31c,
"row_visibility_byte_0x31c_hex": sample.row_visibility_byte_0x31c_hex,
"row_count_word_0x3ae": sample.row_count_word_0x3ae,
"row_count_word_0x3ae_hex": sample.row_count_word_0x3ae_hex,
"payload_word_0x3b2": sample.payload_word_0x3b2,
"payload_word_0x3b2_hex": sample.payload_word_0x3b2_hex,
"payload_word_0x3ba": sample.payload_word_0x3ba,
"payload_word_0x3ba_hex": sample.payload_word_0x3ba_hex,
"candidate_header_word_0_hex": sample.candidate_header_word_0_hex,
"candidate_header_word_1_hex": sample.candidate_header_word_1_hex,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn diff_setup_launch_payload_samples(
samples: &[RuntimeSetupLaunchPayloadSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"file_extension": sample.file_extension,
"inferred_profile_family": sample.inferred_profile_family,
"launch_flag_byte_0x22": sample.launch_flag_byte_0x22,
"launch_flag_byte_0x22_hex": sample.launch_flag_byte_0x22_hex,
"campaign_progress_in_known_range": sample.campaign_progress_in_known_range,
"campaign_progress_scenario_name": sample.campaign_progress_scenario_name,
"campaign_progress_page_index": sample.campaign_progress_page_index,
"launch_selector_byte_0x33": sample.launch_selector_byte_0x33,
"launch_selector_byte_0x33_hex": sample.launch_selector_byte_0x33_hex,
"launch_token_block_0x23_0x32_hex": sample.launch_token_block_0x23_0x32_hex,
"campaign_selector_values": sample.campaign_selector_values,
"nonzero_campaign_selector_values": sample.nonzero_campaign_selector_values,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn diff_post_special_conditions_scalar_samples(
samples: &[RuntimePostSpecialConditionsScalarSample],
) -> Result<Vec<RuntimeClassicProfileDifference>, Box<dyn std::error::Error>> {
let labeled_values = samples
.iter()
.map(|sample| {
(
sample.path.clone(),
serde_json::json!({
"profile_family": sample.profile_family,
"source_kind": sample.source_kind,
"nonzero_relative_offset_hexes": sample.nonzero_relative_offset_hexes,
"values_by_relative_offset_hex": sample.values_by_relative_offset_hex,
}),
)
})
.collect::<Vec<_>>();
let mut differences = Vec::new();
collect_json_multi_differences("$", &labeled_values, &mut differences);
Ok(differences)
}
fn build_profile_block_export_document(
smp_path: &Path,
inspection: &SmpInspectionReport,
) -> Result<RuntimeProfileBlockExportDocument, Box<dyn std::error::Error>> {
if let Some(probe) = &inspection.classic_rehydrate_profile_probe {
return Ok(RuntimeProfileBlockExportDocument {
source_path: smp_path.display().to_string(),
profile_kind: "classic-rehydrate-profile".to_string(),
profile_family: probe.profile_family.clone(),
payload: serde_json::to_value(probe)?,
});
}
if let Some(probe) = &inspection.rt3_105_packed_profile_probe {
return Ok(RuntimeProfileBlockExportDocument {
source_path: smp_path.display().to_string(),
profile_kind: "rt3-105-packed-profile".to_string(),
profile_family: probe.profile_family.clone(),
payload: serde_json::to_value(probe)?,
});
}
Err(format!(
"{} did not expose an exportable packed-profile block",
smp_path.display()
)
.into())
}
fn collect_json_multi_differences(
path: &str,
labeled_values: &[(String, Value)],
differences: &mut Vec<RuntimeClassicProfileDifference>,
) {
if labeled_values.is_empty() {
return;
}
if labeled_values
.iter()
.all(|(_, value)| matches!(value, Value::Object(_)))
{
let mut keys = BTreeSet::new();
for (_, value) in labeled_values {
if let Value::Object(map) = value {
keys.extend(map.keys().cloned());
}
}
for key in keys {
let next_path = format!("{path}.{key}");
let nested = labeled_values
.iter()
.map(|(label, value)| {
let nested_value = match value {
Value::Object(map) => map.get(&key).cloned().unwrap_or(Value::Null),
_ => Value::Null,
};
(label.clone(), nested_value)
})
.collect::<Vec<_>>();
collect_json_multi_differences(&next_path, &nested, differences);
}
return;
}
if labeled_values
.iter()
.all(|(_, value)| matches!(value, Value::Array(_)))
{
let max_len = labeled_values
.iter()
.filter_map(|(_, value)| match value {
Value::Array(items) => Some(items.len()),
_ => None,
})
.max()
.unwrap_or(0);
for index in 0..max_len {
let next_path = format!("{path}[{index}]");
let nested = labeled_values
.iter()
.map(|(label, value)| {
let nested_value = match value {
Value::Array(items) => items.get(index).cloned().unwrap_or(Value::Null),
_ => Value::Null,
};
(label.clone(), nested_value)
})
.collect::<Vec<_>>();
collect_json_multi_differences(&next_path, &nested, differences);
}
return;
}
let first = &labeled_values[0].1;
if labeled_values
.iter()
.skip(1)
.all(|(_, value)| value == first)
{
return;
}
differences.push(RuntimeClassicProfileDifference {
field_path: path.to_string(),
values: labeled_values
.iter()
.map(|(label, value)| RuntimeClassicProfileDifferenceValue {
path: label.clone(),
value: value.clone(),
})
.collect(),
});
}
fn print_runtime_validation_report(
report: &FixtureValidationReport,
) -> Result<(), Box<dyn std::error::Error>> {
println!("{}", serde_json::to_string_pretty(report)?);
Ok(())
}
fn load_finance_outcome(path: &Path) -> Result<FinanceOutcome, Box<dyn std::error::Error>> {
let text = fs::read_to_string(path)?;
if let Ok(snapshot) = serde_json::from_str::<FinanceSnapshot>(&text) {
return Ok(snapshot.evaluate());
}
if let Ok(outcome) = serde_json::from_str::<FinanceOutcome>(&text) {
return Ok(outcome);
}
Err(format!(
"unable to parse {} as FinanceSnapshot or FinanceOutcome",
path.display()
)
.into())
}
fn diff_finance_outcomes(
left: &FinanceOutcome,
right: &FinanceOutcome,
) -> Result<FinanceDiffReport, Box<dyn std::error::Error>> {
let left_value = serde_json::to_value(left)?;
let right_value = serde_json::to_value(right)?;
let mut differences = Vec::new();
collect_json_differences("$", &left_value, &right_value, &mut differences);
Ok(FinanceDiffReport {
matches: differences.is_empty(),
difference_count: differences.len(),
differences,
})
}
fn collect_json_differences(
path: &str,
left: &Value,
right: &Value,
differences: &mut Vec<FinanceDiffEntry>,
) {
match (left, right) {
(Value::Object(left_map), Value::Object(right_map)) => {
let mut keys = BTreeSet::new();
keys.extend(left_map.keys().cloned());
keys.extend(right_map.keys().cloned());
for key in keys {
let next_path = format!("{path}.{key}");
match (left_map.get(&key), right_map.get(&key)) {
(Some(left_value), Some(right_value)) => {
collect_json_differences(&next_path, left_value, right_value, differences);
}
(left_value, right_value) => differences.push(FinanceDiffEntry {
path: next_path,
left: left_value.cloned().unwrap_or(Value::Null),
right: right_value.cloned().unwrap_or(Value::Null),
}),
}
}
}
(Value::Array(left_items), Value::Array(right_items)) => {
let max_len = left_items.len().max(right_items.len());
for index in 0..max_len {
let next_path = format!("{path}[{index}]");
match (left_items.get(index), right_items.get(index)) {
(Some(left_value), Some(right_value)) => {
collect_json_differences(&next_path, left_value, right_value, differences);
}
(left_value, right_value) => differences.push(FinanceDiffEntry {
path: next_path,
left: left_value.cloned().unwrap_or(Value::Null),
right: right_value.cloned().unwrap_or(Value::Null),
}),
}
}
}
_ if left != right => differences.push(FinanceDiffEntry {
path: path.to_string(),
left: left.clone(),
right: right.clone(),
}),
_ => {}
}
}
fn validate_required_files(repo_root: &Path) -> Result<(), Box<dyn std::error::Error>> {
let mut missing = Vec::new();
for relative in REQUIRED_EXPORTS {
let path = repo_root.join(relative);
if !path.exists() {
missing.push(path.display().to_string());
}
}
if !missing.is_empty() {
return Err(format!("missing required exports: {}", missing.join(", ")).into());
}
Ok(())
}
fn validate_binary_summary(repo_root: &Path) -> Result<(), Box<dyn std::error::Error>> {
let summary = load_binary_summary(&repo_root.join(BINARY_SUMMARY_PATH))?;
let actual_exe = repo_root.join(CANONICAL_EXE_PATH);
if !actual_exe.exists() {
return Err(format!("canonical exe missing: {}", actual_exe.display()).into());
}
let actual_hash = sha256_file(&actual_exe)?;
if actual_hash != summary.sha256 {
return Err(format!(
"hash mismatch for {}: summary has {}, actual file is {}",
actual_exe.display(),
summary.sha256,
actual_hash
)
.into());
}
let docs_readme = fs::read_to_string(repo_root.join("docs/README.md"))?;
if !docs_readme.contains(&summary.sha256) {
return Err("docs/README.md does not include the canonical SHA-256".into());
}
Ok(())
}
fn validate_function_map(repo_root: &Path) -> Result<(), Box<dyn std::error::Error>> {
let records = load_function_map(&repo_root.join(FUNCTION_MAP_PATH))?;
let mut seen = BTreeSet::new();
for record in records {
if !(1..=5).contains(&record.confidence) {
return Err(format!(
"invalid confidence {} for {} {}",
record.confidence, record.address, record.name
)
.into());
}
if !seen.insert(record.address) {
return Err(format!("duplicate function address {}", record.address).into());
}
if record.name.trim().is_empty() {
return Err(format!("blank function name at {}", record.address).into());
}
}
Ok(())
}
fn validate_control_loop_atlas(repo_root: &Path) -> Result<(), Box<dyn std::error::Error>> {
let atlas = fs::read_to_string(repo_root.join(CONTROL_LOOP_ATLAS_PATH))?;
for heading in REQUIRED_ATLAS_HEADINGS {
if !atlas.contains(heading) {
return Err(format!("missing atlas heading `{heading}`").into());
}
}
for marker in [
"- Roots:",
"- Trigger/Cadence:",
"- Key Dispatchers:",
"- State Anchors:",
"- Subsystem Handoffs:",
"- Evidence:",
"- Open Questions:",
] {
if !atlas.contains(marker) {
return Err(format!("atlas is missing field marker `{marker}`").into());
}
}
Ok(())
}
fn sha256_file(path: &Path) -> Result<String, Box<dyn std::error::Error>> {
let mut file = fs::File::open(path)?;
let mut hasher = Sha256::new();
let mut buffer = [0_u8; 8192];
loop {
let read = file.read(&mut buffer)?;
if read == 0 {
break;
}
hasher.update(&buffer[..read]);
}
Ok(format!("{:x}", hasher.finalize()))
}
#[cfg(test)]
mod tests {
use super::*;
use rrt_model::finance::{
AnnualFinanceDecision, AnnualFinanceEvaluation, CompanyFinanceState, DebtRestructureSummary,
};
use rrt_runtime::{SmpPackedProfileWordLane, SmpRt3105PackedProfileBlock};
#[test]
fn loads_snapshot_as_outcome() {
let snapshot = FinanceSnapshot {
policy: rrt_model::finance::AnnualFinancePolicy {
dividends_allowed: false,
..rrt_model::finance::AnnualFinancePolicy::default()
},
company: CompanyFinanceState::default(),
};
let path = write_temp_json("snapshot", &snapshot);
let outcome = load_finance_outcome(&path).expect("snapshot should load");
assert_eq!(outcome.evaluation.decision, AnnualFinanceDecision::NoAction);
let _ = fs::remove_file(path);
}
#[test]
fn diffs_outcomes_recursively() {
let left = FinanceOutcome {
evaluation: AnnualFinanceEvaluation::no_action(),
post_company: CompanyFinanceState::default(),
};
let mut right = left.clone();
right.post_company.current_cash = 123;
right.evaluation.debt_restructure = DebtRestructureSummary {
retired_principal: 10,
issued_principal: 20,
};
let report = diff_finance_outcomes(&left, &right).expect("diff should succeed");
assert!(!report.matches);
assert!(
report
.differences
.iter()
.any(|entry| entry.path == "$.post_company.current_cash")
);
assert!(
report
.differences
.iter()
.any(|entry| entry.path == "$.evaluation.debt_restructure.retired_principal")
);
}
#[test]
fn summarizes_runtime_fixture() {
let fixture = serde_json::json!({
"format_version": 1,
"fixture_id": "runtime-fixture-test",
"source": { "kind": "synthetic" },
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 0
},
"world_flags": {
"sandbox": false
},
"companies": [],
"event_runtime_records": []
},
"commands": [
{
"kind": "advance_to",
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 3
}
}
],
"expected_summary": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 3
},
"world_flag_count": 1,
"company_count": 0,
"event_runtime_record_count": 0,
"total_company_cash": 0
},
"expected_state_fragment": {
"calendar": {
"tick_slot": 3
},
"world_flags": {
"sandbox": false
}
}
});
let path = write_temp_json("runtime-fixture", &fixture);
run_runtime_summarize_fixture(&path).expect("fixture summary should succeed");
let _ = fs::remove_file(path);
}
#[test]
fn exports_and_summarizes_runtime_snapshot() {
let fixture = serde_json::json!({
"format_version": 1,
"fixture_id": "runtime-export-test",
"source": { "kind": "synthetic" },
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 0
},
"world_flags": {},
"companies": [],
"event_runtime_records": []
},
"commands": [
{
"kind": "step_count",
"steps": 2
}
],
"expected_summary": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 2
},
"world_flag_count": 0,
"company_count": 0,
"event_runtime_record_count": 0,
"total_company_cash": 0
}
});
let fixture_path = write_temp_json("runtime-export-fixture", &fixture);
let snapshot_path = std::env::temp_dir().join(format!(
"rrt-cli-runtime-export-{}.json",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("system time should be after epoch")
.as_nanos()
));
run_runtime_export_fixture_state(&fixture_path, &snapshot_path)
.expect("fixture export should succeed");
run_runtime_summarize_state(&snapshot_path).expect("snapshot summary should succeed");
let _ = fs::remove_file(fixture_path);
let _ = fs::remove_file(snapshot_path);
}
#[test]
fn imports_runtime_state_dump_into_snapshot() {
let dump = serde_json::json!({
"format_version": 1,
"dump_id": "runtime-dump-test",
"source": {
"description": "test raw runtime dump"
},
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 9
},
"world_flags": {},
"companies": [],
"event_runtime_records": [],
"service_state": {
"periodic_boundary_calls": 0,
"trigger_dispatch_counts": {},
"total_event_record_services": 0,
"dirty_rerun_count": 0
}
}
});
let input_path = write_temp_json("runtime-dump", &dump);
let output_path = std::env::temp_dir().join(format!(
"rrt-cli-runtime-import-{}.json",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("system time should be after epoch")
.as_nanos()
));
run_runtime_import_state(&input_path, &output_path).expect("runtime import should succeed");
run_runtime_summarize_state(&output_path).expect("imported snapshot should summarize");
let _ = fs::remove_file(input_path);
let _ = fs::remove_file(output_path);
}
#[test]
fn diffs_runtime_states_recursively() {
let left = serde_json::json!({
"format_version": 1,
"snapshot_id": "left",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 1
},
"world_flags": {
"sandbox": false
},
"companies": []
}
});
let right = serde_json::json!({
"format_version": 1,
"snapshot_id": "right",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 2
},
"world_flags": {
"sandbox": true
},
"companies": []
}
});
let left_path = write_temp_json("runtime-diff-left", &left);
let right_path = write_temp_json("runtime-diff-right", &right);
run_runtime_diff_state(&left_path, &right_path).expect("runtime diff should succeed");
let _ = fs::remove_file(left_path);
let _ = fs::remove_file(right_path);
}
#[test]
fn diffs_runtime_states_with_event_record_additions_and_removals() {
let left = serde_json::json!({
"format_version": 1,
"snapshot_id": "left-events",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 1
},
"world_flags": {
"sandbox": false
},
"companies": [],
"event_runtime_records": [
{
"record_id": 1,
"trigger_kind": 7,
"active": true
},
{
"record_id": 2,
"trigger_kind": 7,
"active": false
}
]
}
});
let right = serde_json::json!({
"format_version": 1,
"snapshot_id": "right-events",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 1
},
"world_flags": {
"sandbox": false
},
"companies": [],
"event_runtime_records": [
{
"record_id": 1,
"trigger_kind": 7,
"active": true
}
]
}
});
let left_path = write_temp_json("runtime-diff-events-left", &left);
let right_path = write_temp_json("runtime-diff-events-right", &right);
let left_state =
load_normalized_runtime_state(&left_path).expect("left runtime state should load");
let right_state =
load_normalized_runtime_state(&right_path).expect("right runtime state should load");
let differences = diff_json_values(&left_state, &right_state);
assert!(
differences
.iter()
.any(|entry| entry.path == "$.event_runtime_records[1]")
);
let _ = fs::remove_file(left_path);
let _ = fs::remove_file(right_path);
}
#[test]
fn diffs_runtime_states_with_packed_event_collection_changes() {
let left = serde_json::json!({
"format_version": 1,
"snapshot_id": "left-packed-events",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 1
},
"world_flags": {},
"companies": [],
"packed_event_collection": {
"source_kind": "packed-event-runtime-collection",
"mechanism_family": "classic-save-rehydrate-v1",
"mechanism_confidence": "grounded",
"container_profile_family": "rt3-classic-save-container-v1",
"packed_state_version": 1001,
"packed_state_version_hex": "0x000003e9",
"live_id_bound": 5,
"live_record_count": 3,
"live_entry_ids": [1, 3, 5],
"decoded_record_count": 0,
"imported_runtime_record_count": 0,
"records": [
{
"record_index": 0,
"live_entry_id": 1,
"decode_status": "unsupported_framing",
"payload_family": "unsupported_framing",
"grouped_effect_row_counts": [0, 0, 0, 0],
"decoded_actions": [],
"executable_import_ready": false,
"notes": ["left fixture"]
},
{
"record_index": 1,
"live_entry_id": 3,
"decode_status": "unsupported_framing",
"payload_family": "unsupported_framing",
"grouped_effect_row_counts": [0, 0, 0, 0],
"decoded_actions": [],
"executable_import_ready": false,
"notes": ["left fixture"]
},
{
"record_index": 2,
"live_entry_id": 5,
"decode_status": "unsupported_framing",
"payload_family": "unsupported_framing",
"grouped_effect_row_counts": [0, 0, 0, 0],
"decoded_actions": [],
"executable_import_ready": false,
"notes": ["left fixture"]
}
]
},
"event_runtime_records": []
}
});
let right = serde_json::json!({
"format_version": 1,
"snapshot_id": "right-packed-events",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 1
},
"world_flags": {},
"companies": [],
"packed_event_collection": {
"source_kind": "packed-event-runtime-collection",
"mechanism_family": "classic-save-rehydrate-v1",
"mechanism_confidence": "grounded",
"container_profile_family": "rt3-classic-save-container-v1",
"packed_state_version": 1001,
"packed_state_version_hex": "0x000003e9",
"live_id_bound": 5,
"live_record_count": 2,
"live_entry_ids": [1, 5],
"decoded_record_count": 0,
"imported_runtime_record_count": 0,
"records": [
{
"record_index": 0,
"live_entry_id": 1,
"decode_status": "unsupported_framing",
"payload_family": "unsupported_framing",
"grouped_effect_row_counts": [0, 0, 0, 0],
"decoded_actions": [],
"executable_import_ready": false,
"notes": ["right fixture"]
},
{
"record_index": 1,
"live_entry_id": 5,
"decode_status": "unsupported_framing",
"payload_family": "unsupported_framing",
"grouped_effect_row_counts": [0, 0, 0, 0],
"decoded_actions": [],
"executable_import_ready": false,
"notes": ["right fixture"]
}
]
},
"event_runtime_records": []
}
});
let left_path = write_temp_json("runtime-diff-packed-events-left", &left);
let right_path = write_temp_json("runtime-diff-packed-events-right", &right);
let left_state =
load_normalized_runtime_state(&left_path).expect("left runtime state should load");
let right_state =
load_normalized_runtime_state(&right_path).expect("right runtime state should load");
let differences = diff_json_values(&left_state, &right_state);
assert!(differences.iter().any(|entry| {
entry.path == "$.packed_event_collection.live_record_count"
|| entry.path == "$.packed_event_collection.live_entry_ids[1]"
}));
let _ = fs::remove_file(left_path);
let _ = fs::remove_file(right_path);
}
#[test]
fn summarizes_snapshot_backed_fixture_with_packed_event_collection() {
let fixture_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-collection-from-snapshot.json");
run_runtime_summarize_fixture(&fixture_path)
.expect("snapshot-backed packed-event fixture should summarize");
}
#[test]
fn summarizes_snapshot_backed_fixture_with_imported_packed_event_record() {
let fixture_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-record-import-from-snapshot.json");
run_runtime_summarize_fixture(&fixture_path)
.expect("snapshot-backed imported packed-event fixture should summarize");
}
#[test]
fn summarizes_save_slice_backed_fixtures() {
let parity_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-parity-save-slice-fixture.json");
let selective_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-selective-import-save-slice-fixture.json");
let overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-selective-import-overlay-fixture.json");
let symbolic_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-symbolic-company-scope-overlay-fixture.json",
);
let negative_company_scope_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join(
"../../fixtures/runtime/packed-event-negative-company-scope-overlay-fixture.json",
);
let deactivate_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-deactivate-company-overlay-fixture.json");
let track_capacity_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-track-capacity-overlay-fixture.json");
let mixed_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-mixed-company-descriptor-overlay-fixture.json",
);
let named_locomotive_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-named-locomotive-availability-save-slice-fixture.json",
);
let missing_catalog_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-locomotive-availability-missing-catalog-save-slice-fixture.json",
);
let save_locomotive_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-locomotive-availability-save-slice-fixture.json",
);
let overlay_locomotive_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-locomotive-availability-overlay-fixture.json",
);
let save_locomotive_cost_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-locomotive-cost-save-slice-fixture.json");
let overlay_locomotive_cost_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-locomotive-cost-overlay-fixture.json");
let scalar_band_parity_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-world-scalar-band-parity-save-slice-fixture.json",
);
let world_scalar_executable_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-world-scalar-executable-save-slice-fixture.json",
);
let world_scalar_condition_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-world-scalar-condition-save-slice-fixture.json",
);
let world_scalar_condition_parity_fixture =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-world-scalar-condition-parity-save-slice-fixture.json",
);
let cargo_catalog_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-cargo-catalog-save-slice-fixture.json");
let chairman_cash_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-chairman-cash-overlay-fixture.json");
let chairman_cash_save_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-chairman-cash-save-slice-fixture.json");
let deactivate_chairman_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-deactivate-chairman-overlay-fixture.json");
let deactivate_chairman_save_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-deactivate-chairman-save-slice-fixture.json",
);
let deactivate_company_save_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-deactivate-company-save-slice-fixture.json");
let track_capacity_save_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-track-capacity-save-slice-fixture.json");
let negative_company_scope_save_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-negative-company-scope-save-slice-fixture.json",
);
let missing_chairman_context_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-chairman-missing-context-save-slice-fixture.json",
);
let chairman_scope_parity_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../fixtures/runtime/packed-event-chairman-scope-parity-save-slice-fixture.json",
);
let chairman_condition_overlay_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-chairman-condition-overlay-fixture.json");
let chairman_condition_save_fixture = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-chairman-condition-save-slice-fixture.json");
let company_governance_condition_overlay_fixture = PathBuf::from(env!(
"CARGO_MANIFEST_DIR"
))
.join(
"../../fixtures/runtime/packed-event-company-governance-condition-overlay-fixture.json",
);
let company_governance_condition_save_fixture = PathBuf::from(env!(
"CARGO_MANIFEST_DIR"
))
.join(
"../../fixtures/runtime/packed-event-company-governance-condition-save-slice-fixture.json",
);
let investor_confidence_condition_save_fixture = PathBuf::from(env!(
"CARGO_MANIFEST_DIR"
))
.join(
"../../fixtures/runtime/packed-event-investor-confidence-condition-save-slice-fixture.json",
);
let management_attitude_condition_save_fixture = PathBuf::from(env!(
"CARGO_MANIFEST_DIR"
))
.join(
"../../fixtures/runtime/packed-event-management-attitude-condition-save-slice-fixture.json",
);
run_runtime_summarize_fixture(&parity_fixture)
.expect("save-slice-backed parity fixture should summarize");
run_runtime_summarize_fixture(&selective_fixture)
.expect("save-slice-backed selective-import fixture should summarize");
run_runtime_summarize_fixture(&overlay_fixture)
.expect("overlay-backed selective-import fixture should summarize");
run_runtime_summarize_fixture(&symbolic_overlay_fixture)
.expect("overlay-backed symbolic-target fixture should summarize");
run_runtime_summarize_fixture(&negative_company_scope_overlay_fixture)
.expect("overlay-backed negative-sentinel company-scope fixture should summarize");
run_runtime_summarize_fixture(&deactivate_overlay_fixture)
.expect("overlay-backed deactivate-company fixture should summarize");
run_runtime_summarize_fixture(&track_capacity_overlay_fixture)
.expect("overlay-backed track-capacity fixture should summarize");
run_runtime_summarize_fixture(&mixed_overlay_fixture)
.expect("overlay-backed mixed real-row fixture should summarize");
run_runtime_summarize_fixture(&named_locomotive_fixture)
.expect("save-slice-backed named locomotive availability fixture should summarize");
run_runtime_summarize_fixture(&missing_catalog_fixture).expect(
"save-slice-backed locomotive availability missing-catalog fixture should summarize",
);
run_runtime_summarize_fixture(&save_locomotive_fixture).expect(
"save-slice-backed locomotive availability descriptor fixture should summarize",
);
run_runtime_summarize_fixture(&overlay_locomotive_fixture)
.expect("overlay-backed locomotive availability fixture should summarize");
run_runtime_summarize_fixture(&save_locomotive_cost_fixture)
.expect("save-slice-backed locomotive cost fixture should summarize");
run_runtime_summarize_fixture(&overlay_locomotive_cost_fixture)
.expect("overlay-backed locomotive cost fixture should summarize");
run_runtime_summarize_fixture(&scalar_band_parity_fixture)
.expect("save-slice-backed recovered scalar-band parity fixture should summarize");
run_runtime_summarize_fixture(&world_scalar_executable_fixture)
.expect("save-slice-backed executable world-scalar fixture should summarize");
run_runtime_summarize_fixture(&world_scalar_condition_fixture)
.expect("save-slice-backed executable world-scalar condition fixture should summarize");
run_runtime_summarize_fixture(&world_scalar_condition_parity_fixture)
.expect("save-slice-backed parity world-scalar condition fixture should summarize");
run_runtime_summarize_fixture(&cargo_catalog_fixture)
.expect("save-slice-backed cargo catalog fixture should summarize");
run_runtime_summarize_fixture(&chairman_cash_overlay_fixture)
.expect("overlay-backed chairman-cash fixture should summarize");
run_runtime_summarize_fixture(&chairman_cash_save_fixture)
.expect("save-slice-backed chairman-cash fixture should summarize");
run_runtime_summarize_fixture(&deactivate_chairman_overlay_fixture)
.expect("overlay-backed deactivate-chairman fixture should summarize");
run_runtime_summarize_fixture(&deactivate_chairman_save_fixture)
.expect("save-slice-backed deactivate-chairman fixture should summarize");
run_runtime_summarize_fixture(&deactivate_company_save_fixture)
.expect("save-slice-backed deactivate-company fixture should summarize");
run_runtime_summarize_fixture(&track_capacity_save_fixture)
.expect("save-slice-backed track-capacity fixture should summarize");
run_runtime_summarize_fixture(&negative_company_scope_save_fixture)
.expect("save-slice-backed negative-sentinel company-scope fixture should summarize");
run_runtime_summarize_fixture(&missing_chairman_context_fixture)
.expect("save-slice-backed chairman missing-context fixture should summarize");
run_runtime_summarize_fixture(&chairman_scope_parity_fixture)
.expect("save-slice-backed chairman scope parity fixture should summarize");
run_runtime_summarize_fixture(&chairman_condition_overlay_fixture)
.expect("overlay-backed chairman condition fixture should summarize");
run_runtime_summarize_fixture(&chairman_condition_save_fixture)
.expect("save-slice-backed chairman condition fixture should summarize");
run_runtime_summarize_fixture(&company_governance_condition_overlay_fixture)
.expect("overlay-backed company governance condition fixture should summarize");
run_runtime_summarize_fixture(&company_governance_condition_save_fixture)
.expect("save-slice-backed company governance condition fixture should summarize");
run_runtime_summarize_fixture(&investor_confidence_condition_save_fixture)
.expect("save-slice-backed investor-confidence condition fixture should summarize");
run_runtime_summarize_fixture(&management_attitude_condition_save_fixture)
.expect("save-slice-backed management-attitude condition fixture should summarize");
}
#[test]
fn exports_runtime_save_slice_document_from_loaded_slice() {
let nonce = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("system time should be after epoch")
.as_nanos();
let output_path =
std::env::temp_dir().join(format!("rrt-export-save-slice-test-{nonce}.json"));
let smp_path = PathBuf::from("captured-test.gms");
let report = export_runtime_save_slice_document(
&smp_path,
&output_path,
SmpLoadedSaveSlice {
file_extension_hint: Some("gms".to_string()),
container_profile_family: Some("rt3-classic-save-container-v1".to_string()),
mechanism_family: "classic-save-rehydrate-v1".to_string(),
mechanism_confidence: "grounded".to_string(),
trailer_family: None,
bridge_family: None,
profile: None,
candidate_availability_table: None,
named_locomotive_availability_table: None,
locomotive_catalog: None,
cargo_catalog: None,
company_roster: None,
chairman_profile_table: None,
special_conditions_table: None,
event_runtime_collection: None,
notes: vec!["exported for test".to_string()],
},
)
.expect("save slice export should succeed");
assert_eq!(report.save_slice_id, "captured-test");
let document = rrt_runtime::load_runtime_save_slice_document(&output_path)
.expect("exported save slice document should load");
assert_eq!(document.save_slice_id, "captured-test");
assert_eq!(
document.source.original_save_filename.as_deref(),
Some("captured-test.gms")
);
let _ = fs::remove_file(output_path);
}
#[test]
fn exports_runtime_overlay_import_document() {
let nonce = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("system time should be after epoch")
.as_nanos();
let output_path =
std::env::temp_dir().join(format!("rrt-export-overlay-import-test-{nonce}.json"));
let snapshot_path = PathBuf::from("base-snapshot.json");
let save_slice_path = PathBuf::from("captured-save-slice.json");
let report =
export_runtime_overlay_import_document(&snapshot_path, &save_slice_path, &output_path)
.expect("overlay import export should succeed");
let expected_import_id = output_path
.file_stem()
.and_then(|stem| stem.to_str())
.expect("output path should have a stem")
.to_string();
assert_eq!(report.import_id, expected_import_id);
let document = rrt_runtime::load_runtime_overlay_import_document(&output_path)
.expect("exported overlay import document should load");
assert_eq!(document.import_id, expected_import_id);
assert_eq!(document.base_snapshot_path, "base-snapshot.json");
assert_eq!(document.save_slice_path, "captured-save-slice.json");
let _ = fs::remove_file(output_path);
}
#[test]
fn diffs_runtime_states_with_packed_record_and_runtime_record_import_changes() {
let left = serde_json::json!({
"format_version": 1,
"snapshot_id": "left-packed-import",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 0
},
"world_flags": {},
"companies": [],
"packed_event_collection": {
"source_kind": "packed-event-runtime-collection",
"mechanism_family": "classic-save-rehydrate-v1",
"mechanism_confidence": "grounded",
"container_profile_family": "rt3-classic-save-container-v1",
"packed_state_version": 1001,
"packed_state_version_hex": "0x000003e9",
"live_id_bound": 7,
"live_record_count": 1,
"live_entry_ids": [7],
"decoded_record_count": 0,
"imported_runtime_record_count": 0,
"records": [
{
"record_index": 0,
"live_entry_id": 7,
"decode_status": "unsupported_framing",
"payload_family": "unsupported_framing",
"grouped_effect_row_counts": [0, 0, 0, 0],
"decoded_actions": [],
"executable_import_ready": false,
"notes": ["left placeholder"]
}
]
},
"event_runtime_records": []
}
});
let right = serde_json::json!({
"format_version": 1,
"snapshot_id": "right-packed-import",
"state": {
"calendar": {
"year": 1830,
"month_slot": 0,
"phase_slot": 0,
"tick_slot": 0
},
"world_flags": {},
"companies": [],
"packed_event_collection": {
"source_kind": "packed-event-runtime-collection",
"mechanism_family": "classic-save-rehydrate-v1",
"mechanism_confidence": "grounded",
"container_profile_family": "rt3-classic-save-container-v1",
"packed_state_version": 1001,
"packed_state_version_hex": "0x000003e9",
"live_id_bound": 7,
"live_record_count": 1,
"live_entry_ids": [7],
"decoded_record_count": 1,
"imported_runtime_record_count": 1,
"records": [
{
"record_index": 0,
"live_entry_id": 7,
"payload_offset": 29186,
"payload_len": 64,
"decode_status": "executable",
"payload_family": "synthetic_harness",
"trigger_kind": 7,
"active": true,
"marks_collection_dirty": false,
"one_shot": false,
"text_bands": [
{
"label": "primary_text_band",
"packed_len": 5,
"present": true,
"preview": "Alpha"
}
],
"standalone_condition_row_count": 1,
"standalone_condition_rows": [],
"grouped_effect_row_counts": [0, 1, 0, 0],
"grouped_effect_rows": [],
"decoded_actions": [
{
"kind": "set_world_flag",
"key": "from_packed_root",
"value": true
}
],
"executable_import_ready": true,
"notes": ["decoded test record"]
}
]
},
"event_runtime_records": [
{
"record_id": 7,
"trigger_kind": 7,
"active": true,
"marks_collection_dirty": false,
"one_shot": false,
"has_fired": false,
"effects": [
{
"kind": "set_world_flag",
"key": "from_packed_root",
"value": true
}
]
}
]
}
});
let left_path = write_temp_json("runtime-diff-packed-import-left", &left);
let right_path = write_temp_json("runtime-diff-packed-import-right", &right);
let left_state =
load_normalized_runtime_state(&left_path).expect("left runtime state should load");
let right_state =
load_normalized_runtime_state(&right_path).expect("right runtime state should load");
let differences = diff_json_values(&left_state, &right_state);
assert!(differences.iter().any(|entry| {
entry.path == "$.packed_event_collection.records[0].decode_status"
|| entry.path == "$.packed_event_collection.records[0].decoded_actions[0]"
}));
assert!(
differences
.iter()
.any(|entry| entry.path == "$.event_runtime_records[0]")
);
let _ = fs::remove_file(left_path);
let _ = fs::remove_file(right_path);
}
#[test]
fn diffs_runtime_states_between_save_slice_and_overlay_import() {
let base = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-selective-import-save-slice.json");
let overlay = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-selective-import-overlay.json");
let left_state =
load_normalized_runtime_state(&base).expect("save-slice-backed state should load");
let right_state =
load_normalized_runtime_state(&overlay).expect("overlay-backed state should load");
let differences = diff_json_values(&left_state, &right_state);
assert!(differences.iter().any(|entry| {
entry.path == "$.companies[0].company_id"
|| entry.path == "$.packed_event_collection.imported_runtime_record_count"
|| entry.path == "$.packed_event_collection.records[1].import_outcome"
|| entry.path == "$.event_runtime_records[1].record_id"
}));
}
#[test]
fn diffs_save_slice_backed_states_across_packed_event_boundaries() {
let left_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-parity-save-slice.json");
let right_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../fixtures/runtime/packed-event-selective-import-save-slice.json");
let left_state = load_normalized_runtime_state(&left_path)
.expect("left save-slice-backed state should load");
let right_state = load_normalized_runtime_state(&right_path)
.expect("right save-slice-backed state should load");
let differences = diff_json_values(&left_state, &right_state);
assert!(differences.iter().any(|entry| {
entry.path == "$.packed_event_collection.imported_runtime_record_count"
|| entry.path == "$.packed_event_collection.records[0].decode_status"
}));
}
#[test]
fn diffs_classic_profile_samples_across_multiple_files() {
let sample_a = RuntimeClassicProfileSample {
path: "a.gms".to_string(),
profile_family: "rt3-classic-save-container-v1".to_string(),
progress_32dc_offset: 0x76e8,
progress_3714_offset: 0x76ec,
progress_3715_offset: 0x77f8,
packed_profile_offset: 0x76f0,
packed_profile_len: 0x108,
packed_profile_block: SmpClassicPackedProfileBlock {
relative_len: 0x108,
relative_len_hex: "0x108".to_string(),
leading_word_0: 0x03000000,
leading_word_0_hex: "0x03000000".to_string(),
trailing_zero_word_count_after_leading_word: 3,
map_path_offset: 0x13,
map_path: Some("British Isles.gmp".to_string()),
display_name_offset: 0x46,
display_name: Some("British Isles".to_string()),
profile_byte_0x77: 0,
profile_byte_0x77_hex: "0x00".to_string(),
profile_byte_0x82: 0,
profile_byte_0x82_hex: "0x00".to_string(),
profile_byte_0x97: 0,
profile_byte_0x97_hex: "0x00".to_string(),
profile_byte_0xc5: 0,
profile_byte_0xc5_hex: "0x00".to_string(),
stable_nonzero_words: vec![SmpPackedProfileWordLane {
relative_offset: 0,
relative_offset_hex: "0x00".to_string(),
value: 0x03000000,
value_hex: "0x03000000".to_string(),
}],
},
};
let mut sample_b = sample_a.clone();
sample_b.path = "b.gms".to_string();
sample_b.packed_profile_block.leading_word_0 = 0x05000000;
sample_b.packed_profile_block.leading_word_0_hex = "0x05000000".to_string();
sample_b.packed_profile_block.stable_nonzero_words[0].value = 0x05000000;
sample_b.packed_profile_block.stable_nonzero_words[0].value_hex = "0x05000000".to_string();
let differences =
diff_classic_profile_samples(&[sample_a, sample_b]).expect("diff should succeed");
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.packed_profile_block.leading_word_0")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.packed_profile_block.leading_word_0_hex")
);
assert!(differences.iter().any(
|entry| entry.field_path == "$.packed_profile_block.stable_nonzero_words[0].value"
));
}
#[test]
fn diffs_rt3_105_profile_samples_across_multiple_files() {
let sample_a = RuntimeRt3105ProfileSample {
path: "a.gms".to_string(),
profile_family: "rt3-105-save-container-v1".to_string(),
packed_profile_offset: 0x73c0,
packed_profile_len: 0x108,
packed_profile_block: SmpRt3105PackedProfileBlock {
relative_len: 0x108,
relative_len_hex: "0x108".to_string(),
leading_word_0: 3,
leading_word_0_hex: "0x00000003".to_string(),
trailing_zero_word_count_after_leading_word: 2,
header_flag_word_3: 0x01000000,
header_flag_word_3_hex: "0x01000000".to_string(),
map_path_offset: 0x10,
map_path: Some("Alternate USA.gmp".to_string()),
display_name_offset: 0x43,
display_name: Some("Alternate USA".to_string()),
profile_byte_0x77: 0x07,
profile_byte_0x77_hex: "0x07".to_string(),
profile_byte_0x82: 0x4d,
profile_byte_0x82_hex: "0x4d".to_string(),
profile_byte_0x97: 0x00,
profile_byte_0x97_hex: "0x00".to_string(),
profile_byte_0xc5: 0x00,
profile_byte_0xc5_hex: "0x00".to_string(),
stable_nonzero_words: vec![SmpPackedProfileWordLane {
relative_offset: 0x80,
relative_offset_hex: "0x80".to_string(),
value: 0x364d0000,
value_hex: "0x364d0000".to_string(),
}],
},
};
let mut sample_b = sample_a.clone();
sample_b.path = "b.gms".to_string();
sample_b.profile_family = "rt3-105-alt-save-container-v1".to_string();
sample_b.packed_profile_block.map_path = Some("Southern Pacific.gmp".to_string());
sample_b.packed_profile_block.display_name = Some("Southern Pacific".to_string());
sample_b.packed_profile_block.leading_word_0 = 5;
sample_b.packed_profile_block.leading_word_0_hex = "0x00000005".to_string();
sample_b.packed_profile_block.profile_byte_0x82 = 0x90;
sample_b.packed_profile_block.profile_byte_0x82_hex = "0x90".to_string();
sample_b.packed_profile_block.stable_nonzero_words[0].value = 0x1b900000;
sample_b.packed_profile_block.stable_nonzero_words[0].value_hex = "0x1b900000".to_string();
let differences =
diff_rt3_105_profile_samples(&[sample_a, sample_b]).expect("diff should succeed");
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.profile_family")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.packed_profile_block.map_path")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.packed_profile_block.profile_byte_0x82")
);
}
#[test]
fn diffs_candidate_table_samples_across_multiple_files() {
let mut availability_a = BTreeMap::new();
availability_a.insert("AutoPlant".to_string(), 1u32);
availability_a.insert("Nuclear Power Plant".to_string(), 0u32);
let sample_a = RuntimeCandidateTableSample {
path: "a.gmp".to_string(),
profile_family: "rt3-105-map-container-v1".to_string(),
source_kind: "map-fixed-catalog-range".to_string(),
semantic_family: "scenario-named-candidate-availability-table".to_string(),
header_word_0_hex: "0x10000000".to_string(),
header_word_1_hex: "0x00009000".to_string(),
header_word_2_hex: "0x0000332e".to_string(),
observed_entry_count: 67,
zero_trailer_entry_count: 1,
nonzero_trailer_entry_count: 66,
zero_trailer_entry_names: vec!["Nuclear Power Plant".to_string()],
footer_progress_word_0_hex: "0x000032dc".to_string(),
footer_progress_word_1_hex: "0x00003714".to_string(),
availability_by_name: availability_a,
};
let mut availability_b = BTreeMap::new();
availability_b.insert("AutoPlant".to_string(), 0u32);
availability_b.insert("Nuclear Power Plant".to_string(), 0u32);
let sample_b = RuntimeCandidateTableSample {
path: "b.gmp".to_string(),
profile_family: "rt3-105-scenario-map-container-v1".to_string(),
source_kind: "map-fixed-catalog-range".to_string(),
semantic_family: "scenario-named-candidate-availability-table".to_string(),
header_word_0_hex: "0x00000000".to_string(),
header_word_1_hex: "0x00000000".to_string(),
header_word_2_hex: "0x0000332e".to_string(),
observed_entry_count: 67,
zero_trailer_entry_count: 2,
nonzero_trailer_entry_count: 65,
zero_trailer_entry_names: vec![
"AutoPlant".to_string(),
"Nuclear Power Plant".to_string(),
],
footer_progress_word_0_hex: "0x000032dc".to_string(),
footer_progress_word_1_hex: "0x00003714".to_string(),
availability_by_name: availability_b,
};
let differences =
diff_candidate_table_samples(&[sample_a, sample_b]).expect("diff should succeed");
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.profile_family")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.header_word_0_hex")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.availability_by_name.AutoPlant")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.zero_trailer_entry_names[0]")
);
}
#[test]
fn diffs_recipe_book_line_samples_across_multiple_files() {
let sample_a = RuntimeRecipeBookLineSample {
path: "a.gmp".to_string(),
profile_family: "rt3-105-map-container-v1".to_string(),
source_kind: "recipe-book-summary".to_string(),
book_count: 12,
book_stride_hex: "0x4e1".to_string(),
line_count: 5,
line_stride_hex: "0x30".to_string(),
book_head_kind_by_index: BTreeMap::from([("book00".to_string(), "mixed".to_string())]),
book_line_area_kind_by_index: BTreeMap::from([(
"book00".to_string(),
"mixed".to_string(),
)]),
max_annual_production_word_hex_by_book: BTreeMap::from([(
"book00".to_string(),
"0x41200000".to_string(),
)]),
line_kind_by_path: BTreeMap::from([("book00.line00".to_string(), "mixed".to_string())]),
mode_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x00000003".to_string(),
)]),
annual_amount_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x41a00000".to_string(),
)]),
supplied_cargo_token_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x00000017".to_string(),
)]),
demanded_cargo_token_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x0000002a".to_string(),
)]),
};
let sample_b = RuntimeRecipeBookLineSample {
path: "b.gms".to_string(),
profile_family: "rt3-105-alt-save-container-v1".to_string(),
source_kind: "recipe-book-summary".to_string(),
book_count: 12,
book_stride_hex: "0x4e1".to_string(),
line_count: 5,
line_stride_hex: "0x30".to_string(),
book_head_kind_by_index: BTreeMap::from([("book00".to_string(), "mixed".to_string())]),
book_line_area_kind_by_index: BTreeMap::from([(
"book00".to_string(),
"mixed".to_string(),
)]),
max_annual_production_word_hex_by_book: BTreeMap::from([(
"book00".to_string(),
"0x41200000".to_string(),
)]),
line_kind_by_path: BTreeMap::from([("book00.line00".to_string(), "zero".to_string())]),
mode_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x00000000".to_string(),
)]),
annual_amount_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x00000000".to_string(),
)]),
supplied_cargo_token_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x00000000".to_string(),
)]),
demanded_cargo_token_word_hex_by_path: BTreeMap::from([(
"book00.line00".to_string(),
"0x00000000".to_string(),
)]),
};
let differences = diff_recipe_book_line_samples(&[sample_a, sample_b])
.expect("recipe-book diff should succeed");
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.profile_family")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.line_kind_by_path.book00.line00")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.mode_word_hex_by_path.book00.line00")
);
assert!(differences.iter().any(
|entry| entry.field_path == "$.supplied_cargo_token_word_hex_by_path.book00.line00"
));
}
#[test]
fn recipe_book_content_diff_ignores_wrapper_metadata() {
let sample_a = RuntimeRecipeBookLineSample {
path: "a.gmp".to_string(),
profile_family: "rt3-105-map-container-v1".to_string(),
source_kind: "recipe-book-summary".to_string(),
book_count: 12,
book_stride_hex: "0x4e1".to_string(),
line_count: 5,
line_stride_hex: "0x30".to_string(),
book_head_kind_by_index: BTreeMap::from([("book00".to_string(), "mixed".to_string())]),
book_line_area_kind_by_index: BTreeMap::from([(
"book00".to_string(),
"mixed".to_string(),
)]),
max_annual_production_word_hex_by_book: BTreeMap::from([(
"book00".to_string(),
"0x00000000".to_string(),
)]),
line_kind_by_path: BTreeMap::from([("book00.line02".to_string(), "mixed".to_string())]),
mode_word_hex_by_path: BTreeMap::from([(
"book00.line02".to_string(),
"0x00110000".to_string(),
)]),
annual_amount_word_hex_by_path: BTreeMap::from([(
"book00.line02".to_string(),
"0x00000000".to_string(),
)]),
supplied_cargo_token_word_hex_by_path: BTreeMap::from([(
"book00.line02".to_string(),
"0x000040a0".to_string(),
)]),
demanded_cargo_token_word_hex_by_path: BTreeMap::from([(
"book00.line01".to_string(),
"0x72470000".to_string(),
)]),
};
let mut sample_b = sample_a.clone();
sample_b.path = "b.gms".to_string();
sample_b.profile_family = "rt3-105-save-container-v1".to_string();
sample_b.source_kind = "recipe-book-summary".to_string();
let differences = diff_recipe_book_line_samples(&[sample_a.clone(), sample_b.clone()])
.expect("wrapper-aware diff should succeed");
let content_differences = diff_recipe_book_line_content_samples(&[sample_a, sample_b])
.expect("content diff should succeed");
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.profile_family")
);
assert!(content_differences.is_empty());
}
#[test]
fn diffs_setup_payload_core_samples_across_multiple_files() {
let sample_a = RuntimeSetupPayloadCoreSample {
path: "a.gmp".to_string(),
file_extension: "gmp".to_string(),
inferred_profile_family: "rt3-105-map-container-v1".to_string(),
payload_word_0x14: 0x0001,
payload_word_0x14_hex: "0x0001".to_string(),
payload_byte_0x20: 0x05,
payload_byte_0x20_hex: "0x05".to_string(),
marker_bytes_0x2c9_0x2d0_hex: "0000000000000000".to_string(),
row_category_byte_0x31a: 0x00,
row_category_byte_0x31a_hex: "0x00".to_string(),
row_visibility_byte_0x31b: 0x00,
row_visibility_byte_0x31b_hex: "0x00".to_string(),
row_visibility_byte_0x31c: 0x00,
row_visibility_byte_0x31c_hex: "0x00".to_string(),
row_count_word_0x3ae: 0x0186,
row_count_word_0x3ae_hex: "0x0186".to_string(),
payload_word_0x3b2: 0x0001,
payload_word_0x3b2_hex: "0x0001".to_string(),
payload_word_0x3ba: 0x0001,
payload_word_0x3ba_hex: "0x0001".to_string(),
candidate_header_word_0_hex: Some("0x10000000".to_string()),
candidate_header_word_1_hex: Some("0x00009000".to_string()),
};
let sample_b = RuntimeSetupPayloadCoreSample {
path: "b.gms".to_string(),
file_extension: "gms".to_string(),
inferred_profile_family: "rt3-105-scenario-save-container-v1".to_string(),
payload_word_0x14: 0x0001,
payload_word_0x14_hex: "0x0001".to_string(),
payload_byte_0x20: 0x05,
payload_byte_0x20_hex: "0x05".to_string(),
marker_bytes_0x2c9_0x2d0_hex: "0000000000000000".to_string(),
row_category_byte_0x31a: 0x00,
row_category_byte_0x31a_hex: "0x00".to_string(),
row_visibility_byte_0x31b: 0x00,
row_visibility_byte_0x31b_hex: "0x00".to_string(),
row_visibility_byte_0x31c: 0x00,
row_visibility_byte_0x31c_hex: "0x00".to_string(),
row_count_word_0x3ae: 0x0186,
row_count_word_0x3ae_hex: "0x0186".to_string(),
payload_word_0x3b2: 0x0006,
payload_word_0x3b2_hex: "0x0006".to_string(),
payload_word_0x3ba: 0x0001,
payload_word_0x3ba_hex: "0x0001".to_string(),
candidate_header_word_0_hex: Some("0x00000000".to_string()),
candidate_header_word_1_hex: Some("0x00000000".to_string()),
};
let differences =
diff_setup_payload_core_samples(&[sample_a, sample_b]).expect("diff should succeed");
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.file_extension")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.inferred_profile_family")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.payload_word_0x3b2")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.candidate_header_word_0_hex")
);
}
#[test]
fn diffs_setup_launch_payload_samples_across_multiple_files() {
let sample_a = RuntimeSetupLaunchPayloadSample {
path: "a.gmp".to_string(),
file_extension: "gmp".to_string(),
inferred_profile_family: "rt3-105-map-container-v1".to_string(),
launch_flag_byte_0x22: 0x53,
launch_flag_byte_0x22_hex: "0x53".to_string(),
campaign_progress_in_known_range: false,
campaign_progress_scenario_name: None,
campaign_progress_page_index: None,
launch_selector_byte_0x33: 0x00,
launch_selector_byte_0x33_hex: "0x00".to_string(),
launch_token_block_0x23_0x32_hex: "01311154010000000000000000000000".to_string(),
campaign_selector_values: BTreeMap::from([
("Go West!".to_string(), 0x01),
("Germantown".to_string(), 0x31),
]),
nonzero_campaign_selector_values: BTreeMap::from([
("Go West!".to_string(), 0x01),
("Germantown".to_string(), 0x31),
]),
};
let sample_b = RuntimeSetupLaunchPayloadSample {
path: "b.gms".to_string(),
file_extension: "gms".to_string(),
inferred_profile_family: "rt3-105-save-container-v1".to_string(),
launch_flag_byte_0x22: 0xae,
launch_flag_byte_0x22_hex: "0xae".to_string(),
campaign_progress_in_known_range: false,
campaign_progress_scenario_name: None,
campaign_progress_page_index: None,
launch_selector_byte_0x33: 0x00,
launch_selector_byte_0x33_hex: "0x00".to_string(),
launch_token_block_0x23_0x32_hex: "01439aae010000000000000000000000".to_string(),
campaign_selector_values: BTreeMap::from([
("Go West!".to_string(), 0x01),
("Germantown".to_string(), 0x43),
]),
nonzero_campaign_selector_values: BTreeMap::from([
("Go West!".to_string(), 0x01),
("Germantown".to_string(), 0x43),
]),
};
let differences =
diff_setup_launch_payload_samples(&[sample_a, sample_b]).expect("diff should succeed");
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.file_extension")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.inferred_profile_family")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.launch_flag_byte_0x22")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.launch_token_block_0x23_0x32_hex")
);
assert!(
differences
.iter()
.any(|entry| entry.field_path == "$.campaign_selector_values.Germantown")
);
}
fn write_temp_json<T: Serialize>(stem: &str, value: &T) -> PathBuf {
let nonce = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("system time should be after epoch")
.as_nanos();
let path = std::env::temp_dir().join(format!("rrt-cli-{stem}-{nonce}.json"));
let bytes = serde_json::to_vec_pretty(value).expect("json serialization should succeed");
fs::write(&path, bytes).expect("temp json should be written");
path
}
}