From 0fa3c940f253c995ef0cd0292779e01e54a60d23 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Sun, 8 Feb 2026 00:15:26 +0800 Subject: [PATCH 1/3] Restructure tests: extract inline tests to src/tests_unit/, consolidate integration tests - Extract #[cfg(test)] mod tests blocks from 73 src/ files into src/tests_unit/ using #[path] - Consolidate 6 integration test binaries into 1 (tests/main.rs) - Move internal tests (property, reduction graph, trait consistency, unitdiskmapping algorithms) from tests/ to src/tests_unit/ - Keep only user-facing tests in tests/: integration.rs and reductions.rs - All 1582 tests preserved, 0 failures, clippy clean Co-Authored-By: Claude Opus 4.6 --- .claude/CLAUDE.md | 2 + .claude/rules/testing.md | 16 + src/config.rs | 108 +-- src/graph_types.rs | 74 +- src/io.rs | 85 +- src/lib.rs | 16 + src/models/graph/clique.rs | 275 +------ src/models/graph/dominating_set.rs | 249 +----- src/models/graph/independent_set.rs | 258 +----- src/models/graph/kcoloring.rs | 196 +---- src/models/graph/matching.rs | 236 +----- src/models/graph/max_cut.rs | 229 +----- src/models/graph/maximal_is.rs | 253 +----- src/models/graph/vertex_covering.rs | 242 +----- src/models/optimization/ilp.rs | 588 +------------- src/models/optimization/qubo.rs | 140 +--- src/models/optimization/spin_glass.rs | 197 +---- src/models/satisfiability/ksat.rs | 172 +--- src/models/satisfiability/sat.rs | 314 +------- src/models/set/set_covering.rs | 200 +---- src/models/set/set_packing.rs | 224 +----- src/models/specialized/biclique_cover.rs | 155 +--- src/models/specialized/bmf.rs | 188 +---- src/models/specialized/circuit.rs | 274 +------ src/models/specialized/factoring.rs | 156 +--- src/models/specialized/paintshop.rs | 151 +--- src/polynomial.rs | 100 +-- src/registry/category.rs | 114 +-- src/registry/info.rs | 47 +- src/rules/circuit_spinglass.rs | 526 +------------ src/rules/clique_ilp.rs | 302 +------ src/rules/coloring_ilp.rs | 285 +------ src/rules/cost.rs | 97 +-- src/rules/dominatingset_ilp.rs | 239 +----- src/rules/factoring_circuit.rs | 301 +------ src/rules/factoring_ilp.rs | 306 +------- src/rules/graph.rs | 734 +----------------- src/rules/independentset_ilp.rs | 238 +----- src/rules/independentset_setpacking.rs | 138 +--- src/rules/matching_ilp.rs | 256 +----- src/rules/matching_setpacking.rs | 197 +---- src/rules/registry.rs | 128 +-- src/rules/sat_coloring.rs | 308 +------- src/rules/sat_dominatingset.rs | 324 +------- src/rules/sat_independentset.rs | 316 +------- src/rules/sat_ksat.rs | 334 +------- src/rules/setcovering_ilp.rs | 238 +----- src/rules/setpacking_ilp.rs | 226 +----- src/rules/spinglass_maxcut.rs | 101 +-- src/rules/spinglass_qubo.rs | 139 +--- src/rules/traits.rs | 8 +- src/rules/unitdiskmapping/alpha_tensor.rs | 167 +--- src/rules/unitdiskmapping/copyline.rs | 347 +-------- src/rules/unitdiskmapping/grid.rs | 215 +---- .../unitdiskmapping/ksg/gadgets_weighted.rs | 49 +- src/rules/unitdiskmapping/ksg/mapping.rs | 105 +-- .../unitdiskmapping/pathdecomposition.rs | 181 +---- .../unitdiskmapping/triangular/mapping.rs | 78 +- src/rules/unitdiskmapping/triangular/mod.rs | 125 +-- src/rules/unitdiskmapping/weighted.rs | 135 +--- src/rules/vertexcovering_ilp.rs | 284 +------ src/rules/vertexcovering_independentset.rs | 95 +-- src/rules/vertexcovering_setcovering.rs | 188 +---- src/solvers/brute_force.rs | 371 +-------- src/solvers/ilp/solver.rs | 250 +----- src/testing/macros.rs | 37 +- src/testing/mod.rs | 41 +- src/tests_unit/config.rs | 104 +++ .../tests_unit/graph_models.rs | 6 +- src/tests_unit/graph_types.rs | 70 ++ src/tests_unit/io.rs | 81 ++ src/tests_unit/models/graph/clique.rs | 271 +++++++ src/tests_unit/models/graph/dominating_set.rs | 245 ++++++ .../models/graph/independent_set.rs | 61 +- .../tests_unit/models/graph/kcoloring.rs | 24 +- src/tests_unit/models/graph/matching.rs | 232 ++++++ src/tests_unit/models/graph/max_cut.rs | 225 ++++++ src/tests_unit/models/graph/maximal_is.rs | 249 ++++++ .../models/graph/vertex_covering.rs | 62 +- src/tests_unit/models/optimization/ilp.rs | 584 ++++++++++++++ src/tests_unit/models/optimization/qubo.rs | 136 ++++ .../models/optimization/spin_glass.rs | 193 +++++ src/tests_unit/models/satisfiability/ksat.rs | 168 ++++ src/tests_unit/models/satisfiability/sat.rs | 310 ++++++++ src/tests_unit/models/set/set_covering.rs | 196 +++++ src/tests_unit/models/set/set_packing.rs | 220 ++++++ .../models/specialized/biclique_cover.rs | 151 ++++ src/tests_unit/models/specialized/bmf.rs | 184 +++++ src/tests_unit/models/specialized/circuit.rs | 270 +++++++ .../models/specialized/factoring.rs | 152 ++++ .../models/specialized/paintshop.rs | 147 ++++ src/tests_unit/polynomial.rs | 96 +++ .../tests_unit/property.rs | 6 +- .../tests_unit/reduction_graph.rs | 133 +++- src/tests_unit/registry/category.rs | 110 +++ src/tests_unit/registry/info.rs | 43 + src/tests_unit/rules/circuit_spinglass.rs | 522 +++++++++++++ src/tests_unit/rules/clique_ilp.rs | 298 +++++++ src/tests_unit/rules/coloring_ilp.rs | 281 +++++++ src/tests_unit/rules/cost.rs | 93 +++ src/tests_unit/rules/dominatingset_ilp.rs | 235 ++++++ src/tests_unit/rules/factoring_circuit.rs | 297 +++++++ src/tests_unit/rules/factoring_ilp.rs | 302 +++++++ src/tests_unit/rules/graph.rs | 730 +++++++++++++++++ src/tests_unit/rules/independentset_ilp.rs | 234 ++++++ .../rules/independentset_setpacking.rs | 134 ++++ src/tests_unit/rules/matching_ilp.rs | 252 ++++++ src/tests_unit/rules/matching_setpacking.rs | 193 +++++ src/tests_unit/rules/registry.rs | 124 +++ src/tests_unit/rules/sat_coloring.rs | 304 ++++++++ src/tests_unit/rules/sat_dominatingset.rs | 320 ++++++++ src/tests_unit/rules/sat_independentset.rs | 312 ++++++++ src/tests_unit/rules/sat_ksat.rs | 330 ++++++++ src/tests_unit/rules/setcovering_ilp.rs | 234 ++++++ src/tests_unit/rules/setpacking_ilp.rs | 222 ++++++ src/tests_unit/rules/spinglass_maxcut.rs | 97 +++ src/tests_unit/rules/spinglass_qubo.rs | 135 ++++ src/tests_unit/rules/traits.rs | 4 + .../rules/unitdiskmapping/alpha_tensor.rs | 163 ++++ .../rules/unitdiskmapping/copyline.rs | 343 ++++++++ src/tests_unit/rules/unitdiskmapping/grid.rs | 211 +++++ .../unitdiskmapping/ksg/gadgets_weighted.rs | 45 ++ .../rules/unitdiskmapping/ksg/mapping.rs | 101 +++ .../unitdiskmapping/pathdecomposition.rs | 177 +++++ .../unitdiskmapping/triangular/mapping.rs | 74 ++ .../rules/unitdiskmapping/triangular/mod.rs | 121 +++ .../rules/unitdiskmapping/weighted.rs | 131 ++++ src/tests_unit/rules/vertexcovering_ilp.rs | 280 +++++++ .../rules/vertexcovering_independentset.rs | 91 +++ .../rules/vertexcovering_setcovering.rs | 184 +++++ src/tests_unit/solvers/brute_force.rs | 367 +++++++++ src/tests_unit/solvers/ilp/solver.rs | 246 ++++++ src/tests_unit/testing/macros.rs | 33 + src/tests_unit/testing/mod.rs | 37 + src/tests_unit/topology/graph.rs | 135 ++++ src/tests_unit/topology/grid_graph.rs | 224 ++++++ src/tests_unit/topology/hypergraph.rs | 109 +++ src/tests_unit/topology/small_graphs.rs | 181 +++++ src/tests_unit/topology/unit_disk_graph.rs | 136 ++++ src/tests_unit/trait_consistency.rs | 137 ++++ src/tests_unit/traits.rs | 429 ++++++++++ src/tests_unit/truth_table.rs | 195 +++++ src/tests_unit/types.rs | 132 ++++ .../unitdiskmapping_algorithms}/common.rs | 12 +- .../unitdiskmapping_algorithms}/copyline.rs | 2 +- .../unitdiskmapping_algorithms}/gadgets.rs | 16 +- .../gadgets_ground_truth.rs | 2 +- .../julia_comparison.rs | 10 +- .../unitdiskmapping_algorithms}/map_graph.rs | 6 +- .../mapping_result.rs | 18 +- .../unitdiskmapping_algorithms}/mod.rs | 0 .../unitdiskmapping_algorithms}/triangular.rs | 8 +- .../unitdiskmapping_algorithms}/weighted.rs | 70 +- src/tests_unit/variant.rs | 143 ++++ src/topology/graph.rs | 139 +--- src/topology/grid_graph.rs | 228 +----- src/topology/hypergraph.rs | 113 +-- src/topology/small_graphs.rs | 185 +---- src/topology/unit_disk_graph.rs | 140 +--- src/traits.rs | 433 +---------- src/truth_table.rs | 199 +---- src/types.rs | 136 +--- src/variant.rs | 147 +--- tests/main.rs | 4 + tests/property/graph_properties.rs | 151 ---- tests/rules/mod.rs | 3 - tests/rules_unitdiskmapping.rs | 5 - .../integration.rs} | 135 ---- .../reductions.rs} | 66 -- 169 files changed, 14997 insertions(+), 15706 deletions(-) create mode 100644 src/tests_unit/config.rs rename tests/unit_graph_tests.rs => src/tests_unit/graph_models.rs (99%) create mode 100644 src/tests_unit/graph_types.rs create mode 100644 src/tests_unit/io.rs create mode 100644 src/tests_unit/models/graph/clique.rs create mode 100644 src/tests_unit/models/graph/dominating_set.rs rename tests/unit/graph/independent_set_tests.rs => src/tests_unit/models/graph/independent_set.rs (77%) rename tests/unit/graph/coloring_tests.rs => src/tests_unit/models/graph/kcoloring.rs (90%) create mode 100644 src/tests_unit/models/graph/matching.rs create mode 100644 src/tests_unit/models/graph/max_cut.rs create mode 100644 src/tests_unit/models/graph/maximal_is.rs rename tests/unit/graph/vertex_covering_tests.rs => src/tests_unit/models/graph/vertex_covering.rs (75%) create mode 100644 src/tests_unit/models/optimization/ilp.rs create mode 100644 src/tests_unit/models/optimization/qubo.rs create mode 100644 src/tests_unit/models/optimization/spin_glass.rs create mode 100644 src/tests_unit/models/satisfiability/ksat.rs create mode 100644 src/tests_unit/models/satisfiability/sat.rs create mode 100644 src/tests_unit/models/set/set_covering.rs create mode 100644 src/tests_unit/models/set/set_packing.rs create mode 100644 src/tests_unit/models/specialized/biclique_cover.rs create mode 100644 src/tests_unit/models/specialized/bmf.rs create mode 100644 src/tests_unit/models/specialized/circuit.rs create mode 100644 src/tests_unit/models/specialized/factoring.rs create mode 100644 src/tests_unit/models/specialized/paintshop.rs create mode 100644 src/tests_unit/polynomial.rs rename tests/property_tests.rs => src/tests_unit/property.rs (97%) rename tests/set_theoretic_tests.rs => src/tests_unit/reduction_graph.rs (63%) create mode 100644 src/tests_unit/registry/category.rs create mode 100644 src/tests_unit/registry/info.rs create mode 100644 src/tests_unit/rules/circuit_spinglass.rs create mode 100644 src/tests_unit/rules/clique_ilp.rs create mode 100644 src/tests_unit/rules/coloring_ilp.rs create mode 100644 src/tests_unit/rules/cost.rs create mode 100644 src/tests_unit/rules/dominatingset_ilp.rs create mode 100644 src/tests_unit/rules/factoring_circuit.rs create mode 100644 src/tests_unit/rules/factoring_ilp.rs create mode 100644 src/tests_unit/rules/graph.rs create mode 100644 src/tests_unit/rules/independentset_ilp.rs create mode 100644 src/tests_unit/rules/independentset_setpacking.rs create mode 100644 src/tests_unit/rules/matching_ilp.rs create mode 100644 src/tests_unit/rules/matching_setpacking.rs create mode 100644 src/tests_unit/rules/registry.rs create mode 100644 src/tests_unit/rules/sat_coloring.rs create mode 100644 src/tests_unit/rules/sat_dominatingset.rs create mode 100644 src/tests_unit/rules/sat_independentset.rs create mode 100644 src/tests_unit/rules/sat_ksat.rs create mode 100644 src/tests_unit/rules/setcovering_ilp.rs create mode 100644 src/tests_unit/rules/setpacking_ilp.rs create mode 100644 src/tests_unit/rules/spinglass_maxcut.rs create mode 100644 src/tests_unit/rules/spinglass_qubo.rs create mode 100644 src/tests_unit/rules/traits.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/alpha_tensor.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/copyline.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/grid.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/ksg/mapping.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/pathdecomposition.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/triangular/mapping.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/triangular/mod.rs create mode 100644 src/tests_unit/rules/unitdiskmapping/weighted.rs create mode 100644 src/tests_unit/rules/vertexcovering_ilp.rs create mode 100644 src/tests_unit/rules/vertexcovering_independentset.rs create mode 100644 src/tests_unit/rules/vertexcovering_setcovering.rs create mode 100644 src/tests_unit/solvers/brute_force.rs create mode 100644 src/tests_unit/solvers/ilp/solver.rs create mode 100644 src/tests_unit/testing/macros.rs create mode 100644 src/tests_unit/testing/mod.rs create mode 100644 src/tests_unit/topology/graph.rs create mode 100644 src/tests_unit/topology/grid_graph.rs create mode 100644 src/tests_unit/topology/hypergraph.rs create mode 100644 src/tests_unit/topology/small_graphs.rs create mode 100644 src/tests_unit/topology/unit_disk_graph.rs create mode 100644 src/tests_unit/trait_consistency.rs create mode 100644 src/tests_unit/traits.rs create mode 100644 src/tests_unit/truth_table.rs create mode 100644 src/tests_unit/types.rs rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/common.rs (93%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/copyline.rs (99%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/gadgets.rs (98%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/gadgets_ground_truth.rs (99%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/julia_comparison.rs (98%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/map_graph.rs (98%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/mapping_result.rs (97%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/mod.rs (100%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/triangular.rs (98%) rename {tests/rules/unitdiskmapping => src/tests_unit/unitdiskmapping_algorithms}/weighted.rs (91%) create mode 100644 src/tests_unit/variant.rs create mode 100644 tests/main.rs delete mode 100644 tests/property/graph_properties.rs delete mode 100644 tests/rules/mod.rs delete mode 100644 tests/rules_unitdiskmapping.rs rename tests/{integration_tests.rs => suites/integration.rs} (73%) rename tests/{reduction_tests.rs => suites/reductions.rs} (88%) diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index 8bbaf87..71c27ea 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -26,6 +26,8 @@ make test clippy export-graph # Must pass before PR - `src/traits.rs` - `Problem`, `ConstraintSatisfactionProblem` traits - `src/rules/traits.rs` - `ReduceTo`, `ReductionResult` traits - `src/registry/` - Compile-time reduction metadata collection +- `src/tests_unit/` - Unit test files (extracted from inline `mod tests` blocks via `#[path]`) +- `tests/main.rs` - User-facing integration tests only (modules in `tests/suites/`) ### Trait Hierarchy diff --git a/.claude/rules/testing.md b/.claude/rules/testing.md index fa622d6..ba3d52d 100644 --- a/.claude/rules/testing.md +++ b/.claude/rules/testing.md @@ -48,7 +48,23 @@ make clippy # No warnings make coverage # >95% for new code ``` +## Test File Organization + +Unit tests live in `src/tests_unit/`, mirroring `src/` structure. Source files reference them via `#[path]`: + +```rust +// In src/rules/foo_bar.rs: +#[cfg(test)] +#[path = "../tests_unit/rules/foo_bar.rs"] +mod tests; +``` + +The `#[path]` is relative to the source file's directory. `use super::*` in the test file resolves to the parent module (same as inline tests). + +Integration tests are consolidated into a single binary at `tests/main.rs`, with test modules in `tests/suites/`. + ## Anti-patterns - Don't skip closed-loop tests for reductions - Don't test only happy paths - include edge cases - Don't ignore clippy warnings +- Don't add inline `mod tests` blocks in `src/` — use `src/tests_unit/` with `#[path]` diff --git a/src/config.rs b/src/config.rs index 480334c..6f97df0 100644 --- a/src/config.rs +++ b/src/config.rs @@ -108,109 +108,5 @@ pub fn bits_to_config(bits: &[bool]) -> Vec { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_config_iterator_binary() { - let iter = ConfigIterator::new(3, 2); - assert_eq!(iter.total(), 8); - - let configs: Vec<_> = iter.collect(); - assert_eq!(configs.len(), 8); - assert_eq!(configs[0], vec![0, 0, 0]); - assert_eq!(configs[1], vec![0, 0, 1]); - assert_eq!(configs[2], vec![0, 1, 0]); - assert_eq!(configs[3], vec![0, 1, 1]); - assert_eq!(configs[4], vec![1, 0, 0]); - assert_eq!(configs[5], vec![1, 0, 1]); - assert_eq!(configs[6], vec![1, 1, 0]); - assert_eq!(configs[7], vec![1, 1, 1]); - } - - #[test] - fn test_config_iterator_ternary() { - let iter = ConfigIterator::new(2, 3); - assert_eq!(iter.total(), 9); - - let configs: Vec<_> = iter.collect(); - assert_eq!(configs.len(), 9); - assert_eq!(configs[0], vec![0, 0]); - assert_eq!(configs[1], vec![0, 1]); - assert_eq!(configs[2], vec![0, 2]); - assert_eq!(configs[3], vec![1, 0]); - assert_eq!(configs[8], vec![2, 2]); - } - - #[test] - fn test_config_iterator_empty() { - let iter = ConfigIterator::new(0, 2); - assert_eq!(iter.total(), 1); - let configs: Vec<_> = iter.collect(); - assert_eq!(configs.len(), 0); // Empty because num_variables is 0 - } - - #[test] - fn test_config_iterator_single_variable() { - let iter = ConfigIterator::new(1, 4); - assert_eq!(iter.total(), 4); - - let configs: Vec<_> = iter.collect(); - assert_eq!(configs, vec![vec![0], vec![1], vec![2], vec![3]]); - } - - #[test] - fn test_index_to_config() { - assert_eq!(index_to_config(0, 3, 2), vec![0, 0, 0]); - assert_eq!(index_to_config(1, 3, 2), vec![0, 0, 1]); - assert_eq!(index_to_config(7, 3, 2), vec![1, 1, 1]); - assert_eq!(index_to_config(5, 3, 2), vec![1, 0, 1]); - } - - #[test] - fn test_config_to_index() { - assert_eq!(config_to_index(&[0, 0, 0], 2), 0); - assert_eq!(config_to_index(&[0, 0, 1], 2), 1); - assert_eq!(config_to_index(&[1, 1, 1], 2), 7); - assert_eq!(config_to_index(&[1, 0, 1], 2), 5); - } - - #[test] - fn test_index_config_roundtrip() { - for i in 0..27 { - let config = index_to_config(i, 3, 3); - let back = config_to_index(&config, 3); - assert_eq!(i, back); - } - } - - #[test] - fn test_config_to_bits() { - assert_eq!( - config_to_bits(&[0, 1, 0, 1]), - vec![false, true, false, true] - ); - assert_eq!(config_to_bits(&[0, 0, 0]), vec![false, false, false]); - assert_eq!(config_to_bits(&[1, 1, 1]), vec![true, true, true]); - } - - #[test] - fn test_bits_to_config() { - assert_eq!( - bits_to_config(&[false, true, false, true]), - vec![0, 1, 0, 1] - ); - assert_eq!(bits_to_config(&[true, true, true]), vec![1, 1, 1]); - } - - #[test] - fn test_exact_size_iterator() { - let mut iter = ConfigIterator::new(3, 2); - assert_eq!(iter.len(), 8); - iter.next(); - assert_eq!(iter.len(), 7); - iter.next(); - iter.next(); - assert_eq!(iter.len(), 5); - } -} +#[path = "tests_unit/config.rs"] +mod tests; diff --git a/src/graph_types.rs b/src/graph_types.rs index 2ec5ea0..1198232 100644 --- a/src/graph_types.rs +++ b/src/graph_types.rs @@ -67,75 +67,5 @@ declare_graph_subtype!(PlanarGraph => SimpleGraph); declare_graph_subtype!(BipartiteGraph => SimpleGraph); #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_reflexive_subtype() { - fn assert_subtype, B: GraphMarker>() {} - - // Every type is a subtype of itself - assert_subtype::(); - assert_subtype::(); - assert_subtype::(); - } - - #[test] - fn test_subtype_entries_registered() { - let entries: Vec<_> = inventory::iter::().collect(); - - // Should have at least 4 entries - assert!(entries.len() >= 4); - - // Check specific relationships - assert!(entries - .iter() - .any(|e| e.subtype == "UnitDiskGraph" && e.supertype == "SimpleGraph")); - assert!(entries - .iter() - .any(|e| e.subtype == "PlanarGraph" && e.supertype == "SimpleGraph")); - } - - #[test] - fn test_declared_subtypes() { - fn assert_subtype, B: GraphMarker>() {} - - // Declared relationships - assert_subtype::(); - assert_subtype::(); - assert_subtype::(); - assert_subtype::(); - } - - #[test] - fn test_graph_type_traits() { - // Test Default - let _: SimpleGraph = Default::default(); - let _: PlanarGraph = Default::default(); - let _: UnitDiskGraph = Default::default(); - let _: BipartiteGraph = Default::default(); - - // Test Copy (SimpleGraph implements Copy, so no need to clone) - let g = SimpleGraph; - let _g2 = g; // Copy - let g = SimpleGraph; - let _g2 = g; - let _g3 = g; // still usable - } - - #[test] - fn test_bipartite_entry_registered() { - let entries: Vec<_> = inventory::iter::().collect(); - assert!(entries - .iter() - .any(|e| e.subtype == "BipartiteGraph" && e.supertype == "SimpleGraph")); - } - - #[test] - fn test_unit_disk_to_planar_registered() { - let entries: Vec<_> = inventory::iter::().collect(); - assert!(entries - .iter() - .any(|e| e.subtype == "UnitDiskGraph" && e.supertype == "PlanarGraph")); - } -} +#[path = "tests_unit/graph_types.rs"] +mod tests; diff --git a/src/io.rs b/src/io.rs index 04951ac..4f69391 100644 --- a/src/io.rs +++ b/src/io.rs @@ -129,86 +129,5 @@ pub fn write_file>(path: P, contents: &str) -> Result<()> { } #[cfg(test)] -mod tests { - use super::*; - use crate::models::graph::IndependentSet; - use crate::topology::SimpleGraph; - use std::fs; - - #[test] - fn test_to_json() { - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); - let json = to_json(&problem); - assert!(json.is_ok()); - let json = json.unwrap(); - assert!(json.contains("graph")); - } - - #[test] - fn test_from_json() { - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); - let json = to_json(&problem).unwrap(); - let restored: IndependentSet = from_json(&json).unwrap(); - assert_eq!(restored.num_vertices(), 3); - assert_eq!(restored.num_edges(), 2); - } - - #[test] - fn test_json_compact() { - let problem = IndependentSet::::new(3, vec![(0, 1)]); - let compact = to_json_compact(&problem).unwrap(); - let pretty = to_json(&problem).unwrap(); - // Compact should be shorter - assert!(compact.len() < pretty.len()); - } - - #[test] - fn test_file_roundtrip() { - let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let path = "/tmp/test_problem.json"; - - // Write - write_problem(&problem, path, FileFormat::Json).unwrap(); - - // Read back - let restored: IndependentSet = read_problem(path, FileFormat::Json).unwrap(); - assert_eq!(restored.num_vertices(), 4); - assert_eq!(restored.num_edges(), 3); - - // Cleanup - fs::remove_file(path).ok(); - } - - #[test] - fn test_file_format_from_extension() { - assert_eq!( - FileFormat::from_extension(Path::new("test.json")), - Some(FileFormat::Json) - ); - assert_eq!( - FileFormat::from_extension(Path::new("test.JSON")), - Some(FileFormat::Json) - ); - assert_eq!(FileFormat::from_extension(Path::new("test.txt")), None); - assert_eq!(FileFormat::from_extension(Path::new("noext")), None); - } - - #[test] - fn test_read_write_file() { - let path = "/tmp/test_io.txt"; - let contents = "Hello, World!"; - - write_file(path, contents).unwrap(); - let read_back = read_file(path).unwrap(); - - assert_eq!(read_back, contents); - - fs::remove_file(path).ok(); - } - - #[test] - fn test_invalid_json() { - let result: Result> = from_json("not valid json"); - assert!(result.is_err()); - } -} +#[path = "tests_unit/io.rs"] +mod tests; diff --git a/src/lib.rs b/src/lib.rs index 265b9a1..e30e7a7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -113,3 +113,19 @@ pub use types::{EnergyMode, LocalConstraint, LocalSolutionSize, ProblemSize, Sol // Re-export proc macro for reduction registration pub use problemreductions_macros::reduction; + +#[cfg(test)] +#[path = "tests_unit/graph_models.rs"] +mod test_graph_models; +#[cfg(test)] +#[path = "tests_unit/property.rs"] +mod test_property; +#[cfg(test)] +#[path = "tests_unit/reduction_graph.rs"] +mod test_reduction_graph; +#[cfg(test)] +#[path = "tests_unit/trait_consistency.rs"] +mod test_trait_consistency; +#[cfg(test)] +#[path = "tests_unit/unitdiskmapping_algorithms/mod.rs"] +mod test_unitdiskmapping_algorithms; diff --git a/src/models/graph/clique.rs b/src/models/graph/clique.rs index 71f6297..edf1251 100644 --- a/src/models/graph/clique.rs +++ b/src/models/graph/clique.rs @@ -297,276 +297,5 @@ pub fn is_clique(num_vertices: usize, edges: &[(usize, usize)], selected: &[bool } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_clique_creation() { - let problem = Clique::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - assert_eq!(problem.num_variables(), 4); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_clique_with_weights() { - let problem = - Clique::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); - assert_eq!(problem.weights(), vec![1, 2, 3]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_clique_unweighted() { - let problem = Clique::::new(3, vec![(0, 1)]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_has_edge() { - let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); - assert!(problem.has_edge(0, 1)); - assert!(problem.has_edge(1, 0)); // Undirected - assert!(problem.has_edge(1, 2)); - assert!(!problem.has_edge(0, 2)); - } - - #[test] - fn test_solution_size_valid() { - // Complete graph K3 (triangle) - let problem = Clique::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - - // Valid: all three form a clique - let sol = problem.solution_size(&[1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 3); - - // Valid: any pair - let sol = problem.solution_size(&[1, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 2); - } - - #[test] - fn test_solution_size_invalid() { - // Path graph: 0-1-2 (no edge between 0 and 2) - let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); - - // Invalid: 0 and 2 are not adjacent - let sol = problem.solution_size(&[1, 0, 1]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 2); - - // Invalid: all three selected but not a clique - let sol = problem.solution_size(&[1, 1, 1]); - assert!(!sol.is_valid); - } - - #[test] - fn test_solution_size_empty() { - let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); - let sol = problem.solution_size(&[0, 0, 0]); - assert!(sol.is_valid); // Empty set is a valid clique - assert_eq!(sol.size, 0); - } - - #[test] - fn test_weighted_solution() { - let problem = - Clique::::with_weights(3, vec![(0, 1), (1, 2), (0, 2)], vec![10, 20, 30]); - - // Select vertex 2 (weight 30) - let sol = problem.solution_size(&[0, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 30); - - // Select all three (weights 10 + 20 + 30 = 60) - let sol = problem.solution_size(&[1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 60); - } - - #[test] - fn test_constraints() { - // Path graph: 0-1-2 (non-edge between 0 and 2) - let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); - let constraints = problem.constraints(); - assert_eq!(constraints.len(), 1); // One constraint for non-edge (0, 2) - } - - #[test] - fn test_objectives() { - let problem = - Clique::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 3); // One per vertex - } - - #[test] - fn test_brute_force_triangle() { - // Triangle graph (K3): max clique is all 3 vertices - let problem = - Clique::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 1, 1]); - } - - #[test] - fn test_brute_force_path() { - // Path graph 0-1-2: max clique is any adjacent pair - let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Maximum size is 2 - for sol in &solutions { - let size: usize = sol.iter().sum(); - assert_eq!(size, 2); - // Verify it's valid - let sol_result = problem.solution_size(sol); - assert!(sol_result.is_valid); - } - } - - #[test] - fn test_brute_force_weighted() { - // Path with weights: vertex 1 has high weight - let problem = - Clique::::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Should select {0, 1} (weight 101) or {1, 2} (weight 101) - assert!(solutions.len() == 2); - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - assert_eq!(problem.solution_size(sol).size, 101); - } - } - - #[test] - fn test_is_clique_function() { - // Triangle - assert!(is_clique(3, &[(0, 1), (1, 2), (0, 2)], &[true, true, true])); - assert!(is_clique(3, &[(0, 1), (1, 2), (0, 2)], &[true, true, false])); - - // Path - not all pairs adjacent - assert!(!is_clique(3, &[(0, 1), (1, 2)], &[true, false, true])); - assert!(is_clique(3, &[(0, 1), (1, 2)], &[true, true, false])); // Adjacent pair - } - - #[test] - fn test_problem_size() { - let problem = Clique::::new(5, vec![(0, 1), (1, 2), (2, 3)]); - let size = problem.problem_size(); - assert_eq!(size.get("num_vertices"), Some(5)); - assert_eq!(size.get("num_edges"), Some(3)); - } - - #[test] - fn test_energy_mode() { - let problem = Clique::::new(3, vec![(0, 1)]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_edges() { - let problem = Clique::::new(4, vec![(0, 1), (2, 3)]); - let edges = problem.edges(); - assert_eq!(edges.len(), 2); - } - - #[test] - fn test_set_weights() { - let mut problem = Clique::::new(3, vec![(0, 1)]); - problem.set_weights(vec![5, 10, 15]); - assert_eq!(problem.weights(), vec![5, 10, 15]); - } - - #[test] - fn test_empty_graph() { - // No edges means any single vertex is a max clique - let problem = Clique::::new(3, vec![]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 3); - // Each solution should have exactly one vertex selected - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 1); - } - } - - #[test] - fn test_is_satisfied() { - let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); - - assert!(problem.is_satisfied(&[1, 1, 0])); // Valid clique - assert!(problem.is_satisfied(&[0, 1, 1])); // Valid clique - assert!(!problem.is_satisfied(&[1, 0, 1])); // Invalid: 0-2 not adjacent - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = Clique::::from_graph(graph.clone(), vec![1, 2, 3]); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.weights(), vec![1, 2, 3]); - } - - #[test] - fn test_from_graph_unit_weights() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = Clique::::from_graph_unit_weights(graph); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.weights(), vec![1, 1, 1]); - } - - #[test] - fn test_graph_accessor() { - let problem = Clique::::new(3, vec![(0, 1)]); - let graph = problem.graph(); - assert_eq!(graph.num_vertices(), 3); - assert_eq!(graph.num_edges(), 1); - } - - #[test] - fn test_variant() { - let variant = Clique::::variant(); - assert_eq!(variant.len(), 2); - assert_eq!(variant[0], ("graph", "SimpleGraph")); - assert_eq!(variant[1], ("weight", "i32")); - } - - #[test] - fn test_weights_ref() { - let problem = - Clique::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - assert_eq!(problem.weights_ref(), &vec![5, 10, 15]); - } - - #[test] - fn test_is_clique_wrong_len() { - // Wrong length should return false - assert!(!is_clique(3, &[(0, 1)], &[true, false])); - } - - #[test] - fn test_complete_graph() { - // K4 - complete graph with 4 vertices - let problem = Clique::::new( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 1, 1, 1]); // All vertices form a clique - } -} +#[path = "../../tests_unit/models/graph/clique.rs"] +mod tests; diff --git a/src/models/graph/dominating_set.rs b/src/models/graph/dominating_set.rs index c472b55..5c5e5de 100644 --- a/src/models/graph/dominating_set.rs +++ b/src/models/graph/dominating_set.rs @@ -275,250 +275,5 @@ pub fn is_dominating_set(num_vertices: usize, edges: &[(usize, usize)], selected } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_dominating_set_creation() { - let problem = DominatingSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - } - - #[test] - fn test_dominating_set_with_weights() { - let problem = - DominatingSet::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); - assert_eq!(problem.weights(), vec![1, 2, 3]); - } - - #[test] - fn test_neighbors() { - let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (1, 2)]); - let nbrs = problem.neighbors(0); - assert!(nbrs.contains(&1)); - assert!(nbrs.contains(&2)); - assert!(!nbrs.contains(&3)); - } - - #[test] - fn test_closed_neighborhood() { - let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2)]); - let cn = problem.closed_neighborhood(0); - assert!(cn.contains(&0)); - assert!(cn.contains(&1)); - assert!(cn.contains(&2)); - assert!(!cn.contains(&3)); - } - - #[test] - fn test_solution_size_valid() { - // Star graph: center dominates all - let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); - - // Select center - let sol = problem.solution_size(&[1, 0, 0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - // Select all leaves - let sol = problem.solution_size(&[0, 1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 3); - } - - #[test] - fn test_solution_size_invalid() { - let problem = DominatingSet::::new(4, vec![(0, 1), (2, 3)]); - - // Select none - let sol = problem.solution_size(&[0, 0, 0, 0]); - assert!(!sol.is_valid); - - // Select only vertex 0 (doesn't dominate 2, 3) - let sol = problem.solution_size(&[1, 0, 0, 0]); - assert!(!sol.is_valid); - } - - #[test] - fn test_brute_force_star() { - // Star graph: minimum dominating set is the center - let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert!(solutions.contains(&vec![1, 0, 0, 0])); - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 1); - } - } - - #[test] - fn test_brute_force_path() { - // Path 0-1-2-3-4: need to dominate all 5 vertices - let problem = - DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Minimum is 2 (e.g., vertices 1 and 3) - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 2); - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_brute_force_weighted() { - // Star with heavy center - let problem = DominatingSet::::with_weights( - 4, - vec![(0, 1), (0, 2), (0, 3)], - vec![100, 1, 1, 1], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Prefer selecting all leaves (3) over center (100) - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1, 1, 1]); - } - - #[test] - fn test_is_dominating_set_function() { - let edges = vec![(0, 1), (0, 2), (0, 3)]; - - // Center dominates all - assert!(is_dominating_set(4, &edges, &[true, false, false, false])); - // All leaves dominate (leaf dominates center which dominates others) - assert!(is_dominating_set(4, &edges, &[false, true, true, true])); - // Single leaf doesn't dominate other leaves - assert!(!is_dominating_set(4, &edges, &[false, true, false, false])); - // Empty doesn't dominate - assert!(!is_dominating_set(4, &edges, &[false, false, false, false])); - } - - #[test] - fn test_constraints() { - let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2)]); - let constraints = problem.constraints(); - assert_eq!(constraints.len(), 3); // One per vertex - } - - #[test] - fn test_energy_mode() { - let problem = DominatingSet::::new(2, vec![(0, 1)]); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_isolated_vertex() { - // Isolated vertex must be in dominating set - let problem = DominatingSet::::new(3, vec![(0, 1)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Vertex 2 is isolated, must be selected - for sol in &solutions { - assert_eq!(sol[2], 1); - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_is_satisfied() { - let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); - - assert!(problem.is_satisfied(&[1, 0, 0, 0])); // Center dominates all - assert!(problem.is_satisfied(&[0, 1, 1, 1])); // Leaves dominate - assert!(!problem.is_satisfied(&[0, 1, 0, 0])); // Missing 2 and 3 - } - - #[test] - fn test_objectives() { - let problem = - DominatingSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 3); - } - - #[test] - fn test_set_weights() { - let mut problem = DominatingSet::::new(3, vec![(0, 1)]); - assert!(!problem.is_weighted()); // Initially uniform - problem.set_weights(vec![1, 2, 3]); - assert!(problem.is_weighted()); - assert_eq!(problem.weights(), vec![1, 2, 3]); - } - - #[test] - fn test_is_weighted_empty() { - let problem = DominatingSet::::with_weights(0, vec![], vec![]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_is_dominating_set_wrong_len() { - assert!(!is_dominating_set(3, &[(0, 1)], &[true, false])); - } - - #[test] - fn test_problem_size() { - let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3)]); - let size = problem.problem_size(); - assert_eq!(size.get("num_vertices"), Some(5)); - assert_eq!(size.get("num_edges"), Some(3)); - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = DominatingSet::::from_graph(graph.clone(), vec![1, 2, 3]); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.weights(), vec![1, 2, 3]); - - let problem2 = DominatingSet::::from_graph_unit_weights(graph); - assert_eq!(problem2.num_vertices(), 3); - assert_eq!(problem2.weights(), vec![1, 1, 1]); - } - - #[test] - fn test_graph_accessor() { - let problem = DominatingSet::::new(3, vec![(0, 1)]); - let graph = problem.graph(); - assert_eq!(graph.num_vertices(), 3); - assert_eq!(graph.num_edges(), 1); - } - - #[test] - fn test_weights_ref() { - let problem = - DominatingSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - assert_eq!(problem.weights_ref(), &vec![5, 10, 15]); - } - - #[test] - fn test_variant() { - let variant = DominatingSet::::variant(); - assert_eq!(variant.len(), 2); - assert_eq!(variant[0], ("graph", "SimpleGraph")); - assert_eq!(variant[1], ("weight", "i32")); - } - - #[test] - fn test_edges() { - let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2)]); - let edges = problem.edges(); - assert_eq!(edges.len(), 2); - } - - #[test] - fn test_has_edge() { - let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2)]); - assert!(problem.has_edge(0, 1)); - assert!(problem.has_edge(1, 0)); // Undirected - assert!(problem.has_edge(1, 2)); - assert!(!problem.has_edge(0, 2)); - } -} +#[path = "../../tests_unit/models/graph/dominating_set.rs"] +mod tests; diff --git a/src/models/graph/independent_set.rs b/src/models/graph/independent_set.rs index a150a5d..7bf6e97 100644 --- a/src/models/graph/independent_set.rs +++ b/src/models/graph/independent_set.rs @@ -265,259 +265,5 @@ pub fn is_independent_set( } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_independent_set_creation() { - let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - assert_eq!(problem.num_variables(), 4); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_independent_set_with_weights() { - let problem = - IndependentSet::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); - assert_eq!(problem.weights(), vec![1, 2, 3]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_independent_set_unweighted() { - let problem = IndependentSet::::new(3, vec![(0, 1)]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_has_edge() { - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); - assert!(problem.has_edge(0, 1)); - assert!(problem.has_edge(1, 0)); // Undirected - assert!(problem.has_edge(1, 2)); - assert!(!problem.has_edge(0, 2)); - } - - #[test] - fn test_solution_size_valid() { - let problem = IndependentSet::::new(4, vec![(0, 1), (2, 3)]); - - // Valid: select 0 and 2 (not adjacent) - let sol = problem.solution_size(&[1, 0, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 2); - - // Valid: select 1 and 3 (not adjacent) - let sol = problem.solution_size(&[0, 1, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 2); - } - - #[test] - fn test_solution_size_invalid() { - let problem = IndependentSet::::new(4, vec![(0, 1), (2, 3)]); - - // Invalid: 0 and 1 are adjacent - let sol = problem.solution_size(&[1, 1, 0, 0]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 2); - - // Invalid: 2 and 3 are adjacent - let sol = problem.solution_size(&[0, 0, 1, 1]); - assert!(!sol.is_valid); - } - - #[test] - fn test_solution_size_empty() { - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); - let sol = problem.solution_size(&[0, 0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_weighted_solution() { - let problem = - IndependentSet::::with_weights(3, vec![(0, 1)], vec![10, 20, 30]); - - // Select vertex 2 (weight 30) - let sol = problem.solution_size(&[0, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 30); - - // Select vertices 0 and 2 (weights 10 + 30 = 40) - let sol = problem.solution_size(&[1, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 40); - } - - #[test] - fn test_constraints() { - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); - let constraints = problem.constraints(); - assert_eq!(constraints.len(), 2); // One per edge - } - - #[test] - fn test_objectives() { - let problem = - IndependentSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 3); // One per vertex - } - - #[test] - fn test_brute_force_triangle() { - // Triangle graph: maximum IS has size 1 - let problem = - IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // All solutions should have exactly 1 vertex selected - assert_eq!(solutions.len(), 3); // Three equivalent solutions - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 1); - } - } - - #[test] - fn test_brute_force_path() { - // Path graph 0-1-2-3: maximum IS = {0,2} or {1,3} or {0,3} - let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Maximum size is 2 - for sol in &solutions { - let size: usize = sol.iter().sum(); - assert_eq!(size, 2); - // Verify it's valid - let sol_result = problem.solution_size(sol); - assert!(sol_result.is_valid); - } - } - - #[test] - fn test_brute_force_weighted() { - // Graph with weights: vertex 1 has high weight but is connected to both 0 and 2 - let problem = - IndependentSet::::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - // Should select vertex 1 (weight 100) over vertices 0+2 (weight 2) - assert_eq!(solutions[0], vec![0, 1, 0]); - } - - #[test] - fn test_is_independent_set_function() { - assert!(is_independent_set(3, &[(0, 1)], &[true, false, true])); - assert!(is_independent_set(3, &[(0, 1)], &[false, true, true])); - assert!(!is_independent_set(3, &[(0, 1)], &[true, true, false])); - assert!(is_independent_set( - 3, - &[(0, 1), (1, 2)], - &[true, false, true] - )); - assert!(!is_independent_set( - 3, - &[(0, 1), (1, 2)], - &[false, true, true] - )); - } - - #[test] - fn test_problem_size() { - let problem = IndependentSet::::new(5, vec![(0, 1), (1, 2), (2, 3)]); - let size = problem.problem_size(); - assert_eq!(size.get("num_vertices"), Some(5)); - assert_eq!(size.get("num_edges"), Some(3)); - } - - #[test] - fn test_energy_mode() { - let problem = IndependentSet::::new(3, vec![(0, 1)]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_edges() { - let problem = IndependentSet::::new(4, vec![(0, 1), (2, 3)]); - let edges = problem.edges(); - assert_eq!(edges.len(), 2); - assert!(edges.contains(&(0, 1)) || edges.contains(&(1, 0))); - assert!(edges.contains(&(2, 3)) || edges.contains(&(3, 2))); - } - - #[test] - fn test_set_weights() { - let mut problem = IndependentSet::::new(3, vec![(0, 1)]); - problem.set_weights(vec![5, 10, 15]); - assert_eq!(problem.weights(), vec![5, 10, 15]); - } - - #[test] - fn test_empty_graph() { - let problem = IndependentSet::::new(3, vec![]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - // All vertices can be selected - assert_eq!(solutions[0], vec![1, 1, 1]); - } - - #[test] - fn test_is_satisfied() { - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); - - assert!(problem.is_satisfied(&[1, 0, 1])); // Valid IS - assert!(problem.is_satisfied(&[0, 1, 0])); // Valid IS - assert!(!problem.is_satisfied(&[1, 1, 0])); // Invalid: 0-1 adjacent - assert!(!problem.is_satisfied(&[0, 1, 1])); // Invalid: 1-2 adjacent - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = IndependentSet::::from_graph(graph.clone(), vec![1, 2, 3]); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.weights(), vec![1, 2, 3]); - } - - #[test] - fn test_from_graph_unit_weights() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = IndependentSet::::from_graph_unit_weights(graph); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.weights(), vec![1, 1, 1]); - } - - #[test] - fn test_graph_accessor() { - let problem = IndependentSet::::new(3, vec![(0, 1)]); - let graph = problem.graph(); - assert_eq!(graph.num_vertices(), 3); - assert_eq!(graph.num_edges(), 1); - } - - #[test] - fn test_variant() { - let variant = IndependentSet::::variant(); - assert_eq!(variant.len(), 2); - assert_eq!(variant[0], ("graph", "SimpleGraph")); - assert_eq!(variant[1], ("weight", "i32")); - } - - #[test] - fn test_weights_ref() { - let problem = - IndependentSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - assert_eq!(problem.weights_ref(), &vec![5, 10, 15]); - } -} +#[path = "../../tests_unit/models/graph/independent_set.rs"] +mod tests; diff --git a/src/models/graph/kcoloring.rs b/src/models/graph/kcoloring.rs index 63f40bd..2350ee1 100644 --- a/src/models/graph/kcoloring.rs +++ b/src/models/graph/kcoloring.rs @@ -228,197 +228,5 @@ pub fn is_valid_coloring( } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_kcoloring_creation() { - let problem = KColoring::<3, SimpleGraph, i32>::new(4, vec![(0, 1), (1, 2), (2, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - assert_eq!(problem.num_colors(), 3); - assert_eq!(problem.num_variables(), 4); - assert_eq!(problem.num_flavors(), 3); - } - - #[test] - fn test_solution_size_valid() { - let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2)]); - - // Valid: different colors on adjacent vertices - let sol = problem.solution_size(&[0, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - - let sol = problem.solution_size(&[0, 1, 2]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_solution_size_invalid() { - let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2)]); - - // Invalid: adjacent vertices have same color - let sol = problem.solution_size(&[0, 0, 1]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 1); // 1 conflict - - let sol = problem.solution_size(&[0, 0, 0]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 2); // 2 conflicts - } - - #[test] - fn test_brute_force_path() { - // Path graph can be 2-colored - let problem = KColoring::<2, SimpleGraph, i32>::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // All solutions should be valid (0 conflicts) - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_brute_force_triangle() { - // Triangle needs 3 colors - let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - // All three vertices have different colors - assert_ne!(sol[0], sol[1]); - assert_ne!(sol[1], sol[2]); - assert_ne!(sol[0], sol[2]); - } - } - - #[test] - fn test_triangle_2_colors() { - // Triangle cannot be 2-colored - let problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Best we can do is 1 conflict - for sol in &solutions { - assert!(!problem.solution_size(sol).is_valid); - assert_eq!(problem.solution_size(sol).size, 1); - } - } - - #[test] - fn test_constraints() { - let problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2)]); - let constraints = problem.constraints(); - assert_eq!(constraints.len(), 2); // One per edge - } - - #[test] - fn test_energy_mode() { - let problem = KColoring::<2, SimpleGraph, i32>::new(2, vec![(0, 1)]); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_is_valid_coloring_function() { - let edges = vec![(0, 1), (1, 2)]; - - assert!(is_valid_coloring(3, &edges, &[0, 1, 0], 2)); - assert!(is_valid_coloring(3, &edges, &[0, 1, 2], 3)); - assert!(!is_valid_coloring(3, &edges, &[0, 0, 1], 2)); // 0-1 conflict - assert!(!is_valid_coloring(3, &edges, &[0, 1, 1], 2)); // 1-2 conflict - assert!(!is_valid_coloring(3, &edges, &[0, 1], 2)); // Wrong length - assert!(!is_valid_coloring(3, &edges, &[0, 2, 0], 2)); // Color out of range - } - - #[test] - fn test_empty_graph() { - let problem = KColoring::<1, SimpleGraph, i32>::new(3, vec![]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Any coloring is valid when there are no edges - assert!(problem.solution_size(&solutions[0]).is_valid); - } - - #[test] - fn test_complete_graph_k4() { - // K4 needs 4 colors - let problem = KColoring::<4, SimpleGraph, i32>::new( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_is_satisfied() { - let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2)]); - - assert!(problem.is_satisfied(&[0, 1, 0])); - assert!(problem.is_satisfied(&[0, 1, 2])); - assert!(!problem.is_satisfied(&[0, 0, 1])); - } - - #[test] - fn test_problem_size() { - let problem = KColoring::<3, SimpleGraph, i32>::new(5, vec![(0, 1), (1, 2)]); - let size = problem.problem_size(); - assert_eq!(size.get("num_vertices"), Some(5)); - assert_eq!(size.get("num_edges"), Some(2)); - assert_eq!(size.get("num_colors"), Some(3)); - } - - #[test] - fn test_csp_methods() { - let problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1)]); - - // KColoring has no objectives (pure CSP) - let objectives = problem.objectives(); - assert!(objectives.is_empty()); - - // KColoring has no weights - let weights: Vec = problem.weights(); - assert!(weights.is_empty()); - - // is_weighted should return false - assert!(!problem.is_weighted()); - } - - #[test] - fn test_set_weights() { - let mut problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1)]); - // set_weights does nothing for KColoring - problem.set_weights(vec![1, 2, 3]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = KColoring::<3, SimpleGraph, i32>::from_graph(graph); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.num_edges(), 2); - } - - #[test] - fn test_variant() { - let v = KColoring::<3, SimpleGraph, i32>::variant(); - assert_eq!(v.len(), 3); - assert_eq!(v[0], ("k", "3")); - assert_eq!(v[1], ("graph", "SimpleGraph")); - assert_eq!(v[2], ("weight", "i32")); - } -} +#[path = "../../tests_unit/models/graph/kcoloring.rs"] +mod tests; diff --git a/src/models/graph/matching.rs b/src/models/graph/matching.rs index 8c79096..4d65463 100644 --- a/src/models/graph/matching.rs +++ b/src/models/graph/matching.rs @@ -309,237 +309,5 @@ pub fn is_matching(num_vertices: usize, edges: &[(usize, usize)], selected: &[bo } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_matching_creation() { - let problem = Matching::::new(4, vec![(0, 1, 1), (1, 2, 2), (2, 3, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - assert_eq!(problem.num_variables(), 3); - } - - #[test] - fn test_matching_unweighted() { - let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); - assert_eq!(problem.num_edges(), 2); - } - - #[test] - fn test_edge_endpoints() { - let problem = Matching::::new(3, vec![(0, 1, 1), (1, 2, 2)]); - assert_eq!(problem.edge_endpoints(0), Some((0, 1))); - assert_eq!(problem.edge_endpoints(1), Some((1, 2))); - assert_eq!(problem.edge_endpoints(2), None); - } - - #[test] - fn test_is_valid_matching() { - let problem = Matching::::new(4, vec![(0, 1, 1), (1, 2, 1), (2, 3, 1)]); - - // Valid: select edge 0 only - assert!(problem.is_valid_matching(&[1, 0, 0])); - - // Valid: select edges 0 and 2 (disjoint) - assert!(problem.is_valid_matching(&[1, 0, 1])); - - // Invalid: edges 0 and 1 share vertex 1 - assert!(!problem.is_valid_matching(&[1, 1, 0])); - } - - #[test] - fn test_solution_size() { - let problem = Matching::::new(4, vec![(0, 1, 5), (1, 2, 10), (2, 3, 3)]); - - let sol = problem.solution_size(&[1, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 8); // 5 + 3 - - let sol = problem.solution_size(&[0, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 10); - } - - #[test] - fn test_brute_force_path() { - // Path 0-1-2-3 with unit weights - let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Maximum matching has 2 edges: {0-1, 2-3} - assert!(solutions.contains(&vec![1, 0, 1])); - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 2); - } - } - - #[test] - fn test_brute_force_triangle() { - let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Maximum matching has 1 edge (any of the 3) - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 1); - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_brute_force_weighted() { - // Prefer heavy edge even if it excludes more edges - let problem = Matching::::new(4, vec![(0, 1, 100), (0, 2, 1), (1, 3, 1)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Edge 0-1 (weight 100) alone beats edges 0-2 + 1-3 (weight 2) - assert!(solutions.contains(&vec![1, 0, 0])); - } - - #[test] - fn test_is_matching_function() { - let edges = vec![(0, 1), (1, 2), (2, 3)]; - - assert!(is_matching(4, &edges, &[true, false, true])); // Disjoint - assert!(is_matching(4, &edges, &[false, true, false])); // Single edge - assert!(!is_matching(4, &edges, &[true, true, false])); // Share vertex 1 - assert!(is_matching(4, &edges, &[false, false, false])); // Empty is valid - } - - #[test] - fn test_energy_mode() { - let problem = Matching::::unweighted(2, vec![(0, 1)]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_empty_graph() { - let problem = Matching::::unweighted(3, vec![]); - let sol = problem.solution_size(&[]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_constraints() { - let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); - let constraints = problem.constraints(); - // Vertex 1 has degree 2, so 1 constraint - assert_eq!(constraints.len(), 1); - } - - #[test] - fn test_edges() { - let problem = Matching::::new(3, vec![(0, 1, 5), (1, 2, 10)]); - let edges = problem.edges(); - assert_eq!(edges.len(), 2); - } - - #[test] - fn test_perfect_matching() { - // K4: can have perfect matching (2 edges covering all 4 vertices) - let problem = Matching::::unweighted( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Perfect matching has 2 edges - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 2); - // Check it's a valid matching using 4 vertices - let mut used = [false; 4]; - for (idx, &sel) in sol.iter().enumerate() { - if sel == 1 { - if let Some((u, v)) = problem.edge_endpoints(idx) { - used[u] = true; - used[v] = true; - } - } - } - assert!(used.iter().all(|&u| u)); // All vertices matched - } - } - - #[test] - fn test_is_satisfied() { - let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); - - assert!(problem.is_satisfied(&[1, 0, 1])); // Valid matching - assert!(problem.is_satisfied(&[0, 1, 0])); // Valid matching - assert!(!problem.is_satisfied(&[1, 1, 0])); // Share vertex 1 - } - - #[test] - fn test_objectives() { - let problem = Matching::::new(3, vec![(0, 1, 5), (1, 2, 10)]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 2); - } - - #[test] - fn test_set_weights() { - let mut problem = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); - assert!(!problem.is_weighted()); // Initially uniform - problem.set_weights(vec![1, 2]); - assert!(problem.is_weighted()); - assert_eq!(problem.weights(), vec![1, 2]); - } - - #[test] - fn test_is_weighted_empty() { - let problem = Matching::::unweighted(2, vec![]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_is_matching_wrong_len() { - let edges = vec![(0, 1), (1, 2)]; - assert!(!is_matching(3, &edges, &[true])); // Wrong length - } - - #[test] - fn test_is_matching_out_of_bounds() { - let edges = vec![(0, 5)]; // Vertex 5 doesn't exist - assert!(!is_matching(3, &edges, &[true])); - } - - #[test] - fn test_problem_size() { - let problem = Matching::::unweighted(5, vec![(0, 1), (1, 2), (2, 3)]); - let size = problem.problem_size(); - assert_eq!(size.get("num_vertices"), Some(5)); - assert_eq!(size.get("num_edges"), Some(3)); - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = Matching::::from_graph(graph, vec![5, 10]); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.num_edges(), 2); - assert_eq!(problem.weights(), vec![5, 10]); - } - - #[test] - fn test_from_graph_unit_weights() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = Matching::::from_graph_unit_weights(graph); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.num_edges(), 2); - assert_eq!(problem.weights(), vec![1, 1]); - } - - #[test] - fn test_graph_accessor() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = Matching::::from_graph_unit_weights(graph); - assert_eq!(problem.graph().num_vertices(), 3); - assert_eq!(problem.graph().num_edges(), 2); - } -} +#[path = "../../tests_unit/models/graph/matching.rs"] +mod tests; diff --git a/src/models/graph/max_cut.rs b/src/models/graph/max_cut.rs index e19a018..32a25fc 100644 --- a/src/models/graph/max_cut.rs +++ b/src/models/graph/max_cut.rs @@ -258,230 +258,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_maxcut_creation() { - let problem = MaxCut::::new(4, vec![(0, 1, 1), (1, 2, 2), (2, 3, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - assert_eq!(problem.num_variables(), 4); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_maxcut_unweighted() { - let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); - assert_eq!(problem.num_edges(), 2); - } - - #[test] - fn test_solution_size() { - let problem = MaxCut::::new(3, vec![(0, 1, 1), (1, 2, 2), (0, 2, 3)]); - - // All same partition: no cut - let sol = problem.solution_size(&[0, 0, 0]); - assert_eq!(sol.size, 0); - assert!(sol.is_valid); - - // 0 vs {1,2}: cuts edges 0-1 (1) and 0-2 (3) = 4 - let sol = problem.solution_size(&[0, 1, 1]); - assert_eq!(sol.size, 4); - - // {0,2} vs {1}: cuts edges 0-1 (1) and 1-2 (2) = 3 - let sol = problem.solution_size(&[0, 1, 0]); - assert_eq!(sol.size, 3); - } - - #[test] - fn test_brute_force_triangle() { - // Triangle with unit weights: max cut is 2 - let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - let size = problem.solution_size(sol); - assert_eq!(size.size, 2); - } - } - - #[test] - fn test_brute_force_path() { - // Path 0-1-2: max cut is 2 (partition {0,2} vs {1}) - let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - let size = problem.solution_size(sol); - assert_eq!(size.size, 2); - } - } - - #[test] - fn test_brute_force_weighted() { - // Edge with weight 10 should always be cut - let problem = MaxCut::::new(3, vec![(0, 1, 10), (1, 2, 1)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Max is 11 (cut both edges) with partition like [0,1,0] or [1,0,1] - for sol in &solutions { - let size = problem.solution_size(sol); - assert_eq!(size.size, 11); - } - } - - #[test] - fn test_cut_size_function() { - let edges = vec![(0, 1, 1), (1, 2, 2), (0, 2, 3)]; - - // Partition {0} vs {1, 2} - assert_eq!(cut_size(&edges, &[false, true, true]), 4); // 1 + 3 - - // Partition {0, 1} vs {2} - assert_eq!(cut_size(&edges, &[false, false, true]), 5); // 2 + 3 - - // All same partition - assert_eq!(cut_size(&edges, &[false, false, false]), 0); - } - - #[test] - fn test_edge_weight() { - let problem = MaxCut::::new(3, vec![(0, 1, 5), (1, 2, 10)]); - assert_eq!(problem.edge_weight(0, 1), Some(&5)); - assert_eq!(problem.edge_weight(1, 2), Some(&10)); - assert_eq!(problem.edge_weight(0, 2), None); - } - - #[test] - fn test_edges() { - let problem = MaxCut::::new(3, vec![(0, 1, 1), (1, 2, 2)]); - let edges = problem.edges(); - assert_eq!(edges.len(), 2); - } - - #[test] - fn test_energy_mode() { - let problem = MaxCut::::unweighted(2, vec![(0, 1)]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_empty_graph() { - let problem = MaxCut::::unweighted(3, vec![]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Any partition gives cut size 0 - assert!(!solutions.is_empty()); - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 0); - } - } - - #[test] - fn test_single_edge() { - let problem = MaxCut::::new(2, vec![(0, 1, 5)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Putting vertices in different sets maximizes cut - assert_eq!(solutions.len(), 2); // [0,1] and [1,0] - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 5); - } - } - - #[test] - fn test_complete_graph_k4() { - // K4: every partition cuts exactly 4 edges (balanced) or less - let problem = MaxCut::::unweighted( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Max cut in K4 is 4 (2-2 partition) - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 4); - } - } - - #[test] - fn test_bipartite_graph() { - // Complete bipartite K_{2,2}: max cut is all 4 edges - let problem = - MaxCut::::unweighted(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Bipartite graph can achieve max cut = all edges - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, 4); - } - } - - #[test] - fn test_symmetry() { - // Complementary partitions should give same cut - let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); - - let sol1 = problem.solution_size(&[0, 1, 1]); - let sol2 = problem.solution_size(&[1, 0, 0]); // complement - assert_eq!(sol1.size, sol2.size); - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = MaxCut::::from_graph(graph, vec![5, 10]); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.num_edges(), 2); - assert_eq!(problem.edge_weights(), vec![5, 10]); - } - - #[test] - fn test_from_graph_unweighted() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = MaxCut::::from_graph_unweighted(graph); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.num_edges(), 2); - assert_eq!(problem.edge_weights(), vec![1, 1]); - } - - #[test] - fn test_graph_accessor() { - let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); - let graph = problem.graph(); - assert_eq!(graph.num_vertices(), 3); - assert_eq!(graph.num_edges(), 2); - } - - #[test] - fn test_with_weights() { - let problem = - MaxCut::::with_weights(3, vec![(0, 1), (1, 2)], vec![7, 3]); - assert_eq!(problem.edge_weights(), vec![7, 3]); - } - - #[test] - fn test_edge_weight_by_index() { - let problem = MaxCut::::new(3, vec![(0, 1, 5), (1, 2, 10)]); - assert_eq!(problem.edge_weight_by_index(0), Some(&5)); - assert_eq!(problem.edge_weight_by_index(1), Some(&10)); - assert_eq!(problem.edge_weight_by_index(2), None); - } - - #[test] - fn test_variant() { - let variant = MaxCut::::variant(); - assert_eq!(variant.len(), 2); - assert_eq!(variant[0], ("graph", "SimpleGraph")); - assert_eq!(variant[1], ("weight", "i32")); - } -} +#[path = "../../tests_unit/models/graph/max_cut.rs"] +mod tests; diff --git a/src/models/graph/maximal_is.rs b/src/models/graph/maximal_is.rs index 51df8c6..c94015f 100644 --- a/src/models/graph/maximal_is.rs +++ b/src/models/graph/maximal_is.rs @@ -316,254 +316,5 @@ pub fn is_maximal_independent_set( } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_maximal_is_creation() { - let problem = MaximalIS::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - } - - #[test] - fn test_maximal_is_with_weights() { - let problem = MaximalIS::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); - assert_eq!(problem.weights(), vec![1, 2, 3]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_maximal_is_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = MaximalIS::::from_graph(graph, vec![1, 2, 3]); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.weights(), vec![1, 2, 3]); - } - - #[test] - fn test_maximal_is_from_graph_unit_weights() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = MaximalIS::::from_graph_unit_weights(graph); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.weights(), vec![1, 1, 1]); - } - - #[test] - fn test_is_independent() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - - assert!(problem.is_independent(&[1, 0, 1])); - assert!(problem.is_independent(&[0, 1, 0])); - assert!(!problem.is_independent(&[1, 1, 0])); - } - - #[test] - fn test_is_maximal() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - - // {0, 2} is maximal (cannot add 1) - assert!(problem.is_maximal(&[1, 0, 1])); - - // {1} is maximal (cannot add 0 or 2) - assert!(problem.is_maximal(&[0, 1, 0])); - - // {0} is not maximal (can add 2) - assert!(!problem.is_maximal(&[1, 0, 0])); - - // {} is not maximal (can add any vertex) - assert!(!problem.is_maximal(&[0, 0, 0])); - } - - #[test] - fn test_solution_size() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - - // Maximal: {0, 2} - let sol = problem.solution_size(&[1, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 2); - - // Maximal: {1} - let sol = problem.solution_size(&[0, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - // Not maximal: {0} - let sol = problem.solution_size(&[1, 0, 0]); - assert!(!sol.is_valid); - } - - #[test] - fn test_brute_force_path() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Largest maximal IS is {0, 2} with size 2 - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 0, 1]); - } - - #[test] - fn test_brute_force_triangle() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // All maximal IS have size 1 (any single vertex) - assert_eq!(solutions.len(), 3); - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 1); - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_is_maximal_independent_set_function() { - let edges = vec![(0, 1), (1, 2)]; - - assert!(is_maximal_independent_set(3, &edges, &[true, false, true])); - assert!(is_maximal_independent_set(3, &edges, &[false, true, false])); - assert!(!is_maximal_independent_set( - 3, - &edges, - &[true, false, false] - )); // Can add 2 - assert!(!is_maximal_independent_set(3, &edges, &[true, true, false])); // Not independent - } - - #[test] - fn test_energy_mode() { - let problem = MaximalIS::::new(2, vec![(0, 1)]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_empty_graph() { - let problem = MaximalIS::::new(3, vec![]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Only maximal IS is all vertices - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 1, 1]); - } - - #[test] - fn test_constraints() { - let problem = MaximalIS::::new(3, vec![(0, 1)]); - let constraints = problem.constraints(); - // 1 edge constraint + 3 maximality constraints - assert_eq!(constraints.len(), 4); - } - - #[test] - fn test_is_satisfied() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - - assert!(problem.is_satisfied(&[1, 0, 1])); // Maximal - assert!(problem.is_satisfied(&[0, 1, 0])); // Maximal - // Note: is_satisfied checks constraints, which may be more complex - } - - #[test] - fn test_objectives() { - let problem = MaximalIS::::new(3, vec![(0, 1)]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 3); // One per vertex - } - - #[test] - fn test_weights() { - let problem = MaximalIS::::new(3, vec![(0, 1)]); - let weights = problem.weights(); - assert_eq!(weights, vec![1, 1, 1]); // Unit weights - } - - #[test] - fn test_set_weights() { - let mut problem = MaximalIS::::new(3, vec![(0, 1)]); - problem.set_weights(vec![1, 2, 3]); - assert_eq!(problem.weights(), vec![1, 2, 3]); - } - - #[test] - fn test_is_weighted() { - let problem = MaximalIS::::new(3, vec![(0, 1)]); - assert!(!problem.is_weighted()); // Initially uniform - } - - #[test] - fn test_is_weighted_empty() { - let problem = MaximalIS::::with_weights(0, vec![], vec![]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_is_maximal_independent_set_wrong_len() { - assert!(!is_maximal_independent_set(3, &[(0, 1)], &[true, false])); - } - - #[test] - fn test_problem_size() { - let problem = MaximalIS::::new(5, vec![(0, 1), (1, 2), (2, 3)]); - let size = problem.problem_size(); - assert_eq!(size.get("num_vertices"), Some(5)); - assert_eq!(size.get("num_edges"), Some(3)); - } - - #[test] - fn test_variant() { - let variant = MaximalIS::::variant(); - assert_eq!(variant.len(), 2); - assert_eq!(variant[0], ("graph", "SimpleGraph")); - assert_eq!(variant[1], ("weight", "i32")); - } - - #[test] - fn test_graph_ref() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - let graph = problem.graph(); - assert_eq!(graph.num_vertices(), 3); - assert_eq!(graph.num_edges(), 2); - } - - #[test] - fn test_edges() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - let edges = problem.edges(); - assert_eq!(edges.len(), 2); - } - - #[test] - fn test_has_edge() { - let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); - assert!(problem.has_edge(0, 1)); - assert!(problem.has_edge(1, 0)); // Undirected - assert!(problem.has_edge(1, 2)); - assert!(!problem.has_edge(0, 2)); - } - - #[test] - fn test_weights_ref() { - let problem = MaximalIS::::new(3, vec![(0, 1)]); - assert_eq!(problem.weights_ref(), &vec![1, 1, 1]); - } - - #[test] - fn test_weighted_solution() { - let problem = - MaximalIS::::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 100, 10]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Should prefer {1} with weight 100 over {0, 2} with weight 20 - // But {0, 2} is also maximal... maximization prefers larger size - // Actually {0, 2} has size 20 and {1} has size 100 - // With LargerSizeIsBetter, {1} with 100 > {0, 2} with 20 - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1, 0]); - } -} +#[path = "../../tests_unit/models/graph/maximal_is.rs"] +mod tests; diff --git a/src/models/graph/vertex_covering.rs b/src/models/graph/vertex_covering.rs index 6e3d492..171ca50 100644 --- a/src/models/graph/vertex_covering.rs +++ b/src/models/graph/vertex_covering.rs @@ -242,243 +242,5 @@ pub fn is_vertex_cover(num_vertices: usize, edges: &[(usize, usize)], selected: } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_vertex_cover_creation() { - let problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - assert_eq!(problem.num_variables(), 4); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_vertex_cover_with_weights() { - let problem = - VertexCovering::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); - assert_eq!(problem.weights(), vec![1, 2, 3]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_solution_size_valid() { - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - - // Valid: select vertex 1 (covers both edges) - let sol = problem.solution_size(&[0, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - // Valid: select all vertices - let sol = problem.solution_size(&[1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 3); - } - - #[test] - fn test_solution_size_invalid() { - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - - // Invalid: no vertex selected - let sol = problem.solution_size(&[0, 0, 0]); - assert!(!sol.is_valid); - - // Invalid: only vertex 0 selected (edge 1-2 not covered) - let sol = problem.solution_size(&[1, 0, 0]); - assert!(!sol.is_valid); - } - - #[test] - fn test_brute_force_path() { - // Path graph 0-1-2: minimum vertex cover is {1} - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1, 0]); - } - - #[test] - fn test_brute_force_triangle() { - // Triangle: minimum vertex cover has size 2 - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // There are 3 minimum covers of size 2 - assert_eq!(solutions.len(), 3); - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 2); - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_brute_force_weighted() { - // Weighted: prefer selecting low-weight vertices - let problem = VertexCovering::::with_weights( - 3, - vec![(0, 1), (1, 2)], - vec![100, 1, 100], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - // Should select vertex 1 (weight 1) instead of 0 and 2 (total 200) - assert_eq!(solutions[0], vec![0, 1, 0]); - } - - #[test] - fn test_is_vertex_cover_function() { - assert!(is_vertex_cover(3, &[(0, 1), (1, 2)], &[false, true, false])); - assert!(is_vertex_cover(3, &[(0, 1), (1, 2)], &[true, false, true])); - assert!(!is_vertex_cover( - 3, - &[(0, 1), (1, 2)], - &[true, false, false] - )); - assert!(!is_vertex_cover( - 3, - &[(0, 1), (1, 2)], - &[false, false, false] - )); - } - - #[test] - fn test_constraints() { - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - let constraints = problem.constraints(); - assert_eq!(constraints.len(), 2); - } - - #[test] - fn test_energy_mode() { - let problem = VertexCovering::::new(3, vec![(0, 1)]); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_empty_graph() { - let problem = VertexCovering::::new(3, vec![]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // No edges means empty cover is valid and optimal - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 0, 0]); - } - - #[test] - fn test_single_edge() { - let problem = VertexCovering::::new(2, vec![(0, 1)]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Either vertex covers the single edge - assert_eq!(solutions.len(), 2); - } - - #[test] - fn test_is_satisfied() { - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - - assert!(problem.is_satisfied(&[0, 1, 0])); // Valid cover - assert!(problem.is_satisfied(&[1, 0, 1])); // Valid cover - assert!(!problem.is_satisfied(&[1, 0, 0])); // Edge 1-2 uncovered - assert!(!problem.is_satisfied(&[0, 0, 1])); // Edge 0-1 uncovered - } - - #[test] - fn test_complement_relationship() { - // For a graph, if S is an independent set, then V\S is a vertex cover - use crate::models::graph::IndependentSet; - - let edges = vec![(0, 1), (1, 2), (2, 3)]; - let is_problem = IndependentSet::::new(4, edges.clone()); - let vc_problem = VertexCovering::::new(4, edges); - - let solver = BruteForce::new(); - - let is_solutions = solver.find_best(&is_problem); - for is_sol in &is_solutions { - // Complement should be a valid vertex cover - let vc_config: Vec = is_sol.iter().map(|&x| 1 - x).collect(); - assert!(vc_problem.solution_size(&vc_config).is_valid); - } - } - - #[test] - fn test_objectives() { - let problem = - VertexCovering::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 3); - } - - #[test] - fn test_set_weights() { - let mut problem = VertexCovering::::new(3, vec![(0, 1)]); - assert!(!problem.is_weighted()); // Initially uniform - problem.set_weights(vec![1, 2, 3]); - assert!(problem.is_weighted()); - assert_eq!(problem.weights(), vec![1, 2, 3]); - } - - #[test] - fn test_is_weighted_empty() { - let problem = VertexCovering::::new(0, vec![]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_is_vertex_cover_wrong_len() { - // Wrong length should return false - assert!(!is_vertex_cover(3, &[(0, 1)], &[true, false])); - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = VertexCovering::::from_graph_unit_weights(graph); - assert_eq!(problem.num_vertices(), 3); - assert_eq!(problem.num_edges(), 2); - } - - #[test] - fn test_from_graph_with_weights() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = VertexCovering::::from_graph(graph, vec![1, 2, 3]); - assert_eq!(problem.weights(), vec![1, 2, 3]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_graph_accessor() { - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - let graph = problem.graph(); - assert_eq!(graph.num_vertices(), 3); - assert_eq!(graph.num_edges(), 2); - } - - #[test] - fn test_has_edge() { - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - assert!(problem.has_edge(0, 1)); - assert!(problem.has_edge(1, 0)); // Undirected - assert!(problem.has_edge(1, 2)); - assert!(!problem.has_edge(0, 2)); - } - - #[test] - fn test_variant() { - let variant = VertexCovering::::variant(); - assert_eq!(variant.len(), 2); - assert_eq!(variant[0], ("graph", "SimpleGraph")); - assert_eq!(variant[1], ("weight", "i32")); - } -} +#[path = "../../tests_unit/models/graph/vertex_covering.rs"] +mod tests; diff --git a/src/models/optimization/ilp.rs b/src/models/optimization/ilp.rs index 205f322..7ef3303 100644 --- a/src/models/optimization/ilp.rs +++ b/src/models/optimization/ilp.rs @@ -381,589 +381,5 @@ impl Problem for ILP { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - // ============================================================ - // VarBounds tests - // ============================================================ - - #[test] - fn test_varbounds_binary() { - let bounds = VarBounds::binary(); - assert_eq!(bounds.lower, Some(0)); - assert_eq!(bounds.upper, Some(1)); - assert!(bounds.contains(0)); - assert!(bounds.contains(1)); - assert!(!bounds.contains(-1)); - assert!(!bounds.contains(2)); - assert_eq!(bounds.num_values(), Some(2)); - } - - #[test] - fn test_varbounds_non_negative() { - let bounds = VarBounds::non_negative(); - assert_eq!(bounds.lower, Some(0)); - assert_eq!(bounds.upper, None); - assert!(bounds.contains(0)); - assert!(bounds.contains(100)); - assert!(!bounds.contains(-1)); - assert_eq!(bounds.num_values(), None); - } - - #[test] - fn test_varbounds_unbounded() { - let bounds = VarBounds::unbounded(); - assert_eq!(bounds.lower, None); - assert_eq!(bounds.upper, None); - assert!(bounds.contains(-1000)); - assert!(bounds.contains(0)); - assert!(bounds.contains(1000)); - assert_eq!(bounds.num_values(), None); - } - - #[test] - fn test_varbounds_bounded() { - let bounds = VarBounds::bounded(-5, 10); - assert_eq!(bounds.lower, Some(-5)); - assert_eq!(bounds.upper, Some(10)); - assert!(bounds.contains(-5)); - assert!(bounds.contains(0)); - assert!(bounds.contains(10)); - assert!(!bounds.contains(-6)); - assert!(!bounds.contains(11)); - assert_eq!(bounds.num_values(), Some(16)); // -5 to 10 inclusive - } - - #[test] - fn test_varbounds_default() { - let bounds = VarBounds::default(); - assert_eq!(bounds.lower, None); - assert_eq!(bounds.upper, None); - } - - #[test] - fn test_varbounds_empty_range() { - let bounds = VarBounds::bounded(5, 3); // Invalid: lo > hi - assert_eq!(bounds.num_values(), Some(0)); - } - - // ============================================================ - // Comparison tests - // ============================================================ - - #[test] - fn test_comparison_le() { - let cmp = Comparison::Le; - assert!(cmp.holds(5.0, 10.0)); - assert!(cmp.holds(10.0, 10.0)); - assert!(!cmp.holds(11.0, 10.0)); - } - - #[test] - fn test_comparison_ge() { - let cmp = Comparison::Ge; - assert!(cmp.holds(10.0, 5.0)); - assert!(cmp.holds(10.0, 10.0)); - assert!(!cmp.holds(4.0, 5.0)); - } - - #[test] - fn test_comparison_eq() { - let cmp = Comparison::Eq; - assert!(cmp.holds(10.0, 10.0)); - assert!(!cmp.holds(10.0, 10.1)); - assert!(!cmp.holds(9.9, 10.0)); - // Test tolerance - assert!(cmp.holds(10.0, 10.0 + 1e-10)); - } - - // ============================================================ - // LinearConstraint tests - // ============================================================ - - #[test] - fn test_linear_constraint_le() { - // x0 + 2*x1 <= 5 - let constraint = LinearConstraint::le(vec![(0, 1.0), (1, 2.0)], 5.0); - assert_eq!(constraint.cmp, Comparison::Le); - assert_eq!(constraint.rhs, 5.0); - - // x0=1, x1=2 => 1 + 4 = 5 <= 5 (satisfied) - assert!(constraint.is_satisfied(&[1, 2])); - // x0=2, x1=2 => 2 + 4 = 6 > 5 (not satisfied) - assert!(!constraint.is_satisfied(&[2, 2])); - } - - #[test] - fn test_linear_constraint_ge() { - // x0 + x1 >= 3 - let constraint = LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 3.0); - assert_eq!(constraint.cmp, Comparison::Ge); - - assert!(constraint.is_satisfied(&[2, 2])); // 4 >= 3 - assert!(constraint.is_satisfied(&[1, 2])); // 3 >= 3 - assert!(!constraint.is_satisfied(&[1, 1])); // 2 < 3 - } - - #[test] - fn test_linear_constraint_eq() { - // x0 + x1 == 2 - let constraint = LinearConstraint::eq(vec![(0, 1.0), (1, 1.0)], 2.0); - assert_eq!(constraint.cmp, Comparison::Eq); - - assert!(constraint.is_satisfied(&[1, 1])); // 2 == 2 - assert!(!constraint.is_satisfied(&[1, 2])); // 3 != 2 - assert!(!constraint.is_satisfied(&[0, 1])); // 1 != 2 - } - - #[test] - fn test_linear_constraint_evaluate_lhs() { - let constraint = LinearConstraint::le(vec![(0, 3.0), (2, -1.0)], 10.0); - // 3*x0 - 1*x2 with x=[2, 5, 7] => 3*2 - 1*7 = -1 - assert!((constraint.evaluate_lhs(&[2, 5, 7]) - (-1.0)).abs() < 1e-9); - } - - #[test] - fn test_linear_constraint_variables() { - let constraint = LinearConstraint::le(vec![(0, 1.0), (3, 2.0), (5, -1.0)], 10.0); - assert_eq!(constraint.variables(), vec![0, 3, 5]); - } - - #[test] - fn test_linear_constraint_out_of_bounds() { - // Constraint references variable 5, but values only has 3 elements - let constraint = LinearConstraint::le(vec![(5, 1.0)], 10.0); - // Missing variable defaults to 0, so 0 <= 10 is satisfied - assert!(constraint.is_satisfied(&[1, 2, 3])); - } - - // ============================================================ - // ObjectiveSense tests - // ============================================================ - - #[test] - fn test_objective_sense_from_energy_mode() { - assert_eq!( - ObjectiveSense::from(EnergyMode::LargerSizeIsBetter), - ObjectiveSense::Maximize - ); - assert_eq!( - ObjectiveSense::from(EnergyMode::SmallerSizeIsBetter), - ObjectiveSense::Minimize - ); - } - - #[test] - fn test_energy_mode_from_objective_sense() { - assert_eq!( - EnergyMode::from(ObjectiveSense::Maximize), - EnergyMode::LargerSizeIsBetter - ); - assert_eq!( - EnergyMode::from(ObjectiveSense::Minimize), - EnergyMode::SmallerSizeIsBetter - ); - } - - // ============================================================ - // ILP tests - // ============================================================ - - #[test] - fn test_ilp_new() { - let ilp = ILP::new( - 2, - vec![VarBounds::binary(), VarBounds::binary()], - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 2.0)], - ObjectiveSense::Maximize, - ); - assert_eq!(ilp.num_vars, 2); - assert_eq!(ilp.bounds.len(), 2); - assert_eq!(ilp.constraints.len(), 1); - assert_eq!(ilp.objective.len(), 2); - assert_eq!(ilp.sense, ObjectiveSense::Maximize); - } - - #[test] - #[should_panic(expected = "bounds length must match num_vars")] - fn test_ilp_new_mismatched_bounds() { - ILP::new( - 3, - vec![VarBounds::binary(), VarBounds::binary()], // Only 2 bounds for 3 vars - vec![], - vec![], - ObjectiveSense::Minimize, - ); - } - - #[test] - fn test_ilp_binary() { - let ilp = ILP::binary( - 3, - vec![], - vec![(0, 1.0), (1, 1.0), (2, 1.0)], - ObjectiveSense::Minimize, - ); - assert_eq!(ilp.num_vars, 3); - assert!(ilp.bounds.iter().all(|b| *b == VarBounds::binary())); - } - - #[test] - fn test_ilp_empty() { - let ilp = ILP::empty(); - assert_eq!(ilp.num_vars, 0); - assert!(ilp.bounds.is_empty()); - assert!(ilp.constraints.is_empty()); - assert!(ilp.objective.is_empty()); - } - - #[test] - fn test_ilp_evaluate_objective() { - let ilp = ILP::binary( - 3, - vec![], - vec![(0, 2.0), (1, 3.0), (2, -1.0)], - ObjectiveSense::Maximize, - ); - // 2*1 + 3*1 + (-1)*0 = 5 - assert!((ilp.evaluate_objective(&[1, 1, 0]) - 5.0).abs() < 1e-9); - // 2*0 + 3*0 + (-1)*1 = -1 - assert!((ilp.evaluate_objective(&[0, 0, 1]) - (-1.0)).abs() < 1e-9); - } - - #[test] - fn test_ilp_bounds_satisfied() { - let ilp = ILP::new( - 2, - vec![VarBounds::bounded(0, 5), VarBounds::bounded(-2, 2)], - vec![], - vec![], - ObjectiveSense::Minimize, - ); - assert!(ilp.bounds_satisfied(&[0, 0])); - assert!(ilp.bounds_satisfied(&[5, 2])); - assert!(ilp.bounds_satisfied(&[3, -2])); - assert!(!ilp.bounds_satisfied(&[6, 0])); // x0 > 5 - assert!(!ilp.bounds_satisfied(&[0, 3])); // x1 > 2 - assert!(!ilp.bounds_satisfied(&[0])); // Wrong length - } - - #[test] - fn test_ilp_constraints_satisfied() { - let ilp = ILP::binary( - 3, - vec![ - LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0), // x0 + x1 <= 1 - LinearConstraint::ge(vec![(2, 1.0)], 0.0), // x2 >= 0 - ], - vec![], - ObjectiveSense::Minimize, - ); - assert!(ilp.constraints_satisfied(&[0, 0, 1])); - assert!(ilp.constraints_satisfied(&[1, 0, 0])); - assert!(ilp.constraints_satisfied(&[0, 1, 1])); - assert!(!ilp.constraints_satisfied(&[1, 1, 0])); // x0 + x1 = 2 > 1 - } - - #[test] - fn test_ilp_is_feasible() { - let ilp = ILP::binary( - 2, - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - assert!(ilp.is_feasible(&[0, 0])); - assert!(ilp.is_feasible(&[1, 0])); - assert!(ilp.is_feasible(&[0, 1])); - assert!(!ilp.is_feasible(&[1, 1])); // Constraint violated - assert!(!ilp.is_feasible(&[2, 0])); // Bounds violated - } - - // ============================================================ - // Problem trait tests - // ============================================================ - - #[test] - fn test_ilp_num_variables() { - let ilp = ILP::binary(5, vec![], vec![], ObjectiveSense::Minimize); - assert_eq!(ilp.num_variables(), 5); - } - - #[test] - fn test_ilp_num_flavors_binary() { - let ilp = ILP::binary(3, vec![], vec![], ObjectiveSense::Minimize); - assert_eq!(ilp.num_flavors(), 2); - } - - #[test] - fn test_ilp_num_flavors_mixed() { - let ilp = ILP::new( - 3, - vec![ - VarBounds::binary(), - VarBounds::bounded(0, 5), - VarBounds::bounded(-1, 1), - ], - vec![], - vec![], - ObjectiveSense::Minimize, - ); - assert_eq!(ilp.num_flavors(), 6); // Max is 6 (from 0-5) - } - - #[test] - fn test_ilp_num_flavors_unbounded() { - let ilp = ILP::new( - 2, - vec![VarBounds::binary(), VarBounds::unbounded()], - vec![], - vec![], - ObjectiveSense::Minimize, - ); - assert_eq!(ilp.num_flavors(), usize::MAX); - } - - #[test] - fn test_ilp_num_flavors_empty() { - let ilp = ILP::empty(); - assert_eq!(ilp.num_flavors(), 2); // Default when empty - } - - #[test] - fn test_ilp_problem_size() { - let ilp = ILP::binary( - 4, - vec![ - LinearConstraint::le(vec![(0, 1.0)], 1.0), - LinearConstraint::le(vec![(1, 1.0)], 1.0), - ], - vec![], - ObjectiveSense::Minimize, - ); - let size = ilp.problem_size(); - assert_eq!(size.get("num_vars"), Some(4)); - assert_eq!(size.get("num_constraints"), Some(2)); - } - - #[test] - fn test_ilp_energy_mode() { - let max_ilp = ILP::binary(2, vec![], vec![], ObjectiveSense::Maximize); - let min_ilp = ILP::binary(2, vec![], vec![], ObjectiveSense::Minimize); - - assert!(max_ilp.energy_mode().is_maximization()); - assert!(min_ilp.energy_mode().is_minimization()); - } - - #[test] - fn test_ilp_solution_size_valid() { - // Maximize x0 + 2*x1 subject to x0 + x1 <= 1 - let ilp = ILP::binary( - 2, - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 2.0)], - ObjectiveSense::Maximize, - ); - - // Config [0, 1] means x0=0, x1=1 => obj = 2, valid - let sol = ilp.solution_size(&[0, 1]); - assert!(sol.is_valid); - assert!((sol.size - 2.0).abs() < 1e-9); - - // Config [1, 0] means x0=1, x1=0 => obj = 1, valid - let sol = ilp.solution_size(&[1, 0]); - assert!(sol.is_valid); - assert!((sol.size - 1.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_size_invalid() { - // x0 + x1 <= 1 - let ilp = ILP::binary( - 2, - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 2.0)], - ObjectiveSense::Maximize, - ); - - // Config [1, 1] means x0=1, x1=1 => obj = 3, but invalid (1+1 > 1) - let sol = ilp.solution_size(&[1, 1]); - assert!(!sol.is_valid); - assert!((sol.size - 3.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_size_with_offset_bounds() { - // Variables with non-zero lower bounds - let ilp = ILP::new( - 2, - vec![VarBounds::bounded(1, 3), VarBounds::bounded(-1, 1)], - vec![], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - - // Config [0, 0] maps to x0=1, x1=-1 => obj = 0 - let sol = ilp.solution_size(&[0, 0]); - assert!(sol.is_valid); - assert!((sol.size - 0.0).abs() < 1e-9); - - // Config [2, 2] maps to x0=3, x1=1 => obj = 4 - let sol = ilp.solution_size(&[2, 2]); - assert!(sol.is_valid); - assert!((sol.size - 4.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_brute_force_maximization() { - // Maximize x0 + 2*x1 subject to x0 + x1 <= 1, x0, x1 binary - let ilp = ILP::binary( - 2, - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 2.0)], - ObjectiveSense::Maximize, - ); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&ilp); - - // Optimal: x1=1, x0=0 => objective = 2 - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1]); - } - - #[test] - fn test_ilp_brute_force_minimization() { - // Minimize x0 + x1 subject to x0 + x1 >= 1, x0, x1 binary - let ilp = ILP::binary( - 2, - vec![LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Minimize, - ); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&ilp); - - // Optimal: x0=1,x1=0 or x0=0,x1=1 => objective = 1 - assert_eq!(solutions.len(), 2); - for sol in &solutions { - let size = ilp.solution_size(sol); - assert!(size.is_valid); - assert!((size.size - 1.0).abs() < 1e-9); - } - } - - #[test] - fn test_ilp_brute_force_no_feasible() { - // x0 >= 1 AND x0 <= 0 (infeasible) - let ilp = ILP::binary( - 1, - vec![ - LinearConstraint::ge(vec![(0, 1.0)], 1.0), - LinearConstraint::le(vec![(0, 1.0)], 0.0), - ], - vec![(0, 1.0)], - ObjectiveSense::Minimize, - ); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&ilp); - - // No feasible solutions - assert!(solutions.is_empty()); - } - - #[test] - fn test_ilp_unconstrained() { - // Maximize x0 + x1, no constraints, binary vars - let ilp = ILP::binary( - 2, - vec![], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&ilp); - - // Optimal: both = 1 - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 1]); - } - - #[test] - fn test_ilp_equality_constraint() { - // Minimize x0 subject to x0 + x1 == 1, binary vars - let ilp = ILP::binary( - 2, - vec![LinearConstraint::eq(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0)], - ObjectiveSense::Minimize, - ); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&ilp); - - // Optimal: x0=0, x1=1 => objective = 0 - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1]); - } - - #[test] - fn test_ilp_multiple_constraints() { - // Maximize x0 + x1 + x2 subject to: - // x0 + x1 <= 1 - // x1 + x2 <= 1 - // Binary vars - let ilp = ILP::binary( - 3, - vec![ - LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0), - LinearConstraint::le(vec![(1, 1.0), (2, 1.0)], 1.0), - ], - vec![(0, 1.0), (1, 1.0), (2, 1.0)], - ObjectiveSense::Maximize, - ); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&ilp); - - // Optimal: x0=1, x1=0, x2=1 => objective = 2 - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 0, 1]); - } - - #[test] - fn test_ilp_config_to_values() { - let ilp = ILP::new( - 3, - vec![ - VarBounds::bounded(0, 2), // 0,1,2 - VarBounds::bounded(-1, 1), // -1,0,1 - VarBounds::bounded(5, 7), // 5,6,7 - ], - vec![], - vec![], - ObjectiveSense::Minimize, - ); - - // Config [0,0,0] => [0, -1, 5] - assert_eq!(ilp.config_to_values(&[0, 0, 0]), vec![0, -1, 5]); - // Config [2,2,2] => [2, 1, 7] - assert_eq!(ilp.config_to_values(&[2, 2, 2]), vec![2, 1, 7]); - // Config [1,1,1] => [1, 0, 6] - assert_eq!(ilp.config_to_values(&[1, 1, 1]), vec![1, 0, 6]); - } - - #[test] - fn test_ilp_variant() { - let v = ILP::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0], ("graph", "SimpleGraph")); - assert_eq!(v[1], ("weight", "f64")); - } -} +#[path = "../../tests_unit/models/optimization/ilp.rs"] +mod tests; diff --git a/src/models/optimization/qubo.rs b/src/models/optimization/qubo.rs index d43e594..03e474b 100644 --- a/src/models/optimization/qubo.rs +++ b/src/models/optimization/qubo.rs @@ -173,141 +173,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_qubo_from_matrix() { - let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); - assert_eq!(problem.num_vars(), 2); - assert_eq!(problem.get(0, 0), Some(&1.0)); - assert_eq!(problem.get(0, 1), Some(&2.0)); - assert_eq!(problem.get(1, 1), Some(&3.0)); - } - - #[test] - fn test_qubo_new() { - let problem = QUBO::new(vec![1.0, 2.0], vec![((0, 1), 3.0)]); - assert_eq!(problem.get(0, 0), Some(&1.0)); - assert_eq!(problem.get(1, 1), Some(&2.0)); - assert_eq!(problem.get(0, 1), Some(&3.0)); - } - - #[test] - fn test_evaluate() { - // Q = [[1, 2], [0, 3]] - // f(x) = x0 + 3*x1 + 2*x0*x1 - let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); - - assert_eq!(problem.evaluate(&[0, 0]), 0.0); - assert_eq!(problem.evaluate(&[1, 0]), 1.0); - assert_eq!(problem.evaluate(&[0, 1]), 3.0); - assert_eq!(problem.evaluate(&[1, 1]), 6.0); // 1 + 3 + 2 = 6 - } - - #[test] - fn test_solution_size() { - let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); - - let sol = problem.solution_size(&[0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0.0); - - let sol = problem.solution_size(&[1, 1]); - assert_eq!(sol.size, 6.0); - } - - #[test] - fn test_brute_force_minimize() { - // Q = [[1, 0], [0, -2]] - // f(x) = x0 - 2*x1 - // Minimum at x = [0, 1] with value -2 - let problem = QUBO::from_matrix(vec![vec![1.0, 0.0], vec![0.0, -2.0]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1]); - assert_eq!(problem.solution_size(&solutions[0]).size, -2.0); - } - - #[test] - fn test_brute_force_with_interaction() { - // Q = [[-1, 2], [0, -1]] - // f(x) = -x0 - x1 + 2*x0*x1 - // x=[0,0] -> 0, x=[1,0] -> -1, x=[0,1] -> -1, x=[1,1] -> 0 - let problem = QUBO::from_matrix(vec![vec![-1.0, 2.0], vec![0.0, -1.0]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Minimum is -1 at [1,0] or [0,1] - assert_eq!(solutions.len(), 2); - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, -1.0); - } - } - - #[test] - fn test_energy_mode() { - let problem = QUBO::::from_matrix(vec![vec![1.0]]); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_num_variables_flavors() { - let problem = QUBO::::from_matrix(vec![vec![0.0; 5]; 5]); - assert_eq!(problem.num_variables(), 5); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_problem_size() { - let problem = QUBO::::from_matrix(vec![vec![0.0; 3]; 3]); - let size = problem.problem_size(); - assert_eq!(size.get("num_vars"), Some(3)); - } - - #[test] - fn test_matrix_access() { - let problem = QUBO::from_matrix(vec![ - vec![1.0, 2.0, 3.0], - vec![0.0, 4.0, 5.0], - vec![0.0, 0.0, 6.0], - ]); - let matrix = problem.matrix(); - assert_eq!(matrix.len(), 3); - assert_eq!(matrix[0], vec![1.0, 2.0, 3.0]); - } - - #[test] - fn test_empty_qubo() { - let problem = QUBO::::from_matrix(vec![]); - assert_eq!(problem.num_vars(), 0); - assert_eq!(problem.evaluate(&[]), 0.0); - } - - #[test] - fn test_single_variable() { - let problem = QUBO::from_matrix(vec![vec![-5.0]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1]); // x=1 gives -5, x=0 gives 0 - } - - #[test] - fn test_qubo_new_reverse_indices() { - // Test the case where (j, i) is provided with i < j - let problem = QUBO::new(vec![1.0, 2.0], vec![((1, 0), 3.0)]); // j > i - assert_eq!(problem.get(0, 1), Some(&3.0)); // Should be stored at (0, 1) - } - - #[test] - fn test_get_out_of_bounds() { - let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); - assert_eq!(problem.get(5, 5), None); - assert_eq!(problem.get(0, 5), None); - } -} +#[path = "../../tests_unit/models/optimization/qubo.rs"] +mod tests; diff --git a/src/models/optimization/spin_glass.rs b/src/models/optimization/spin_glass.rs index a8027aa..e98feb3 100644 --- a/src/models/optimization/spin_glass.rs +++ b/src/models/optimization/spin_glass.rs @@ -230,198 +230,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_spin_glass_creation() { - let problem = SpinGlass::::new( - 3, - vec![((0, 1), 1.0), ((1, 2), -1.0)], - vec![0.0, 0.0, 0.0], - ); - assert_eq!(problem.num_spins(), 3); - assert_eq!(problem.interactions().len(), 2); - assert_eq!(problem.fields().len(), 3); - } - - #[test] - fn test_spin_glass_without_fields() { - let problem = SpinGlass::::without_fields(3, vec![((0, 1), 1.0)]); - assert_eq!(problem.fields(), &[0.0, 0.0, 0.0]); - } - - #[test] - fn test_config_to_spins() { - assert_eq!( - SpinGlass::::config_to_spins(&[0, 0]), - vec![-1, -1] - ); - assert_eq!( - SpinGlass::::config_to_spins(&[1, 1]), - vec![1, 1] - ); - assert_eq!( - SpinGlass::::config_to_spins(&[0, 1]), - vec![-1, 1] - ); - assert_eq!( - SpinGlass::::config_to_spins(&[1, 0]), - vec![1, -1] - ); - } - - #[test] - fn test_compute_energy() { - // Two spins with J = 1 (ferromagnetic prefers aligned) - let problem = - SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); - - // Aligned spins: energy = J * s1 * s2 = 1 * 1 * 1 = 1 or 1 * (-1) * (-1) = 1 - assert_eq!(problem.compute_energy(&[1, 1]), 1.0); - assert_eq!(problem.compute_energy(&[-1, -1]), 1.0); - - // Anti-aligned spins: energy = J * s1 * s2 = 1 * 1 * (-1) = -1 - assert_eq!(problem.compute_energy(&[1, -1]), -1.0); - assert_eq!(problem.compute_energy(&[-1, 1]), -1.0); - } - - #[test] - fn test_compute_energy_with_fields() { - let problem = SpinGlass::::new(2, vec![], vec![1.0, -1.0]); - - // Energy = h1*s1 + h2*s2 = 1*s1 + (-1)*s2 - assert_eq!(problem.compute_energy(&[1, 1]), 0.0); // 1 - 1 = 0 - assert_eq!(problem.compute_energy(&[-1, -1]), 0.0); // -1 + 1 = 0 - assert_eq!(problem.compute_energy(&[1, -1]), 2.0); // 1 + 1 = 2 - assert_eq!(problem.compute_energy(&[-1, 1]), -2.0); // -1 - 1 = -2 - } - - #[test] - fn test_solution_size() { - let problem = - SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); - - // config [0,0] -> spins [-1,-1] -> energy = 1 - let sol = problem.solution_size(&[0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1.0); - - // config [0,1] -> spins [-1,1] -> energy = -1 - let sol = problem.solution_size(&[0, 1]); - assert_eq!(sol.size, -1.0); - } - - #[test] - fn test_brute_force_ferromagnetic() { - // Ferromagnetic: J > 0 prefers aligned spins to minimize energy - // But wait, energy = J*s1*s2, so J>0 with aligned gives positive energy - // For minimization, we want anti-aligned for J>0 - let problem = - SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Minimum energy is -1 (anti-aligned) - for sol in &solutions { - assert_ne!(sol[0], sol[1]); - assert_eq!(problem.solution_size(sol).size, -1.0); - } - } - - #[test] - fn test_brute_force_antiferromagnetic() { - // Antiferromagnetic: J < 0, energy = J*s1*s2 - // J<0 with aligned spins gives negative energy (good for minimization) - let problem = - SpinGlass::::new(2, vec![((0, 1), -1.0)], vec![0.0, 0.0]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Minimum energy is -1 (aligned) - for sol in &solutions { - assert_eq!(sol[0], sol[1]); - assert_eq!(problem.solution_size(sol).size, -1.0); - } - } - - #[test] - fn test_energy_mode() { - let problem = SpinGlass::::without_fields(2, vec![]); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_num_variables_flavors() { - let problem = SpinGlass::::without_fields(5, vec![]); - assert_eq!(problem.num_variables(), 5); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_problem_size() { - let problem = SpinGlass::::new( - 3, - vec![((0, 1), 1.0), ((1, 2), 1.0)], - vec![0.0, 0.0, 0.0], - ); - let size = problem.problem_size(); - assert_eq!(size.get("num_spins"), Some(3)); - assert_eq!(size.get("num_interactions"), Some(2)); - } - - #[test] - fn test_triangle_frustration() { - // Triangle with all antiferromagnetic couplings - frustrated system - let problem = SpinGlass::::new( - 3, - vec![((0, 1), 1.0), ((1, 2), 1.0), ((0, 2), 1.0)], - vec![0.0, 0.0, 0.0], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Best we can do is satisfy 2 out of 3 interactions - // Energy = -1 -1 + 1 = -1 (one frustrated) - for sol in &solutions { - assert_eq!(problem.solution_size(sol).size, -1.0); - } - } - - #[test] - fn test_from_graph() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let problem = - SpinGlass::::from_graph(graph, vec![1.0, 2.0], vec![0.0, 0.0, 0.0]); - assert_eq!(problem.num_spins(), 3); - assert_eq!(problem.couplings(), &[1.0, 2.0]); - assert_eq!(problem.fields(), &[0.0, 0.0, 0.0]); - } - - #[test] - fn test_from_graph_without_fields() { - let graph = SimpleGraph::new(2, vec![(0, 1)]); - let problem = SpinGlass::::from_graph_without_fields(graph, vec![1.5]); - assert_eq!(problem.num_spins(), 2); - assert_eq!(problem.couplings(), &[1.5]); - assert_eq!(problem.fields(), &[0.0, 0.0]); - } - - #[test] - fn test_graph_accessor() { - let problem = - SpinGlass::::new(3, vec![((0, 1), 1.0)], vec![0.0, 0.0, 0.0]); - let graph = problem.graph(); - assert_eq!(graph.num_vertices(), 3); - assert_eq!(graph.num_edges(), 1); - } - - #[test] - fn test_variant() { - let variant = SpinGlass::::variant(); - assert_eq!(variant.len(), 2); - assert_eq!(variant[0], ("graph", "SimpleGraph")); - assert_eq!(variant[1], ("weight", "f64")); - } -} +#[path = "../../tests_unit/models/optimization/spin_glass.rs"] +mod tests; diff --git a/src/models/satisfiability/ksat.rs b/src/models/satisfiability/ksat.rs index e89dec1..50dfcd4 100644 --- a/src/models/satisfiability/ksat.rs +++ b/src/models/satisfiability/ksat.rs @@ -308,173 +308,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_3sat_creation() { - let problem = KSatisfiability::<3, i32>::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, 3]), - ], - ); - assert_eq!(problem.num_vars(), 3); - assert_eq!(problem.num_clauses(), 2); - } - - #[test] - #[should_panic(expected = "Clause 0 has 2 literals, expected 3")] - fn test_3sat_wrong_clause_size() { - let _ = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2])]); - } - - #[test] - fn test_2sat_creation() { - let problem = KSatisfiability::<2, i32>::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], - ); - assert_eq!(problem.num_vars(), 2); - assert_eq!(problem.num_clauses(), 2); - } - - #[test] - fn test_3sat_is_satisfying() { - // (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR NOT x3) - let problem = KSatisfiability::<3, i32>::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, -3]), - ], - ); - - // x1=T, x2=F, x3=F satisfies both - assert!(problem.is_satisfying(&[true, false, false])); - // x1=T, x2=T, x3=T fails second clause - assert!(!problem.is_satisfying(&[true, true, true])); - } - - #[test] - fn test_3sat_brute_force() { - let problem = KSatisfiability::<3, i32>::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, 3]), - ], - ); - let solver = BruteForce::new(); - let solutions = solver.find_best(&problem); - - assert!(!solutions.is_empty()); - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_ksat_problem_size() { - let problem = KSatisfiability::<3, i32>::new(4, vec![CNFClause::new(vec![1, 2, 3])]); - let size = problem.problem_size(); - assert_eq!(size.get("k"), Some(3)); - assert_eq!(size.get("num_vars"), Some(4)); - assert_eq!(size.get("num_clauses"), Some(1)); - } - - #[test] - fn test_ksat_with_weights() { - let problem = KSatisfiability::<3>::with_weights( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, -3]), - ], - vec![5, 10], - ); - assert_eq!(problem.weights(), vec![5, 10]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_ksat_allow_less() { - // This should work - clause has 2 literals which is <= 3 - let problem = - KSatisfiability::<3, i32>::new_allow_less(2, vec![CNFClause::new(vec![1, 2])]); - assert_eq!(problem.num_clauses(), 1); - } - - #[test] - #[should_panic(expected = "Clause 0 has 4 literals, expected at most 3")] - fn test_ksat_allow_less_too_many() { - let _ = - KSatisfiability::<3, i32>::new_allow_less(4, vec![CNFClause::new(vec![1, 2, 3, 4])]); - } - - #[test] - fn test_ksat_constraints() { - let problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - let constraints = problem.constraints(); - assert_eq!(constraints.len(), 1); - } - - #[test] - fn test_ksat_objectives() { - let problem = - KSatisfiability::<3>::with_weights(3, vec![CNFClause::new(vec![1, 2, 3])], vec![5]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 1); - } - - #[test] - fn test_ksat_energy_mode() { - let problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_ksat_get_clause() { - let problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - assert_eq!(problem.get_clause(0), Some(&CNFClause::new(vec![1, 2, 3]))); - assert_eq!(problem.get_clause(1), None); - } - - #[test] - fn test_ksat_count_satisfied() { - let problem = KSatisfiability::<3, i32>::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, -3]), - ], - ); - // x1=T, x2=T, x3=T: first satisfied, second not - assert_eq!(problem.count_satisfied(&[true, true, true]), 1); - // x1=T, x2=F, x3=F: both satisfied - assert_eq!(problem.count_satisfied(&[true, false, false]), 2); - } - - #[test] - fn test_ksat_set_weights() { - let mut problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - assert!(!problem.is_weighted()); - problem.set_weights(vec![10]); - assert_eq!(problem.weights(), vec![10]); - } - - #[test] - fn test_ksat_is_satisfied_csp() { - let problem = KSatisfiability::<3, i32>::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, -3]), - ], - ); - assert!(problem.is_satisfied(&[1, 0, 0])); // x1=T, x2=F, x3=F - assert!(!problem.is_satisfied(&[1, 1, 1])); // x1=T, x2=T, x3=T - } -} +#[path = "../../tests_unit/models/satisfiability/ksat.rs"] +mod tests; diff --git a/src/models/satisfiability/sat.rs b/src/models/satisfiability/sat.rs index 61e6175..a983002 100644 --- a/src/models/satisfiability/sat.rs +++ b/src/models/satisfiability/sat.rs @@ -345,315 +345,5 @@ pub fn is_satisfying_assignment( } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_cnf_clause_creation() { - let clause = CNFClause::new(vec![1, -2, 3]); - assert_eq!(clause.len(), 3); - assert!(!clause.is_empty()); - assert_eq!(clause.variables(), vec![0, 1, 2]); - } - - #[test] - fn test_cnf_clause_satisfaction() { - let clause = CNFClause::new(vec![1, 2]); // x1 OR x2 - - assert!(clause.is_satisfied(&[true, false])); // x1 = T - assert!(clause.is_satisfied(&[false, true])); // x2 = T - assert!(clause.is_satisfied(&[true, true])); // Both T - assert!(!clause.is_satisfied(&[false, false])); // Both F - } - - #[test] - fn test_cnf_clause_negation() { - let clause = CNFClause::new(vec![-1, 2]); // NOT x1 OR x2 - - assert!(clause.is_satisfied(&[false, false])); // NOT x1 = T - assert!(clause.is_satisfied(&[false, true])); // Both true - assert!(clause.is_satisfied(&[true, true])); // x2 = T - assert!(!clause.is_satisfied(&[true, false])); // Both false - } - - #[test] - fn test_sat_creation() { - let problem = Satisfiability::::new( - 3, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], - ); - assert_eq!(problem.num_vars(), 3); - assert_eq!(problem.num_clauses(), 2); - assert_eq!(problem.num_variables(), 3); - } - - #[test] - fn test_sat_with_weights() { - let problem = Satisfiability::with_weights( - 2, - vec![CNFClause::new(vec![1]), CNFClause::new(vec![2])], - vec![5, 10], - ); - assert_eq!(problem.weights(), vec![5, 10]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_is_satisfying() { - // (x1 OR x2) AND (NOT x1 OR NOT x2) - let problem = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], - ); - - assert!(problem.is_satisfying(&[true, false])); // Satisfies both - assert!(problem.is_satisfying(&[false, true])); // Satisfies both - assert!(!problem.is_satisfying(&[true, true])); // Fails second clause - assert!(!problem.is_satisfying(&[false, false])); // Fails first clause - } - - #[test] - fn test_count_satisfied() { - let problem = Satisfiability::::new( - 2, - vec![ - CNFClause::new(vec![1]), - CNFClause::new(vec![2]), - CNFClause::new(vec![-1, -2]), - ], - ); - - assert_eq!(problem.count_satisfied(&[true, true]), 2); // x1, x2 satisfied - assert_eq!(problem.count_satisfied(&[false, false]), 1); // Only last - assert_eq!(problem.count_satisfied(&[true, false]), 2); // x1 and last - } - - #[test] - fn test_solution_size() { - let problem = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], - ); - - let sol = problem.solution_size(&[1, 0]); // true, false - assert!(sol.is_valid); - assert_eq!(sol.size, 2); // Both clauses satisfied - - let sol = problem.solution_size(&[1, 1]); // true, true - assert!(!sol.is_valid); - assert_eq!(sol.size, 1); // Only first clause satisfied - } - - #[test] - fn test_brute_force_satisfiable() { - // (x1) AND (x2) AND (NOT x1 OR NOT x2) - UNSAT - let problem = Satisfiability::::new( - 2, - vec![ - CNFClause::new(vec![1]), - CNFClause::new(vec![2]), - CNFClause::new(vec![-1, -2]), - ], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // This is unsatisfiable, so no valid solutions - // BruteForce will return configs with max satisfied clauses - for sol in &solutions { - // Best we can do is satisfy 2 out of 3 clauses - assert!(!problem.solution_size(sol).is_valid); - assert_eq!(problem.solution_size(sol).size, 2); - } - } - - #[test] - fn test_brute_force_simple_sat() { - // (x1 OR x2) - many solutions - let problem = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // 3 satisfying assignments - assert_eq!(solutions.len(), 3); - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_max_sat() { - // Weighted: clause 1 has weight 10, clause 2 has weight 1 - // They conflict, so we prefer satisfying clause 1 - let problem = Satisfiability::with_weights( - 1, - vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])], - vec![10, 1], - ); - let solver = BruteForce::new().valid_only(false); // Allow invalid (partial) solutions - - let solutions = solver.find_best(&problem); - // Should select x1 = true (weight 10) - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1]); - } - - #[test] - fn test_is_satisfying_assignment() { - let clauses = vec![vec![1, 2], vec![-1, 3]]; - - assert!(is_satisfying_assignment(3, &clauses, &[true, false, true])); - assert!(is_satisfying_assignment(3, &clauses, &[false, true, false])); - assert!(!is_satisfying_assignment( - 3, - &clauses, - &[true, false, false] - )); - } - - #[test] - fn test_constraints() { - let problem = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], - ); - let constraints = problem.constraints(); - assert_eq!(constraints.len(), 2); - } - - #[test] - fn test_energy_mode() { - let problem = Satisfiability::::new(2, vec![CNFClause::new(vec![1])]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_empty_formula() { - let problem = Satisfiability::::new(2, vec![]); - let sol = problem.solution_size(&[0, 0]); - assert!(sol.is_valid); // Empty formula is trivially satisfied - } - - #[test] - fn test_single_literal_clauses() { - // Unit propagation scenario: x1 AND NOT x2 - let problem = - Satisfiability::::new(2, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-2])]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 0]); // x1=T, x2=F - } - - #[test] - fn test_get_clause() { - let problem = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], - ); - assert_eq!(problem.get_clause(0), Some(&CNFClause::new(vec![1, 2]))); - assert_eq!(problem.get_clause(2), None); - } - - #[test] - fn test_three_sat_example() { - // (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR x3) AND (x1 OR NOT x2 OR NOT x3) - let problem = Satisfiability::::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, 3]), - CNFClause::new(vec![1, -2, -3]), - ], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_is_satisfied_csp() { - let problem = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], - ); - - assert!(problem.is_satisfied(&[1, 0])); - assert!(problem.is_satisfied(&[0, 1])); - assert!(!problem.is_satisfied(&[1, 1])); - assert!(!problem.is_satisfied(&[0, 0])); - } - - #[test] - fn test_objectives() { - let problem = Satisfiability::with_weights(2, vec![CNFClause::new(vec![1, 2])], vec![5]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 1); - } - - #[test] - fn test_set_weights() { - let mut problem = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], - ); - assert!(!problem.is_weighted()); // Initially uniform - problem.set_weights(vec![1, 2]); - assert!(problem.is_weighted()); - assert_eq!(problem.weights(), vec![1, 2]); - } - - #[test] - fn test_is_weighted_empty() { - let problem = Satisfiability::::new(2, vec![]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_is_satisfying_assignment_defaults() { - // When assignment is shorter than needed, missing vars default to false - let clauses = vec![vec![1, 2]]; - // assignment is [true], var 0 = true satisfies literal 1 - assert!(is_satisfying_assignment(3, &clauses, &[true])); - // assignment is [false], var 0 = false, var 1 defaults to false - // Neither literal 1 (var0=false) nor literal 2 (var1=false) satisfied - assert!(!is_satisfying_assignment(3, &clauses, &[false])); - } - - #[test] - fn test_problem_size() { - let problem = Satisfiability::::new( - 3, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], - ); - let size = problem.problem_size(); - assert_eq!(size.get("num_vars"), Some(3)); - assert_eq!(size.get("num_clauses"), Some(2)); - } - - #[test] - fn test_num_variables_flavors() { - let problem = Satisfiability::::new(5, vec![CNFClause::new(vec![1])]); - assert_eq!(problem.num_variables(), 5); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_clause_variables() { - let clause = CNFClause::new(vec![1, -2, 3]); - let vars = clause.variables(); - assert_eq!(vars, vec![0, 1, 2]); // 0-indexed - } - - #[test] - fn test_clause_debug() { - let clause = CNFClause::new(vec![1, -2, 3]); - let debug = format!("{:?}", clause); - assert!(debug.contains("CNFClause")); - } -} +#[path = "../../tests_unit/models/satisfiability/sat.rs"] +mod tests; diff --git a/src/models/set/set_covering.rs b/src/models/set/set_covering.rs index d2b7533..26fab1c 100644 --- a/src/models/set/set_covering.rs +++ b/src/models/set/set_covering.rs @@ -241,201 +241,5 @@ pub fn is_set_cover(universe_size: usize, sets: &[Vec], selected: &[bool] } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_set_covering_creation() { - let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - assert_eq!(problem.universe_size(), 4); - assert_eq!(problem.num_sets(), 3); - assert_eq!(problem.num_variables(), 3); - } - - #[test] - fn test_set_covering_with_weights() { - let problem = SetCovering::with_weights(3, vec![vec![0, 1], vec![1, 2]], vec![5, 10]); - assert_eq!(problem.weights(), vec![5, 10]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_covered_elements() { - let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - - let covered = problem.covered_elements(&[1, 0, 0]); - assert!(covered.contains(&0)); - assert!(covered.contains(&1)); - assert!(!covered.contains(&2)); - - let covered = problem.covered_elements(&[1, 0, 1]); - assert!(covered.contains(&0)); - assert!(covered.contains(&1)); - assert!(covered.contains(&2)); - assert!(covered.contains(&3)); - } - - #[test] - fn test_solution_size_valid() { - let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - - // Select first and third sets: covers {0,1} ∪ {2,3} = {0,1,2,3} - let sol = problem.solution_size(&[1, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 2); - - // Select all sets - let sol = problem.solution_size(&[1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 3); - } - - #[test] - fn test_solution_size_invalid() { - let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - - // Select only first set: missing 2, 3 - let sol = problem.solution_size(&[1, 0, 0]); - assert!(!sol.is_valid); - - // Select none - let sol = problem.solution_size(&[0, 0, 0]); - assert!(!sol.is_valid); - } - - #[test] - fn test_brute_force_simple() { - // Universe {0,1,2}, sets: {0,1}, {1,2}, {0,2} - // Minimum cover: any 2 sets work - let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2], vec![0, 2]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 2); - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_brute_force_weighted() { - // Prefer lighter sets - let problem = - SetCovering::with_weights(3, vec![vec![0, 1, 2], vec![0, 1], vec![2]], vec![10, 3, 3]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Should select sets 1 and 2 (total 6) instead of set 0 (total 10) - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1, 1]); - } - - #[test] - fn test_is_set_cover_function() { - let sets = vec![vec![0, 1], vec![1, 2], vec![2, 3]]; - - assert!(is_set_cover(4, &sets, &[true, false, true])); - assert!(is_set_cover(4, &sets, &[true, true, true])); - assert!(!is_set_cover(4, &sets, &[true, false, false])); - assert!(!is_set_cover(4, &sets, &[false, false, false])); - } - - #[test] - fn test_get_set() { - let problem = SetCovering::::new(4, vec![vec![0, 1], vec![2, 3]]); - assert_eq!(problem.get_set(0), Some(&vec![0, 1])); - assert_eq!(problem.get_set(1), Some(&vec![2, 3])); - assert_eq!(problem.get_set(2), None); - } - - #[test] - fn test_energy_mode() { - let problem = SetCovering::::new(2, vec![vec![0, 1]]); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_constraints() { - let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); - let constraints = problem.constraints(); - // One constraint per element - assert_eq!(constraints.len(), 3); - } - - #[test] - fn test_single_set_covers_all() { - let problem = SetCovering::::new(3, vec![vec![0, 1, 2], vec![0], vec![1], vec![2]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // First set alone covers everything - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 0, 0, 0]); - } - - #[test] - fn test_overlapping_sets() { - // All sets overlap on element 1 - let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2], vec![1]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Minimum is selecting first two sets - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 2); - } - } - - #[test] - fn test_is_satisfied() { - let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); - - assert!(problem.is_satisfied(&[1, 1, 0])); // Note: 3 vars needed - assert!(!problem.is_satisfied(&[1, 0])); - } - - #[test] - fn test_empty_universe() { - let problem = SetCovering::::new(0, vec![]); - let sol = problem.solution_size(&[]); - assert!(sol.is_valid); // Empty universe is trivially covered - assert_eq!(sol.size, 0); - } - - #[test] - fn test_objectives() { - let problem = SetCovering::with_weights(3, vec![vec![0, 1], vec![1, 2]], vec![5, 10]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 2); - } - - #[test] - fn test_set_weights() { - let mut problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); - assert!(!problem.is_weighted()); // Initially uniform - problem.set_weights(vec![1, 2]); - assert!(problem.is_weighted()); - assert_eq!(problem.weights(), vec![1, 2]); - } - - #[test] - fn test_is_weighted_empty() { - let problem = SetCovering::::new(0, vec![]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_is_set_cover_wrong_len() { - let sets = vec![vec![0, 1], vec![1, 2]]; - assert!(!is_set_cover(3, &sets, &[true])); // Wrong length - } - - #[test] - fn test_problem_size() { - let problem = SetCovering::::new(5, vec![vec![0, 1], vec![1, 2], vec![3, 4]]); - let size = problem.problem_size(); - assert_eq!(size.get("universe_size"), Some(5)); - assert_eq!(size.get("num_sets"), Some(3)); - } -} +#[path = "../../tests_unit/models/set/set_covering.rs"] +mod tests; diff --git a/src/models/set/set_packing.rs b/src/models/set/set_packing.rs index 8db4952..999e4b2 100644 --- a/src/models/set/set_packing.rs +++ b/src/models/set/set_packing.rs @@ -234,225 +234,5 @@ pub fn is_set_packing(sets: &[Vec], selected: &[bool]) -> bool { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_set_packing_creation() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); - assert_eq!(problem.num_sets(), 3); - assert_eq!(problem.num_variables(), 3); - } - - #[test] - fn test_set_packing_with_weights() { - let problem = SetPacking::with_weights(vec![vec![0, 1], vec![2, 3]], vec![5, 10]); - assert_eq!(problem.weights(), vec![5, 10]); - assert!(problem.is_weighted()); - } - - #[test] - fn test_sets_overlap() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); - - assert!(problem.sets_overlap(0, 1)); // Share element 1 - assert!(!problem.sets_overlap(0, 2)); // No overlap - assert!(!problem.sets_overlap(1, 2)); // No overlap - } - - #[test] - fn test_overlapping_pairs() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - - let pairs = problem.overlapping_pairs(); - assert_eq!(pairs.len(), 2); - assert!(pairs.contains(&(0, 1))); - assert!(pairs.contains(&(1, 2))); - } - - #[test] - fn test_solution_size_valid() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![2, 3], vec![4, 5]]); - - // All disjoint, can select all - let sol = problem.solution_size(&[1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 3); - - // Select none - let sol = problem.solution_size(&[0, 0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_solution_size_invalid() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); - - // Sets 0 and 1 overlap - let sol = problem.solution_size(&[1, 1, 0]); - assert!(!sol.is_valid); - } - - #[test] - fn test_brute_force_chain() { - // Chain: {0,1}, {1,2}, {2,3} - can select at most 2 non-adjacent sets - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Max is 2: select {0,1} and {2,3} - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 2); - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_brute_force_weighted() { - // Weighted: single heavy set vs multiple light sets - let problem = SetPacking::with_weights( - vec![vec![0, 1, 2, 3], vec![0, 1], vec![2, 3]], - vec![5, 3, 3], - ); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Should select sets 1 and 2 (total 6) over set 0 (total 5) - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0, 1, 1]); - } - - #[test] - fn test_is_set_packing_function() { - let sets = vec![vec![0, 1], vec![1, 2], vec![3, 4]]; - - assert!(is_set_packing(&sets, &[true, false, true])); // Disjoint - assert!(is_set_packing(&sets, &[false, true, true])); // Disjoint - assert!(!is_set_packing(&sets, &[true, true, false])); // Overlap on 1 - assert!(is_set_packing(&sets, &[false, false, false])); // Empty is valid - } - - #[test] - fn test_constraints() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); - let constraints = problem.constraints(); - // Only one overlapping pair - assert_eq!(constraints.len(), 1); - } - - #[test] - fn test_energy_mode() { - let problem = SetPacking::::new(vec![vec![0, 1]]); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_disjoint_sets() { - let problem = SetPacking::::new(vec![vec![0], vec![1], vec![2], vec![3]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // All sets are disjoint, so select all - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![1, 1, 1, 1]); - } - - #[test] - fn test_all_overlapping() { - // All sets share element 0 - let problem = SetPacking::::new(vec![vec![0, 1], vec![0, 2], vec![0, 3]]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Can only select one set - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 1); - } - } - - #[test] - fn test_is_satisfied() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); - - assert!(problem.is_satisfied(&[1, 0, 1])); // Disjoint selection - assert!(problem.is_satisfied(&[0, 1, 1])); // Disjoint selection - assert!(!problem.is_satisfied(&[1, 1, 0])); // Overlapping selection - } - - #[test] - fn test_empty_sets() { - let problem = SetPacking::::new(vec![]); - let sol = problem.solution_size(&[]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_get_set() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![2, 3]]); - assert_eq!(problem.get_set(0), Some(&vec![0, 1])); - assert_eq!(problem.get_set(1), Some(&vec![2, 3])); - assert_eq!(problem.get_set(2), None); - } - - #[test] - fn test_relationship_to_independent_set() { - // SetPacking on sets is equivalent to IndependentSet on the intersection graph - use crate::models::graph::IndependentSet; - use crate::topology::SimpleGraph; - - let sets = vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![3, 4]]; - let sp_problem = SetPacking::::new(sets.clone()); - - // Build intersection graph - let edges = sp_problem.overlapping_pairs(); - let is_problem = IndependentSet::::new(sets.len(), edges); - - let solver = BruteForce::new(); - - let sp_solutions = solver.find_best(&sp_problem); - let is_solutions = solver.find_best(&is_problem); - - // Should have same optimal value - let sp_size: usize = sp_solutions[0].iter().sum(); - let is_size: usize = is_solutions[0].iter().sum(); - assert_eq!(sp_size, is_size); - } - - #[test] - fn test_objectives() { - let problem = SetPacking::with_weights(vec![vec![0, 1], vec![1, 2]], vec![5, 10]); - let objectives = problem.objectives(); - assert_eq!(objectives.len(), 2); - } - - #[test] - fn test_set_weights() { - let mut problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2]]); - assert!(!problem.is_weighted()); // Initially uniform - problem.set_weights(vec![1, 2]); - assert!(problem.is_weighted()); - assert_eq!(problem.weights(), vec![1, 2]); - } - - #[test] - fn test_is_weighted_empty() { - let problem = SetPacking::::new(vec![]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_is_set_packing_wrong_len() { - let sets = vec![vec![0, 1], vec![1, 2]]; - assert!(!is_set_packing(&sets, &[true])); // Wrong length - } - - #[test] - fn test_problem_size() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); - let size = problem.problem_size(); - assert_eq!(size.get("num_sets"), Some(3)); - } -} +#[path = "../../tests_unit/models/set/set_packing.rs"] +mod tests; diff --git a/src/models/specialized/biclique_cover.rs b/src/models/specialized/biclique_cover.rs index a365eb8..86512fa 100644 --- a/src/models/specialized/biclique_cover.rs +++ b/src/models/specialized/biclique_cover.rs @@ -233,156 +233,5 @@ pub fn is_biclique_cover( } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_biclique_cover_creation() { - let problem = BicliqueCover::new(2, 2, vec![(0, 2), (0, 3), (1, 2)], 2); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - assert_eq!(problem.k(), 2); - assert_eq!(problem.num_variables(), 8); // 4 vertices * 2 bicliques - } - - #[test] - fn test_from_matrix() { - // Matrix: - // [[1, 1], - // [1, 0]] - // Edges: (0,2), (0,3), (1,2) - let matrix = vec![vec![1, 1], vec![1, 0]]; - let problem = BicliqueCover::from_matrix(&matrix, 2); - assert_eq!(problem.num_vertices(), 4); - assert_eq!(problem.num_edges(), 3); - } - - #[test] - fn test_get_biclique_memberships() { - let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); - // Config: vertex 0 in biclique 0, vertex 2 in biclique 0 - // Variables: [v0_b0, v1_b0, v2_b0, v3_b0] - let config = vec![1, 0, 1, 0]; - let (left, right) = problem.get_biclique_memberships(&config); - assert!(left[0].contains(&0)); - assert!(!left[0].contains(&1)); - assert!(right[0].contains(&2)); - assert!(!right[0].contains(&3)); - } - - #[test] - fn test_is_edge_covered() { - let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); - // Put vertex 0 and 2 in biclique 0 - let config = vec![1, 0, 1, 0]; - assert!(problem.is_edge_covered(0, 2, &config)); - - // Don't put vertex 2 in biclique - let config = vec![1, 0, 0, 0]; - assert!(!problem.is_edge_covered(0, 2, &config)); - } - - #[test] - fn test_is_valid_cover() { - let problem = BicliqueCover::new(2, 2, vec![(0, 2), (0, 3)], 1); - // Put 0, 2, 3 in biclique 0 -> covers both edges - let config = vec![1, 0, 1, 1]; - assert!(problem.is_valid_cover(&config)); - - // Only put 0, 2 -> doesn't cover (0,3) - let config = vec![1, 0, 1, 0]; - assert!(!problem.is_valid_cover(&config)); - } - - #[test] - fn test_solution_size() { - let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); - - // Valid cover with size 2 - let sol = problem.solution_size(&[1, 0, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 2); - - // Invalid cover - let sol = problem.solution_size(&[1, 0, 0, 0]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 1); - } - - #[test] - fn test_brute_force_simple() { - // Single edge (0, 2) with k=1 - let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - assert!(problem.is_valid_cover(sol)); - // Minimum size is 2 (one left, one right vertex) - assert_eq!(problem.total_biclique_size(sol), 2); - } - } - - #[test] - fn test_brute_force_two_bicliques() { - // Edges that need 2 bicliques to cover efficiently - // (0,2), (1,3) - these don't share vertices - let problem = BicliqueCover::new(2, 2, vec![(0, 2), (1, 3)], 2); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - assert!(problem.is_valid_cover(sol)); - } - } - - #[test] - fn test_count_covered_edges() { - let problem = BicliqueCover::new(2, 2, vec![(0, 2), (0, 3), (1, 2)], 1); - // Cover only (0,2): put 0 and 2 in biclique - let config = vec![1, 0, 1, 0]; - assert_eq!(problem.count_covered_edges(&config), 1); - - // Cover (0,2) and (0,3): put 0, 2, 3 in biclique - let config = vec![1, 0, 1, 1]; - assert_eq!(problem.count_covered_edges(&config), 2); - } - - #[test] - fn test_is_biclique_cover_function() { - let edges = vec![(0, 2), (1, 3)]; - let left = vec![vec![0].into_iter().collect(), vec![1].into_iter().collect()]; - let right = vec![vec![2].into_iter().collect(), vec![3].into_iter().collect()]; - assert!(is_biclique_cover(&edges, &left, &right)); - - // Missing coverage - let left = vec![vec![0].into_iter().collect()]; - let right = vec![vec![2].into_iter().collect()]; - assert!(!is_biclique_cover(&edges, &left, &right)); - } - - #[test] - fn test_energy_mode() { - let problem = BicliqueCover::new(1, 1, vec![(0, 1)], 1); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_problem_size() { - let problem = BicliqueCover::new(3, 4, vec![(0, 3), (1, 4)], 2); - let size = problem.problem_size(); - assert_eq!(size.get("left_size"), Some(3)); - assert_eq!(size.get("right_size"), Some(4)); - assert_eq!(size.get("num_edges"), Some(2)); - assert_eq!(size.get("k"), Some(2)); - } - - #[test] - fn test_empty_edges() { - let problem = BicliqueCover::new(2, 2, vec![], 1); - let sol = problem.solution_size(&[0, 0, 0, 0]); - assert!(sol.is_valid); // No edges to cover - assert_eq!(sol.size, 0); - } -} +#[path = "../../tests_unit/models/specialized/biclique_cover.rs"] +mod tests; diff --git a/src/models/specialized/bmf.rs b/src/models/specialized/bmf.rs index 996d22a..182fb2b 100644 --- a/src/models/specialized/bmf.rs +++ b/src/models/specialized/bmf.rs @@ -209,189 +209,5 @@ pub fn matrix_hamming_distance(a: &[Vec], b: &[Vec]) -> usize { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_bmf_creation() { - let matrix = vec![vec![true, false], vec![false, true]]; - let problem = BMF::new(matrix, 2); - assert_eq!(problem.rows(), 2); - assert_eq!(problem.cols(), 2); - assert_eq!(problem.rank(), 2); - assert_eq!(problem.num_variables(), 8); // 2*2 + 2*2 - } - - #[test] - fn test_extract_factors() { - let matrix = vec![vec![true]]; - let problem = BMF::new(matrix, 1); - // Config: [b00, c00] = [1, 1] - let (b, c) = problem.extract_factors(&[1, 1]); - assert_eq!(b, vec![vec![true]]); - assert_eq!(c, vec![vec![true]]); - } - - #[test] - fn test_extract_factors_larger() { - // 2x2 matrix with rank 1 - let matrix = vec![vec![true, true], vec![true, true]]; - let problem = BMF::new(matrix, 1); - // B: 2x1, C: 1x2 - // Config: [b00, b10, c00, c01] = [1, 1, 1, 1] - let (b, c) = problem.extract_factors(&[1, 1, 1, 1]); - assert_eq!(b, vec![vec![true], vec![true]]); - assert_eq!(c, vec![vec![true, true]]); - } - - #[test] - fn test_boolean_product() { - // B = [[1], [1]], C = [[1, 1]] - // B ⊙ C = [[1,1], [1,1]] - let b = vec![vec![true], vec![true]]; - let c = vec![vec![true, true]]; - let product = BMF::boolean_product(&b, &c); - assert_eq!(product, vec![vec![true, true], vec![true, true]]); - } - - #[test] - fn test_boolean_product_rank2() { - // B = [[1,0], [0,1]], C = [[1,0], [0,1]] - // B ⊙ C = [[1,0], [0,1]] (identity) - let b = vec![vec![true, false], vec![false, true]]; - let c = vec![vec![true, false], vec![false, true]]; - let product = BMF::boolean_product(&b, &c); - assert_eq!(product, vec![vec![true, false], vec![false, true]]); - } - - #[test] - fn test_hamming_distance() { - // Target: [[1,0], [0,1]] - let matrix = vec![vec![true, false], vec![false, true]]; - let problem = BMF::new(matrix, 2); - - // B = [[1,0], [0,1]], C = [[1,0], [0,1]] -> exact match - // Config: [1,0,0,1, 1,0,0,1] - let config = vec![1, 0, 0, 1, 1, 0, 0, 1]; - assert_eq!(problem.hamming_distance(&config), 0); - - // All zeros -> product is all zeros, distance = 2 - let config = vec![0, 0, 0, 0, 0, 0, 0, 0]; - assert_eq!(problem.hamming_distance(&config), 2); - } - - #[test] - fn test_solution_size() { - let matrix = vec![vec![true, false], vec![false, true]]; - let problem = BMF::new(matrix, 2); - - // Exact factorization - let config = vec![1, 0, 0, 1, 1, 0, 0, 1]; - let sol = problem.solution_size(&config); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - - // Non-exact - let config = vec![0, 0, 0, 0, 0, 0, 0, 0]; - let sol = problem.solution_size(&config); - assert!(!sol.is_valid); - assert_eq!(sol.size, 2); - } - - #[test] - fn test_brute_force_ones() { - // All ones matrix can be factored with rank 1 - let matrix = vec![vec![true, true], vec![true, true]]; - let problem = BMF::new(matrix, 1); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - let sol_size = problem.solution_size(sol); - assert_eq!(sol_size.size, 0); - assert!(sol_size.is_valid); - } - } - - #[test] - fn test_brute_force_identity() { - // Identity matrix needs rank 2 - let matrix = vec![vec![true, false], vec![false, true]]; - let problem = BMF::new(matrix, 2); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Should find exact factorization - for sol in &solutions { - assert!(problem.is_exact(sol)); - } - } - - #[test] - fn test_brute_force_insufficient_rank() { - // Identity matrix with rank 1 cannot be exact - let matrix = vec![vec![true, false], vec![false, true]]; - let problem = BMF::new(matrix, 1); - let solver = BruteForce::new().valid_only(false); - - let solutions = solver.find_best(&problem); - // Best approximation has distance > 0 - let best_distance = problem.hamming_distance(&solutions[0]); - // With rank 1, best we can do is distance 1 (all ones or all zeros except one) - assert!(best_distance >= 1); - } - - #[test] - fn test_boolean_matrix_product_function() { - let b = vec![vec![true], vec![true]]; - let c = vec![vec![true, true]]; - let product = boolean_matrix_product(&b, &c); - assert_eq!(product, vec![vec![true, true], vec![true, true]]); - } - - #[test] - fn test_matrix_hamming_distance_function() { - let a = vec![vec![true, false], vec![false, true]]; - let b = vec![vec![true, true], vec![true, true]]; - assert_eq!(matrix_hamming_distance(&a, &b), 2); - - let c = vec![vec![true, false], vec![false, true]]; - assert_eq!(matrix_hamming_distance(&a, &c), 0); - } - - #[test] - fn test_energy_mode() { - let matrix = vec![vec![true]]; - let problem = BMF::new(matrix, 1); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_problem_size() { - let matrix = vec![vec![true, false, true], vec![false, true, false]]; - let problem = BMF::new(matrix, 2); - let size = problem.problem_size(); - assert_eq!(size.get("rows"), Some(2)); - assert_eq!(size.get("cols"), Some(3)); - assert_eq!(size.get("rank"), Some(2)); - } - - #[test] - fn test_empty_matrix() { - let matrix: Vec> = vec![]; - let problem = BMF::new(matrix, 1); - assert_eq!(problem.num_variables(), 0); - let sol = problem.solution_size(&[]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_is_exact() { - let matrix = vec![vec![true]]; - let problem = BMF::new(matrix, 1); - assert!(problem.is_exact(&[1, 1])); - assert!(!problem.is_exact(&[0, 0])); - } -} +#[path = "../../tests_unit/models/specialized/bmf.rs"] +mod tests; diff --git a/src/models/specialized/circuit.rs b/src/models/specialized/circuit.rs index 443d88b..9309981 100644 --- a/src/models/specialized/circuit.rs +++ b/src/models/specialized/circuit.rs @@ -329,275 +329,5 @@ pub fn is_circuit_satisfying(circuit: &Circuit, assignments: &HashMap::new(circuit); - assert_eq!(problem.num_variables(), 3); // c, x, y - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_circuit_sat_solution_size() { - // c = x AND y - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - )]); - let problem = CircuitSAT::::new(circuit); - - // Variables sorted: c, x, y - // c=1, x=1, y=1 -> c = 1 AND 1 = 1, valid - let sol = problem.solution_size(&[1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - // c=0, x=0, y=0 -> c = 0 AND 0 = 0, valid - let sol = problem.solution_size(&[0, 0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - // c=1, x=0, y=0 -> c should be 0, but c=1, invalid - let sol = problem.solution_size(&[1, 0, 0]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_circuit_sat_brute_force() { - // c = x AND y - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - )]); - let problem = CircuitSAT::::new(circuit); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // All satisfying: c matches x AND y - // 4 valid configs: (0,0,0), (0,0,1), (0,1,0), (1,1,1) - assert_eq!(solutions.len(), 4); - for sol in &solutions { - assert!(problem.solution_size(sol).is_valid); - } - } - - #[test] - fn test_circuit_sat_complex() { - // c = x AND y - // d = c OR z - let circuit = Circuit::new(vec![ - Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - ), - Assignment::new( - vec!["d".to_string()], - BooleanExpr::or(vec![BooleanExpr::var("c"), BooleanExpr::var("z")]), - ), - ]); - let problem = CircuitSAT::::new(circuit); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // All valid solutions satisfy both assignments - for sol in &solutions { - let sol_size = problem.solution_size(sol); - assert!(sol_size.is_valid); - assert_eq!(sol_size.size, 2); - } - } - - #[test] - fn test_is_circuit_satisfying() { - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - )]); - - let mut assignments = HashMap::new(); - assignments.insert("x".to_string(), true); - assignments.insert("y".to_string(), true); - assignments.insert("c".to_string(), true); - assert!(is_circuit_satisfying(&circuit, &assignments)); - - assignments.insert("c".to_string(), false); - assert!(!is_circuit_satisfying(&circuit, &assignments)); - } - - #[test] - fn test_problem_size() { - let circuit = Circuit::new(vec![ - Assignment::new(vec!["c".to_string()], BooleanExpr::var("x")), - Assignment::new(vec!["d".to_string()], BooleanExpr::var("y")), - ]); - let problem = CircuitSAT::::new(circuit); - let size = problem.problem_size(); - assert_eq!(size.get("num_variables"), Some(4)); - assert_eq!(size.get("num_assignments"), Some(2)); - } - - #[test] - fn test_energy_mode() { - let circuit = Circuit::new(vec![]); - let problem = CircuitSAT::::new(circuit); - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_empty_circuit() { - let circuit = Circuit::new(vec![]); - let problem = CircuitSAT::::new(circuit); - let sol = problem.solution_size(&[]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_weighted_circuit_sat() { - let circuit = Circuit::new(vec![ - Assignment::new(vec!["c".to_string()], BooleanExpr::var("x")), - Assignment::new(vec!["d".to_string()], BooleanExpr::var("y")), - ]); - let problem = CircuitSAT::with_weights(circuit, vec![10, 1]); - - // Variables sorted: c, d, x, y - // Config [1, 0, 1, 0]: c=1, d=0, x=1, y=0 - // c=x (1=1) satisfied (weight 10), d=y (0=0) satisfied (weight 1) - let sol = problem.solution_size(&[1, 0, 1, 0]); - assert_eq!(sol.size, 11); // Both satisfied: 10 + 1 - assert!(sol.is_valid); - - // Config [1, 0, 0, 0]: c=1, d=0, x=0, y=0 - // c=x (1!=0) not satisfied, d=y (0=0) satisfied (weight 1) - let sol = problem.solution_size(&[1, 0, 0, 0]); - assert_eq!(sol.size, 1); // Only d=y satisfied - assert!(!sol.is_valid); - - // Config [0, 1, 0, 0]: c=0, d=1, x=0, y=0 - // c=x (0=0) satisfied (weight 10), d=y (1!=0) not satisfied - let sol = problem.solution_size(&[0, 1, 0, 0]); - assert_eq!(sol.size, 10); // Only c=x satisfied - assert!(!sol.is_valid); - } -} +#[path = "../../tests_unit/models/specialized/circuit.rs"] +mod tests; diff --git a/src/models/specialized/factoring.rs b/src/models/specialized/factoring.rs index 30a8262..8253bda 100644 --- a/src/models/specialized/factoring.rs +++ b/src/models/specialized/factoring.rs @@ -145,157 +145,5 @@ pub fn is_factoring(target: u64, a: u64, b: u64) -> bool { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_factoring_creation() { - let problem = Factoring::new(3, 3, 15); - assert_eq!(problem.m(), 3); - assert_eq!(problem.n(), 3); - assert_eq!(problem.target(), 15); - assert_eq!(problem.num_variables(), 6); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_bits_to_int() { - assert_eq!(bits_to_int(&[0, 0, 0]), 0); - assert_eq!(bits_to_int(&[1, 0, 0]), 1); - assert_eq!(bits_to_int(&[0, 1, 0]), 2); - assert_eq!(bits_to_int(&[1, 1, 0]), 3); - assert_eq!(bits_to_int(&[0, 0, 1]), 4); - assert_eq!(bits_to_int(&[1, 1, 1]), 7); - } - - #[test] - fn test_int_to_bits() { - assert_eq!(int_to_bits(0, 3), vec![0, 0, 0]); - assert_eq!(int_to_bits(1, 3), vec![1, 0, 0]); - assert_eq!(int_to_bits(2, 3), vec![0, 1, 0]); - assert_eq!(int_to_bits(3, 3), vec![1, 1, 0]); - assert_eq!(int_to_bits(7, 3), vec![1, 1, 1]); - } - - #[test] - fn test_read_factors() { - let problem = Factoring::new(2, 2, 6); - // bits: [a0, a1, b0, b1] - // a=2 (binary 10), b=3 (binary 11) -> config = [0,1,1,1] - let (a, b) = problem.read_factors(&[0, 1, 1, 1]); - assert_eq!(a, 2); - assert_eq!(b, 3); - } - - #[test] - fn test_solution_size_valid() { - let problem = Factoring::new(2, 2, 6); - // 2 * 3 = 6 - let sol = problem.solution_size(&[0, 1, 1, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); // Exact match - - // 3 * 2 = 6 - let sol = problem.solution_size(&[1, 1, 0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - } - - #[test] - fn test_solution_size_invalid() { - let problem = Factoring::new(2, 2, 6); - // 2 * 2 = 4 != 6 - let sol = problem.solution_size(&[0, 1, 0, 1]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 2); // Distance from 6 - - // 1 * 1 = 1 != 6 - let sol = problem.solution_size(&[1, 0, 1, 0]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 5); // Distance from 6 - } - - #[test] - fn test_brute_force_factor_6() { - let problem = Factoring::new(2, 2, 6); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Should find 2*3 and 3*2 - assert!(!solutions.is_empty()); - for sol in &solutions { - let (a, b) = problem.read_factors(sol); - assert_eq!(a * b, 6); - } - } - - #[test] - fn test_brute_force_factor_15() { - let problem = Factoring::new(3, 3, 15); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Should find 3*5, 5*3, 1*15, 15*1 - for sol in &solutions { - let (a, b) = problem.read_factors(sol); - assert_eq!(a * b, 15); - } - } - - #[test] - fn test_brute_force_prime() { - // 7 is prime, only 1*7 and 7*1 work - let problem = Factoring::new(3, 3, 7); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - let factor_pairs: Vec<_> = solutions.iter().map(|s| problem.read_factors(s)).collect(); - - // Should find (1,7) and (7,1) - assert!(factor_pairs.contains(&(1, 7)) || factor_pairs.contains(&(7, 1))); - } - - #[test] - fn test_is_factoring_function() { - assert!(is_factoring(6, 2, 3)); - assert!(is_factoring(6, 3, 2)); - assert!(is_factoring(15, 3, 5)); - assert!(!is_factoring(6, 2, 2)); - } - - #[test] - fn test_energy_mode() { - let problem = Factoring::new(2, 2, 6); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_problem_size() { - let problem = Factoring::new(3, 4, 12); - let size = problem.problem_size(); - assert_eq!(size.get("num_bits_first"), Some(3)); - assert_eq!(size.get("num_bits_second"), Some(4)); - assert_eq!(size.get("target"), Some(12)); - } - - #[test] - fn test_is_valid_factorization() { - let problem = Factoring::new(2, 2, 6); - assert!(problem.is_valid_factorization(&[0, 1, 1, 1])); // 2*3=6 - assert!(!problem.is_valid_factorization(&[0, 1, 0, 1])); // 2*2=4 - } - - #[test] - fn test_factor_one() { - // Factor 1: only 1*1 works - let problem = Factoring::new(2, 2, 1); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - for sol in &solutions { - let (a, b) = problem.read_factors(sol); - assert_eq!(a * b, 1); - } - } -} +#[path = "../../tests_unit/models/specialized/factoring.rs"] +mod tests; diff --git a/src/models/specialized/paintshop.rs b/src/models/specialized/paintshop.rs index db94465..6b8e88a 100644 --- a/src/models/specialized/paintshop.rs +++ b/src/models/specialized/paintshop.rs @@ -181,152 +181,5 @@ pub fn count_paint_switches(coloring: &[usize]) -> usize { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_paintshop_creation() { - let problem = PaintShop::new(vec!["a", "b", "a", "b"]); - assert_eq!(problem.num_cars(), 2); - assert_eq!(problem.sequence_len(), 4); - assert_eq!(problem.num_variables(), 2); - assert_eq!(problem.num_flavors(), 2); - } - - #[test] - fn test_is_first() { - let problem = PaintShop::new(vec!["a", "b", "a", "b"]); - // First occurrence: a at 0, b at 1 - // Second occurrence: a at 2, b at 3 - assert_eq!(problem.is_first, vec![true, true, false, false]); - } - - #[test] - fn test_get_coloring() { - let problem = PaintShop::new(vec!["a", "b", "a", "b"]); - // Config: a=0, b=1 - // Sequence: a(0), b(1), a(1-opposite), b(0-opposite) - let coloring = problem.get_coloring(&[0, 1]); - assert_eq!(coloring, vec![0, 1, 1, 0]); - - // Config: a=1, b=0 - let coloring = problem.get_coloring(&[1, 0]); - assert_eq!(coloring, vec![1, 0, 0, 1]); - } - - #[test] - fn test_count_switches() { - let problem = PaintShop::new(vec!["a", "b", "a", "b"]); - - // Config [0, 1] -> coloring [0, 1, 1, 0] -> 2 switches - assert_eq!(problem.count_switches(&[0, 1]), 2); - - // Config [0, 0] -> coloring [0, 0, 1, 1] -> 1 switch - assert_eq!(problem.count_switches(&[0, 0]), 1); - - // Config [1, 1] -> coloring [1, 1, 0, 0] -> 1 switch - assert_eq!(problem.count_switches(&[1, 1]), 1); - } - - #[test] - fn test_solution_size() { - let problem = PaintShop::new(vec!["a", "b", "a", "b"]); - - let sol = problem.solution_size(&[0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - let sol = problem.solution_size(&[0, 1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 2); - } - - #[test] - fn test_brute_force_simple() { - let problem = PaintShop::new(vec!["a", "b", "a", "b"]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Optimal has 1 switch: [0,0] or [1,1] - for sol in &solutions { - assert_eq!(problem.count_switches(sol), 1); - } - } - - #[test] - fn test_brute_force_longer() { - // Sequence: a, b, a, c, c, b - let problem = PaintShop::new(vec!["a", "b", "a", "c", "c", "b"]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Find the minimum number of switches - let min_switches = problem.count_switches(&solutions[0]); - for sol in &solutions { - assert_eq!(problem.count_switches(sol), min_switches); - } - } - - #[test] - fn test_count_paint_switches_function() { - assert_eq!(count_paint_switches(&[0, 0, 0]), 0); - assert_eq!(count_paint_switches(&[0, 1, 0]), 2); - assert_eq!(count_paint_switches(&[0, 0, 1, 1]), 1); - assert_eq!(count_paint_switches(&[0, 1, 0, 1]), 3); - } - - #[test] - fn test_energy_mode() { - let problem = PaintShop::new(vec!["a", "a"]); - assert!(problem.energy_mode().is_minimization()); - } - - #[test] - fn test_problem_size() { - let problem = PaintShop::new(vec!["a", "b", "c", "a", "b", "c"]); - let size = problem.problem_size(); - assert_eq!(size.get("num_cars"), Some(3)); - assert_eq!(size.get("sequence_length"), Some(6)); - } - - #[test] - fn test_single_car() { - let problem = PaintShop::new(vec!["a", "a"]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Both configs give 1 switch: a(0)->a(1) or a(1)->a(0) - assert_eq!(solutions.len(), 2); - for sol in &solutions { - assert_eq!(problem.count_switches(sol), 1); - } - } - - #[test] - fn test_adjacent_same_car() { - // Sequence: a, a, b, b - let problem = PaintShop::new(vec!["a", "a", "b", "b"]); - let solver = BruteForce::new(); - - let solutions = solver.find_best(&problem); - // Best case: [0,0] -> [0,1,0,1] = 3 switches, or [0,1] -> [0,1,1,0] = 2 switches - // Actually: [0,0] -> a=0,a=1,b=0,b=1 = [0,1,0,1] = 3 switches - // [0,1] -> a=0,a=1,b=1,b=0 = [0,1,1,0] = 2 switches - let min_switches = problem.count_switches(&solutions[0]); - assert!(min_switches <= 3); - } - - #[test] - #[should_panic] - fn test_invalid_sequence_single_occurrence() { - // This should panic because 'c' only appears once - let _ = PaintShop::new(vec!["a", "b", "a", "c"]); - } - - #[test] - fn test_car_labels() { - let problem = PaintShop::new(vec!["car1", "car2", "car1", "car2"]); - assert_eq!(problem.car_labels().len(), 2); - } -} +#[path = "../../tests_unit/models/specialized/paintshop.rs"] +mod tests; diff --git a/src/polynomial.rs b/src/polynomial.rs index 96aba1d..e16aedb 100644 --- a/src/polynomial.rs +++ b/src/polynomial.rs @@ -126,101 +126,5 @@ macro_rules! poly { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_monomial_constant() { - let m = Monomial::constant(5.0); - let size = ProblemSize::new(vec![("n", 10)]); - assert_eq!(m.evaluate(&size), 5.0); - } - - #[test] - fn test_monomial_variable() { - let m = Monomial::var("n"); - let size = ProblemSize::new(vec![("n", 10)]); - assert_eq!(m.evaluate(&size), 10.0); - } - - #[test] - fn test_monomial_var_pow() { - let m = Monomial::var_pow("n", 2); - let size = ProblemSize::new(vec![("n", 5)]); - assert_eq!(m.evaluate(&size), 25.0); - } - - #[test] - fn test_polynomial_add() { - // 3n + 2m - let p = Polynomial::var("n").scale(3.0) + Polynomial::var("m").scale(2.0); - - let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); - assert_eq!(p.evaluate(&size), 40.0); // 3*10 + 2*5 - } - - #[test] - fn test_polynomial_complex() { - // n^2 + 3m - let p = Polynomial::var_pow("n", 2) + Polynomial::var("m").scale(3.0); - - let size = ProblemSize::new(vec![("n", 4), ("m", 2)]); - assert_eq!(p.evaluate(&size), 22.0); // 16 + 6 - } - - #[test] - fn test_poly_macro() { - let size = ProblemSize::new(vec![("n", 5), ("m", 3)]); - - assert_eq!(poly!(n).evaluate(&size), 5.0); - assert_eq!(poly!(n ^ 2).evaluate(&size), 25.0); - assert_eq!(poly!(3 * n).evaluate(&size), 15.0); - assert_eq!(poly!(2 * m ^ 2).evaluate(&size), 18.0); - } - - #[test] - fn test_missing_variable() { - let p = Polynomial::var("missing"); - let size = ProblemSize::new(vec![("n", 10)]); - assert_eq!(p.evaluate(&size), 0.0); // missing var = 0 - } - - #[test] - fn test_polynomial_zero() { - let p = Polynomial::zero(); - let size = ProblemSize::new(vec![("n", 100)]); - assert_eq!(p.evaluate(&size), 0.0); - } - - #[test] - fn test_polynomial_constant() { - let p = Polynomial::constant(42.0); - let size = ProblemSize::new(vec![("n", 100)]); - assert_eq!(p.evaluate(&size), 42.0); - } - - #[test] - fn test_monomial_scale() { - let m = Monomial::var("n").scale(3.0); - let size = ProblemSize::new(vec![("n", 10)]); - assert_eq!(m.evaluate(&size), 30.0); - } - - #[test] - fn test_polynomial_scale() { - let p = Polynomial::var("n").scale(5.0); - let size = ProblemSize::new(vec![("n", 10)]); - assert_eq!(p.evaluate(&size), 50.0); - } - - #[test] - fn test_monomial_multi_variable() { - // n * m^2 - let m = Monomial { - coefficient: 1.0, - variables: vec![("n", 1), ("m", 2)], - }; - let size = ProblemSize::new(vec![("n", 2), ("m", 3)]); - assert_eq!(m.evaluate(&size), 18.0); // 2 * 9 - } -} +#[path = "tests_unit/polynomial.rs"] +mod tests; diff --git a/src/registry/category.rs b/src/registry/category.rs index bd183f2..4e6285f 100644 --- a/src/registry/category.rs +++ b/src/registry/category.rs @@ -322,115 +322,5 @@ impl SpecializedSubcategory { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_category_path() { - let cat = ProblemCategory::Graph(GraphSubcategory::Independent); - assert_eq!(cat.path(), "graph/independent"); - assert_eq!(cat.name(), "graph"); - assert_eq!(cat.subcategory_name(), "independent"); - } - - #[test] - fn test_category_display() { - let cat = ProblemCategory::Satisfiability(SatisfiabilitySubcategory::Sat); - assert_eq!(format!("{}", cat), "satisfiability/sat"); - } - - #[test] - fn test_all_subcategories() { - // Graph - assert_eq!(GraphSubcategory::Coloring.name(), "coloring"); - assert_eq!(GraphSubcategory::Covering.name(), "covering"); - assert_eq!(GraphSubcategory::Independent.name(), "independent"); - assert_eq!(GraphSubcategory::Paths.name(), "paths"); - assert_eq!(GraphSubcategory::Structure.name(), "structure"); - assert_eq!(GraphSubcategory::Trees.name(), "trees"); - assert_eq!(GraphSubcategory::Matching.name(), "matching"); - - // Satisfiability - assert_eq!(SatisfiabilitySubcategory::Sat.name(), "sat"); - assert_eq!(SatisfiabilitySubcategory::Circuit.name(), "circuit"); - assert_eq!(SatisfiabilitySubcategory::Qbf.name(), "qbf"); - - // Set - assert_eq!(SetSubcategory::Covering.name(), "covering"); - assert_eq!(SetSubcategory::Packing.name(), "packing"); - assert_eq!(SetSubcategory::Partition.name(), "partition"); - assert_eq!(SetSubcategory::Matching.name(), "matching"); - - // Optimization - assert_eq!(OptimizationSubcategory::Quadratic.name(), "quadratic"); - assert_eq!(OptimizationSubcategory::Linear.name(), "linear"); - assert_eq!(OptimizationSubcategory::Constraint.name(), "constraint"); - - // Scheduling - assert_eq!(SchedulingSubcategory::Machine.name(), "machine"); - assert_eq!(SchedulingSubcategory::Sequencing.name(), "sequencing"); - assert_eq!(SchedulingSubcategory::Resource.name(), "resource"); - - // Network - assert_eq!(NetworkSubcategory::Flow.name(), "flow"); - assert_eq!(NetworkSubcategory::Routing.name(), "routing"); - assert_eq!(NetworkSubcategory::Connectivity.name(), "connectivity"); - - // String - assert_eq!(StringSubcategory::Sequence.name(), "sequence"); - assert_eq!(StringSubcategory::Matching.name(), "matching"); - assert_eq!(StringSubcategory::Compression.name(), "compression"); - - // Specialized - assert_eq!(SpecializedSubcategory::Geometry.name(), "geometry"); - assert_eq!(SpecializedSubcategory::Number.name(), "number"); - assert_eq!(SpecializedSubcategory::Game.name(), "game"); - assert_eq!(SpecializedSubcategory::Other.name(), "other"); - } - - #[test] - fn test_all_category_paths() { - // Test ProblemCategory name() and subcategory_name() for all variants - let categories = [ - ProblemCategory::Graph(GraphSubcategory::Coloring), - ProblemCategory::Satisfiability(SatisfiabilitySubcategory::Sat), - ProblemCategory::Set(SetSubcategory::Covering), - ProblemCategory::Optimization(OptimizationSubcategory::Quadratic), - ProblemCategory::Scheduling(SchedulingSubcategory::Machine), - ProblemCategory::Network(NetworkSubcategory::Flow), - ProblemCategory::String(StringSubcategory::Sequence), - ProblemCategory::Specialized(SpecializedSubcategory::Geometry), - ]; - - let expected_names = [ - "graph", - "satisfiability", - "set", - "optimization", - "scheduling", - "network", - "string", - "specialized", - ]; - - let expected_subcategories = [ - "coloring", - "sat", - "covering", - "quadratic", - "machine", - "flow", - "sequence", - "geometry", - ]; - - for (i, cat) in categories.iter().enumerate() { - assert_eq!(cat.name(), expected_names[i]); - assert_eq!(cat.subcategory_name(), expected_subcategories[i]); - assert!(!cat.path().is_empty()); - // Test Display - let display = format!("{}", cat); - assert!(display.contains('/')); - } - } -} +#[path = "../tests_unit/registry/category.rs"] +mod tests; diff --git a/src/registry/info.rs b/src/registry/info.rs index e231744..b3332b8 100644 --- a/src/registry/info.rs +++ b/src/registry/info.rs @@ -267,48 +267,5 @@ pub trait ProblemMetadata { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_complexity_class() { - assert_eq!(ComplexityClass::NpComplete.name(), "NP-complete"); - assert!(ComplexityClass::NpComplete.is_hard()); - assert!(ComplexityClass::NpHard.is_hard()); - assert!(!ComplexityClass::P.is_hard()); - } - - #[test] - fn test_problem_info_builder() { - let info = ProblemInfo::new("Independent Set", "Find a maximum weight independent set") - .with_aliases(&["MIS", "MWIS"]) - .with_complexity(ComplexityClass::NpComplete) - .with_reduction_from("3-SAT") - .with_reference("https://en.wikipedia.org/wiki/Independent_set_(graph_theory)"); - - assert_eq!(info.name, "Independent Set"); - assert_eq!(info.aliases, &["MIS", "MWIS"]); - assert!(info.is_np_complete()); - assert_eq!(info.canonical_reduction_from, Some("3-SAT")); - assert_eq!(info.all_names(), vec!["Independent Set", "MIS", "MWIS"]); - } - - #[test] - fn test_problem_info_display() { - let info = ProblemInfo::new("Vertex Cover", "Find a minimum vertex cover"); - assert_eq!(format!("{}", info), "Vertex Cover (NP-complete)"); - } - - #[test] - fn test_problem_info_versions() { - let decision_only = - ProblemInfo::new("Decision Problem", "A yes/no problem").with_optimization(false); - assert!(decision_only.decision_version); - assert!(!decision_only.optimization_version); - - let opt_only = ProblemInfo::new("Optimization Problem", "An optimization problem") - .with_decision(false); - assert!(!opt_only.decision_version); - assert!(opt_only.optimization_version); - } -} +#[path = "../tests_unit/registry/info.rs"] +mod tests; diff --git a/src/rules/circuit_spinglass.rs b/src/rules/circuit_spinglass.rs index 937e60b..d3a979c 100644 --- a/src/rules/circuit_spinglass.rs +++ b/src/rules/circuit_spinglass.rs @@ -459,527 +459,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::models::specialized::Circuit; - use crate::solvers::{BruteForce, Solver}; - - /// Verify a gadget has the correct ground states. - fn verify_gadget_truth_table(gadget: &LogicGadget, expected: &[(Vec, Vec)]) - where - W: Clone - + Default - + PartialOrd - + Num - + Zero - + AddAssign - + From - + std::ops::Mul - + std::fmt::Debug - + 'static, - { - let solver = BruteForce::new(); - let solutions = solver.find_best(&gadget.problem); - - // For each expected input/output pair, verify there's a matching ground state - for (inputs, outputs) in expected { - let found = solutions.iter().any(|sol| { - let input_match = gadget - .inputs - .iter() - .zip(inputs) - .all(|(&idx, &expected)| sol[idx] == expected); - let output_match = gadget - .outputs - .iter() - .zip(outputs) - .all(|(&idx, &expected)| sol[idx] == expected); - input_match && output_match - }); - assert!( - found, - "Expected ground state with inputs {:?} and outputs {:?} not found in {:?}", - inputs, outputs, solutions - ); - } - } - - #[test] - fn test_and_gadget() { - let gadget: LogicGadget = and_gadget(); - assert_eq!(gadget.num_spins(), 3); - assert_eq!(gadget.inputs, vec![0, 1]); - assert_eq!(gadget.outputs, vec![2]); - - // AND truth table: (a, b) -> a AND b - let truth_table = vec![ - (vec![0, 0], vec![0]), // 0 AND 0 = 0 - (vec![0, 1], vec![0]), // 0 AND 1 = 0 - (vec![1, 0], vec![0]), // 1 AND 0 = 0 - (vec![1, 1], vec![1]), // 1 AND 1 = 1 - ]; - verify_gadget_truth_table(&gadget, &truth_table); - } - - #[test] - fn test_or_gadget() { - let gadget: LogicGadget = or_gadget(); - assert_eq!(gadget.num_spins(), 3); - assert_eq!(gadget.inputs, vec![0, 1]); - assert_eq!(gadget.outputs, vec![2]); - - // OR truth table: (a, b) -> a OR b - let truth_table = vec![ - (vec![0, 0], vec![0]), // 0 OR 0 = 0 - (vec![0, 1], vec![1]), // 0 OR 1 = 1 - (vec![1, 0], vec![1]), // 1 OR 0 = 1 - (vec![1, 1], vec![1]), // 1 OR 1 = 1 - ]; - verify_gadget_truth_table(&gadget, &truth_table); - } - - #[test] - fn test_not_gadget() { - let gadget: LogicGadget = not_gadget(); - assert_eq!(gadget.num_spins(), 2); - assert_eq!(gadget.inputs, vec![0]); - assert_eq!(gadget.outputs, vec![1]); - - // NOT truth table: a -> NOT a - let truth_table = vec![ - (vec![0], vec![1]), // NOT 0 = 1 - (vec![1], vec![0]), // NOT 1 = 0 - ]; - verify_gadget_truth_table(&gadget, &truth_table); - } - - #[test] - fn test_xor_gadget() { - let gadget: LogicGadget = xor_gadget(); - assert_eq!(gadget.num_spins(), 4); - assert_eq!(gadget.inputs, vec![0, 1]); - assert_eq!(gadget.outputs, vec![2]); - - // XOR truth table: (a, b) -> a XOR b - let truth_table = vec![ - (vec![0, 0], vec![0]), // 0 XOR 0 = 0 - (vec![0, 1], vec![1]), // 0 XOR 1 = 1 - (vec![1, 0], vec![1]), // 1 XOR 0 = 1 - (vec![1, 1], vec![0]), // 1 XOR 1 = 0 - ]; - verify_gadget_truth_table(&gadget, &truth_table); - } - - #[test] - fn test_set0_gadget() { - let gadget: LogicGadget = set0_gadget(); - assert_eq!(gadget.num_spins(), 1); - assert_eq!(gadget.inputs, Vec::::new()); - assert_eq!(gadget.outputs, vec![0]); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&gadget.problem); - // Ground state should be spin down (0) - assert!(solutions.contains(&vec![0])); - assert!(!solutions.contains(&vec![1])); - } - - #[test] - fn test_set1_gadget() { - let gadget: LogicGadget = set1_gadget(); - assert_eq!(gadget.num_spins(), 1); - assert_eq!(gadget.inputs, Vec::::new()); - assert_eq!(gadget.outputs, vec![0]); - - let solver = BruteForce::new(); - let solutions = solver.find_best(&gadget.problem); - // Ground state should be spin up (1) - assert!(solutions.contains(&vec![1])); - assert!(!solutions.contains(&vec![0])); - } - - #[test] - fn test_simple_and_circuit() { - // c = x AND y - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - // Extract and verify solutions - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Should have valid AND configurations - // Variables are sorted: c, x, y - let valid_configs = vec![ - vec![0, 0, 0], // c=0, x=0, y=0: 0 AND 0 = 0 OK - vec![0, 0, 1], // c=0, x=0, y=1: 0 AND 1 = 0 OK - vec![0, 1, 0], // c=0, x=1, y=0: 1 AND 0 = 0 OK - vec![1, 1, 1], // c=1, x=1, y=1: 1 AND 1 = 1 OK - ]; - - for config in &valid_configs { - assert!( - extracted.contains(config), - "Expected valid config {:?} not found in {:?}", - config, - extracted - ); - } - } - - #[test] - fn test_simple_or_circuit() { - // c = x OR y - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::or(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Variables sorted: c, x, y - let valid_configs = vec![ - vec![0, 0, 0], // c=0, x=0, y=0: 0 OR 0 = 0 OK - vec![1, 0, 1], // c=1, x=0, y=1: 0 OR 1 = 1 OK - vec![1, 1, 0], // c=1, x=1, y=0: 1 OR 0 = 1 OK - vec![1, 1, 1], // c=1, x=1, y=1: 1 OR 1 = 1 OK - ]; - - for config in &valid_configs { - assert!( - extracted.contains(config), - "Expected valid config {:?} not found in {:?}", - config, - extracted - ); - } - } - - #[test] - fn test_not_circuit() { - // c = NOT x - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::not(BooleanExpr::var("x")), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Variables sorted: c, x - let valid_configs = vec![ - vec![1, 0], // c=1, x=0: NOT 0 = 1 OK - vec![0, 1], // c=0, x=1: NOT 1 = 0 OK - ]; - - for config in &valid_configs { - assert!( - extracted.contains(config), - "Expected valid config {:?} not found in {:?}", - config, - extracted - ); - } - } - - #[test] - fn test_xor_circuit() { - // c = x XOR y - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::xor(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Variables sorted: c, x, y - let valid_configs = vec![ - vec![0, 0, 0], // c=0, x=0, y=0: 0 XOR 0 = 0 OK - vec![1, 0, 1], // c=1, x=0, y=1: 0 XOR 1 = 1 OK - vec![1, 1, 0], // c=1, x=1, y=0: 1 XOR 0 = 1 OK - vec![0, 1, 1], // c=0, x=1, y=1: 1 XOR 1 = 0 OK - ]; - - for config in &valid_configs { - assert!( - extracted.contains(config), - "Expected valid config {:?} not found in {:?}", - config, - extracted - ); - } - } - - #[test] - fn test_constant_true() { - // c = true - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::constant(true), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // c should be 1 - assert!( - extracted.contains(&vec![1]), - "Expected c=1 in {:?}", - extracted - ); - } - - #[test] - fn test_constant_false() { - // c = false - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::constant(false), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // c should be 0 - assert!( - extracted.contains(&vec![0]), - "Expected c=0 in {:?}", - extracted - ); - } - - #[test] - fn test_multi_input_and() { - // c = x AND y AND z (3-input AND) - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![ - BooleanExpr::var("x"), - BooleanExpr::var("y"), - BooleanExpr::var("z"), - ]), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Variables sorted: c, x, y, z - // Only c=1 when all inputs are 1 - assert!( - extracted.contains(&vec![1, 1, 1, 1]), - "Expected (1,1,1,1) in {:?}", - extracted - ); - // c=0 for all other combinations - assert!( - extracted.contains(&vec![0, 0, 0, 0]), - "Expected (0,0,0,0) in {:?}", - extracted - ); - } - - #[test] - fn test_chained_circuit() { - // c = x AND y - // d = c OR z - let circuit = Circuit::new(vec![ - Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - ), - Assignment::new( - vec!["d".to_string()], - BooleanExpr::or(vec![BooleanExpr::var("c"), BooleanExpr::var("z")]), - ), - ]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Verify some valid configurations - // Variables sorted: c, d, x, y, z - // c = x AND y, d = c OR z - - // x=1, y=1 -> c=1, z=0 -> d=1 - assert!( - extracted.contains(&vec![1, 1, 1, 1, 0]), - "Expected (1,1,1,1,0) in {:?}", - extracted - ); - - // x=0, y=0 -> c=0, z=1 -> d=1 - assert!( - extracted.contains(&vec![0, 1, 0, 0, 1]), - "Expected (0,1,0,0,1) in {:?}", - extracted - ); - - // x=0, y=0 -> c=0, z=0 -> d=0 - assert!( - extracted.contains(&vec![0, 0, 0, 0, 0]), - "Expected (0,0,0,0,0) in {:?}", - extracted - ); - } - - #[test] - fn test_nested_expression() { - // c = (x AND y) OR z - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::or(vec![ - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - BooleanExpr::var("z"), - ]), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - let extracted: Vec> = solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Variables sorted: c, x, y, z - // c = (x AND y) OR z - - // x=1, y=1, z=0 -> c=1 - assert!( - extracted.contains(&vec![1, 1, 1, 0]), - "Expected (1,1,1,0) in {:?}", - extracted - ); - - // x=0, y=0, z=1 -> c=1 - assert!( - extracted.contains(&vec![1, 0, 0, 1]), - "Expected (1,0,0,1) in {:?}", - extracted - ); - - // x=0, y=0, z=0 -> c=0 - assert!( - extracted.contains(&vec![0, 0, 0, 0]), - "Expected (0,0,0,0) in {:?}", - extracted - ); - } - - #[test] - fn test_reduction_result_methods() { - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::var("x"), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - - // Test source_size and target_size - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert!(source_size.get("num_variables").is_some()); - assert!(target_size.get("num_spins").is_some()); - } - - #[test] - fn test_empty_circuit() { - let circuit = Circuit::new(vec![]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - let sg = reduction.target_problem(); - - // Empty circuit should result in empty SpinGlass - assert_eq!(sg.num_spins(), 0); - } - - #[test] - fn test_solution_extraction() { - let circuit = Circuit::new(vec![Assignment::new( - vec!["c".to_string()], - BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), - )]); - let problem = CircuitSAT::::new(circuit); - let reduction = problem.reduce_to(); - - // The source variables are c, x, y (sorted) - assert_eq!(reduction.source_variables, vec!["c", "x", "y"]); - - // Test extraction with a mock target solution - // Need to know the mapping to construct proper test - let sg = reduction.target_problem(); - assert!(sg.num_spins() >= 3); // At least c, x, y - } -} +#[path = "../tests_unit/rules/circuit_spinglass.rs"] +mod tests; diff --git a/src/rules/clique_ilp.rs b/src/rules/clique_ilp.rs index ceb5c32..4b66719 100644 --- a/src/rules/clique_ilp.rs +++ b/src/rules/clique_ilp.rs @@ -95,303 +95,5 @@ impl ReduceTo for Clique { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::ILPSolver; - - /// Check if a configuration represents a valid clique in the graph. - /// A clique is valid if all selected vertices are pairwise adjacent. - fn is_valid_clique(problem: &Clique, config: &[usize]) -> bool { - let selected: Vec = config - .iter() - .enumerate() - .filter(|(_, &v)| v == 1) - .map(|(i, _)| i) - .collect(); - - // Check all pairs of selected vertices are adjacent - for i in 0..selected.len() { - for j in (i + 1)..selected.len() { - if !problem.has_edge(selected[i], selected[j]) { - return false; - } - } - } - true - } - - /// Compute the clique size (sum of weights of selected vertices). - fn clique_size(problem: &Clique, config: &[usize]) -> i32 { - let weights = problem.weights(); - config - .iter() - .enumerate() - .filter(|(_, &v)| v == 1) - .map(|(i, _)| weights[i]) - .sum() - } - - /// Find maximum clique size by brute force enumeration. - fn brute_force_max_clique(problem: &Clique) -> i32 { - let n = problem.num_vertices(); - let mut max_size = 0; - for mask in 0..(1 << n) { - let config: Vec = (0..n).map(|i| (mask >> i) & 1).collect(); - if is_valid_clique(problem, &config) { - let size = clique_size(problem, &config); - if size > max_size { - max_size = size; - } - } - } - max_size - } - - #[test] - fn test_reduction_creates_valid_ilp() { - // Triangle graph: 3 vertices, 3 edges (complete graph K3) - // All pairs are adjacent, so no constraints should be added - let problem: Clique = Clique::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); - assert_eq!( - ilp.constraints.len(), - 0, - "Complete graph has no non-edges, so no constraints" - ); - assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - } - - #[test] - fn test_reduction_with_non_edges() { - // Path graph 0-1-2: edges (0,1) and (1,2), non-edge (0,2) - let problem: Clique = Clique::new(3, vec![(0, 1), (1, 2)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Should have 1 constraint for non-edge (0, 2) - assert_eq!(ilp.constraints.len(), 1); - - // The constraint should be x_0 + x_2 <= 1 - let constraint = &ilp.constraints[0]; - assert_eq!(constraint.terms.len(), 2); - assert!((constraint.rhs - 1.0).abs() < 1e-9); - } - - #[test] - fn test_reduction_weighted() { - let problem: Clique = - Clique::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check that weights are correctly transferred to objective - let mut coeffs: Vec = vec![0.0; 3]; - for &(var, coef) in &ilp.objective { - coeffs[var] = coef; - } - assert!((coeffs[0] - 5.0).abs() < 1e-9); - assert!((coeffs[1] - 10.0).abs() < 1e-9); - assert!((coeffs[2] - 15.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_equals_brute_force_triangle() { - // Triangle graph (K3): max clique = 3 vertices - let problem: Clique = Clique::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - - // Solve with brute force for clique - let bf_size = brute_force_max_clique(&problem); - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Both should find optimal size = 3 (all vertices form a clique) - let ilp_size = clique_size(&problem, &extracted); - assert_eq!(bf_size, 3); - assert_eq!(ilp_size, 3); - - // Verify the ILP solution is a valid clique - assert!( - is_valid_clique(&problem, &extracted), - "Extracted solution should be a valid clique" - ); - } - - #[test] - fn test_ilp_solution_equals_brute_force_path() { - // Path graph 0-1-2-3: max clique = 2 (any adjacent pair) - let problem: Clique = Clique::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - - // Solve with brute force for clique - let bf_size = brute_force_max_clique(&problem); - - // Solve via ILP - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size = clique_size(&problem, &extracted); - - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify validity - assert!(is_valid_clique(&problem, &extracted)); - } - - #[test] - fn test_ilp_solution_equals_brute_force_weighted() { - // Triangle with one missing edge: 0-1, 1-2, but no 0-2 - // Weights: [1, 100, 1] - // Max clique by weight: {0, 1} (weight 101) or {1, 2} (weight 101), or just {1} (weight 100) - // Since 0-1 and 1-2 are edges, both {0,1} and {1,2} are valid cliques - let problem: Clique = - Clique::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - - let bf_obj = brute_force_max_clique(&problem); - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_obj = clique_size(&problem, &extracted); - - assert_eq!(bf_obj, 101); - assert_eq!(ilp_obj, 101); - - // Verify the solution is a valid clique - assert!(is_valid_clique(&problem, &extracted)); - } - - #[test] - fn test_solution_extraction() { - let problem: Clique = Clique::new(4, vec![(0, 1), (2, 3)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - - // Test that extraction works correctly (1:1 mapping) - let ilp_solution = vec![1, 1, 0, 0]; - let extracted = reduction.extract_solution(&ilp_solution); - assert_eq!(extracted, vec![1, 1, 0, 0]); - - // Verify this is a valid clique (0 and 1 are adjacent) - assert!(is_valid_clique(&problem, &extracted)); - } - - #[test] - fn test_source_and_target_size() { - let problem: Clique = - Clique::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(4)); - - assert_eq!(target_size.get("num_vars"), Some(5)); - // Number of non-edges in a path of 5 vertices: C(5,2) - 4 = 10 - 4 = 6 - assert_eq!(target_size.get("num_constraints"), Some(6)); - } - - #[test] - fn test_empty_graph() { - // Graph with no edges: max clique = 1 (any single vertex) - let problem: Clique = Clique::new(3, vec![]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // All pairs are non-edges, so 3 constraints - assert_eq!(ilp.constraints.len(), 3); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Only one vertex should be selected - assert_eq!(extracted.iter().sum::(), 1); - - assert!(is_valid_clique(&problem, &extracted)); - assert_eq!(clique_size(&problem, &extracted), 1); - } - - #[test] - fn test_complete_graph() { - // Complete graph K4: max clique = 4 (all vertices) - let problem: Clique = - Clique::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // No non-edges, so no constraints - assert_eq!(ilp.constraints.len(), 0); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // All vertices should be selected - assert_eq!(extracted, vec![1, 1, 1, 1]); - - assert!(is_valid_clique(&problem, &extracted)); - assert_eq!(clique_size(&problem, &extracted), 4); - } - - #[test] - fn test_bipartite_graph() { - // Bipartite graph: 0-2, 0-3, 1-2, 1-3 (two independent sets: {0,1} and {2,3}) - // Max clique = 2 (any edge, e.g., {0, 2}) - let problem: Clique = - Clique::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert!(is_valid_clique(&problem, &extracted)); - assert_eq!(clique_size(&problem, &extracted), 2); - - // Should select an adjacent pair - let sum: usize = extracted.iter().sum(); - assert_eq!(sum, 2); - } - - #[test] - fn test_star_graph() { - // Star graph: center 0 connected to 1, 2, 3 - // Max clique = 2 (center + any leaf) - let problem: Clique = Clique::new(4, vec![(0, 1), (0, 2), (0, 3)]); - let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Non-edges: (1,2), (1,3), (2,3) = 3 constraints - assert_eq!(ilp.constraints.len(), 3); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert!(is_valid_clique(&problem, &extracted)); - assert_eq!(clique_size(&problem, &extracted), 2); - } -} +#[path = "../tests_unit/rules/clique_ilp.rs"] +mod tests; diff --git a/src/rules/coloring_ilp.rs b/src/rules/coloring_ilp.rs index 0488415..33e0d5b 100644 --- a/src/rules/coloring_ilp.rs +++ b/src/rules/coloring_ilp.rs @@ -166,286 +166,5 @@ where pub type ReductionColoringToILP = ReductionKColoringToILP<3, SimpleGraph, i32>; #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Triangle graph with 3 colors - let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - // num_vars = 3 vertices * 3 colors = 9 - assert_eq!( - ilp.num_vars, 9, - "Should have 9 variables (3 vertices * 3 colors)" - ); - - // num_constraints = 3 (one per vertex for "exactly one color") - // + 3 edges * 3 colors = 9 (edge constraints) - // = 12 total - assert_eq!( - ilp.constraints.len(), - 12, - "Should have 12 constraints (3 vertex + 9 edge)" - ); - - assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - } - - #[test] - fn test_reduction_path_graph() { - // Path graph 0-1-2 with 2 colors (2-colorable) - let problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2)]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // num_vars = 3 * 2 = 6 - assert_eq!(ilp.num_vars, 6); - - // constraints = 3 (vertex) + 2 edges * 2 colors = 7 - assert_eq!(ilp.constraints.len(), 7); - } - - #[test] - fn test_ilp_solution_equals_brute_force_triangle() { - // Triangle needs 3 colors - let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force on original problem - let bf_solutions = bf.find_best(&problem); - assert!( - !bf_solutions.is_empty(), - "Brute force should find solutions" - ); - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Verify the extracted solution is valid for the original problem - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - - // All three vertices should have different colors - assert_ne!(extracted[0], extracted[1]); - assert_ne!(extracted[1], extracted[2]); - assert_ne!(extracted[0], extracted[2]); - } - - #[test] - fn test_ilp_solution_equals_brute_force_path() { - // Path graph 0-1-2-3 with 2 colors - let problem = KColoring::<2, SimpleGraph, i32>::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - - // Solve via ILP - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Verify validity - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - - // Check adjacent vertices have different colors - assert_ne!(extracted[0], extracted[1]); - assert_ne!(extracted[1], extracted[2]); - assert_ne!(extracted[2], extracted[3]); - } - - #[test] - fn test_ilp_infeasible_triangle_2_colors() { - // Triangle cannot be 2-colored - let problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - - // ILP should be infeasible - let result = ilp_solver.solve(ilp); - assert!( - result.is_none(), - "Triangle with 2 colors should be infeasible" - ); - } - - #[test] - fn test_solution_extraction() { - let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1)]); - let reduction = ReduceTo::::reduce_to(&problem); - - // ILP solution where: - // vertex 0 has color 1 (x_{0,1} = 1) - // vertex 1 has color 2 (x_{1,2} = 1) - // vertex 2 has color 0 (x_{2,0} = 1) - // Variables are indexed as: v0c0, v0c1, v0c2, v1c0, v1c1, v1c2, v2c0, v2c1, v2c2 - let ilp_solution = vec![0, 1, 0, 0, 0, 1, 1, 0, 0]; - let extracted = reduction.extract_solution(&ilp_solution); - - assert_eq!(extracted, vec![1, 2, 0]); - - // Verify this is a valid coloring (vertex 0 and 1 have different colors) - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_source_and_target_size() { - let problem = KColoring::<3, SimpleGraph, i32>::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(4)); - assert_eq!(source_size.get("num_colors"), Some(3)); - - assert_eq!(target_size.get("num_vars"), Some(15)); // 5 * 3 - // constraints = 5 (vertex) + 4 * 3 (edge) = 17 - assert_eq!(target_size.get("num_constraints"), Some(17)); - } - - #[test] - fn test_empty_graph() { - // Graph with no edges: any coloring is valid - let problem = KColoring::<1, SimpleGraph, i32>::new(3, vec![]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Should only have vertex constraints (each vertex = one color) - assert_eq!(ilp.constraints.len(), 3); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_complete_graph_k4() { - // K4 needs 4 colors - let problem = KColoring::<4, SimpleGraph, i32>::new( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - - // All vertices should have different colors - let mut colors: Vec = extracted.clone(); - colors.sort(); - colors.dedup(); - assert_eq!(colors.len(), 4); - } - - #[test] - fn test_complete_graph_k4_with_3_colors_infeasible() { - // K4 cannot be 3-colored - let problem = KColoring::<3, SimpleGraph, i32>::new( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let result = ilp_solver.solve(ilp); - assert!(result.is_none(), "K4 with 3 colors should be infeasible"); - } - - #[test] - fn test_bipartite_graph() { - // Complete bipartite K_{2,2}: 0-2, 0-3, 1-2, 1-3 - // This is 2-colorable - let problem = KColoring::<2, SimpleGraph, i32>::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - - // Vertices 0,1 should have same color, vertices 2,3 should have same color - // And different from 0,1 - assert_eq!(extracted[0], extracted[1]); - assert_eq!(extracted[2], extracted[3]); - assert_ne!(extracted[0], extracted[2]); - } - - #[test] - fn test_solve_reduced() { - // Test the ILPSolver::solve_reduced method - let problem = KColoring::<2, SimpleGraph, i32>::new(4, vec![(0, 1), (1, 2), (2, 3)]); - - let ilp_solver = ILPSolver::new(); - let solution = ilp_solver - .solve_reduced(&problem) - .expect("solve_reduced should work"); - - let sol_result = problem.solution_size(&solution); - assert!(sol_result.is_valid); - } - - #[test] - fn test_single_vertex() { - // Single vertex graph: always 1-colorable - let problem = KColoring::<1, SimpleGraph, i32>::new(1, vec![]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.num_vars, 1); - assert_eq!(ilp.constraints.len(), 1); // Just the "exactly one color" constraint - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert_eq!(extracted, vec![0]); - } - - #[test] - fn test_single_edge() { - // Single edge: needs 2 colors - let problem = KColoring::<2, SimpleGraph, i32>::new(2, vec![(0, 1)]); - let reduction = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_ne!(extracted[0], extracted[1]); - } -} +#[path = "../tests_unit/rules/coloring_ilp.rs"] +mod tests; diff --git a/src/rules/cost.rs b/src/rules/cost.rs index c089978..9a198b4 100644 --- a/src/rules/cost.rs +++ b/src/rules/cost.rs @@ -79,98 +79,5 @@ impl f64> PathCostFn for CustomCost ReductionOverhead { - ReductionOverhead::new(vec![ - ("n", Polynomial::var("n").scale(2.0)), - ("m", Polynomial::var("m")), - ]) - } - - #[test] - fn test_minimize_single() { - let cost_fn = Minimize("n"); - let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); - let overhead = test_overhead(); - - assert_eq!(cost_fn.edge_cost(&overhead, &size), 20.0); // 2 * 10 - } - - #[test] - fn test_minimize_weighted() { - let cost_fn = MinimizeWeighted(vec![("n", 1.0), ("m", 2.0)]); - let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); - let overhead = test_overhead(); - - // output n = 20, output m = 5 - // cost = 1.0 * 20 + 2.0 * 5 = 30 - assert_eq!(cost_fn.edge_cost(&overhead, &size), 30.0); - } - - #[test] - fn test_minimize_steps() { - let cost_fn = MinimizeSteps; - let size = ProblemSize::new(vec![("n", 100)]); - let overhead = test_overhead(); - - assert_eq!(cost_fn.edge_cost(&overhead, &size), 1.0); - } - - #[test] - fn test_minimize_max() { - let cost_fn = MinimizeMax(vec!["n", "m"]); - let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); - let overhead = test_overhead(); - - // output n = 20, output m = 5 - // max(20, 5) = 20 - assert_eq!(cost_fn.edge_cost(&overhead, &size), 20.0); - } - - #[test] - fn test_minimize_lexicographic() { - let cost_fn = MinimizeLexicographic(vec!["n", "m"]); - let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); - let overhead = test_overhead(); - - // output n = 20, output m = 5 - // cost = 20 * 1.0 + 5 * 1e-10 = 20.0000000005 - let cost = cost_fn.edge_cost(&overhead, &size); - assert!(cost > 20.0 && cost < 20.001); - } - - #[test] - fn test_custom_cost() { - let cost_fn = CustomCost(|overhead: &ReductionOverhead, size: &ProblemSize| { - let output = overhead.evaluate_output_size(size); - (output.get("n").unwrap_or(0) + output.get("m").unwrap_or(0)) as f64 - }); - let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); - let overhead = test_overhead(); - - // output n = 20, output m = 5 - // custom = 20 + 5 = 25 - assert_eq!(cost_fn.edge_cost(&overhead, &size), 25.0); - } - - #[test] - fn test_minimize_missing_field() { - let cost_fn = Minimize("nonexistent"); - let size = ProblemSize::new(vec![("n", 10)]); - let overhead = test_overhead(); - - assert_eq!(cost_fn.edge_cost(&overhead, &size), 0.0); - } - - #[test] - fn test_minimize_max_empty() { - let cost_fn = MinimizeMax(vec![]); - let size = ProblemSize::new(vec![("n", 10)]); - let overhead = test_overhead(); - - assert_eq!(cost_fn.edge_cost(&overhead, &size), 0.0); - } -} +#[path = "../tests_unit/rules/cost.rs"] +mod tests; diff --git a/src/rules/dominatingset_ilp.rs b/src/rules/dominatingset_ilp.rs index fc3df03..f367af3 100644 --- a/src/rules/dominatingset_ilp.rs +++ b/src/rules/dominatingset_ilp.rs @@ -97,240 +97,5 @@ impl ReduceTo for DominatingSet { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Triangle graph: 3 vertices, 3 edges - let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); - assert_eq!( - ilp.constraints.len(), - 3, - "Should have one constraint per vertex" - ); - assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - - // Each constraint should be x_v + sum_{u in N(v)} x_u >= 1 - for constraint in &ilp.constraints { - assert!(!constraint.terms.is_empty()); - assert!((constraint.rhs - 1.0).abs() < 1e-9); - } - } - - #[test] - fn test_reduction_weighted() { - let problem = DominatingSet::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check that weights are correctly transferred to objective - let mut coeffs: Vec = vec![0.0; 3]; - for &(var, coef) in &ilp.objective { - coeffs[var] = coef; - } - assert!((coeffs[0] - 5.0).abs() < 1e-9); - assert!((coeffs[1] - 10.0).abs() < 1e-9); - assert!((coeffs[2] - 15.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_equals_brute_force_star() { - // Star graph: center vertex 0 connected to all others - // Minimum dominating set is just the center (weight 1) - let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force on original problem - let bf_solutions = bf.find_best(&problem); - let bf_size = problem.solution_size(&bf_solutions[0]).size; - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size = problem.solution_size(&extracted).size; - - // Both should find optimal size = 1 (just the center) - assert_eq!(bf_size, 1); - assert_eq!(ilp_size, 1); - - // Verify the ILP solution is valid for the original problem - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - } - - #[test] - fn test_ilp_solution_equals_brute_force_path() { - // Path graph 0-1-2-3-4: min DS = 2 (e.g., vertices 1 and 3) - let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force - let bf_solutions = bf.find_best(&problem); - let bf_size = problem.solution_size(&bf_solutions[0]).size; - - // Solve via ILP - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size = problem.solution_size(&extracted).size; - - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify validity - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_ilp_solution_equals_brute_force_weighted() { - // Star with heavy center: prefer selecting all leaves (total weight 3) - // over center (weight 100) - let problem = - DominatingSet::with_weights(4, vec![(0, 1), (0, 2), (0, 3)], vec![100, 1, 1, 1]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_obj = problem.solution_size(&bf_solutions[0]).size; - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_obj = problem.solution_size(&extracted).size; - - assert_eq!(bf_obj, 3); - assert_eq!(ilp_obj, 3); - - // Verify the solution selects all leaves - assert_eq!(extracted, vec![0, 1, 1, 1]); - } - - #[test] - fn test_solution_extraction() { - let problem = DominatingSet::::new(4, vec![(0, 1), (2, 3)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - - // Test that extraction works correctly (1:1 mapping) - let ilp_solution = vec![1, 0, 1, 0]; - let extracted = reduction.extract_solution(&ilp_solution); - assert_eq!(extracted, vec![1, 0, 1, 0]); - - // Verify this is a valid DS (0 dominates 0,1 and 2 dominates 2,3) - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_source_and_target_size() { - let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(4)); - - assert_eq!(target_size.get("num_vars"), Some(5)); - assert_eq!(target_size.get("num_constraints"), Some(5)); // one per vertex - } - - #[test] - fn test_isolated_vertices() { - // Graph with isolated vertex 2: it must be in the dominating set - let problem = DominatingSet::::new(3, vec![(0, 1)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Vertex 2 must be selected (isolated) - assert_eq!(extracted[2], 1); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_complete_graph() { - // Complete graph K4: min DS = 1 (any vertex dominates all) - let problem = - DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 1); - } - - #[test] - fn test_single_vertex() { - // Single vertex with no edges: must be in dominating set - let problem = DominatingSet::::new(1, vec![]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert_eq!(extracted, vec![1]); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 1); - } - - #[test] - fn test_cycle_graph() { - // Cycle C5: 0-1-2-3-4-0 - // Minimum dominating set size = 2 - let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]); - let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_size = problem.solution_size(&bf_solutions[0]).size; - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size = problem.solution_size(&extracted).size; - - assert_eq!(bf_size, ilp_size); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } -} +#[path = "../tests_unit/rules/dominatingset_ilp.rs"] +mod tests; diff --git a/src/rules/factoring_circuit.rs b/src/rules/factoring_circuit.rs index 3450a55..b86dfff 100644 --- a/src/rules/factoring_circuit.rs +++ b/src/rules/factoring_circuit.rs @@ -284,302 +284,5 @@ impl ReduceTo> for Factoring { } #[cfg(test)] -mod tests { - use super::*; - use std::collections::HashMap; - - #[test] - fn test_read_bit() { - // 6 = 110 in binary (little-endian: bit1=0, bit2=1, bit3=1) - assert!(!read_bit(6, 1)); // bit 1 (LSB) = 0 - assert!(read_bit(6, 2)); // bit 2 = 1 - assert!(read_bit(6, 3)); // bit 3 = 1 - assert!(!read_bit(6, 4)); // bit 4 = 0 - - // 15 = 1111 in binary - assert!(read_bit(15, 1)); - assert!(read_bit(15, 2)); - assert!(read_bit(15, 3)); - assert!(read_bit(15, 4)); - assert!(!read_bit(15, 5)); - } - - #[test] - fn test_reduction_structure() { - // Factor 6 = 2 * 3 with 2-bit factors - let factoring = Factoring::new(2, 2, 6); - let reduction = ReduceTo::>::reduce_to(&factoring); - - assert_eq!(reduction.p_vars().len(), 2); - assert_eq!(reduction.q_vars().len(), 2); - assert_eq!(reduction.m_vars().len(), 4); // 2 + 2 = 4 bits for product - } - - #[test] - fn test_reduction_structure_3x3() { - // Factor 15 = 3 * 5 with 3-bit factors - let factoring = Factoring::new(3, 3, 15); - let reduction = ReduceTo::>::reduce_to(&factoring); - - assert_eq!(reduction.p_vars().len(), 3); - assert_eq!(reduction.q_vars().len(), 3); - assert_eq!(reduction.m_vars().len(), 6); // 3 + 3 = 6 bits for product - } - - /// Helper function to evaluate a circuit with given inputs. - /// Returns a HashMap of all variable assignments after propagation. - fn evaluate_multiplier_circuit( - reduction: &ReductionFactoringToCircuit, - p_val: u64, - q_val: u64, - ) -> HashMap { - let circuit = reduction.target_problem().circuit(); - let mut assignments: HashMap = HashMap::new(); - - // Set input variables for p - for (i, var_name) in reduction.p_vars().iter().enumerate() { - let bit = ((p_val >> i) & 1) == 1; - assignments.insert(var_name.clone(), bit); - } - - // Set input variables for q - for (i, var_name) in reduction.q_vars().iter().enumerate() { - let bit = ((q_val >> i) & 1) == 1; - assignments.insert(var_name.clone(), bit); - } - - // Evaluate the circuit assignments in order - for assign in &circuit.assignments { - let result = assign.expr.evaluate(&assignments); - for out in &assign.outputs { - assignments.insert(out.clone(), result); - } - } - - assignments - } - - /// Check if inputs satisfying the circuit give correct factorization. - /// This tests the core functionality: given p and q, does the circuit - /// correctly identify when p * q = target? - fn check_factorization_satisfies( - factoring: &Factoring, - reduction: &ReductionFactoringToCircuit, - p_val: u64, - q_val: u64, - ) -> bool { - let assignments = evaluate_multiplier_circuit(reduction, p_val, q_val); - let circuit = reduction.target_problem().circuit(); - - // Check if all assignments are satisfied - for assign in &circuit.assignments { - if !assign.is_satisfied(&assignments) { - return false; - } - } - - // Also verify the product equals target (redundant but explicit) - p_val * q_val == factoring.target() - } - - #[test] - fn test_factorization_6_satisfies_circuit() { - let factoring = Factoring::new(2, 2, 6); - let reduction = ReduceTo::>::reduce_to(&factoring); - - // 2 * 3 = 6 should satisfy the circuit - assert!( - check_factorization_satisfies(&factoring, &reduction, 2, 3), - "2 * 3 = 6 should satisfy the circuit" - ); - - // 3 * 2 = 6 should also satisfy - assert!( - check_factorization_satisfies(&factoring, &reduction, 3, 2), - "3 * 2 = 6 should satisfy the circuit" - ); - - // 1 * 1 = 1 != 6 should NOT satisfy (product constraint fails) - assert!( - !check_factorization_satisfies(&factoring, &reduction, 1, 1), - "1 * 1 != 6 should not satisfy the circuit" - ); - - // 2 * 2 = 4 != 6 should NOT satisfy - assert!( - !check_factorization_satisfies(&factoring, &reduction, 2, 2), - "2 * 2 != 6 should not satisfy the circuit" - ); - } - - #[test] - fn test_factorization_15_satisfies_circuit() { - let factoring = Factoring::new(4, 4, 15); - let reduction = ReduceTo::>::reduce_to(&factoring); - - // Valid factorizations of 15 - assert!( - check_factorization_satisfies(&factoring, &reduction, 3, 5), - "3 * 5 = 15 should satisfy" - ); - assert!( - check_factorization_satisfies(&factoring, &reduction, 5, 3), - "5 * 3 = 15 should satisfy" - ); - assert!( - check_factorization_satisfies(&factoring, &reduction, 1, 15), - "1 * 15 = 15 should satisfy" - ); - assert!( - check_factorization_satisfies(&factoring, &reduction, 15, 1), - "15 * 1 = 15 should satisfy" - ); - - // Invalid: 2 * 7 = 14 != 15 - assert!( - !check_factorization_satisfies(&factoring, &reduction, 2, 7), - "2 * 7 != 15 should not satisfy" - ); - } - - #[test] - fn test_factorization_21_satisfies_circuit() { - let factoring = Factoring::new(3, 3, 21); - let reduction = ReduceTo::>::reduce_to(&factoring); - - // 3 * 7 = 21 - assert!( - check_factorization_satisfies(&factoring, &reduction, 3, 7), - "3 * 7 = 21 should satisfy" - ); - assert!( - check_factorization_satisfies(&factoring, &reduction, 7, 3), - "7 * 3 = 21 should satisfy" - ); - - // Invalid: 3 * 5 = 15 != 21 - assert!( - !check_factorization_satisfies(&factoring, &reduction, 3, 5), - "3 * 5 != 21 should not satisfy" - ); - } - - #[test] - fn test_source_and_target_size() { - let factoring = Factoring::new(3, 4, 15); - let reduction = ReduceTo::>::reduce_to(&factoring); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_bits_first"), Some(3)); - assert_eq!(source_size.get("num_bits_second"), Some(4)); - assert!(target_size.get("num_variables").unwrap() > 0); - assert!(target_size.get("num_assignments").unwrap() > 0); - } - - #[test] - fn test_extract_solution() { - let factoring = Factoring::new(2, 2, 6); - let reduction = ReduceTo::>::reduce_to(&factoring); - let circuit_sat = reduction.target_problem(); - - // Create a solution where p=2 (binary: 01) and q=3 (binary: 11) - // We need to find the indices of p1, p2, q1, q2 in the variable list - let var_names = circuit_sat.variable_names(); - let mut sol = vec![0usize; var_names.len()]; - - // Now evaluate the circuit to set all internal variables correctly - let assignments = evaluate_multiplier_circuit(&reduction, 2, 3); - for (i, name) in var_names.iter().enumerate() { - if let Some(&val) = assignments.get(name) { - sol[i] = if val { 1 } else { 0 }; - } - } - - let factoring_sol = reduction.extract_solution(&sol); - assert_eq!( - factoring_sol.len(), - 4, - "Should have 4 bits (2 for p, 2 for q)" - ); - - let (p, q) = factoring.read_factors(&factoring_sol); - assert_eq!(p, 2, "p should be 2"); - assert_eq!(q, 3, "q should be 3"); - assert_eq!(p * q, 6, "Product should equal target"); - } - - #[test] - fn test_prime_7_only_trivial_factorizations() { - let factoring = Factoring::new(3, 3, 7); - let reduction = ReduceTo::>::reduce_to(&factoring); - - // Check that only trivial factorizations satisfy - for p in 0..8u64 { - for q in 0..8u64 { - let satisfies = check_factorization_satisfies(&factoring, &reduction, p, q); - let is_valid_factorization = p * q == 7; - - if is_valid_factorization { - assert!(satisfies, "{}*{}=7 should satisfy the circuit", p, q); - // Check it's a trivial factorization (1*7 or 7*1) - assert!( - (p == 1 && q == 7) || (p == 7 && q == 1), - "7 is prime, so only 1*7 or 7*1 should work" - ); - } else if p > 0 && q > 0 { - // Non-zero products that don't equal 7 should not satisfy - assert!( - !satisfies, - "{}*{}={} != 7 should not satisfy the circuit", - p, - q, - p * q - ); - } - } - } - } - - #[test] - fn test_all_2bit_factorizations() { - // Test all possible 2-bit * 2-bit multiplications for target 6 - let factoring = Factoring::new(2, 2, 6); - let reduction = ReduceTo::>::reduce_to(&factoring); - - let mut valid_factorizations = Vec::new(); - for p in 0..4u64 { - for q in 0..4u64 { - if check_factorization_satisfies(&factoring, &reduction, p, q) { - valid_factorizations.push((p, q)); - } - } - } - - // Only 2*3 and 3*2 should satisfy (both give 6) - assert_eq!( - valid_factorizations.len(), - 2, - "Should find exactly 2 factorizations of 6" - ); - assert!(valid_factorizations.contains(&(2, 3)), "Should find 2*3"); - assert!(valid_factorizations.contains(&(3, 2)), "Should find 3*2"); - } - - #[test] - fn test_factorization_1_trivial() { - // Factor 1 = 1 * 1 - let factoring = Factoring::new(2, 2, 1); - let reduction = ReduceTo::>::reduce_to(&factoring); - - assert!( - check_factorization_satisfies(&factoring, &reduction, 1, 1), - "1 * 1 = 1 should satisfy" - ); - assert!( - !check_factorization_satisfies(&factoring, &reduction, 2, 1), - "2 * 1 = 2 != 1 should not satisfy" - ); - } -} +#[path = "../tests_unit/rules/factoring_circuit.rs"] +mod tests; diff --git a/src/rules/factoring_ilp.rs b/src/rules/factoring_ilp.rs index 1ff8151..8d6d971 100644 --- a/src/rules/factoring_ilp.rs +++ b/src/rules/factoring_ilp.rs @@ -275,307 +275,5 @@ impl ReduceTo for Factoring { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Factor 6 with 2-bit factors - let problem = Factoring::new(2, 2, 6); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check variable count: m + n + m*n + (m+n) = 2 + 2 + 4 + 4 = 12 - assert_eq!(ilp.num_vars, 12); - - // Check constraint count: 3*m*n + (m+n) + 1 = 12 + 4 + 1 = 17 - assert_eq!(ilp.constraints.len(), 17); - - assert_eq!(ilp.sense, ObjectiveSense::Minimize); - } - - #[test] - fn test_variable_layout() { - let problem = Factoring::new(3, 2, 6); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - - // p variables: [0, 1, 2] - assert_eq!(reduction.p_var(0), 0); - assert_eq!(reduction.p_var(2), 2); - - // q variables: [3, 4] - assert_eq!(reduction.q_var(0), 3); - assert_eq!(reduction.q_var(1), 4); - - // z variables: [5, 6, 7, 8, 9, 10] (3x2 = 6) - assert_eq!(reduction.z_var(0, 0), 5); - assert_eq!(reduction.z_var(0, 1), 6); - assert_eq!(reduction.z_var(1, 0), 7); - assert_eq!(reduction.z_var(2, 1), 10); - - // carry variables: [11, 12, 13, 14, 15] (m+n = 5) - assert_eq!(reduction.carry_var(0), 11); - assert_eq!(reduction.carry_var(4), 15); - } - - #[test] - fn test_factor_6() { - // 6 = 2 × 3 or 3 × 2 - let problem = Factoring::new(2, 2, 6); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Verify it's a valid factorization - assert!(problem.is_valid_factorization(&extracted)); - - let (a, b) = problem.read_factors(&extracted); - assert_eq!(a * b, 6); - } - - #[test] - fn test_factor_15() { - // Closed-loop test for factoring 15 = 3 × 5 (or 5 × 3, 1 × 15, 15 × 1) - - // 1. Create factoring instance: find p (4-bit) × q (4-bit) = 15 - let problem = Factoring::new(4, 4, 15); - - // 2. Reduce to ILP - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // 3. Solve ILP - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - - // 4. Extract factoring solution - let extracted = reduction.extract_solution(&ilp_solution); - - // 5. Verify: solution is valid and p × q = 15 - assert!(problem.is_valid_factorization(&extracted)); - let (p, q) = problem.read_factors(&extracted); - assert_eq!(p * q, 15); // e.g., (3, 5) or (5, 3) - } - - #[test] - fn test_factor_35() { - // 35 = 5 × 7 or 7 × 5 - let problem = Factoring::new(3, 3, 35); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert!(problem.is_valid_factorization(&extracted)); - - let (a, b) = problem.read_factors(&extracted); - assert_eq!(a * b, 35); - } - - #[test] - fn test_factor_one() { - // 1 = 1 × 1 - let problem = Factoring::new(2, 2, 1); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert!(problem.is_valid_factorization(&extracted)); - - let (a, b) = problem.read_factors(&extracted); - assert_eq!(a * b, 1); - } - - #[test] - fn test_factor_prime() { - // 7 is prime: 7 = 1 × 7 or 7 × 1 - let problem = Factoring::new(3, 3, 7); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert!(problem.is_valid_factorization(&extracted)); - - let (a, b) = problem.read_factors(&extracted); - assert_eq!(a * b, 7); - } - - #[test] - fn test_factor_square() { - // 9 = 3 × 3 - let problem = Factoring::new(3, 3, 9); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert!(problem.is_valid_factorization(&extracted)); - - let (a, b) = problem.read_factors(&extracted); - assert_eq!(a * b, 9); - } - - #[test] - fn test_infeasible_target_too_large() { - // Target 100 with 2-bit factors (max product is 3 × 3 = 9) - let problem = Factoring::new(2, 2, 100); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let result = ilp_solver.solve(ilp); - - assert!(result.is_none(), "Should be infeasible"); - } - - #[test] - fn test_ilp_matches_brute_force() { - let problem = Factoring::new(2, 2, 6); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Get ILP solution - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let ilp_factors = reduction.extract_solution(&ilp_solution); - - // Get brute force solutions - let bf = BruteForce::new(); - let bf_solutions = bf.find_best(&problem); - - // ILP solution should be among brute force solutions - let (a, b) = problem.read_factors(&ilp_factors); - let bf_pairs: Vec<(u64, u64)> = bf_solutions - .iter() - .map(|s| problem.read_factors(s)) - .collect(); - - assert!( - bf_pairs.contains(&(a, b)), - "ILP solution ({}, {}) should be in brute force solutions {:?}", - a, - b, - bf_pairs - ); - } - - #[test] - fn test_solution_extraction() { - let problem = Factoring::new(2, 2, 6); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - - // Manually construct ILP solution for 2 × 3 = 6 - // p = 2 = binary 10 -> p_0=0, p_1=1 - // q = 3 = binary 11 -> q_0=1, q_1=1 - // z_00 = p_0 * q_0 = 0, z_01 = p_0 * q_1 = 0 - // z_10 = p_1 * q_0 = 1, z_11 = p_1 * q_1 = 1 - // Variables: [p0, p1, q0, q1, z00, z01, z10, z11, c0, c1, c2, c3] - let ilp_solution = vec![0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0]; - let extracted = reduction.extract_solution(&ilp_solution); - - // Should extract [p0, p1, q0, q1] = [0, 1, 1, 1] - assert_eq!(extracted, vec![0, 1, 1, 1]); - - let (a, b) = problem.read_factors(&extracted); - assert_eq!(a, 2); - assert_eq!(b, 3); - assert_eq!(a * b, 6); - } - - #[test] - fn test_source_and_target_size() { - let problem = Factoring::new(3, 4, 12); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_bits_first"), Some(3)); - assert_eq!(source_size.get("num_bits_second"), Some(4)); - - // num_vars = 3 + 4 + 12 + 7 = 26 - assert_eq!(target_size.get("num_vars"), Some(26)); - - // num_constraints = 3*12 + 7 + 1 = 44 - assert_eq!(target_size.get("num_constraints"), Some(44)); - } - - #[test] - fn test_solve_reduced() { - let problem = Factoring::new(2, 2, 6); - - let ilp_solver = ILPSolver::new(); - let solution = ilp_solver - .solve_reduced(&problem) - .expect("solve_reduced should work"); - - assert!(problem.is_valid_factorization(&solution)); - } - - #[test] - fn test_asymmetric_bit_widths() { - // 12 = 3 × 4 or 4 × 3 or 2 × 6 or 6 × 2 or 1 × 12 or 12 × 1 - let problem = Factoring::new(2, 4, 12); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - assert!(problem.is_valid_factorization(&extracted)); - - let (a, b) = problem.read_factors(&extracted); - assert_eq!(a * b, 12); - } - - #[test] - fn test_constraint_count_formula() { - // Verify constraint count matches formula: 3*m*n + (m+n) + 1 - for (m, n) in [(2, 2), (3, 3), (2, 4), (4, 2)] { - let problem = Factoring::new(m, n, 1); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let expected = 3 * m * n + (m + n) + 1; - assert_eq!( - ilp.constraints.len(), - expected, - "Constraint count mismatch for m={}, n={}", - m, - n - ); - } - } - - #[test] - fn test_variable_count_formula() { - // Verify variable count matches formula: m + n + m*n + (m+n) - for (m, n) in [(2, 2), (3, 3), (2, 4), (4, 2)] { - let problem = Factoring::new(m, n, 1); - let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let expected = m + n + m * n + (m + n); - assert_eq!( - ilp.num_vars, expected, - "Variable count mismatch for m={}, n={}", - m, n - ); - } - } -} +#[path = "../tests_unit/rules/factoring_ilp.rs"] +mod tests; diff --git a/src/rules/graph.rs b/src/rules/graph.rs index 0005c08..9800df1 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -687,735 +687,5 @@ impl ReductionGraph { } #[cfg(test)] -mod tests { - use super::*; - use crate::models::graph::{IndependentSet, VertexCovering}; - use crate::models::set::SetPacking; - use crate::rules::cost::MinimizeSteps; - use crate::topology::SimpleGraph; - - #[test] - fn test_find_direct_path() { - let graph = ReductionGraph::new(); - let paths = graph.find_paths::, VertexCovering>(); - assert!(!paths.is_empty()); - assert_eq!(paths[0].type_names.len(), 2); - assert_eq!(paths[0].len(), 1); // One reduction step - } - - #[test] - fn test_find_indirect_path() { - let graph = ReductionGraph::new(); - // IS -> VC -> IS -> SP or IS -> SP directly - let paths = graph.find_paths::, SetPacking>(); - assert!(!paths.is_empty()); - } - - #[test] - fn test_find_shortest_path() { - let graph = ReductionGraph::new(); - let path = graph.find_shortest_path::, SetPacking>(); - assert!(path.is_some()); - let path = path.unwrap(); - assert_eq!(path.len(), 1); // Direct path exists - } - - #[test] - fn test_has_direct_reduction() { - let graph = ReductionGraph::new(); - assert!(graph.has_direct_reduction::, VertexCovering>()); - assert!(graph.has_direct_reduction::, IndependentSet>()); - } - - #[test] - fn test_no_path() { - let graph = ReductionGraph::new(); - // No path between IndependentSet and QUBO (disconnected in graph topology) - let paths = - graph.find_paths::, crate::models::optimization::QUBO>(); - assert!(paths.is_empty()); - } - - #[test] - fn test_type_erased_paths() { - let graph = ReductionGraph::new(); - - // Different weight types should find the same path (type-erased) - let paths_i32 = graph.find_paths::< - crate::models::graph::MaxCut, - crate::models::optimization::SpinGlass, - >(); - let paths_f64 = graph.find_paths::< - crate::models::graph::MaxCut, - crate::models::optimization::SpinGlass, - >(); - - // Both should find paths since we use type-erased names - assert!(!paths_i32.is_empty()); - assert!(!paths_f64.is_empty()); - assert_eq!(paths_i32[0].type_names, paths_f64[0].type_names); - } - - #[test] - fn test_find_paths_by_name() { - let graph = ReductionGraph::new(); - - let paths = graph.find_paths_by_name("MaxCut", "SpinGlass"); - assert!(!paths.is_empty()); - assert_eq!(paths[0].len(), 1); // Direct path - - let paths = graph.find_paths_by_name("Factoring", "SpinGlass"); - assert!(!paths.is_empty()); - assert_eq!(paths[0].len(), 2); // Factoring -> CircuitSAT -> SpinGlass - } - - #[test] - fn test_problem_types() { - let graph = ReductionGraph::new(); - let types = graph.problem_types(); - assert!(types.len() >= 5); - assert!(types.iter().any(|t| t.contains("IndependentSet"))); - assert!(types.iter().any(|t| t.contains("VertexCovering"))); - } - - #[test] - fn test_graph_statistics() { - let graph = ReductionGraph::new(); - assert!(graph.num_types() >= 5); - assert!(graph.num_reductions() >= 6); - } - - #[test] - fn test_reduction_path_methods() { - let graph = ReductionGraph::new(); - let path = graph - .find_shortest_path::, VertexCovering>() - .unwrap(); - - assert!(!path.is_empty()); - assert!(path.source().unwrap().contains("IndependentSet")); - assert!(path.target().unwrap().contains("VertexCovering")); - } - - #[test] - fn test_bidirectional_paths() { - let graph = ReductionGraph::new(); - - // Forward path - let forward = graph.find_paths::, VertexCovering>(); - assert!(!forward.is_empty()); - - // Backward path - let backward = graph.find_paths::, IndependentSet>(); - assert!(!backward.is_empty()); - } - - #[test] - fn test_to_json() { - let graph = ReductionGraph::new(); - let json = graph.to_json(); - - // Check nodes - assert!(json.nodes.len() >= 10); - assert!(json.nodes.iter().any(|n| n.name == "IndependentSet")); - assert!(json.nodes.iter().any(|n| n.category == "graph")); - assert!(json.nodes.iter().any(|n| n.category == "optimization")); - - // Check edges - assert!(json.edges.len() >= 10); - - // Check that IS <-> VC is marked bidirectional - let is_vc_edge = json.edges.iter().find(|e| { - (e.source.name.contains("IndependentSet") && e.target.name.contains("VertexCovering")) - || (e.source.name.contains("VertexCovering") - && e.target.name.contains("IndependentSet")) - }); - assert!(is_vc_edge.is_some()); - assert!(is_vc_edge.unwrap().bidirectional); - } - - #[test] - fn test_to_json_string() { - let graph = ReductionGraph::new(); - let json_string = graph.to_json_string().unwrap(); - - // Should be valid JSON - assert!(json_string.contains("\"nodes\"")); - assert!(json_string.contains("\"edges\"")); - assert!(json_string.contains("IndependentSet")); - assert!(json_string.contains("\"category\"")); - assert!(json_string.contains("\"bidirectional\"")); - } - - #[test] - fn test_categorize_type() { - // Graph problems - assert_eq!( - ReductionGraph::categorize_type("IndependentSet"), - "graph" - ); - assert_eq!( - ReductionGraph::categorize_type("VertexCovering"), - "graph" - ); - assert_eq!(ReductionGraph::categorize_type("MaxCut"), "graph"); - assert_eq!(ReductionGraph::categorize_type("KColoring"), "graph"); - assert_eq!( - ReductionGraph::categorize_type("DominatingSet"), - "graph" - ); - assert_eq!(ReductionGraph::categorize_type("Matching"), "graph"); - - // Set problems - assert_eq!(ReductionGraph::categorize_type("SetPacking"), "set"); - assert_eq!(ReductionGraph::categorize_type("SetCovering"), "set"); - - // Optimization - assert_eq!( - ReductionGraph::categorize_type("SpinGlass"), - "optimization" - ); - assert_eq!(ReductionGraph::categorize_type("QUBO"), "optimization"); - - // Satisfiability - assert_eq!( - ReductionGraph::categorize_type("Satisfiability"), - "satisfiability" - ); - assert_eq!( - ReductionGraph::categorize_type("KSatisfiability<3, i32>"), - "satisfiability" - ); - assert_eq!( - ReductionGraph::categorize_type("CircuitSAT"), - "satisfiability" - ); - - // Specialized - assert_eq!(ReductionGraph::categorize_type("Factoring"), "specialized"); - - // Unknown - assert_eq!(ReductionGraph::categorize_type("UnknownProblem"), "other"); - } - - #[test] - fn test_sat_based_reductions() { - use crate::models::graph::KColoring; - use crate::models::graph::DominatingSet; - use crate::models::satisfiability::Satisfiability; - - let graph = ReductionGraph::new(); - - // SAT -> IS - assert!(graph.has_direct_reduction::, IndependentSet>()); - - // SAT -> KColoring - assert!(graph.has_direct_reduction::, KColoring<3, SimpleGraph, i32>>()); - - // SAT -> DominatingSet - assert!(graph.has_direct_reduction::, DominatingSet>()); - } - - #[test] - fn test_circuit_reductions() { - use crate::models::optimization::SpinGlass; - use crate::models::specialized::{CircuitSAT, Factoring}; - - let graph = ReductionGraph::new(); - - // Factoring -> CircuitSAT - assert!(graph.has_direct_reduction::>()); - - // CircuitSAT -> SpinGlass - assert!(graph.has_direct_reduction::, SpinGlass>()); - - // Find path from Factoring to SpinGlass - let paths = graph.find_paths::>(); - assert!(!paths.is_empty()); - let shortest = graph - .find_shortest_path::>() - .unwrap(); - assert_eq!(shortest.len(), 2); // Factoring -> CircuitSAT -> SpinGlass - } - - #[test] - fn test_optimization_reductions() { - use crate::models::graph::MaxCut; - use crate::models::optimization::{SpinGlass, QUBO}; - - let graph = ReductionGraph::new(); - - // SpinGlass <-> QUBO (bidirectional) - assert!(graph.has_direct_reduction::, QUBO>()); - assert!(graph.has_direct_reduction::, SpinGlass>()); - - // MaxCut <-> SpinGlass (bidirectional) - assert!(graph.has_direct_reduction::, SpinGlass>()); - assert!(graph.has_direct_reduction::, MaxCut>()); - } - - #[test] - fn test_ksat_reductions() { - use crate::models::satisfiability::{KSatisfiability, Satisfiability}; - - let graph = ReductionGraph::new(); - - // SAT <-> 3-SAT (bidirectional) - assert!(graph.has_direct_reduction::, KSatisfiability<3, i32>>()); - assert!(graph.has_direct_reduction::, Satisfiability>()); - } - - #[test] - fn test_all_categories_present() { - let graph = ReductionGraph::new(); - let json = graph.to_json(); - - let categories: std::collections::HashSet<&str> = - json.nodes.iter().map(|n| n.category.as_str()).collect(); - - assert!(categories.contains("graph")); - assert!(categories.contains("set")); - assert!(categories.contains("optimization")); - assert!(categories.contains("satisfiability")); - assert!(categories.contains("specialized")); - } - - #[test] - fn test_empty_path_source_target() { - let path = ReductionPath { type_names: vec![] }; - assert!(path.is_empty()); - assert_eq!(path.len(), 0); - assert!(path.source().is_none()); - assert!(path.target().is_none()); - } - - #[test] - fn test_single_node_path() { - let path = ReductionPath { - type_names: vec!["IndependentSet"], - }; - assert!(!path.is_empty()); - assert_eq!(path.len(), 0); // No reductions, just one type - assert_eq!(path.source(), Some("IndependentSet")); - assert_eq!(path.target(), Some("IndependentSet")); - } - - #[test] - fn test_default_implementation() { - let graph1 = ReductionGraph::new(); - let graph2 = ReductionGraph::default(); - - assert_eq!(graph1.num_types(), graph2.num_types()); - assert_eq!(graph1.num_reductions(), graph2.num_reductions()); - } - - #[test] - fn test_to_json_file() { - use std::env; - use std::fs; - - let graph = ReductionGraph::new(); - let file_path = env::temp_dir().join("problemreductions_test_graph.json"); - - // Write to file - graph.to_json_file(&file_path).unwrap(); - - // Read back and verify - let content = fs::read_to_string(&file_path).unwrap(); - assert!(content.contains("\"nodes\"")); - assert!(content.contains("\"edges\"")); - assert!(content.contains("IndependentSet")); - - // Parse as generic JSON to verify validity - let parsed: serde_json::Value = serde_json::from_str(&content).unwrap(); - assert!(!parsed["nodes"].as_array().unwrap().is_empty()); - assert!(!parsed["edges"].as_array().unwrap().is_empty()); - - // Clean up - let _ = fs::remove_file(&file_path); - } - - #[test] - fn test_has_direct_reduction_unregistered_types() { - // Test with a type that's not registered in the graph - struct UnregisteredType; - - let graph = ReductionGraph::new(); - - // Source type not registered - assert!(!graph.has_direct_reduction::>()); - - // Target type not registered - assert!(!graph.has_direct_reduction::, UnregisteredType>()); - - // Both types not registered - assert!(!graph.has_direct_reduction::()); - } - - #[test] - fn test_find_paths_unregistered_source() { - struct UnregisteredType; - - let graph = ReductionGraph::new(); - let paths = graph.find_paths::>(); - assert!(paths.is_empty()); - } - - #[test] - fn test_find_paths_unregistered_target() { - struct UnregisteredType; - - let graph = ReductionGraph::new(); - let paths = graph.find_paths::, UnregisteredType>(); - assert!(paths.is_empty()); - } - - #[test] - fn test_find_shortest_path_no_path() { - struct UnregisteredType; - - let graph = ReductionGraph::new(); - let path = graph.find_shortest_path::>(); - assert!(path.is_none()); - } - - #[test] - fn test_categorize_circuit_as_specialized() { - // CircuitSAT should be categorized as specialized (contains "Circuit") - assert_eq!( - ReductionGraph::categorize_type("CircuitSAT"), - "satisfiability" - ); - // But it contains "SAT" so it goes to satisfiability first - // Let's verify the actual behavior matches what the code does - } - - #[test] - fn test_edge_bidirectionality_detection() { - let graph = ReductionGraph::new(); - let json = graph.to_json(); - - // Count bidirectional and unidirectional edges - let bidirectional_count = json.edges.iter().filter(|e| e.bidirectional).count(); - let unidirectional_count = json.edges.iter().filter(|e| !e.bidirectional).count(); - - // We should have both types - assert!(bidirectional_count > 0, "Should have bidirectional edges"); - assert!(unidirectional_count > 0, "Should have unidirectional edges"); - - // Verify specific known bidirectional edges - let is_vc_bidir = json.edges.iter().any(|e| { - (e.source.name.contains("IndependentSet") && e.target.name.contains("VertexCovering") - || e.source.name.contains("VertexCovering") - && e.target.name.contains("IndependentSet")) - && e.bidirectional - }); - assert!(is_vc_bidir, "IS <-> VC should be bidirectional"); - - // Verify specific known unidirectional edge - let factoring_circuit_unidir = json.edges.iter().any(|e| { - e.source.name.contains("Factoring") - && e.target.name.contains("CircuitSAT") - && !e.bidirectional - }); - assert!( - factoring_circuit_unidir, - "Factoring -> CircuitSAT should be unidirectional" - ); - } - - // New tests for set-theoretic path finding - - #[test] - fn test_graph_hierarchy_built() { - let graph = ReductionGraph::new(); - let hierarchy = graph.graph_hierarchy(); - - // Should have relationships from GraphSubtypeEntry registrations - // UnitDiskGraph -> PlanarGraph -> SimpleGraph - // BipartiteGraph -> SimpleGraph - assert!( - hierarchy - .get("UnitDiskGraph") - .map(|s| s.contains("SimpleGraph")) - .unwrap_or(false), - "UnitDiskGraph should have SimpleGraph as supertype" - ); - assert!( - hierarchy - .get("PlanarGraph") - .map(|s| s.contains("SimpleGraph")) - .unwrap_or(false), - "PlanarGraph should have SimpleGraph as supertype" - ); - } - - #[test] - fn test_is_graph_subtype_reflexive() { - let graph = ReductionGraph::new(); - - // Every type is a subtype of itself - assert!(graph.is_graph_subtype("SimpleGraph", "SimpleGraph")); - assert!(graph.is_graph_subtype("PlanarGraph", "PlanarGraph")); - assert!(graph.is_graph_subtype("UnitDiskGraph", "UnitDiskGraph")); - } - - #[test] - fn test_is_graph_subtype_direct() { - let graph = ReductionGraph::new(); - - // Direct subtype relationships - assert!(graph.is_graph_subtype("PlanarGraph", "SimpleGraph")); - assert!(graph.is_graph_subtype("BipartiteGraph", "SimpleGraph")); - assert!(graph.is_graph_subtype("UnitDiskGraph", "PlanarGraph")); - } - - #[test] - fn test_is_graph_subtype_transitive() { - let graph = ReductionGraph::new(); - - // Transitive closure: UnitDiskGraph -> PlanarGraph -> SimpleGraph - assert!(graph.is_graph_subtype("UnitDiskGraph", "SimpleGraph")); - } - - #[test] - fn test_is_graph_subtype_not_supertype() { - let graph = ReductionGraph::new(); - - // SimpleGraph is NOT a subtype of PlanarGraph (only the reverse) - assert!(!graph.is_graph_subtype("SimpleGraph", "PlanarGraph")); - assert!(!graph.is_graph_subtype("SimpleGraph", "UnitDiskGraph")); - } - - #[test] - fn test_rule_applicable_same_graphs() { - let graph = ReductionGraph::new(); - - // Rule for SimpleGraph -> SimpleGraph applies to same - assert!(graph.rule_applicable("SimpleGraph", "SimpleGraph", "SimpleGraph", "SimpleGraph")); - } - - #[test] - fn test_rule_applicable_subtype_source() { - let graph = ReductionGraph::new(); - - // Rule for SimpleGraph -> SimpleGraph applies when source is PlanarGraph - // (because PlanarGraph <= SimpleGraph) - assert!(graph.rule_applicable("PlanarGraph", "SimpleGraph", "SimpleGraph", "SimpleGraph")); - } - - #[test] - fn test_rule_applicable_subtype_target() { - let graph = ReductionGraph::new(); - - // Rule producing PlanarGraph applies when we want SimpleGraph - // (because PlanarGraph <= SimpleGraph) - assert!(graph.rule_applicable("SimpleGraph", "SimpleGraph", "SimpleGraph", "PlanarGraph")); - } - - #[test] - fn test_rule_not_applicable_wrong_source() { - let graph = ReductionGraph::new(); - - // Rule requiring PlanarGraph does NOT apply to SimpleGraph source - // (because SimpleGraph is NOT <= PlanarGraph) - assert!(!graph.rule_applicable("SimpleGraph", "SimpleGraph", "PlanarGraph", "SimpleGraph")); - } - - #[test] - fn test_rule_not_applicable_wrong_target() { - let graph = ReductionGraph::new(); - - // Rule producing SimpleGraph does NOT apply when we need PlanarGraph - // (because SimpleGraph is NOT <= PlanarGraph) - assert!(!graph.rule_applicable("SimpleGraph", "PlanarGraph", "SimpleGraph", "SimpleGraph")); - } - - #[test] - fn test_find_cheapest_path_minimize_steps() { - let graph = ReductionGraph::new(); - let cost_fn = MinimizeSteps; - let input_size = ProblemSize::new(vec![("n", 10), ("m", 20)]); - - // Find path from IndependentSet to VertexCovering on SimpleGraph - let path = graph.find_cheapest_path( - ("IndependentSet", "SimpleGraph"), - ("VertexCovering", "SimpleGraph"), - &input_size, - &cost_fn, - ); - - assert!(path.is_some()); - let path = path.unwrap(); - assert_eq!(path.len(), 1); // Direct path - } - - #[test] - fn test_find_cheapest_path_multi_step() { - let graph = ReductionGraph::new(); - let cost_fn = MinimizeSteps; - let input_size = ProblemSize::new(vec![("num_vertices", 10), ("num_edges", 20)]); - - // Find multi-step path where all edges use compatible graph types - // IndependentSet (SimpleGraph) -> SetPacking (SimpleGraph) - // This tests the algorithm can find paths with consistent graph types - let path = graph.find_cheapest_path( - ("IndependentSet", "SimpleGraph"), - ("SetPacking", "SimpleGraph"), - &input_size, - &cost_fn, - ); - - assert!(path.is_some()); - let path = path.unwrap(); - assert_eq!(path.len(), 1); // Direct path: IndependentSet -> SetPacking - } - - #[test] - fn test_find_cheapest_path_no_path() { - let graph = ReductionGraph::new(); - let cost_fn = MinimizeSteps; - let input_size = ProblemSize::new(vec![("n", 10)]); - - // No path from IndependentSet to QUBO - let path = graph.find_cheapest_path( - ("IndependentSet", "SimpleGraph"), - ("QUBO", "SimpleGraph"), - &input_size, - &cost_fn, - ); - - assert!(path.is_none()); - } - - #[test] - fn test_find_cheapest_path_unknown_source() { - let graph = ReductionGraph::new(); - let cost_fn = MinimizeSteps; - let input_size = ProblemSize::new(vec![("n", 10)]); - - let path = graph.find_cheapest_path( - ("UnknownProblem", "SimpleGraph"), - ("VertexCovering", "SimpleGraph"), - &input_size, - &cost_fn, - ); - - assert!(path.is_none()); - } - - #[test] - fn test_find_cheapest_path_unknown_target() { - let graph = ReductionGraph::new(); - let cost_fn = MinimizeSteps; - let input_size = ProblemSize::new(vec![("n", 10)]); - - let path = graph.find_cheapest_path( - ("IndependentSet", "SimpleGraph"), - ("UnknownProblem", "SimpleGraph"), - &input_size, - &cost_fn, - ); - - assert!(path.is_none()); - } - - #[test] - fn test_reduction_edge_struct() { - let edge = ReductionEdge { - source_variant: &[("graph", "PlanarGraph"), ("weight", "Unweighted")], - target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - overhead: ReductionOverhead::default(), - }; - - assert_eq!(edge.source_graph(), "PlanarGraph"); - assert_eq!(edge.target_graph(), "SimpleGraph"); - } - - #[test] - fn test_reduction_edge_default_graph() { - // When no "graph" key is present, default to SimpleGraph - let edge = ReductionEdge { - source_variant: &[("weight", "Unweighted")], - target_variant: &[], - overhead: ReductionOverhead::default(), - }; - - assert_eq!(edge.source_graph(), "SimpleGraph"); - assert_eq!(edge.target_graph(), "SimpleGraph"); - } - - #[test] - fn test_variant_to_map() { - let variant: &[(&str, &str)] = &[("graph", "SimpleGraph"), ("weight", "i32")]; - let map = ReductionGraph::variant_to_map(variant); - assert_eq!(map.get("graph"), Some(&"SimpleGraph".to_string())); - assert_eq!(map.get("weight"), Some(&"i32".to_string())); - assert_eq!(map.len(), 2); - } - - #[test] - fn test_variant_to_map_empty() { - let variant: &[(&str, &str)] = &[]; - let map = ReductionGraph::variant_to_map(variant); - assert!(map.is_empty()); - } - - #[test] - fn test_make_variant_ref() { - let variant: &[(&str, &str)] = &[("graph", "PlanarGraph"), ("weight", "f64")]; - let variant_ref = ReductionGraph::make_variant_ref("IndependentSet", variant); - assert_eq!(variant_ref.name, "IndependentSet"); - assert_eq!( - variant_ref.variant.get("graph"), - Some(&"PlanarGraph".to_string()) - ); - assert_eq!(variant_ref.variant.get("weight"), Some(&"f64".to_string())); - } - - #[test] - fn test_to_json_nodes_have_variants() { - let graph = ReductionGraph::new(); - let json = graph.to_json(); - - // Check that nodes have variant information - for node in &json.nodes { - // Verify node has a name - assert!(!node.name.is_empty()); - // Verify node has a category - assert!(!node.category.is_empty()); - } - } - - #[test] - fn test_to_json_edges_have_variants() { - let graph = ReductionGraph::new(); - let json = graph.to_json(); - - // Check that edges have source and target variant refs - for edge in &json.edges { - assert!(!edge.source.name.is_empty()); - assert!(!edge.target.name.is_empty()); - } - } - - #[test] - fn test_json_variant_content() { - let graph = ReductionGraph::new(); - let json = graph.to_json(); - - // Find a node and verify its variant contains expected keys - let is_node = json.nodes.iter().find(|n| n.name == "IndependentSet"); - assert!(is_node.is_some(), "IndependentSet node should exist"); - - // Find an edge involving IndependentSet (could be source or target) - let is_edge = json - .edges - .iter() - .find(|e| e.source.name == "IndependentSet" || e.target.name == "IndependentSet"); - assert!( - is_edge.is_some(), - "Edge involving IndependentSet should exist" - ); - } -} +#[path = "../tests_unit/rules/graph.rs"] +mod tests; diff --git a/src/rules/independentset_ilp.rs b/src/rules/independentset_ilp.rs index 666eb84..488a497 100644 --- a/src/rules/independentset_ilp.rs +++ b/src/rules/independentset_ilp.rs @@ -90,239 +90,5 @@ impl ReduceTo for IndependentSet { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Triangle graph: 3 vertices, 3 edges - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); - assert_eq!( - ilp.constraints.len(), - 3, - "Should have one constraint per edge" - ); - assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - - // Each constraint should be x_i + x_j <= 1 - for constraint in &ilp.constraints { - assert_eq!(constraint.terms.len(), 2); - assert!((constraint.rhs - 1.0).abs() < 1e-9); - } - } - - #[test] - fn test_reduction_weighted() { - let problem = IndependentSet::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check that weights are correctly transferred to objective - let mut coeffs: Vec = vec![0.0; 3]; - for &(var, coef) in &ilp.objective { - coeffs[var] = coef; - } - assert!((coeffs[0] - 5.0).abs() < 1e-9); - assert!((coeffs[1] - 10.0).abs() < 1e-9); - assert!((coeffs[2] - 15.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_equals_brute_force_triangle() { - // Triangle graph: max IS = 1 vertex - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force on original problem - let bf_solutions = bf.find_best(&problem); - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Both should find optimal size = 1 - let bf_size: usize = bf_solutions[0].iter().sum(); - let ilp_size: usize = extracted.iter().sum(); - assert_eq!(bf_size, 1); - assert_eq!(ilp_size, 1); - - // Verify the ILP solution is valid for the original problem - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - } - - #[test] - fn test_ilp_solution_equals_brute_force_path() { - // Path graph 0-1-2-3: max IS = 2 (e.g., {0, 2} or {1, 3} or {0, 3}) - let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force - let bf_solutions = bf.find_best(&problem); - let bf_size: usize = bf_solutions[0].iter().sum(); - - // Solve via ILP - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size: usize = extracted.iter().sum(); - - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify validity - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_ilp_solution_equals_brute_force_weighted() { - // Weighted problem: vertex 1 has high weight but is connected to both 0 and 2 - // 0 -- 1 -- 2 - // Weights: [1, 100, 1] - // Max IS by weight: just vertex 1 (weight 100) beats 0+2 (weight 2) - let problem = IndependentSet::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_obj = problem.solution_size(&bf_solutions[0]).size; - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_obj = problem.solution_size(&extracted).size; - - assert_eq!(bf_obj, 100); - assert_eq!(ilp_obj, 100); - - // Verify the solution selects vertex 1 - assert_eq!(extracted, vec![0, 1, 0]); - } - - #[test] - fn test_solution_extraction() { - let problem = IndependentSet::::new(4, vec![(0, 1), (2, 3)]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - - // Test that extraction works correctly (1:1 mapping) - let ilp_solution = vec![1, 0, 0, 1]; - let extracted = reduction.extract_solution(&ilp_solution); - assert_eq!(extracted, vec![1, 0, 0, 1]); - - // Verify this is a valid IS (0 and 3 are not adjacent) - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_source_and_target_size() { - let problem = IndependentSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(4)); - - assert_eq!(target_size.get("num_vars"), Some(5)); - assert_eq!(target_size.get("num_constraints"), Some(4)); - } - - #[test] - fn test_empty_graph() { - // Graph with no edges: all vertices can be selected - let problem = IndependentSet::::new(3, vec![]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.constraints.len(), 0); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // All vertices should be selected - assert_eq!(extracted, vec![1, 1, 1]); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 3); - } - - #[test] - fn test_complete_graph() { - // Complete graph K4: max IS = 1 - let problem = - IndependentSet::::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.constraints.len(), 6); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 1); - } - - #[test] - fn test_solve_reduced() { - // Test the ILPSolver::solve_reduced method - let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - - let ilp_solver = ILPSolver::new(); - let solution = ilp_solver - .solve_reduced(&problem) - .expect("solve_reduced should work"); - - let sol_result = problem.solution_size(&solution); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - } - - #[test] - fn test_bipartite_graph() { - // Bipartite graph: 0-2, 0-3, 1-2, 1-3 (two independent sets: {0,1} and {2,3}) - // With equal weights, max IS = 2 - let problem = IndependentSet::::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); - let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - - // Should select either {0, 1} or {2, 3} - let sum: usize = extracted.iter().sum(); - assert_eq!(sum, 2); - } -} +#[path = "../tests_unit/rules/independentset_ilp.rs"] +mod tests; diff --git a/src/rules/independentset_setpacking.rs b/src/rules/independentset_setpacking.rs index b603ca4..15554b7 100644 --- a/src/rules/independentset_setpacking.rs +++ b/src/rules/independentset_setpacking.rs @@ -156,139 +156,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_is_to_setpacking() { - // Triangle graph - let is_problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::>::reduce_to(&is_problem); - let sp_problem = reduction.target_problem(); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp_problem); - - // Extract back - let is_solutions: Vec<_> = sp_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Max IS in triangle = 1 - for sol in &is_solutions { - let size: usize = sol.iter().sum(); - assert_eq!(size, 1); - } - } - - #[test] - fn test_setpacking_to_is() { - // Two disjoint sets and one overlapping - let sets = vec![ - vec![0, 1], - vec![2, 3], - vec![1, 2], // overlaps with both - ]; - let sp_problem = SetPacking::::new(sets); - let reduction: ReductionSPToIS = - ReduceTo::>::reduce_to(&sp_problem); - let is_problem = reduction.target_problem(); - - let solver = BruteForce::new(); - let is_solutions = solver.find_best(is_problem); - - // Max packing = 2 (sets 0 and 1) - for sol in &is_solutions { - let size: usize = sol.iter().sum(); - assert_eq!(size, 2); - } - } - - #[test] - fn test_roundtrip_is_sp_is() { - let original = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let solver = BruteForce::new(); - let original_solutions = solver.find_best(&original); - - // IS -> SP -> IS - let reduction1 = ReduceTo::>::reduce_to(&original); - let sp = reduction1.target_problem().clone(); - let reduction2: ReductionSPToIS = ReduceTo::>::reduce_to(&sp); - let roundtrip = reduction2.target_problem(); - - let roundtrip_solutions = solver.find_best(roundtrip); - - // Solutions should have same objective value - let orig_size: usize = original_solutions[0].iter().sum(); - let rt_size: usize = roundtrip_solutions[0].iter().sum(); - assert_eq!(orig_size, rt_size); - } - - #[test] - fn test_weighted_reduction() { - let is_problem = IndependentSet::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 20, 30]); - let reduction = ReduceTo::>::reduce_to(&is_problem); - let sp_problem = reduction.target_problem(); - - // Weights should be preserved - assert_eq!(sp_problem.weights_ref(), &vec![10, 20, 30]); - } - - #[test] - fn test_empty_graph() { - // No edges means all sets are empty (or we need to handle it) - let is_problem = IndependentSet::::new(3, vec![]); - let reduction = ReduceTo::>::reduce_to(&is_problem); - let sp_problem = reduction.target_problem(); - - // All sets should be empty (no edges to include) - assert_eq!(sp_problem.num_sets(), 3); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sp_problem); - - // With no overlaps, we can select all sets - assert_eq!(solutions[0].iter().sum::(), 3); - } - - #[test] - fn test_disjoint_sets() { - // Completely disjoint sets - let sets = vec![vec![0], vec![1], vec![2]]; - let sp_problem = SetPacking::::new(sets); - let reduction: ReductionSPToIS = - ReduceTo::>::reduce_to(&sp_problem); - let is_problem = reduction.target_problem(); - - // No edges in the intersection graph - assert_eq!(is_problem.num_edges(), 0); - } - - #[test] - fn test_reduction_sizes() { - // Test source_size and target_size methods - let is_problem = IndependentSet::::new(4, vec![(0, 1), (1, 2)]); - let reduction = ReduceTo::>::reduce_to(&is_problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - // Source and target sizes should have components - assert!(!source_size.components.is_empty()); - assert!(!target_size.components.is_empty()); - - // Test SP to IS sizes - let sets = vec![vec![0, 1], vec![2, 3]]; - let sp_problem = SetPacking::::new(sets); - let reduction2: ReductionSPToIS = - ReduceTo::>::reduce_to(&sp_problem); - - let source_size2 = reduction2.source_size(); - let target_size2 = reduction2.target_size(); - - assert!(!source_size2.components.is_empty()); - assert!(!target_size2.components.is_empty()); - } -} +#[path = "../tests_unit/rules/independentset_setpacking.rs"] +mod tests; diff --git a/src/rules/matching_ilp.rs b/src/rules/matching_ilp.rs index 8776599..acf2281 100644 --- a/src/rules/matching_ilp.rs +++ b/src/rules/matching_ilp.rs @@ -95,257 +95,5 @@ impl ReduceTo for Matching { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Triangle graph: 3 vertices, 3 edges - let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - assert_eq!(ilp.num_vars, 3, "Should have one variable per edge"); - // Each vertex has degree 2, so 3 constraints (one per vertex) - assert_eq!( - ilp.constraints.len(), - 3, - "Should have one constraint per vertex" - ); - assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - - // Each constraint should be sum of incident edge vars <= 1 - for constraint in &ilp.constraints { - assert!((constraint.rhs - 1.0).abs() < 1e-9); - } - } - - #[test] - fn test_reduction_weighted() { - let problem = Matching::new(3, vec![(0, 1, 5), (1, 2, 10)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check that weights are correctly transferred to objective - let mut coeffs: Vec = vec![0.0; 2]; - for &(var, coef) in &ilp.objective { - coeffs[var] = coef; - } - assert!((coeffs[0] - 5.0).abs() < 1e-9); - assert!((coeffs[1] - 10.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_equals_brute_force_triangle() { - // Triangle graph: max matching = 1 edge - let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force on original problem - let bf_solutions = bf.find_best(&problem); - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Both should find optimal size = 1 (one edge) - let bf_size = problem.solution_size(&bf_solutions[0]).size; - let ilp_size = problem.solution_size(&extracted).size; - assert_eq!(bf_size, 1); - assert_eq!(ilp_size, 1); - - // Verify the ILP solution is valid for the original problem - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - } - - #[test] - fn test_ilp_solution_equals_brute_force_path() { - // Path graph 0-1-2-3: max matching = 2 (edges {0-1, 2-3}) - let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force - let bf_solutions = bf.find_best(&problem); - let bf_size = problem.solution_size(&bf_solutions[0]).size; - - // Solve via ILP - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size = problem.solution_size(&extracted).size; - - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify validity - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_ilp_solution_equals_brute_force_weighted() { - // Weighted matching: edge 0-1 has high weight - // 0 -- 1 -- 2 - // Weights: [100, 1] - // Max matching by weight: just edge 0-1 (weight 100) beats edge 1-2 (weight 1) - let problem = Matching::new(3, vec![(0, 1, 100), (1, 2, 1)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_obj = problem.solution_size(&bf_solutions[0]).size; - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_obj = problem.solution_size(&extracted).size; - - assert_eq!(bf_obj, 100); - assert_eq!(ilp_obj, 100); - - // Verify the solution selects edge 0 (0-1) - assert_eq!(extracted, vec![1, 0]); - } - - #[test] - fn test_solution_extraction() { - let problem = Matching::::unweighted(4, vec![(0, 1), (2, 3)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - - // Test that extraction works correctly (1:1 mapping) - let ilp_solution = vec![1, 1]; - let extracted = reduction.extract_solution(&ilp_solution); - assert_eq!(extracted, vec![1, 1]); - - // Verify this is a valid matching (edges 0-1 and 2-3 are disjoint) - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_source_and_target_size() { - let problem = - Matching::::unweighted(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(4)); - - assert_eq!(target_size.get("num_vars"), Some(4)); - // Constraints: one per vertex with degree >= 1 - // Vertices 0,1,2,3,4 have degrees 1,2,2,2,1 respectively - assert_eq!(target_size.get("num_constraints"), Some(5)); - } - - #[test] - fn test_empty_graph() { - // Graph with no edges: empty matching - let problem = Matching::::unweighted(3, vec![]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.num_vars, 0); - assert_eq!(ilp.constraints.len(), 0); - - let sol_result = problem.solution_size(&[]); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 0); - } - - #[test] - fn test_k4_perfect_matching() { - // Complete graph K4: can have perfect matching (2 edges covering all 4 vertices) - let problem = Matching::::unweighted( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // 6 edges, 4 vertices with constraints - assert_eq!(ilp.num_vars, 6); - assert_eq!(ilp.constraints.len(), 4); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); // Perfect matching has 2 edges - - // Verify all vertices are matched - let sum: usize = extracted.iter().sum(); - assert_eq!(sum, 2); - } - - #[test] - fn test_star_graph() { - // Star graph with center vertex 0 connected to 1, 2, 3 - // Max matching = 1 (only one edge can be selected) - let problem = Matching::::unweighted(4, vec![(0, 1), (0, 2), (0, 3)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 1); - } - - #[test] - fn test_bipartite_graph() { - // Bipartite graph: {0,1} and {2,3} with all cross edges - // Max matching = 2 (one perfect matching) - let problem = - Matching::::unweighted(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); - let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - } - - #[test] - fn test_solve_reduced() { - // Test the ILPSolver::solve_reduced method - let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); - - let ilp_solver = ILPSolver::new(); - let solution = ilp_solver - .solve_reduced(&problem) - .expect("solve_reduced should work"); - - let sol_result = problem.solution_size(&solution); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - } -} +#[path = "../tests_unit/rules/matching_ilp.rs"] +mod tests; diff --git a/src/rules/matching_setpacking.rs b/src/rules/matching_setpacking.rs index 5614150..fd9148a 100644 --- a/src/rules/matching_setpacking.rs +++ b/src/rules/matching_setpacking.rs @@ -85,198 +85,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - use crate::topology::SimpleGraph; - - #[test] - fn test_matching_to_setpacking_structure() { - // Path graph 0-1-2 - let matching = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - // Should have 2 sets (one for each edge) - assert_eq!(sp.num_sets(), 2); - - // Sets should contain edge endpoints - let sets = sp.sets(); - assert_eq!(sets[0], vec![0, 1]); - assert_eq!(sets[1], vec![1, 2]); - } - - #[test] - fn test_matching_to_setpacking_path() { - // Path 0-1-2-3 with unit weights - let matching = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp); - - // Extract back to Matching solutions - let _matching_solutions: Vec<_> = sp_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Verify against direct Matching solution - let direct_solutions = solver.find_best(&matching); - - // Solutions should have same objective value - let sp_size: usize = sp_solutions[0].iter().sum(); - let direct_size: usize = direct_solutions[0].iter().sum(); - assert_eq!(sp_size, direct_size); - assert_eq!(sp_size, 2); // Max matching in path graph has 2 edges - } - - #[test] - fn test_matching_to_setpacking_triangle() { - // Triangle graph - let matching = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp); - - // Max matching in triangle = 1 (any single edge) - for sol in &sp_solutions { - assert_eq!(sol.iter().sum::(), 1); - } - - // Should have 3 optimal solutions (one for each edge) - assert_eq!(sp_solutions.len(), 3); - } - - #[test] - fn test_matching_to_setpacking_weighted() { - // Weighted edges: heavy edge should win over multiple light edges - let matching = - Matching::::new(4, vec![(0, 1, 100), (0, 2, 1), (1, 3, 1)]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - // Weights should be preserved - assert_eq!(sp.weights_ref(), &vec![100, 1, 1]); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp); - - // Edge 0-1 (weight 100) alone beats edges 0-2 + 1-3 (weight 2) - assert!(sp_solutions.contains(&vec![1, 0, 0])); - - // Verify through direct Matching solution - let direct_solutions = solver.find_best(&matching); - assert_eq!(matching.solution_size(&sp_solutions[0]).size, 100); - assert_eq!(matching.solution_size(&direct_solutions[0]).size, 100); - } - - #[test] - fn test_matching_to_setpacking_solution_extraction() { - let matching = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); - let reduction = ReduceTo::>::reduce_to(&matching); - - // Test solution extraction is 1:1 - let sp_solution = vec![1, 0, 1]; - let matching_solution = reduction.extract_solution(&sp_solution); - assert_eq!(matching_solution, vec![1, 0, 1]); - - // Verify the extracted solution is valid for original Matching - assert!(matching.solution_size(&matching_solution).is_valid); - } - - #[test] - fn test_matching_to_setpacking_k4() { - // Complete graph K4: can have perfect matching (2 edges covering all 4 vertices) - let matching = Matching::::unweighted( - 4, - vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], - ); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp); - let direct_solutions = solver.find_best(&matching); - - // Both should find matchings of size 2 - let sp_size: usize = sp_solutions[0].iter().sum(); - let direct_size: usize = direct_solutions[0].iter().sum(); - assert_eq!(sp_size, 2); - assert_eq!(direct_size, 2); - } - - #[test] - fn test_matching_to_setpacking_empty() { - // Graph with no edges - let matching = Matching::::unweighted(3, vec![]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - assert_eq!(sp.num_sets(), 0); - } - - #[test] - fn test_matching_to_setpacking_single_edge() { - let matching = Matching::::unweighted(2, vec![(0, 1)]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - assert_eq!(sp.num_sets(), 1); - assert_eq!(sp.sets()[0], vec![0, 1]); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp); - - // Should select the only set - assert_eq!(sp_solutions, vec![vec![1]]); - } - - #[test] - fn test_matching_to_setpacking_disjoint_edges() { - // Two disjoint edges: 0-1 and 2-3 - let matching = Matching::::unweighted(4, vec![(0, 1), (2, 3)]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp); - - // Both edges can be selected (they don't share vertices) - assert_eq!(sp_solutions, vec![vec![1, 1]]); - } - - #[test] - fn test_reduction_sizes() { - let matching = Matching::::unweighted(5, vec![(0, 1), (1, 2), (2, 3)]); - let reduction = ReduceTo::>::reduce_to(&matching); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(3)); - assert_eq!(target_size.get("num_sets"), Some(3)); - } - - #[test] - fn test_matching_to_setpacking_star() { - // Star graph: center vertex 0 connected to 1, 2, 3 - let matching = Matching::::unweighted(4, vec![(0, 1), (0, 2), (0, 3)]); - let reduction = ReduceTo::>::reduce_to(&matching); - let sp = reduction.target_problem(); - - let solver = BruteForce::new(); - let sp_solutions = solver.find_best(sp); - - // All edges share vertex 0, so max matching = 1 - for sol in &sp_solutions { - assert_eq!(sol.iter().sum::(), 1); - } - // Should have 3 optimal solutions - assert_eq!(sp_solutions.len(), 3); - } -} +#[path = "../tests_unit/rules/matching_setpacking.rs"] +mod tests; diff --git a/src/rules/registry.rs b/src/rules/registry.rs index 5983aa2..b5d01e2 100644 --- a/src/rules/registry.rs +++ b/src/rules/registry.rs @@ -87,129 +87,5 @@ impl std::fmt::Debug for ReductionEntry { inventory::collect!(ReductionEntry); #[cfg(test)] -mod tests { - use super::*; - use crate::poly; - - #[test] - fn test_reduction_overhead_evaluate() { - let overhead = ReductionOverhead::new(vec![("n", poly!(3 * m)), ("m", poly!(m ^ 2))]); - - let input = ProblemSize::new(vec![("m", 4)]); - let output = overhead.evaluate_output_size(&input); - - assert_eq!(output.get("n"), Some(12)); // 3 * 4 - assert_eq!(output.get("m"), Some(16)); // 4^2 - } - - #[test] - fn test_reduction_overhead_default() { - let overhead = ReductionOverhead::default(); - assert!(overhead.output_size.is_empty()); - } - - #[test] - fn test_reduction_entry_overhead() { - let entry = ReductionEntry { - source_name: "TestSource", - target_name: "TestTarget", - source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - overhead_fn: || ReductionOverhead::new(vec![("n", poly!(2 * n))]), - }; - - let overhead = entry.overhead(); - let input = ProblemSize::new(vec![("n", 5)]); - let output = overhead.evaluate_output_size(&input); - assert_eq!(output.get("n"), Some(10)); - } - - #[test] - fn test_reduction_entry_debug() { - let entry = ReductionEntry { - source_name: "A", - target_name: "B", - source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - overhead_fn: || ReductionOverhead::default(), - }; - - let debug_str = format!("{:?}", entry); - assert!(debug_str.contains("A")); - assert!(debug_str.contains("B")); - } - - #[test] - fn test_is_base_reduction_unweighted() { - let entry = ReductionEntry { - source_name: "A", - target_name: "B", - source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - overhead_fn: || ReductionOverhead::default(), - }; - assert!(entry.is_base_reduction()); - } - - #[test] - fn test_is_base_reduction_source_weighted() { - let entry = ReductionEntry { - source_name: "A", - target_name: "B", - source_variant: &[("graph", "SimpleGraph"), ("weight", "i32")], - target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - overhead_fn: || ReductionOverhead::default(), - }; - assert!(!entry.is_base_reduction()); - } - - #[test] - fn test_is_base_reduction_target_weighted() { - let entry = ReductionEntry { - source_name: "A", - target_name: "B", - source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], - target_variant: &[("graph", "SimpleGraph"), ("weight", "f64")], - overhead_fn: || ReductionOverhead::default(), - }; - assert!(!entry.is_base_reduction()); - } - - #[test] - fn test_is_base_reduction_both_weighted() { - let entry = ReductionEntry { - source_name: "A", - target_name: "B", - source_variant: &[("graph", "SimpleGraph"), ("weight", "i32")], - target_variant: &[("graph", "SimpleGraph"), ("weight", "f64")], - overhead_fn: || ReductionOverhead::default(), - }; - assert!(!entry.is_base_reduction()); - } - - #[test] - fn test_is_base_reduction_no_weight_key() { - // If no weight key is present, assume unweighted (base) - let entry = ReductionEntry { - source_name: "A", - target_name: "B", - source_variant: &[("graph", "SimpleGraph")], - target_variant: &[("graph", "SimpleGraph")], - overhead_fn: || ReductionOverhead::default(), - }; - assert!(entry.is_base_reduction()); - } - - #[test] - fn test_reduction_entries_registered() { - let entries: Vec<_> = inventory::iter::().collect(); - - // Should have at least some registered reductions - assert!(entries.len() >= 10); - - // Check specific reductions exist - assert!(entries - .iter() - .any(|e| e.source_name == "IndependentSet" && e.target_name == "VertexCovering")); - } -} +#[path = "../tests_unit/rules/registry.rs"] +mod tests; diff --git a/src/rules/sat_coloring.rs b/src/rules/sat_coloring.rs index 4fb58f1..78be439 100644 --- a/src/rules/sat_coloring.rs +++ b/src/rules/sat_coloring.rs @@ -353,309 +353,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::models::satisfiability::CNFClause; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_constructor_basic_structure() { - let constructor = SATColoringConstructor::new(2); - - // Should have 2*2 + 3 = 7 vertices - assert_eq!(constructor.num_vertices, 7); - - // Check pos_vertices and neg_vertices - assert_eq!(constructor.pos_vertices, vec![3, 4]); - assert_eq!(constructor.neg_vertices, vec![5, 6]); - - // Check vmap - assert_eq!(constructor.vmap[&(0, false)], 3); - assert_eq!(constructor.vmap[&(0, true)], 5); - assert_eq!(constructor.vmap[&(1, false)], 4); - assert_eq!(constructor.vmap[&(1, true)], 6); - } - - #[test] - fn test_special_vertex_accessors() { - let constructor = SATColoringConstructor::new(1); - assert_eq!(constructor.true_vertex(), 0); - assert_eq!(constructor.false_vertex(), 1); - assert_eq!(constructor.aux_vertex(), 2); - } - - #[test] - fn test_simple_sat_to_coloring() { - // Simple SAT: (x1) - one clause with one literal - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let coloring = reduction.target_problem(); - - // Should have 2*1 + 3 = 5 base vertices - // Plus edges to set x1 to TRUE (attached to AUX and FALSE) - assert!(coloring.num_vertices() >= 5); - } - - #[test] - fn test_reduction_structure() { - // Satisfiable formula: (x1 OR x2) AND (NOT x1 OR x2) - // Just verify the reduction builds the correct structure - let sat = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 2])], - ); - - let reduction = ReduceTo::>::reduce_to(&sat); - let coloring = reduction.target_problem(); - - // Base vertices: 3 (TRUE, FALSE, AUX) + 2*2 (pos and neg for each var) = 7 - // Each 2-literal clause adds 5 vertices for OR gadget = 2 * 5 = 10 - // Total: 7 + 10 = 17 vertices - assert_eq!(coloring.num_vertices(), 17); - assert_eq!(coloring.num_colors(), 3); - assert_eq!(reduction.pos_vertices().len(), 2); - assert_eq!(reduction.neg_vertices().len(), 2); - } - - #[test] - fn test_unsatisfiable_formula() { - // Unsatisfiable: (x1) AND (NOT x1) - let sat = - Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let coloring = reduction.target_problem(); - - // Solve the coloring problem - let solver = BruteForce::new(); - let solutions = solver.find_best(coloring); - - // For an unsatisfiable formula, the coloring should have no valid solutions - // OR no valid coloring exists that extracts to a satisfying SAT assignment - let mut found_satisfying = false; - for sol in &solutions { - if coloring.solution_size(sol).is_valid { - let sat_sol = reduction.extract_solution(sol); - let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); - if sat.is_satisfying(&assignment) { - found_satisfying = true; - break; - } - } - } - - // The coloring should not yield a satisfying SAT assignment - // because the formula is unsatisfiable - // Note: The coloring graph itself may still be colorable, - // but the constraints should make it impossible for both - // x1 and NOT x1 to be TRUE color simultaneously - // Actually, let's check if ANY coloring solution produces a valid SAT solution - // If the formula is unsat, no valid coloring should extract to a satisfying assignment - assert!( - !found_satisfying, - "Unsatisfiable formula should not produce satisfying assignment" - ); - } - - #[test] - fn test_three_literal_clause_structure() { - // (x1 OR x2 OR x3) - let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let coloring = reduction.target_problem(); - - // Base vertices: 3 + 2*3 = 9 - // 3-literal clause needs 2 OR gadgets (x1 OR x2, then result OR x3) - // Each OR gadget adds 5 vertices, so 2*5 = 10 - // Total: 9 + 10 = 19 vertices - assert_eq!(coloring.num_vertices(), 19); - assert_eq!(coloring.num_colors(), 3); - assert_eq!(reduction.pos_vertices().len(), 3); - assert_eq!(reduction.neg_vertices().len(), 3); - } - - #[test] - fn test_source_and_target_size() { - let sat = Satisfiability::::new( - 3, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], - ); - let reduction = ReduceTo::>::reduce_to(&sat); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vars"), Some(3)); - assert_eq!(source_size.get("num_clauses"), Some(2)); - assert!(target_size.get("num_vertices").is_some()); - assert!(target_size.get("num_colors").unwrap() == 3); - } - - #[test] - fn test_extract_solution_basic() { - // Simple case: one variable, one clause (x1) - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - // Manually construct a valid coloring where x1 has TRUE color - // Vertices: 0=TRUE, 1=FALSE, 2=AUX, 3=x1, 4=NOT_x1 - // Colors: TRUE=0, FALSE=1, AUX=2 - // For x1 to be true, pos_vertex[0]=3 should have color 0 (TRUE) - - // A valid coloring that satisfies x1=TRUE: - // - Vertex 0 (TRUE): color 0 - // - Vertex 1 (FALSE): color 1 - // - Vertex 2 (AUX): color 2 - // - Vertex 3 (x1): color 0 (TRUE) - connected to AUX(2), NOT_x1(4) - // - Vertex 4 (NOT_x1): color 1 (FALSE) - connected to AUX(2), x1(3) - - // However, the actual coloring depends on the full graph structure - // Let's just verify the extraction logic works by checking type signatures - assert_eq!(reduction.pos_vertices().len(), 1); - assert_eq!(reduction.neg_vertices().len(), 1); - } - - #[test] - fn test_complex_formula_structure() { - // (x1 OR x2) AND (NOT x1 OR x3) AND (NOT x2 OR NOT x3) - let sat = Satisfiability::::new( - 3, - vec![ - CNFClause::new(vec![1, 2]), // x1 OR x2 - CNFClause::new(vec![-1, 3]), // NOT x1 OR x3 - CNFClause::new(vec![-2, -3]), // NOT x2 OR NOT x3 - ], - ); - - let reduction = ReduceTo::>::reduce_to(&sat); - let coloring = reduction.target_problem(); - - // Base vertices: 3 + 2*3 = 9 - // 3 clauses each with 2 literals, each needs 1 OR gadget = 3*5 = 15 - // Total: 9 + 15 = 24 vertices - assert_eq!(coloring.num_vertices(), 24); - assert_eq!(coloring.num_colors(), 3); - assert_eq!(reduction.num_clauses(), 3); - } - - #[test] - fn test_single_literal_clauses() { - // (x1) AND (x2) - both must be true - let sat = - Satisfiability::::new(2, vec![CNFClause::new(vec![1]), CNFClause::new(vec![2])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let coloring = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(coloring); - - let mut found_correct = false; - for sol in &solutions { - if coloring.solution_size(sol).is_valid { - let sat_sol = reduction.extract_solution(sol); - if sat_sol == vec![1, 1] { - found_correct = true; - break; - } - } - } - - assert!( - found_correct, - "Should find solution where both x1 and x2 are true" - ); - } - - #[test] - fn test_empty_sat() { - // Empty SAT (trivially satisfiable) - let sat = Satisfiability::::new(0, vec![]); - let reduction = ReduceTo::>::reduce_to(&sat); - - assert_eq!(reduction.num_clauses(), 0); - assert!(reduction.pos_vertices().is_empty()); - assert!(reduction.neg_vertices().is_empty()); - - let coloring = reduction.target_problem(); - // Just the 3 special vertices - assert_eq!(coloring.num_vertices(), 3); - } - - #[test] - fn test_num_clauses_accessor() { - let sat = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], - ); - let reduction = ReduceTo::>::reduce_to(&sat); - assert_eq!(reduction.num_clauses(), 2); - } - - #[test] - fn test_or_gadget_construction() { - // Test that OR gadget is correctly added - let mut constructor = SATColoringConstructor::new(2); - let initial_vertices = constructor.num_vertices; - - // Add an OR gadget - let input1 = constructor.pos_vertices[0]; // x1 - let input2 = constructor.pos_vertices[1]; // x2 - let output = constructor.add_or_gadget(input1, input2); - - // Should add 5 vertices - assert_eq!(constructor.num_vertices, initial_vertices + 5); - - // Output should be the last added vertex - assert_eq!(output, constructor.num_vertices - 1); - } - - #[test] - fn test_manual_coloring_extraction() { - // Test solution extraction with a manually constructed coloring solution - // for a simple 1-variable SAT problem: (x1) - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let coloring = reduction.target_problem(); - - // The graph structure for (x1) with set_true: - // - Vertices 0, 1, 2: TRUE, FALSE, AUX (triangle) - // - Vertex 3: x1 (pos) - // - Vertex 4: NOT x1 (neg) - // After set_true(3): x1 is connected to AUX and FALSE - // So x1 must have TRUE color - - // A valid 3-coloring where x1 has TRUE color: - // TRUE=0, FALSE=1, AUX=2 - // x1 must have color 0 (connected to 1 and 2) - // NOT_x1 must have color 1 (connected to 2 and x1=0) - let valid_coloring = vec![0, 1, 2, 0, 1]; - - assert_eq!(coloring.num_vertices(), 5); - let extracted = reduction.extract_solution(&valid_coloring); - // x1 should be true (1) because vertex 3 has color 0 which equals TRUE vertex's color - assert_eq!(extracted, vec![1]); - } - - #[test] - fn test_extraction_with_different_color_assignment() { - // Test that extraction works with different color assignments - // (colors may be permuted but semantics preserved) - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - // Different valid coloring: TRUE=2, FALSE=0, AUX=1 - // x1 must have color 2 (TRUE), NOT_x1 must have color 0 (FALSE) - let coloring_permuted = vec![2, 0, 1, 2, 0]; - let extracted = reduction.extract_solution(&coloring_permuted); - // x1 should still be true because its color equals TRUE vertex's color - assert_eq!(extracted, vec![1]); - - // Another permutation: TRUE=1, FALSE=2, AUX=0 - // x1 has color 1 (TRUE), NOT_x1 has color 2 (FALSE) - let coloring_permuted2 = vec![1, 2, 0, 1, 2]; - let extracted2 = reduction.extract_solution(&coloring_permuted2); - assert_eq!(extracted2, vec![1]); - } -} +#[path = "../tests_unit/rules/sat_coloring.rs"] +mod tests; diff --git a/src/rules/sat_dominatingset.rs b/src/rules/sat_dominatingset.rs index d265e50..0ef6339 100644 --- a/src/rules/sat_dominatingset.rs +++ b/src/rules/sat_dominatingset.rs @@ -199,325 +199,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::models::satisfiability::CNFClause; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_simple_sat_to_ds() { - // Simple SAT: (x1) - one variable, one clause - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - // Should have 3 vertices (variable gadget) + 1 clause vertex = 4 vertices - assert_eq!(ds_problem.num_vertices(), 4); - - // Edges: 3 for triangle + 1 from positive literal to clause = 4 - // Triangle edges: (0,1), (0,2), (1,2) - // Clause edge: (0, 3) since x1 positive connects to clause vertex - assert_eq!(ds_problem.num_edges(), 4); - } - - #[test] - fn test_two_variable_sat_to_ds() { - // SAT: (x1 OR x2) - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - // 2 variables * 3 = 6 gadget vertices + 1 clause vertex = 7 - assert_eq!(ds_problem.num_vertices(), 7); - - // Edges: - // - 3 edges for first triangle: (0,1), (0,2), (1,2) - // - 3 edges for second triangle: (3,4), (3,5), (4,5) - // - 2 edges from literals to clause: (0,6), (3,6) - assert_eq!(ds_problem.num_edges(), 8); - } - - #[test] - fn test_satisfiable_formula() { - // SAT: (x1 OR x2) AND (NOT x1 OR x2) - // Satisfiable with x2 = true - let sat = Satisfiability::::new( - 2, - vec![ - CNFClause::new(vec![1, 2]), // x1 OR x2 - CNFClause::new(vec![-1, 2]), // NOT x1 OR x2 - ], - ); - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - // Solve the dominating set problem - let solver = BruteForce::new(); - let solutions = solver.find_best(ds_problem); - - // Minimum dominating set should be of size 2 (one per variable) - let min_size = solutions[0].iter().sum::(); - assert_eq!(min_size, 2, "Minimum dominating set should have 2 vertices"); - - // Extract and verify at least one solution satisfies SAT - let mut found_satisfying = false; - for sol in &solutions { - let sat_sol = reduction.extract_solution(sol); - let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); - if sat.is_satisfying(&assignment) { - found_satisfying = true; - break; - } - } - assert!(found_satisfying, "Should find a satisfying assignment"); - } - - #[test] - fn test_unsatisfiable_formula() { - // SAT: (x1) AND (NOT x1) - unsatisfiable - let sat = - Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - // Vertices: 3 (gadget) + 2 (clauses) = 5 - assert_eq!(ds_problem.num_vertices(), 5); - - let solver = BruteForce::new(); - let solutions = solver.find_best(ds_problem); - - // For unsatisfiable formula, the minimum dominating set will need - // more than num_variables vertices OR won't produce a valid assignment - // Actually, in this case we can still dominate with just selecting - // one literal vertex (it dominates its gadget AND one clause), - // but then the other clause isn't dominated. - // So we need at least 2 vertices: one for each clause's requirement. - - // The key insight is that both clauses share the same variable gadget - // but require opposite literals. To dominate both clause vertices, - // we need to select BOTH literal vertices (0 and 1) or the dummy + - // something else. - - // Verify no extracted solution satisfies the formula - for sol in &solutions { - let sat_sol = reduction.extract_solution(sol); - let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); - // This unsatisfiable formula should not have a satisfying assignment - assert!( - !sat.is_satisfying(&assignment), - "Unsatisfiable formula should not be satisfied" - ); - } - } - - #[test] - fn test_three_sat_example() { - // 3-SAT: (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR x3) AND (x1 OR NOT x2 OR NOT x3) - let sat = Satisfiability::::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), // x1 OR x2 OR x3 - CNFClause::new(vec![-1, -2, 3]), // NOT x1 OR NOT x2 OR x3 - CNFClause::new(vec![1, -2, -3]), // x1 OR NOT x2 OR NOT x3 - ], - ); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - // 3 variables * 3 = 9 gadget vertices + 3 clauses = 12 - assert_eq!(ds_problem.num_vertices(), 12); - - let solver = BruteForce::new(); - let solutions = solver.find_best(ds_problem); - - // Minimum should be 3 (one per variable) - let min_size = solutions[0].iter().sum::(); - assert_eq!(min_size, 3, "Minimum dominating set should have 3 vertices"); - - // Verify extracted solutions - let mut found_satisfying = false; - for sol in &solutions { - let sat_sol = reduction.extract_solution(sol); - let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); - if sat.is_satisfying(&assignment) { - found_satisfying = true; - break; - } - } - assert!( - found_satisfying, - "Should find a satisfying assignment for 3-SAT" - ); - } - - #[test] - fn test_extract_solution_positive_literal() { - // (x1) - select positive literal - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - // Solution: select vertex 0 (positive literal x1) - // This dominates vertices 1, 2 (gadget) and vertex 3 (clause) - let ds_sol = vec![1, 0, 0, 0]; - let sat_sol = reduction.extract_solution(&ds_sol); - assert_eq!(sat_sol, vec![1]); // x1 = true - } - - #[test] - fn test_extract_solution_negative_literal() { - // (NOT x1) - select negative literal - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - // Solution: select vertex 1 (negative literal NOT x1) - // This dominates vertices 0, 2 (gadget) and vertex 3 (clause) - let ds_sol = vec![0, 1, 0, 0]; - let sat_sol = reduction.extract_solution(&ds_sol); - assert_eq!(sat_sol, vec![0]); // x1 = false - } - - #[test] - fn test_extract_solution_dummy() { - // (x1 OR x2) where only x1 matters - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - // Select: vertex 0 (x1 positive) and vertex 5 (x2 dummy) - // Vertex 0 dominates: itself, 1, 2, and clause 6 - // Vertex 5 dominates: 3, 4, and itself - let ds_sol = vec![1, 0, 0, 0, 0, 1, 0]; - let sat_sol = reduction.extract_solution(&ds_sol); - assert_eq!(sat_sol, vec![1, 0]); // x1 = true, x2 = false (from dummy) - } - - #[test] - fn test_source_and_target_size() { - let sat = Satisfiability::::new( - 3, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], - ); - let reduction = ReduceTo::>::reduce_to(&sat); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vars"), Some(3)); - assert_eq!(source_size.get("num_clauses"), Some(2)); - // 3 vars * 3 = 9 gadget vertices + 2 clause vertices = 11 - assert_eq!(target_size.get("num_vertices"), Some(11)); - } - - #[test] - fn test_empty_sat() { - // Empty SAT (trivially satisfiable) - let sat = Satisfiability::::new(0, vec![]); - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - assert_eq!(ds_problem.num_vertices(), 0); - assert_eq!(ds_problem.num_edges(), 0); - assert_eq!(reduction.num_clauses(), 0); - assert_eq!(reduction.num_literals(), 0); - } - - #[test] - fn test_multiple_literals_same_variable() { - // Clause with repeated variable: (x1 OR NOT x1) - tautology - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1, -1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - // 3 gadget vertices + 1 clause vertex = 4 - assert_eq!(ds_problem.num_vertices(), 4); - - // Edges: - // - 3 for triangle - // - 2 from literals to clause (both positive and negative literals connect) - assert_eq!(ds_problem.num_edges(), 5); - } - - #[test] - fn test_sat_ds_solution_correspondence() { - // Comprehensive test: verify that solutions extracted from DS satisfy SAT - let sat = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], - ); - - // Solve SAT directly - let sat_solver = BruteForce::new(); - let direct_sat_solutions = sat_solver.find_best(&sat); - - // Solve via reduction - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - let ds_solutions = sat_solver.find_best(ds_problem); - - // Direct SAT solutions should all be valid - for sol in &direct_sat_solutions { - let assignment: Vec = sol.iter().map(|&v| v == 1).collect(); - assert!(sat.is_satisfying(&assignment)); - } - - // DS solutions with minimum size should correspond to valid SAT solutions - let min_size = ds_solutions[0].iter().sum::(); - if min_size == 2 { - // Only if min dominating set = num_vars - let mut found_satisfying = false; - for sol in &ds_solutions { - if sol.iter().sum::() == 2 { - let sat_sol = reduction.extract_solution(sol); - let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); - if sat.is_satisfying(&assignment) { - found_satisfying = true; - break; - } - } - } - assert!( - found_satisfying, - "At least one DS solution should give a SAT solution" - ); - } - } - - #[test] - fn test_accessors() { - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, -2])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - assert_eq!(reduction.num_literals(), 2); - assert_eq!(reduction.num_clauses(), 1); - } - - #[test] - fn test_extract_solution_too_many_selected() { - // Test that extract_solution handles invalid (non-minimal) dominating sets - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - // Select all 4 vertices (more than num_literals=1) - let ds_sol = vec![1, 1, 1, 1]; - let sat_sol = reduction.extract_solution(&ds_sol); - // Should return default (all false) - assert_eq!(sat_sol, vec![0]); - } - - #[test] - fn test_negated_variable_connection() { - // (NOT x1 OR NOT x2) - both negated - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![-1, -2])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let ds_problem = reduction.target_problem(); - - // 2 * 3 = 6 gadget vertices + 1 clause = 7 - assert_eq!(ds_problem.num_vertices(), 7); - - // Edges: - // - 3 for first triangle: (0,1), (0,2), (1,2) - // - 3 for second triangle: (3,4), (3,5), (4,5) - // - 2 from negated literals to clause: (1,6), (4,6) - assert_eq!(ds_problem.num_edges(), 8); - } -} +#[path = "../tests_unit/rules/sat_dominatingset.rs"] +mod tests; diff --git a/src/rules/sat_independentset.rs b/src/rules/sat_independentset.rs index bbd0275..0b06a4c 100644 --- a/src/rules/sat_independentset.rs +++ b/src/rules/sat_independentset.rs @@ -189,317 +189,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::models::satisfiability::CNFClause; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_boolvar_creation() { - let var = BoolVar::new(0, false); - assert_eq!(var.name, 0); - assert!(!var.neg); - - let neg_var = BoolVar::new(1, true); - assert_eq!(neg_var.name, 1); - assert!(neg_var.neg); - } - - #[test] - fn test_boolvar_from_literal() { - // Positive literal: variable 1 (1-indexed) -> variable 0 (0-indexed), not negated - let var = BoolVar::from_literal(1); - assert_eq!(var.name, 0); - assert!(!var.neg); - - // Negative literal: variable 2 (1-indexed) -> variable 1 (0-indexed), negated - let neg_var = BoolVar::from_literal(-2); - assert_eq!(neg_var.name, 1); - assert!(neg_var.neg); - } - - #[test] - fn test_boolvar_complement() { - let x = BoolVar::new(0, false); - let not_x = BoolVar::new(0, true); - let y = BoolVar::new(1, false); - - assert!(x.is_complement(¬_x)); - assert!(not_x.is_complement(&x)); - assert!(!x.is_complement(&y)); - assert!(!x.is_complement(&x)); - } - - #[test] - fn test_simple_sat_to_is() { - // Simple SAT: (x1) - one clause with one literal - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - // Should have 1 vertex (one literal) - assert_eq!(is_problem.num_vertices(), 1); - // No edges (single vertex can't form a clique) - assert_eq!(is_problem.num_edges(), 0); - } - - #[test] - fn test_two_clause_sat_to_is() { - // SAT: (x1) AND (NOT x1) - // This is unsatisfiable - let sat = - Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - // Should have 2 vertices - assert_eq!(is_problem.num_vertices(), 2); - // Should have 1 edge (between x1 and NOT x1) - assert_eq!(is_problem.num_edges(), 1); - - // Maximum IS should have size 1 (can't select both) - let solver = BruteForce::new(); - let solutions = solver.find_best(is_problem); - for sol in &solutions { - assert_eq!(sol.iter().sum::(), 1); - } - } - - #[test] - fn test_satisfiable_formula() { - // SAT: (x1 OR x2) AND (NOT x1 OR x2) AND (x1 OR NOT x2) - // Satisfiable with x1=true, x2=true or x1=false, x2=true - let sat = Satisfiability::::new( - 2, - vec![ - CNFClause::new(vec![1, 2]), // x1 OR x2 - CNFClause::new(vec![-1, 2]), // NOT x1 OR x2 - CNFClause::new(vec![1, -2]), // x1 OR NOT x2 - ], - ); - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - // Should have 6 vertices (2 literals per clause, 3 clauses) - assert_eq!(is_problem.num_vertices(), 6); - - // Count edges: - // - 3 edges within clauses (one per clause, since each clause has 2 literals) - // - Edges between complementary literals across clauses: - // - x1 (clause 0, vertex 0) and NOT x1 (clause 1, vertex 2) - // - x2 (clause 0, vertex 1) and NOT x2 (clause 2, vertex 5) - // - x2 (clause 1, vertex 3) and NOT x2 (clause 2, vertex 5) - // - x1 (clause 2, vertex 4) and NOT x1 (clause 1, vertex 2) - // Total: 3 (clique) + 4 (complement) = 7 edges - - // Solve the IS problem - let solver = BruteForce::new(); - let is_solutions = solver.find_best(is_problem); - - // Max IS should be 3 (one literal per clause) - for sol in &is_solutions { - assert_eq!(sol.iter().sum::(), 3); - } - - // Extract SAT solutions and verify they satisfy the original formula - for sol in &is_solutions { - let sat_sol = reduction.extract_solution(sol); - let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); - assert!( - sat.is_satisfying(&assignment), - "Extracted solution {:?} should satisfy the SAT formula", - assignment - ); - } - } - - #[test] - fn test_unsatisfiable_formula() { - // SAT: (x1) AND (NOT x1) - unsatisfiable - let sat = - Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - let solver = BruteForce::new(); - let is_solutions = solver.find_best(is_problem); - - // Max IS can only be 1 (not 2 = num_clauses) - // This indicates the formula is unsatisfiable - for sol in &is_solutions { - assert!( - sol.iter().sum::() < reduction.num_clauses(), - "For unsatisfiable formula, IS size should be less than num_clauses" - ); - } - } - - #[test] - fn test_three_sat_example() { - // 3-SAT: (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR x3) AND (x1 OR NOT x2 OR NOT x3) - let sat = Satisfiability::::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), // x1 OR x2 OR x3 - CNFClause::new(vec![-1, -2, 3]), // NOT x1 OR NOT x2 OR x3 - CNFClause::new(vec![1, -2, -3]), // x1 OR NOT x2 OR NOT x3 - ], - ); - - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - // Should have 9 vertices (3 literals per clause, 3 clauses) - assert_eq!(is_problem.num_vertices(), 9); - - let solver = BruteForce::new(); - let is_solutions = solver.find_best(is_problem); - - // Check that max IS has size 3 (satisfiable) - let max_size = is_solutions[0].iter().sum::(); - assert_eq!(max_size, 3, "3-SAT should be satisfiable with IS size = 3"); - - // Verify extracted solutions - for sol in &is_solutions { - let sat_sol = reduction.extract_solution(sol); - let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); - assert!(sat.is_satisfying(&assignment)); - } - } - - #[test] - fn test_extract_solution_basic() { - // Simple case: (x1 OR x2) - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - // Select vertex 0 (literal x1) - let is_sol = vec![1, 0]; - let sat_sol = reduction.extract_solution(&is_sol); - assert_eq!(sat_sol, vec![1, 0]); // x1=true, x2=false - - // Select vertex 1 (literal x2) - let is_sol = vec![0, 1]; - let sat_sol = reduction.extract_solution(&is_sol); - assert_eq!(sat_sol, vec![0, 1]); // x1=false, x2=true - } - - #[test] - fn test_extract_solution_with_negation() { - // (NOT x1) - selecting NOT x1 means x1 should be false - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![-1])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - let is_sol = vec![1]; - let sat_sol = reduction.extract_solution(&is_sol); - assert_eq!(sat_sol, vec![0]); // x1=false (so NOT x1 is true) - } - - #[test] - fn test_clique_edges_in_clause() { - // A clause with 3 literals should form a clique (3 edges) - let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - // 3 vertices, 3 edges (complete graph K3) - assert_eq!(is_problem.num_vertices(), 3); - assert_eq!(is_problem.num_edges(), 3); - } - - #[test] - fn test_complement_edges_across_clauses() { - // (x1) AND (NOT x1) AND (x2) - three clauses - // Vertices: 0 (x1), 1 (NOT x1), 2 (x2) - // Edges: (0,1) for complement x1 and NOT x1 - let sat = Satisfiability::::new( - 2, - vec![ - CNFClause::new(vec![1]), - CNFClause::new(vec![-1]), - CNFClause::new(vec![2]), - ], - ); - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - assert_eq!(is_problem.num_vertices(), 3); - assert_eq!(is_problem.num_edges(), 1); // Only the complement edge - } - - #[test] - fn test_source_and_target_size() { - let sat = Satisfiability::::new( - 3, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], - ); - let reduction = ReduceTo::>::reduce_to(&sat); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vars"), Some(3)); - assert_eq!(source_size.get("num_clauses"), Some(2)); - assert_eq!(target_size.get("num_vertices"), Some(4)); // 2 + 2 literals - } - - #[test] - fn test_empty_sat() { - // Empty SAT (trivially satisfiable) - let sat = Satisfiability::::new(0, vec![]); - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - - assert_eq!(is_problem.num_vertices(), 0); - assert_eq!(is_problem.num_edges(), 0); - assert_eq!(reduction.num_clauses(), 0); - } - - #[test] - fn test_sat_is_solution_correspondence() { - // Comprehensive test: solve both SAT and IS, compare solutions - let sat = Satisfiability::::new( - 2, - vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], - ); - - // Solve SAT directly - let sat_solver = BruteForce::new(); - let direct_sat_solutions = sat_solver.find_best(&sat); - - // Solve via reduction - let reduction = ReduceTo::>::reduce_to(&sat); - let is_problem = reduction.target_problem(); - let is_solutions = sat_solver.find_best(is_problem); - - // Extract SAT solutions from IS - let extracted_sat_solutions: Vec<_> = is_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // All extracted solutions should be valid SAT solutions - for sol in &extracted_sat_solutions { - let assignment: Vec = sol.iter().map(|&v| v == 1).collect(); - assert!(sat.is_satisfying(&assignment)); - } - - // Direct SAT solutions and extracted solutions should be compatible - // (same satisfying assignments, though representation might differ) - for sol in &direct_sat_solutions { - let assignment: Vec = sol.iter().map(|&v| v == 1).collect(); - assert!(sat.is_satisfying(&assignment)); - } - } - - #[test] - fn test_literals_accessor() { - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, -2])]); - let reduction = ReduceTo::>::reduce_to(&sat); - - let literals = reduction.literals(); - assert_eq!(literals.len(), 2); - assert_eq!(literals[0], BoolVar::new(0, false)); // x1 - assert_eq!(literals[1], BoolVar::new(1, true)); // NOT x2 - } -} +#[path = "../tests_unit/rules/sat_independentset.rs"] +mod tests; diff --git a/src/rules/sat_ksat.rs b/src/rules/sat_ksat.rs index 95b13e7..967be35 100644 --- a/src/rules/sat_ksat.rs +++ b/src/rules/sat_ksat.rs @@ -224,338 +224,8 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_sat_to_3sat_exact_size() { - // Clause already has 3 literals - should remain unchanged - let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - assert_eq!(ksat.num_vars(), 3); - assert_eq!(ksat.num_clauses(), 1); - assert_eq!(ksat.clauses()[0].literals, vec![1, 2, 3]); - } - - #[test] - fn test_sat_to_3sat_padding() { - // Clause has 2 literals - should be padded to 3 - // (a v b) becomes (a v b v x) AND (a v b v -x) - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // Should have 2 clauses (positive and negative ancilla) - assert_eq!(ksat.num_clauses(), 2); - // All clauses should have exactly 3 literals - for clause in ksat.clauses() { - assert_eq!(clause.len(), 3); - } - } - - #[test] - fn test_sat_to_3sat_splitting() { - // Clause has 4 literals - should be split - // (a v b v c v d) becomes (a v b v x) AND (-x v c v d) - let sat = Satisfiability::::new(4, vec![CNFClause::new(vec![1, 2, 3, 4])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // Should have 2 clauses after splitting - assert_eq!(ksat.num_clauses(), 2); - // All clauses should have exactly 3 literals - for clause in ksat.clauses() { - assert_eq!(clause.len(), 3); - } - - // Verify structure: first clause has positive ancilla, second has negative - let c1 = &ksat.clauses()[0]; - let c2 = &ksat.clauses()[1]; - // First clause: [1, 2, 5] (ancilla is var 5) - assert_eq!(c1.literals[0], 1); - assert_eq!(c1.literals[1], 2); - let ancilla = c1.literals[2]; - assert!(ancilla > 0); - // Second clause: [-5, 3, 4] - assert_eq!(c2.literals[0], -ancilla); - assert_eq!(c2.literals[1], 3); - assert_eq!(c2.literals[2], 4); - } - - #[test] - fn test_sat_to_3sat_large_clause() { - // Clause has 5 literals - requires multiple splits - // (a v b v c v d v e) -> (a v b v x1) AND (-x1 v c v x2) AND (-x2 v d v e) - let sat = Satisfiability::::new(5, vec![CNFClause::new(vec![1, 2, 3, 4, 5])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // Should have 3 clauses after splitting - assert_eq!(ksat.num_clauses(), 3); - // All clauses should have exactly 3 literals - for clause in ksat.clauses() { - assert_eq!(clause.len(), 3); - } - } - - #[test] - fn test_sat_to_3sat_single_literal() { - // Single literal clause - needs padding twice - // (a) becomes (a v x v y) where we pad twice - let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // With recursive padding: (a) -> (a v x) AND (a v -x) - // Then each of those gets padded again - // (a v x) -> (a v x v y) AND (a v x v -y) - // (a v -x) -> (a v -x v z) AND (a v -x v -z) - // Total: 4 clauses - assert_eq!(ksat.num_clauses(), 4); - for clause in ksat.clauses() { - assert_eq!(clause.len(), 3); - } - } - - #[test] - fn test_sat_to_3sat_preserves_satisfiability() { - // Create a SAT formula and verify the 3-SAT version is equisatisfiable - let sat = Satisfiability::::new( - 3, - vec![ - CNFClause::new(vec![1, 2]), // Needs padding - CNFClause::new(vec![-1, 2, 3]), // Already 3 literals - CNFClause::new(vec![1, -2, 3, -3]), // Needs splitting (tautology for testing) - ], - ); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // Solve both problems - let solver = BruteForce::new(); - - let sat_solutions = solver.find_best(&sat); - let ksat_solutions = solver.find_best(ksat); - - // If SAT is satisfiable, K-SAT should be too - let sat_satisfiable = sat_solutions.iter().any(|s| sat.solution_size(s).is_valid); - let ksat_satisfiable = ksat_solutions - .iter() - .any(|s| ksat.solution_size(s).is_valid); - - assert_eq!(sat_satisfiable, ksat_satisfiable); - - // Extract solutions should map back correctly - if ksat_satisfiable { - for ksat_sol in &ksat_solutions { - if ksat.solution_size(ksat_sol).is_valid { - let sat_sol = reduction.extract_solution(ksat_sol); - assert_eq!(sat_sol.len(), 3); // Original variable count - } - } - } - } - - #[test] - fn test_sat_to_3sat_solution_extraction() { - let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // Solve K-SAT - let solver = BruteForce::new(); - let ksat_solutions = solver.find_best(ksat); - - // Extract and verify solutions - for ksat_sol in &ksat_solutions { - if ksat.solution_size(ksat_sol).is_valid { - let sat_sol = reduction.extract_solution(ksat_sol); - // Should only have original 2 variables - assert_eq!(sat_sol.len(), 2); - // Should satisfy original problem - assert!(sat.solution_size(&sat_sol).is_valid); - } - } - } - - #[test] - fn test_3sat_to_sat() { - let ksat = KSatisfiability::<3, i32>::new( - 3, - vec![ - CNFClause::new(vec![1, 2, 3]), - CNFClause::new(vec![-1, -2, 3]), - ], - ); - - let reduction = ReduceTo::>::reduce_to(&ksat); - let sat = reduction.target_problem(); - - assert_eq!(sat.num_vars(), 3); - assert_eq!(sat.num_clauses(), 2); - - // Verify clauses are preserved - assert_eq!(sat.clauses()[0].literals, vec![1, 2, 3]); - assert_eq!(sat.clauses()[1].literals, vec![-1, -2, 3]); - } - - #[test] - fn test_3sat_to_sat_solution_extraction() { - let ksat = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); - - let reduction = ReduceTo::>::reduce_to(&ksat); - - let sol = vec![1, 0, 1]; - let extracted = reduction.extract_solution(&sol); - assert_eq!(extracted, vec![1, 0, 1]); - } - - #[test] - fn test_roundtrip_sat_3sat_sat() { - // SAT -> 3-SAT -> SAT roundtrip - let original_sat = Satisfiability::::new( - 3, - vec![CNFClause::new(vec![1, -2]), CNFClause::new(vec![2, 3])], - ); - - // SAT -> 3-SAT - let to_ksat = ReduceTo::>::reduce_to(&original_sat); - let ksat = to_ksat.target_problem(); - - // 3-SAT -> SAT - let to_sat = ReduceTo::>::reduce_to(ksat); - let final_sat = to_sat.target_problem(); - - // Solve all three - let solver = BruteForce::new(); - - let orig_solutions = solver.find_best(&original_sat); - let ksat_solutions = solver.find_best(ksat); - let final_solutions = solver.find_best(final_sat); - - // All should be satisfiable - assert!(orig_solutions - .iter() - .any(|s| original_sat.solution_size(s).is_valid)); - assert!(ksat_solutions - .iter() - .any(|s| ksat.solution_size(s).is_valid)); - assert!(final_solutions - .iter() - .any(|s| final_sat.solution_size(s).is_valid)); - } - - #[test] - fn test_sat_to_4sat() { - let sat = Satisfiability::::new( - 4, - vec![ - CNFClause::new(vec![1, 2]), // Needs padding - CNFClause::new(vec![1, 2, 3, 4]), // Exact - CNFClause::new(vec![1, 2, 3, 4, -1]), // Needs splitting - ], - ); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // All clauses should have exactly 4 literals - for clause in ksat.clauses() { - assert_eq!(clause.len(), 4); - } - } - - #[test] - fn test_problem_sizes() { - let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3, 4])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vars"), Some(3)); - assert_eq!(target_size.get("k"), Some(3)); - } - - #[test] - fn test_empty_sat_to_3sat() { - let sat = Satisfiability::::new(3, vec![]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - assert_eq!(ksat.num_clauses(), 0); - assert_eq!(ksat.num_vars(), 3); - } - - #[test] - fn test_mixed_clause_sizes() { - let sat = Satisfiability::::new( - 5, - vec![ - CNFClause::new(vec![1]), // 1 literal - CNFClause::new(vec![2, 3]), // 2 literals - CNFClause::new(vec![1, 2, 3]), // 3 literals - CNFClause::new(vec![1, 2, 3, 4]), // 4 literals - CNFClause::new(vec![1, 2, 3, 4, 5]), // 5 literals - ], - ); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - // All clauses should have exactly 3 literals - for clause in ksat.clauses() { - assert_eq!(clause.len(), 3); - } - - // Verify satisfiability is preserved - let solver = BruteForce::new(); - let sat_solutions = solver.find_best(&sat); - let ksat_solutions = solver.find_best(ksat); - - let sat_satisfiable = sat_solutions.iter().any(|s| sat.solution_size(s).is_valid); - let ksat_satisfiable = ksat_solutions - .iter() - .any(|s| ksat.solution_size(s).is_valid); - assert_eq!(sat_satisfiable, ksat_satisfiable); - } - - #[test] - fn test_unsatisfiable_formula() { - // (x) AND (-x) is unsatisfiable - let sat = - Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); - - let reduction = ReduceTo::>::reduce_to(&sat); - let ksat = reduction.target_problem(); - - let solver = BruteForce::new(); - - // Both should be unsatisfiable - let sat_solutions = solver.find_best(&sat); - let ksat_solutions = solver.find_best(ksat); - - let sat_satisfiable = sat_solutions.iter().any(|s| sat.solution_size(s).is_valid); - let ksat_satisfiable = ksat_solutions - .iter() - .any(|s| ksat.solution_size(s).is_valid); - - assert!(!sat_satisfiable); - assert!(!ksat_satisfiable); - } -} +#[path = "../tests_unit/rules/sat_ksat.rs"] +mod tests; // Register SAT -> KSAT reduction manually (generated by macro, can't use #[reduction]) inventory::submit! { diff --git a/src/rules/setcovering_ilp.rs b/src/rules/setcovering_ilp.rs index 63d8fa4..08702fb 100644 --- a/src/rules/setcovering_ilp.rs +++ b/src/rules/setcovering_ilp.rs @@ -98,239 +98,5 @@ impl ReduceTo for SetCovering { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Universe: {0, 1, 2}, Sets: S0={0,1}, S1={1,2} - let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - assert_eq!(ilp.num_vars, 2, "Should have one variable per set"); - assert_eq!( - ilp.constraints.len(), - 3, - "Should have one constraint per element" - ); - assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - - // Each constraint should be sum >= 1 - for constraint in &ilp.constraints { - assert!((constraint.rhs - 1.0).abs() < 1e-9); - } - } - - #[test] - fn test_reduction_weighted() { - let problem = SetCovering::with_weights(3, vec![vec![0, 1], vec![1, 2]], vec![5, 10]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check that weights are correctly transferred to objective - let mut coeffs: Vec = vec![0.0; 2]; - for &(var, coef) in &ilp.objective { - coeffs[var] = coef; - } - assert!((coeffs[0] - 5.0).abs() < 1e-9); - assert!((coeffs[1] - 10.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_equals_brute_force_simple() { - // Universe: {0, 1, 2}, Sets: S0={0,1}, S1={1,2}, S2={0,2} - // Minimum cover: any 2 sets work - let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2], vec![0, 2]]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force on original problem - let bf_solutions = bf.find_best(&problem); - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Both should find optimal size = 2 - let bf_size: usize = bf_solutions[0].iter().sum(); - let ilp_size: usize = extracted.iter().sum(); - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify the ILP solution is valid for the original problem - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - } - - #[test] - fn test_ilp_solution_equals_brute_force_weighted() { - // Weighted problem: prefer lighter sets - // Universe: {0,1,2}, Sets: S0={0,1,2}, S1={0,1}, S2={2} - // Weights: [10, 3, 3] - // Optimal: select S1 and S2 (weight 6) instead of S0 (weight 10) - let problem = - SetCovering::with_weights(3, vec![vec![0, 1, 2], vec![0, 1], vec![2]], vec![10, 3, 3]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_obj = problem.solution_size(&bf_solutions[0]).size; - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_obj = problem.solution_size(&extracted).size; - - assert_eq!(bf_obj, 6); - assert_eq!(ilp_obj, 6); - - // Verify the solution selects S1 and S2 - assert_eq!(extracted, vec![0, 1, 1]); - } - - #[test] - fn test_solution_extraction() { - let problem = SetCovering::::new(4, vec![vec![0, 1], vec![2, 3]]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - - // Test that extraction works correctly (1:1 mapping) - let ilp_solution = vec![1, 1]; - let extracted = reduction.extract_solution(&ilp_solution); - assert_eq!(extracted, vec![1, 1]); - - // Verify this is a valid set cover - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_source_and_target_size() { - let problem = - SetCovering::::new(5, vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![3, 4]]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("universe_size"), Some(5)); - assert_eq!(source_size.get("num_sets"), Some(4)); - - assert_eq!(target_size.get("num_vars"), Some(4)); - assert_eq!(target_size.get("num_constraints"), Some(5)); - } - - #[test] - fn test_single_set_covers_all() { - // Single set covers entire universe - let problem = SetCovering::::new(3, vec![vec![0, 1, 2], vec![0], vec![1], vec![2]]); - - let ilp_solver = ILPSolver::new(); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // First set alone covers everything with weight 1 - assert_eq!(extracted, vec![1, 0, 0, 0]); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 1); - } - - #[test] - fn test_overlapping_sets() { - // All sets overlap on element 1 - let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); - - let ilp_solver = ILPSolver::new(); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Need both sets to cover all elements - assert_eq!(extracted, vec![1, 1]); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - } - - #[test] - fn test_empty_universe() { - // Empty universe is trivially covered - let problem = SetCovering::::new(0, vec![]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.num_vars, 0); - assert_eq!(ilp.constraints.len(), 0); - } - - #[test] - fn test_solve_reduced() { - // Test the ILPSolver::solve_reduced method - let problem = - SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![0, 3]]); - - let ilp_solver = ILPSolver::new(); - let solution = ilp_solver - .solve_reduced(&problem) - .expect("solve_reduced should work"); - - let sol_result = problem.solution_size(&solution); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - } - - #[test] - fn test_constraint_structure() { - // Universe: {0, 1, 2} - // Sets: S0={0}, S1={0,1}, S2={1,2} - // Element 0 is in S0, S1 -> constraint: x0 + x1 >= 1 - // Element 1 is in S1, S2 -> constraint: x1 + x2 >= 1 - // Element 2 is in S2 -> constraint: x2 >= 1 - let problem = SetCovering::::new(3, vec![vec![0], vec![0, 1], vec![1, 2]]); - let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.constraints.len(), 3); - - // Check constraint for element 0: should involve sets 0 and 1 - let c0 = &ilp.constraints[0]; - let vars0: Vec = c0.terms.iter().map(|&(v, _)| v).collect(); - assert!(vars0.contains(&0)); - assert!(vars0.contains(&1)); - assert!(!vars0.contains(&2)); - - // Check constraint for element 1: should involve sets 1 and 2 - let c1 = &ilp.constraints[1]; - let vars1: Vec = c1.terms.iter().map(|&(v, _)| v).collect(); - assert!(!vars1.contains(&0)); - assert!(vars1.contains(&1)); - assert!(vars1.contains(&2)); - - // Check constraint for element 2: should involve only set 2 - let c2 = &ilp.constraints[2]; - let vars2: Vec = c2.terms.iter().map(|&(v, _)| v).collect(); - assert!(!vars2.contains(&0)); - assert!(!vars2.contains(&1)); - assert!(vars2.contains(&2)); - } -} +#[path = "../tests_unit/rules/setcovering_ilp.rs"] +mod tests; diff --git a/src/rules/setpacking_ilp.rs b/src/rules/setpacking_ilp.rs index 606ea20..000a2eb 100644 --- a/src/rules/setpacking_ilp.rs +++ b/src/rules/setpacking_ilp.rs @@ -89,227 +89,5 @@ impl ReduceTo for SetPacking { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Three sets with two overlapping pairs - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - assert_eq!(ilp.num_vars, 3, "Should have one variable per set"); - assert_eq!( - ilp.constraints.len(), - 2, - "Should have one constraint per overlapping pair" - ); - assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - - // Each constraint should be x_i + x_j <= 1 - for constraint in &ilp.constraints { - assert_eq!(constraint.terms.len(), 2); - assert!((constraint.rhs - 1.0).abs() < 1e-9); - } - } - - #[test] - fn test_reduction_weighted() { - let problem = SetPacking::with_weights(vec![vec![0, 1], vec![2, 3]], vec![5, 10]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check that weights are correctly transferred to objective - let mut coeffs: Vec = vec![0.0; 2]; - for &(var, coef) in &ilp.objective { - coeffs[var] = coef; - } - assert!((coeffs[0] - 5.0).abs() < 1e-9); - assert!((coeffs[1] - 10.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_equals_brute_force_chain() { - // Chain: {0,1}, {1,2}, {2,3} - can select at most 2 non-adjacent sets - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force on original problem - let bf_solutions = bf.find_best(&problem); - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Both should find optimal size = 2 - let bf_size: usize = bf_solutions[0].iter().sum(); - let ilp_size: usize = extracted.iter().sum(); - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify the ILP solution is valid for the original problem - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - } - - #[test] - fn test_ilp_solution_equals_brute_force_all_overlap() { - // All sets share element 0: can only select one - let problem = SetPacking::::new(vec![vec![0, 1], vec![0, 2], vec![0, 3]]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_size: usize = bf_solutions[0].iter().sum(); - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size: usize = extracted.iter().sum(); - - assert_eq!(bf_size, 1); - assert_eq!(ilp_size, 1); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_ilp_solution_equals_brute_force_weighted() { - // Weighted problem: single heavy set vs multiple light sets - // Set 0 covers all elements but has weight 5 - // Sets 1 and 2 are disjoint and together have weight 6 - let problem = SetPacking::with_weights( - vec![vec![0, 1, 2, 3], vec![0, 1], vec![2, 3]], - vec![5, 3, 3], - ); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_obj = problem.solution_size(&bf_solutions[0]).size; - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_obj = problem.solution_size(&extracted).size; - - assert_eq!(bf_obj, 6); - assert_eq!(ilp_obj, 6); - - // Should select sets 1 and 2 - assert_eq!(extracted, vec![0, 1, 1]); - } - - #[test] - fn test_solution_extraction() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![2, 3], vec![4, 5], vec![6, 7]]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - - // Test that extraction works correctly (1:1 mapping) - let ilp_solution = vec![1, 0, 1, 0]; - let extracted = reduction.extract_solution(&ilp_solution); - assert_eq!(extracted, vec![1, 0, 1, 0]); - - // Verify this is a valid packing (sets 0 and 2 are disjoint) - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_source_and_target_size() { - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![3, 4]]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_sets"), Some(4)); - - assert_eq!(target_size.get("num_vars"), Some(4)); - // 3 overlapping pairs: (0,1), (1,2), (2,3) - assert_eq!(target_size.get("num_constraints"), Some(3)); - } - - #[test] - fn test_disjoint_sets() { - // All sets are disjoint: no overlapping pairs - let problem = SetPacking::::new(vec![vec![0], vec![1], vec![2], vec![3]]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.constraints.len(), 0); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // All sets should be selected - assert_eq!(extracted, vec![1, 1, 1, 1]); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 4); - } - - #[test] - fn test_empty_sets() { - let problem = SetPacking::::new(vec![]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.num_vars, 0); - assert_eq!(ilp.constraints.len(), 0); - } - - #[test] - fn test_solve_reduced() { - // Test the ILPSolver::solve_reduced method - let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); - - let ilp_solver = ILPSolver::new(); - let solution = ilp_solver - .solve_reduced(&problem) - .expect("solve_reduced should work"); - - let sol_result = problem.solution_size(&solution); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - } - - #[test] - fn test_all_sets_overlap_pairwise() { - // All pairs overlap: can only select one set - // Sets: {0,1}, {0,2}, {1,2} - each pair shares one element - let problem = SetPacking::::new(vec![vec![0, 1], vec![0, 2], vec![1, 2]]); - let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // 3 overlapping pairs - assert_eq!(ilp.constraints.len(), 3); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 1); - } -} +#[path = "../tests_unit/rules/setpacking_ilp.rs"] +mod tests; diff --git a/src/rules/spinglass_maxcut.rs b/src/rules/spinglass_maxcut.rs index 88e47f1..b0460eb 100644 --- a/src/rules/spinglass_maxcut.rs +++ b/src/rules/spinglass_maxcut.rs @@ -204,102 +204,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_maxcut_to_spinglass() { - // Simple triangle MaxCut - let mc = MaxCut::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::>::reduce_to(&mc); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(sg); - - assert!(!solutions.is_empty()); - } - - #[test] - fn test_spinglass_to_maxcut_no_onsite() { - // SpinGlass without onsite terms - let sg = SpinGlass::::new(3, vec![((0, 1), 1), ((1, 2), 1)], vec![0, 0, 0]); - let reduction = ReduceTo::>::reduce_to(&sg); - let mc = reduction.target_problem(); - - assert_eq!(mc.num_vertices(), 3); // No ancilla needed - assert!(reduction.ancilla.is_none()); - } - - #[test] - fn test_spinglass_to_maxcut_with_onsite() { - // SpinGlass with onsite terms - let sg = SpinGlass::::new(2, vec![((0, 1), 1)], vec![1, 0]); - let reduction = ReduceTo::>::reduce_to(&sg); - let mc = reduction.target_problem(); - - assert_eq!(mc.num_vertices(), 3); // Ancilla added - assert_eq!(reduction.ancilla, Some(2)); - } - - #[test] - fn test_solution_extraction_no_ancilla() { - let sg = SpinGlass::::new(2, vec![((0, 1), 1)], vec![0, 0]); - let reduction = ReduceTo::>::reduce_to(&sg); - - let mc_sol = vec![0, 1]; - let extracted = reduction.extract_solution(&mc_sol); - assert_eq!(extracted, vec![0, 1]); - } - - #[test] - fn test_solution_extraction_with_ancilla() { - let sg = SpinGlass::::new(2, vec![((0, 1), 1)], vec![1, 0]); - let reduction = ReduceTo::>::reduce_to(&sg); - - // If ancilla is 0, don't flip - let mc_sol = vec![0, 1, 0]; - let extracted = reduction.extract_solution(&mc_sol); - assert_eq!(extracted, vec![0, 1]); - - // If ancilla is 1, flip all - let mc_sol = vec![0, 1, 1]; - let extracted = reduction.extract_solution(&mc_sol); - assert_eq!(extracted, vec![1, 0]); // flipped and ancilla removed - } - - #[test] - fn test_weighted_maxcut() { - let mc = MaxCut::::new(3, vec![(0, 1, 10), (1, 2, 20)]); - let reduction = ReduceTo::>::reduce_to(&mc); - let sg = reduction.target_problem(); - - // Verify interactions have correct weights - let interactions = sg.interactions(); - assert_eq!(interactions.len(), 2); - } - - #[test] - fn test_reduction_sizes() { - // Test source_size and target_size methods - let mc = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); - let reduction = ReduceTo::>::reduce_to(&mc); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert!(!source_size.components.is_empty()); - assert!(!target_size.components.is_empty()); - - // Test SG to MaxCut sizes - let sg = SpinGlass::::new(3, vec![((0, 1), 1)], vec![0, 0, 0]); - let reduction2 = ReduceTo::>::reduce_to(&sg); - - let source_size2 = reduction2.source_size(); - let target_size2 = reduction2.target_size(); - - assert!(!source_size2.components.is_empty()); - assert!(!target_size2.components.is_empty()); - } -} +#[path = "../tests_unit/rules/spinglass_maxcut.rs"] +mod tests; diff --git a/src/rules/spinglass_qubo.rs b/src/rules/spinglass_qubo.rs index db64103..543397c 100644 --- a/src/rules/spinglass_qubo.rs +++ b/src/rules/spinglass_qubo.rs @@ -180,140 +180,5 @@ impl ReduceTo> for SpinGlass { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_qubo_to_spinglass() { - // Simple 2-variable QUBO: minimize x0 + x1 - 2*x0*x1 - // Optimal at x = [0, 0] (value 0) or x = [1, 1] (value 0) - let qubo = QUBO::from_matrix(vec![vec![1.0, -2.0], vec![0.0, 1.0]]); - let reduction = ReduceTo::>::reduce_to(&qubo); - let sg = reduction.target_problem(); - - let solver = BruteForce::new(); - let sg_solutions = solver.find_best(sg); - let qubo_solutions: Vec<_> = sg_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Verify solutions are valid - assert!(!qubo_solutions.is_empty()); - - // Original QUBO at [0,0]: 0, at [1,1]: 1 + 1 - 2 = 0, at [0,1]: 1, at [1,0]: 1 - // So [0,0] and [1,1] are optimal with value 0 - for sol in &qubo_solutions { - let val = qubo.solution_size(sol).size; - assert!( - val <= 0.0 + 1e-6, - "Expected optimal value near 0, got {}", - val - ); - } - } - - #[test] - fn test_spinglass_to_qubo() { - // Simple SpinGlass: J_01 = -1 (ferromagnetic: prefers aligned spins) - // Energy: J_01 * s0 * s1 = -s0 * s1 - // Aligned spins give -1, anti-aligned give +1 - // Minimum is -1 at [0,0] or [1,1] (both give s=-1,-1 or s=+1,+1) - let sg = SpinGlass::::new(2, vec![((0, 1), -1.0)], vec![0.0, 0.0]); - let reduction = ReduceTo::>::reduce_to(&sg); - let qubo = reduction.target_problem(); - - let solver = BruteForce::new(); - let qubo_solutions = solver.find_best(qubo); - - // Ferromagnetic: aligned spins are optimal - for sol in &qubo_solutions { - assert_eq!(sol[0], sol[1], "Ferromagnetic should have aligned spins"); - } - } - - #[test] - fn test_roundtrip_qubo_sg_qubo() { - let original = QUBO::from_matrix(vec![vec![-1.0, 2.0], vec![0.0, -1.0]]); - let solver = BruteForce::new(); - let original_solutions = solver.find_best(&original); - let _original_val = original.solution_size(&original_solutions[0]).size; - - // QUBO -> SG -> QUBO - let reduction1 = ReduceTo::>::reduce_to(&original); - let sg = reduction1.target_problem().clone(); - let reduction2 = ReduceTo::>::reduce_to(&sg); - let roundtrip = reduction2.target_problem(); - - let roundtrip_solutions = solver.find_best(roundtrip); - let _roundtrip_val = roundtrip.solution_size(&roundtrip_solutions[0]).size; - - // The solutions should have the same configuration - // (optimal configs should match) - let orig_configs: std::collections::HashSet<_> = original_solutions.iter().collect(); - let rt_configs: std::collections::HashSet<_> = roundtrip_solutions.iter().collect(); - assert!( - orig_configs.intersection(&rt_configs).count() > 0, - "At least one optimal solution should match" - ); - } - - #[test] - fn test_antiferromagnetic() { - // Antiferromagnetic: J > 0, prefers anti-aligned spins - let sg = SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); - let reduction = ReduceTo::>::reduce_to(&sg); - let qubo = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(qubo); - - // Anti-ferromagnetic: opposite spins are optimal - for sol in &solutions { - assert_ne!( - sol[0], sol[1], - "Antiferromagnetic should have opposite spins" - ); - } - } - - #[test] - fn test_with_onsite_fields() { - // SpinGlass with only on-site field h_0 = 1 - // Energy = h_0 * s_0 = s_0 - // Minimum at s_0 = -1, i.e., x_0 = 0 - let sg = SpinGlass::::new(1, vec![], vec![1.0]); - let reduction = ReduceTo::>::reduce_to(&sg); - let qubo = reduction.target_problem(); - - let solver = BruteForce::new(); - let solutions = solver.find_best(qubo); - - assert_eq!(solutions.len(), 1); - assert_eq!(solutions[0], vec![0], "Should prefer x=0 (s=-1)"); - } - - #[test] - fn test_reduction_sizes() { - // Test source_size and target_size methods - let qubo = QUBO::from_matrix(vec![vec![1.0, -2.0], vec![0.0, 1.0]]); - let reduction = ReduceTo::>::reduce_to(&qubo); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert!(!source_size.components.is_empty()); - assert!(!target_size.components.is_empty()); - - // Test SG to QUBO sizes - let sg = SpinGlass::::new(3, vec![((0, 1), -1.0)], vec![0.0, 0.0, 0.0]); - let reduction2 = ReduceTo::>::reduce_to(&sg); - - let source_size2 = reduction2.source_size(); - let target_size2 = reduction2.target_size(); - - assert!(!source_size2.components.is_empty()); - assert!(!target_size2.components.is_empty()); - } -} +#[path = "../tests_unit/rules/spinglass_qubo.rs"] +mod tests; diff --git a/src/rules/traits.rs b/src/rules/traits.rs index 2432d47..d832826 100644 --- a/src/rules/traits.rs +++ b/src/rules/traits.rs @@ -68,9 +68,5 @@ pub trait ReduceTo: Problem { } #[cfg(test)] -mod tests { - #[test] - fn test_traits_compile() { - // Traits should compile - actual tests in reduction implementations - } -} +#[path = "../tests_unit/rules/traits.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/alpha_tensor.rs b/src/rules/unitdiskmapping/alpha_tensor.rs index 00685da..35e701a 100644 --- a/src/rules/unitdiskmapping/alpha_tensor.rs +++ b/src/rules/unitdiskmapping/alpha_tensor.rs @@ -365,168 +365,5 @@ pub fn verify_triangular_gadget( } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_simple_path_alpha_tensor() { - // Path graph: 0-1-2, all weight 1, pins = [0, 2] - let edges = vec![(0, 1), (1, 2)]; - let weights = vec![1, 1, 1]; - let pins = vec![0, 2]; - - let tensor = compute_alpha_tensor(3, &edges, &weights, &pins); - - // Config 0b00: neither pin in IS -> MIS can include vertex 1 -> MIS = 1 - // Config 0b01: pin 0 (vertex 0) in -> vertex 1 blocked -> MIS = 1 - // Config 0b10: pin 1 (vertex 2) in -> vertex 1 blocked -> MIS = 1 - // Config 0b11: both pins in -> vertices 0,2 in IS, vertex 1 blocked -> MIS = 2 - assert_eq!(tensor, vec![1, 1, 1, 2]); - } - - #[test] - fn test_triangle_alpha_tensor() { - // Triangle: 0-1, 1-2, 0-2, all weight 1, pins = [0, 1, 2] - let edges = vec![(0, 1), (1, 2), (0, 2)]; - let weights = vec![1, 1, 1]; - let pins = vec![0, 1, 2]; - - let tensor = compute_alpha_tensor(3, &edges, &weights, &pins); - - // When all vertices are pins: - // 0b000: all pins forced OUT -> no vertices available -> MIS = 0 - // 0b001: vertex 0 in, others forced out -> MIS = 1 - // 0b010: vertex 1 in, others forced out -> MIS = 1 - // 0b011: vertices 0,1 in -> INVALID (adjacent) -> i32::MIN - // 0b100: vertex 2 in, others forced out -> MIS = 1 - // 0b101: vertices 0,2 in -> INVALID (adjacent) -> i32::MIN - // 0b110: vertices 1,2 in -> INVALID (adjacent) -> i32::MIN - // 0b111: all in -> INVALID (all adjacent) -> i32::MIN - assert_eq!( - tensor, - vec![0, 1, 1, i32::MIN, 1, i32::MIN, i32::MIN, i32::MIN] - ); - } - - #[test] - fn test_mis_compactify_simple() { - // From path graph test - let mut tensor = vec![1, 1, 1, 2]; - mis_compactify(&mut tensor); - - // Entry 0b00 (val=1): is it dominated? - // - By 0b01 (val=1)? (0b01 & 0b00) == 0b00 != 0b01, NO - // - By 0b10 (val=1)? (0b10 & 0b00) == 0b00 != 0b10, NO - // - By 0b11 (val=2)? (0b11 & 0b00) == 0b00 != 0b11, NO - // Entry 0b01 (val=1): - // - By 0b11 (val=2)? (0b11 & 0b01) == 0b01, but val=1 <= val=2, YES dominated - // Entry 0b10 (val=1): - // - By 0b11 (val=2)? (0b11 & 0b10) == 0b10, but val=1 <= val=2, YES dominated - - // After compactify: entries 0b01 and 0b10 should be i32::MIN - assert_eq!(tensor[0], 1); // 0b00 not dominated - assert_eq!(tensor[1], i32::MIN); // 0b01 dominated by 0b11 - assert_eq!(tensor[2], i32::MIN); // 0b10 dominated by 0b11 - assert_eq!(tensor[3], 2); // 0b11 not dominated - } - - #[test] - fn test_is_diff_by_const() { - let t1 = vec![3, i32::MIN, i32::MIN, 5]; - let t2 = vec![2, i32::MIN, i32::MIN, 4]; - - let (is_equiv, diff) = is_diff_by_const(&t1, &t2); - assert!(is_equiv); - assert_eq!(diff, 1); // 3-2 = 1, 5-4 = 1 - - let t3 = vec![3, i32::MIN, i32::MIN, 6]; - let (is_equiv2, _) = is_diff_by_const(&t1, &t3); - assert!(!is_equiv2); // 3-3=0, 5-6=-1, not constant - } - - #[test] - fn test_weighted_mis_exhaustive() { - // Path: 0-1-2, weights [3, 1, 3] - let edges = vec![(0, 1), (1, 2)]; - let weights = vec![3, 1, 3]; - - let mis = weighted_mis_exhaustive(3, &edges, &weights); - assert_eq!(mis, 6); // Select vertices 0 and 2 - } - - #[test] - fn test_triangular_unit_disk_edges() { - // Simple case: two adjacent nodes on triangular lattice - // Nodes at (1, 1) and (1, 2) should be connected (distance ~0.866) - let locs = vec![(1, 1), (1, 2)]; - let edges = build_triangular_unit_disk_edges(&locs); - assert_eq!(edges.len(), 1); - assert_eq!(edges[0], (0, 1)); - - // Nodes at (1, 1) and (3, 1) should NOT be connected (distance = 2) - let locs2 = vec![(1, 1), (3, 1)]; - let edges2 = build_triangular_unit_disk_edges(&locs2); - assert_eq!(edges2.len(), 0); - } - - #[test] - fn test_verify_tri_turn() { - use super::super::triangular::TriTurn; - - let gadget = TriTurn; - let result = verify_triangular_gadget(&gadget); - assert!(result.is_ok(), "TriTurn verification failed: {:?}", result); - } - - #[test] - fn test_verify_tri_cross_false() { - use super::super::triangular::TriCross; - - let gadget = TriCross::; - let result = verify_triangular_gadget(&gadget); - assert!( - result.is_ok(), - "TriCross verification failed: {:?}", - result - ); - } - - #[test] - fn test_verify_tri_cross_true() { - use super::super::triangular::TriCross; - - let gadget = TriCross::; - let result = verify_triangular_gadget(&gadget); - assert!( - result.is_ok(), - "TriCross verification failed: {:?}", - result - ); - } - - #[test] - fn test_verify_tri_branch() { - use super::super::triangular::TriBranch; - - let gadget = TriBranch; - let result = verify_triangular_gadget(&gadget); - assert!( - result.is_ok(), - "TriBranch verification failed: {:?}", - result - ); - } - - #[test] - fn test_verify_tri_tcon_left() { - use super::super::triangular::TriTConLeft; - - let gadget = TriTConLeft; - let result = verify_triangular_gadget(&gadget); - assert!( - result.is_ok(), - "TriTConLeft verification failed: {:?}", - result - ); - } -} +#[path = "../../tests_unit/rules/unitdiskmapping/alpha_tensor.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/copyline.rs b/src/rules/unitdiskmapping/copyline.rs index de596a4..7d60378 100644 --- a/src/rules/unitdiskmapping/copyline.rs +++ b/src/rules/unitdiskmapping/copyline.rs @@ -525,348 +525,5 @@ pub fn mis_overhead_copyline_triangular(line: &CopyLine, spacing: usize) -> i32 } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_create_copylines_path() { - // Path graph: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let order = vec![0, 1, 2]; - let lines = create_copylines(3, &edges, &order); - - assert_eq!(lines.len(), 3); - // Each vertex gets a copy line - assert_eq!(lines[0].vertex, 0); - assert_eq!(lines[1].vertex, 1); - assert_eq!(lines[2].vertex, 2); - } - - #[test] - fn test_copyline_locations() { - let line = CopyLine { - vertex: 0, - vslot: 1, - hslot: 1, - vstart: 1, - vstop: 1, - hstop: 3, - }; - let locs = line.locations(2, 4); // padding=2, spacing=4 - assert!(!locs.is_empty()); - } - - #[test] - fn test_create_copylines_empty() { - let edges: Vec<(usize, usize)> = vec![]; - let order: Vec = vec![]; - let lines = create_copylines(0, &edges, &order); - assert!(lines.is_empty()); - } - - #[test] - fn test_create_copylines_single_vertex() { - let edges: Vec<(usize, usize)> = vec![]; - let order = vec![0]; - let lines = create_copylines(1, &edges, &order); - - assert_eq!(lines.len(), 1); - assert_eq!(lines[0].vertex, 0); - assert_eq!(lines[0].vslot, 1); - } - - #[test] - fn test_create_copylines_triangle() { - // Triangle: 0-1, 1-2, 0-2 - let edges = vec![(0, 1), (1, 2), (0, 2)]; - let order = vec![0, 1, 2]; - let lines = create_copylines(3, &edges, &order); - - assert_eq!(lines.len(), 3); - // Vertex 0 should have hstop reaching to vertex 2's slot - assert!(lines[0].hstop >= 2); - } - - #[test] - fn test_copyline_center_location() { - let line = CopyLine::new(0, 2, 3, 1, 3, 4); - let (row, col) = line.center_location(1, 4); - // Julia 1-indexed: row = 4 * (3-1) + 1 + 2 = 11, col = 4 * (2-1) + 1 + 1 = 6 - // Rust 0-indexed: row = 11 - 1 = 10, col = 6 - 1 = 5 - assert_eq!(row, 10); - assert_eq!(col, 5); - } - - #[test] - fn test_remove_order_path() { - // Path: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let order = vec![0, 1, 2]; - let removal = remove_order(3, &edges, &order); - - // Vertex 2 has no later neighbors, so it can be removed at step 2 - // Vertex 1's latest neighbor is 2, so can be removed at step 2 - // Vertex 0's latest neighbor is 1, so can be removed at step 1 - assert_eq!(removal.len(), 3); - } - - #[test] - fn test_mis_overhead_copyline() { - let line = CopyLine::new(0, 1, 2, 1, 2, 3); - let spacing = 4; - let padding = 2; - let locs = line.copyline_locations(padding, spacing); - let overhead = mis_overhead_copyline(&line, spacing, padding); - // Julia formula for UnWeighted mode: length(locs) / 2 - assert_eq!(overhead, locs.len() / 2); - } - - #[test] - fn test_copyline_serialization() { - let line = CopyLine::new(0, 1, 2, 1, 2, 3); - let json = serde_json::to_string(&line).unwrap(); - let deserialized: CopyLine = serde_json::from_str(&json).unwrap(); - assert_eq!(line, deserialized); - } - - #[test] - fn test_create_copylines_star() { - // Star graph: 0 connected to 1, 2, 3 - let edges = vec![(0, 1), (0, 2), (0, 3)]; - let order = vec![0, 1, 2, 3]; - let lines = create_copylines(4, &edges, &order); - - assert_eq!(lines.len(), 4); - // Vertex 0 (center) should have hstop reaching the last neighbor - assert_eq!(lines[0].hstop, 4); - } - - #[test] - fn test_copyline_locations_detailed() { - let line = CopyLine::new(0, 1, 2, 1, 2, 2); - let locs = line.locations(0, 2); - - // With padding=0, spacing=2 (0-indexed output): - // Julia 1-indexed: col = 2*(1-1) + 0 + 1 = 1 -> Rust 0-indexed: col = 0 - // Julia 1-indexed: row = 2*(2-1) + 0 + 2 = 4 -> Rust 0-indexed: row = 3 - // Vertical segment covers rows around the center - - assert!(!locs.is_empty()); - // Check that we have vertical positions (col = 0 in 0-indexed) - let has_vertical = locs.iter().any(|&(_r, c, _)| c == 0); - assert!(has_vertical); - } - - #[test] - fn test_copyline_locations_simple() { - // Simple L-shape: vslot=1, hslot=1, vstart=1, vstop=2, hstop=2 - let line = CopyLine::new(0, 1, 1, 1, 2, 2); - let locs = line.copyline_locations(2, 4); // padding=2, spacing=4 - - // Center: I = 4*(1-1) + 2 + 2 = 4, J = 4*(1-1) + 2 + 1 = 3 - // vstart=1, hslot=1: no "up" segment - // vstop=2, hslot=1: "down" segment from I to I + 4*(2-1) - 1 = 4 to 7 - // hstop=2, vslot=1: "right" segment from J+2=5 to J + 4*(2-1) - 1 = 6 - - assert!(!locs.is_empty()); - // Should have nodes at every cell, not just at spacing intervals - // Check we have more than just the sparse waypoints - let node_count = locs.len(); - println!("Dense locations for simple L-shape: {:?}", locs); - println!("Node count: {}", node_count); - - // Dense should have many more nodes than sparse (which has ~3-4) - assert!( - node_count > 4, - "Dense locations should have more than sparse" - ); - } - - #[test] - fn test_copyline_locations_matches_julia() { - // Test case that can be verified against Julia's UnitDiskMapping - // Using vslot=1, hslot=2, vstart=1, vstop=2, hstop=3, padding=2, spacing=4 - let line = CopyLine::new(0, 1, 2, 1, 2, 3); - let locs = line.copyline_locations(2, 4); - - // Julia 1-indexed: I = 4*(2-1) + 2 + 2 = 8, J = 4*(1-1) + 2 + 1 = 3 - // Rust 0-indexed: row = 7, col = 2 - // Center node at (I, J+1) in Julia = (8, 4) -> Rust 0-indexed = (7, 3) - let has_center = locs.iter().any(|&(r, c, _)| r == 7 && c == 3); - assert!( - has_center, - "Center node at (7, 3) should be present. Locs: {:?}", - locs - ); - - // All positions should be valid (0-indexed, so >= 0) - for &(_row, _col, weight) in &locs { - assert!(weight >= 1, "Weight should be >= 1"); - } - - println!("Dense locations: {:?}", locs); - } - - // === Julia comparison tests === - // These test cases are derived from Julia's UnitDiskMapping tests - - #[test] - fn test_mis_overhead_julia_cases() { - // Test cases using UnWeighted formula: length(copyline_locations) / 2 - // Using vslot=5, hslot=5 as the base configuration - let spacing = 4; - let padding = 2; - - let test_cases = [ - // (vstart, vstop, hstop) - (3, 7, 8), - (3, 5, 8), - (5, 9, 8), - (5, 5, 8), - (1, 7, 5), - (5, 8, 5), - (1, 5, 5), - (5, 5, 5), - ]; - - for (vstart, vstop, hstop) in test_cases { - let line = CopyLine::new(1, 5, 5, vstart, vstop, hstop); - let locs = line.copyline_locations(padding, spacing); - let overhead = mis_overhead_copyline(&line, spacing, padding); - - // UnWeighted formula: length(locs) / 2 - let expected = locs.len() / 2; - - assert_eq!( - overhead, expected, - "MIS overhead mismatch for (vstart={}, vstop={}, hstop={}): got {}, expected {}", - vstart, vstop, hstop, overhead, expected - ); - } - } - - #[test] - fn test_create_copylines_petersen() { - // Petersen graph edges (0-indexed) - let edges = vec![ - (0, 1), - (1, 2), - (2, 3), - (3, 4), - (4, 0), // outer pentagon - (5, 7), - (7, 9), - (9, 6), - (6, 8), - (8, 5), // inner star - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), // connections - ]; - let order: Vec = (0..10).collect(); - - let lines = create_copylines(10, &edges, &order); - - // Verify all lines are created - assert_eq!(lines.len(), 10); - - // Verify basic invariants - for (i, &v) in order.iter().enumerate() { - let line = &lines[v]; - assert_eq!(line.vertex, v, "Vertex mismatch"); - assert_eq!(line.vslot, i + 1, "vslot should be position + 1"); - assert!( - line.vstart <= line.hslot && line.hslot <= line.vstop, - "hslot should be between vstart and vstop for vertex {}", - v - ); - assert!( - line.hstop >= line.vslot, - "hstop should be >= vslot for vertex {}", - v - ); - } - - // Verify that neighboring vertices have overlapping L-shapes - for &(u, v) in &edges { - let line_u = &lines[u]; - let line_v = &lines[v]; - // Two lines cross if one's vslot is in the other's hslot range - // and one's hslot is in the other's vslot range - let u_pos = order.iter().position(|&x| x == u).unwrap() + 1; - let v_pos = order.iter().position(|&x| x == v).unwrap() + 1; - // For a valid embedding, connected vertices should have crossing copy lines - assert!( - line_u.hstop >= v_pos || line_v.hstop >= u_pos, - "Connected vertices {} and {} should have overlapping L-shapes", - u, - v - ); - } - } - - #[test] - fn test_remove_order_detailed() { - // Path graph: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let order = vec![0, 1, 2]; - let removal = remove_order(3, &edges, &order); - - // Trace through Julia's algorithm: - // Step 0: add vertex 0, counts = [0, 1, 0], totalcounts = [1, 2, 1] - // vertex 0: counts[0]=0 != totalcounts[0]=1, not removed - // vertex 1: counts[1]=1 != totalcounts[1]=2, not removed - // vertex 2: counts[2]=0 != totalcounts[2]=1, not removed - // removal[0] = [] - // Step 1: add vertex 1, counts = [1, 2, 1], totalcounts = [1, 2, 1] - // vertex 0: counts[0]=1 == totalcounts[0]=1, remove at max(1, 0)=1 - // vertex 1: counts[1]=2 == totalcounts[1]=2, remove at max(1, 1)=1 - // vertex 2: counts[2]=1 == totalcounts[2]=1, remove at max(1, 2)=2 - // removal[1] = [0, 1] - // Step 2: add vertex 2, counts = [1, 3, 2] - // vertex 2 already marked removed at step 2 - // removal[2] = [2] - - assert_eq!(removal.len(), 3); - // At step 1, vertices 0 and 1 can be removed - assert!(removal[1].contains(&0) || removal[1].contains(&1)); - // At step 2, vertex 2 can be removed - assert!(removal[2].contains(&2)); - } - - #[test] - fn test_copyline_locations_node_count() { - // For a copy line, copyline_locations should produce nodes at every cell - // The number of nodes should be odd (ends + center) - let spacing = 4; - - let test_cases = [(1, 1, 1, 2), (1, 2, 1, 3), (1, 1, 2, 3), (3, 7, 5, 8)]; - - for (vslot, hslot, vstart, hstop) in test_cases { - let vstop = hslot; // Simplified: vstop = hslot - let line = CopyLine::new(0, vslot, hslot, vstart, vstop, hstop); - let locs = line.copyline_locations(2, spacing); - - // Node count should be odd (property of copy line construction) - // This is verified in Julia's test: @assert length(locs) % 2 == 1 - println!( - "vslot={}, hslot={}, vstart={}, vstop={}, hstop={}: {} nodes", - vslot, - hslot, - vstart, - vstop, - hstop, - locs.len() - ); - - // All weights should be 1 or 2 (for non-center nodes) - // except center node which has weight = nline (number of line segments) - for &(row, col, weight) in &locs { - assert!(row > 0 && col > 0, "Coordinates should be positive"); - assert!(weight >= 1, "Weight should be >= 1"); - } - } - } -} +#[path = "../../tests_unit/rules/unitdiskmapping/copyline.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/grid.rs b/src/rules/unitdiskmapping/grid.rs index b7a9f5c..63f6848 100644 --- a/src/rules/unitdiskmapping/grid.rs +++ b/src/rules/unitdiskmapping/grid.rs @@ -314,216 +314,5 @@ impl fmt::Display for MappingGrid { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_mapping_grid_create() { - let grid = MappingGrid::new(10, 10, 4); - assert_eq!(grid.size(), (10, 10)); - assert_eq!(grid.spacing(), 4); - } - - #[test] - fn test_mapping_grid_with_padding() { - let grid = MappingGrid::with_padding(8, 12, 3, 5); - assert_eq!(grid.size(), (8, 12)); - assert_eq!(grid.spacing(), 3); - assert_eq!(grid.padding(), 5); - } - - #[test] - fn test_mapping_grid_add_node() { - let mut grid = MappingGrid::new(10, 10, 4); - grid.add_node(2, 3, 1); - assert!(grid.is_occupied(2, 3)); - assert!(!grid.is_occupied(2, 4)); - } - - #[test] - fn test_mapping_grid_get_out_of_bounds() { - let grid = MappingGrid::new(5, 5, 2); - assert!(grid.get(0, 0).is_some()); - assert!(grid.get(4, 4).is_some()); - assert!(grid.get(5, 0).is_none()); - assert!(grid.get(0, 5).is_none()); - assert!(grid.get(10, 10).is_none()); - } - - #[test] - fn test_mapping_grid_add_node_doubled() { - let mut grid = MappingGrid::new(10, 10, 4); - grid.add_node(2, 3, 5); - assert_eq!(grid.get(2, 3), Some(&CellState::Occupied { weight: 5 })); - // Julia requires weights to match when doubling: - // @assert m[i,j].weight == node.weight - // Result keeps the same weight (not summed) - grid.add_node(2, 3, 5); - assert_eq!(grid.get(2, 3), Some(&CellState::Doubled { weight: 5 })); - } - - #[test] - fn test_mapping_grid_connect() { - let mut grid = MappingGrid::new(10, 10, 4); - grid.add_node(3, 4, 7); - assert_eq!(grid.get(3, 4), Some(&CellState::Occupied { weight: 7 })); - grid.connect(3, 4); - assert_eq!(grid.get(3, 4), Some(&CellState::Connected { weight: 7 })); - } - - #[test] - fn test_mapping_grid_connect_empty_cell() { - let mut grid = MappingGrid::new(10, 10, 4); - grid.connect(3, 4); - assert_eq!(grid.get(3, 4), Some(&CellState::Empty)); - } - - #[test] - fn test_mapping_grid_matches_pattern() { - let mut grid = MappingGrid::new(10, 10, 4); - grid.add_node(2, 2, 1); - grid.add_node(2, 3, 1); - grid.add_node(3, 2, 1); - - let pattern = vec![(0, 0), (0, 1), (1, 0)]; - assert!(grid.matches_pattern(&pattern, 2, 2)); - assert!(!grid.matches_pattern(&pattern, 0, 0)); - } - - #[test] - fn test_mapping_grid_matches_pattern_out_of_bounds() { - let grid = MappingGrid::new(5, 5, 2); - let pattern = vec![(0, 0), (1, 1)]; - assert!(!grid.matches_pattern(&pattern, 10, 10)); - } - - #[test] - fn test_mapping_grid_cross_at() { - let grid = MappingGrid::new(20, 20, 4); - // Julia's crossat uses larger position for col calculation (1-indexed) - // Julia: row = (hslot - 1) * spacing + 2 + padding = 4 + 2 + 2 = 8 - // Julia: col = (larger_vslot - 1) * spacing + 1 + padding = 8 + 1 + 2 = 11 - // Rust 0-indexed: row = 8 - 1 = 7, col = 11 - 1 = 10 - let (row, col) = grid.cross_at(1, 3, 2); - assert_eq!(row, 7); // 0-indexed - assert_eq!(col, 10); // 0-indexed - - let (row2, col2) = grid.cross_at(3, 1, 2); - assert_eq!((row, col), (row2, col2)); - } - - #[test] - fn test_cell_state_weight() { - assert_eq!(CellState::Empty.weight(), 0); - assert_eq!(CellState::Occupied { weight: 5 }.weight(), 5); - assert_eq!(CellState::Doubled { weight: 10 }.weight(), 10); - assert_eq!(CellState::Connected { weight: 3 }.weight(), 3); - } - - #[test] - fn test_cell_state_is_empty() { - assert!(CellState::Empty.is_empty()); - assert!(!CellState::Occupied { weight: 1 }.is_empty()); - assert!(!CellState::Doubled { weight: 2 }.is_empty()); - assert!(!CellState::Connected { weight: 1 }.is_empty()); - } - - #[test] - fn test_cell_state_is_occupied() { - assert!(!CellState::Empty.is_occupied()); - assert!(CellState::Occupied { weight: 1 }.is_occupied()); - assert!(CellState::Doubled { weight: 2 }.is_occupied()); - assert!(CellState::Connected { weight: 1 }.is_occupied()); - } - - #[test] - fn test_mapping_grid_set() { - let mut grid = MappingGrid::new(5, 5, 2); - grid.set(2, 3, CellState::Occupied { weight: 7 }); - assert_eq!(grid.get(2, 3), Some(&CellState::Occupied { weight: 7 })); - - // Out of bounds set should be ignored - grid.set(10, 10, CellState::Occupied { weight: 1 }); - assert!(grid.get(10, 10).is_none()); - } - - #[test] - fn test_mapping_grid_get_mut() { - let mut grid = MappingGrid::new(5, 5, 2); - grid.add_node(1, 1, 3); - - if let Some(cell) = grid.get_mut(1, 1) { - *cell = CellState::Connected { weight: 5 }; - } - assert_eq!(grid.get(1, 1), Some(&CellState::Connected { weight: 5 })); - - // Out of bounds get_mut should return None - assert!(grid.get_mut(10, 10).is_none()); - } - - #[test] - fn test_mapping_grid_occupied_coords() { - let mut grid = MappingGrid::new(5, 5, 2); - grid.add_node(1, 2, 1); - grid.add_node(3, 4, 2); - grid.add_node(0, 0, 1); - - let coords = grid.occupied_coords(); - assert_eq!(coords.len(), 3); - assert!(coords.contains(&(0, 0))); - assert!(coords.contains(&(1, 2))); - assert!(coords.contains(&(3, 4))); - } - - #[test] - fn test_mapping_grid_add_node_out_of_bounds() { - let mut grid = MappingGrid::new(5, 5, 2); - // Should silently ignore out of bounds - grid.add_node(10, 10, 1); - assert!(grid.get(10, 10).is_none()); - } - - #[test] - fn test_mapping_grid_connect_out_of_bounds() { - let mut grid = MappingGrid::new(5, 5, 2); - // Should silently ignore out of bounds - grid.connect(10, 10); - } - - #[test] - fn test_cell_state_display() { - assert_eq!(format!("{}", CellState::Empty), "⋅"); - assert_eq!(format!("{}", CellState::Occupied { weight: 1 }), "●"); - assert_eq!(format!("{}", CellState::Doubled { weight: 2 }), "◉"); - assert_eq!(format!("{}", CellState::Connected { weight: 1 }), "◇"); - } - - #[test] - fn test_mapping_grid_display() { - let mut grid = MappingGrid::new(3, 3, 2); - grid.add_node(0, 0, 1); - grid.add_node(1, 1, 1); - let display = format!("{}", grid); - assert!(display.contains("●")); // Has occupied nodes - assert!(display.contains("⋅")); // Has empty cells - } - - #[test] - fn test_mapping_grid_format_with_config_none() { - let mut grid = MappingGrid::new(3, 3, 2); - grid.add_node(1, 1, 1); - let output = grid.format_with_config(None); - assert!(output.contains("●")); // Occupied nodes - } - - #[test] - fn test_mapping_grid_format_with_config_some() { - let mut grid = MappingGrid::new(3, 3, 2); - grid.add_node(1, 1, 1); - // Config with node at (1,1) selected - let config = vec![0, 0, 0, 0, 1, 0, 0, 0, 0]; // 3x3 = 9 cells - let output = grid.format_with_config(Some(&config)); - // Should have some output - assert!(!output.is_empty()); - } -} +#[path = "../../tests_unit/rules/unitdiskmapping/grid.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs b/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs index dea4343..0b64a9f 100644 --- a/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs +++ b/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs @@ -1368,50 +1368,5 @@ pub fn map_config_back_pattern( } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_weighted_ksg_cross_false_mis_overhead() { - assert_eq!(WeightedKsgCross::.mis_overhead(), -2); - } - - #[test] - fn test_weighted_ksg_cross_true_mis_overhead() { - assert_eq!(WeightedKsgCross::.mis_overhead(), -2); - } - - #[test] - fn test_weighted_ksg_turn_mis_overhead() { - assert_eq!(WeightedKsgTurn.mis_overhead(), -2); - } - - #[test] - fn test_weighted_ksg_branch_weights() { - let branch = WeightedKsgBranch; - assert_eq!(branch.source_weights(), vec![2, 2, 2, 3, 2, 2, 2, 2]); - assert_eq!(branch.mapped_weights(), vec![2, 3, 2, 2, 2, 2]); - } - - #[test] - fn test_weighted_ksg_tcon_weights() { - let tcon = WeightedKsgTCon; - assert_eq!(tcon.source_weights(), vec![2, 1, 2, 2]); - assert_eq!(tcon.mapped_weights(), vec![2, 1, 2, 2]); - } - - #[test] - fn test_weighted_ksg_trivial_turn_weights() { - let turn = WeightedKsgTrivialTurn; - assert_eq!(turn.source_weights(), vec![1, 1]); - assert_eq!(turn.mapped_weights(), vec![1, 1]); - } - - #[test] - fn test_weighted_ksg_pattern_from_tape_idx() { - assert!(WeightedKsgPattern::from_tape_idx(0).is_some()); - assert!(WeightedKsgPattern::from_tape_idx(12).is_some()); - assert!(WeightedKsgPattern::from_tape_idx(100).is_some()); - assert!(WeightedKsgPattern::from_tape_idx(200).is_none()); - } -} +#[path = "../../../tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/ksg/mapping.rs b/src/rules/unitdiskmapping/ksg/mapping.rs index ea1c9b3..d7b3990 100644 --- a/src/rules/unitdiskmapping/ksg/mapping.rs +++ b/src/rules/unitdiskmapping/ksg/mapping.rs @@ -624,106 +624,5 @@ pub fn map_weighted_with_order( } #[cfg(test)] -mod tests { - use super::*; - use crate::topology::Graph; - - #[test] - fn test_embed_graph_path() { - // Path graph: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let result = embed_graph(3, &edges, &[0, 1, 2]); - - assert!(result.is_some()); - let grid = result.unwrap(); - assert!(!grid.occupied_coords().is_empty()); - } - - #[test] - fn test_map_unweighted_triangle() { - // Triangle graph - let edges = vec![(0, 1), (1, 2), (0, 2)]; - let result = map_unweighted(3, &edges); - - assert!(result.grid_graph.num_vertices() > 0); - // mis_overhead can be negative due to gadgets, so we just verify the function completes - } - - #[test] - fn test_map_weighted_triangle() { - // Triangle graph - let edges = vec![(0, 1), (1, 2), (0, 2)]; - let result = map_weighted(3, &edges); - - assert!(result.grid_graph.num_vertices() > 0); - } - - #[test] - fn test_mapping_result_config_back_unweighted() { - let edges = vec![(0, 1)]; - let result = map_unweighted(2, &edges); - - // Create a dummy config - let config: Vec = vec![0; result.grid_graph.num_vertices()]; - let original = result.map_config_back(&config); - - assert_eq!(original.len(), 2); - } - - #[test] - fn test_mapping_result_config_back_weighted() { - let edges = vec![(0, 1)]; - let result = map_weighted(2, &edges); - - // Create a dummy config - let config: Vec = vec![0; result.grid_graph.num_vertices()]; - let original = result.map_config_back(&config); - - assert_eq!(original.len(), 2); - } - - #[test] - fn test_map_config_copyback_simple() { - // Create a simple copyline - let line = CopyLine::new(0, 1, 1, 1, 1, 3); - let lines = vec![line]; - - // Create config with some nodes selected - let locs = lines[0].copyline_locations(PADDING, SPACING); - let (rows, cols) = (20, 20); - let mut config = vec![vec![0; cols]; rows]; - - // Select all nodes in copyline - for &(row, col, _) in &locs { - if row < rows && col < cols { - config[row][col] = 1; - } - } - - let doubled_cells = HashSet::new(); - let result = map_config_copyback(&lines, PADDING, SPACING, &config, &doubled_cells); - - // count = len(locs) (all selected with ci=1), overhead = len/2 - // result = count - overhead = n - n/2 = n/2 - let n = locs.len(); - let overhead = n / 2; - let expected = n - overhead; - assert_eq!(result[0], expected); - } - - #[test] - fn test_map_unweighted_with_method() { - let edges = vec![(0, 1), (1, 2)]; - let result = map_unweighted_with_method(3, &edges, PathDecompositionMethod::greedy()); - - assert!(result.grid_graph.num_vertices() > 0); - } - - #[test] - fn test_map_weighted_with_method() { - let edges = vec![(0, 1), (1, 2)]; - let result = map_weighted_with_method(3, &edges, PathDecompositionMethod::greedy()); - - assert!(result.grid_graph.num_vertices() > 0); - } -} +#[path = "../../../tests_unit/rules/unitdiskmapping/ksg/mapping.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/pathdecomposition.rs b/src/rules/unitdiskmapping/pathdecomposition.rs index 43dbad0..ae19558 100644 --- a/src/rules/unitdiskmapping/pathdecomposition.rs +++ b/src/rules/unitdiskmapping/pathdecomposition.rs @@ -465,182 +465,5 @@ pub fn vertex_order_from_layout(layout: &Layout) -> Vec { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_layout_empty() { - let layout = Layout::empty(5); - assert_eq!(layout.vertices.len(), 0); - assert_eq!(layout.vsep(), 0); - assert_eq!(layout.disconnected.len(), 5); - assert_eq!(layout.neighbors.len(), 0); - } - - #[test] - fn test_layout_new() { - // Path graph: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let layout = Layout::new(3, &edges, vec![0, 1, 2]); - assert_eq!(layout.vertices, vec![0, 1, 2]); - assert_eq!(layout.vsep(), 1); // Path has pathwidth 1 - } - - #[test] - fn test_vsep_and_neighbors_path() { - // Path: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let (vsep, _) = vsep_and_neighbors(3, &edges, &[0, 1, 2]); - assert_eq!(vsep, 1); - } - - #[test] - fn test_vsep_and_neighbors_star() { - // Star: 0 connected to 1, 2, 3 - let edges = vec![(0, 1), (0, 2), (0, 3)]; - // Order: 0, 1, 2, 3 - after adding 0, all others become neighbors - let (vsep, _) = vsep_and_neighbors(4, &edges, &[0, 1, 2, 3]); - assert_eq!(vsep, 3); // After adding 0, neighbors = {1, 2, 3} - } - - #[test] - fn test_extend() { - // Path: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let layout = Layout::empty(3); - let layout = extend(3, &edges, &layout, 0); - assert_eq!(layout.vertices, vec![0]); - assert!(layout.neighbors.contains(&1)); - assert!(layout.disconnected.contains(&2)); - } - - #[test] - fn test_greedy_decompose_path() { - // Path: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let layout = greedy_decompose(3, &edges); - assert_eq!(layout.vertices.len(), 3); - assert_eq!(layout.vsep(), 1); - } - - #[test] - fn test_greedy_decompose_triangle() { - // Triangle: 0-1, 1-2, 0-2 - let edges = vec![(0, 1), (1, 2), (0, 2)]; - let layout = greedy_decompose(3, &edges); - assert_eq!(layout.vertices.len(), 3); - assert_eq!(layout.vsep(), 2); // Triangle has pathwidth 2 - } - - #[test] - fn test_greedy_decompose_k4() { - // Complete graph K4 - let edges = vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]; - let layout = greedy_decompose(4, &edges); - assert_eq!(layout.vertices.len(), 4); - assert_eq!(layout.vsep(), 3); // K4 has pathwidth 3 - } - - #[test] - fn test_branch_and_bound_path() { - // Path: 0-1-2 - let edges = vec![(0, 1), (1, 2)]; - let layout = branch_and_bound(3, &edges); - assert_eq!(layout.vertices.len(), 3); - assert_eq!(layout.vsep(), 1); - } - - #[test] - fn test_branch_and_bound_triangle() { - // Triangle - let edges = vec![(0, 1), (1, 2), (0, 2)]; - let layout = branch_and_bound(3, &edges); - assert_eq!(layout.vertices.len(), 3); - assert_eq!(layout.vsep(), 2); - } - - #[test] - fn test_pathwidth_greedy() { - let edges = vec![(0, 1), (1, 2)]; - let layout = pathwidth(3, &edges, PathDecompositionMethod::greedy()); - assert_eq!(layout.vertices.len(), 3); - assert_eq!(layout.vsep(), 1); - } - - #[test] - fn test_pathwidth_minhthi() { - let edges = vec![(0, 1), (1, 2)]; - let layout = pathwidth(3, &edges, PathDecompositionMethod::MinhThiTrick); - assert_eq!(layout.vertices.len(), 3); - assert_eq!(layout.vsep(), 1); - } - - #[test] - fn test_vertex_order_from_layout() { - let layout = Layout { - vertices: vec![0, 1, 2], - vsep: 1, - neighbors: vec![], - disconnected: vec![], - }; - let order = vertex_order_from_layout(&layout); - // Returns vertices in same order as layout (matching Julia's behavior) - assert_eq!(order, vec![0, 1, 2]); - } - - #[test] - fn test_petersen_graph_pathwidth() { - // Petersen graph edges - let edges = vec![ - (0, 1), - (1, 2), - (2, 3), - (3, 4), - (4, 0), // outer pentagon - (5, 7), - (7, 9), - (9, 6), - (6, 8), - (8, 5), // inner star - (0, 5), - (1, 6), - (2, 7), - (3, 8), - (4, 9), // connections - ]; - - let layout = pathwidth(10, &edges, PathDecompositionMethod::MinhThiTrick); - assert_eq!(layout.vertices.len(), 10); - // Petersen graph has pathwidth 5 - assert_eq!(layout.vsep(), 5); - } - - #[test] - fn test_cycle_graph_pathwidth() { - // Cycle C5: 0-1-2-3-4-0 - let edges = vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]; - let layout = pathwidth(5, &edges, PathDecompositionMethod::MinhThiTrick); - assert_eq!(layout.vertices.len(), 5); - // Cycle has pathwidth 2 - assert_eq!(layout.vsep(), 2); - } - - #[test] - fn test_disconnected_graph() { - // Two disconnected edges: 0-1, 2-3 - let edges = vec![(0, 1), (2, 3)]; - let layout = pathwidth(4, &edges, PathDecompositionMethod::MinhThiTrick); - assert_eq!(layout.vertices.len(), 4); - // Pathwidth is 1 (each component has pathwidth 1) - assert_eq!(layout.vsep(), 1); - } - - #[test] - fn test_empty_graph() { - // No edges - let edges: Vec<(usize, usize)> = vec![]; - let layout = pathwidth(5, &edges, PathDecompositionMethod::MinhThiTrick); - assert_eq!(layout.vertices.len(), 5); - assert_eq!(layout.vsep(), 0); // No edges means pathwidth 0 - } -} +#[path = "../../tests_unit/rules/unitdiskmapping/pathdecomposition.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/triangular/mapping.rs b/src/rules/unitdiskmapping/triangular/mapping.rs index f2203d1..9221143 100644 --- a/src/rules/unitdiskmapping/triangular/mapping.rs +++ b/src/rules/unitdiskmapping/triangular/mapping.rs @@ -292,79 +292,5 @@ pub fn map_weights(result: &MappingResult, source_weights: &[f64]) -> Vec { } #[cfg(test)] -mod tests { - use super::*; - use crate::topology::Graph; - - #[test] - fn test_map_weighted_basic() { - let edges = vec![(0, 1), (1, 2)]; - let result = map_weighted(3, &edges); - - assert!(result.grid_graph.num_vertices() > 0); - assert!(matches!( - result.grid_graph.grid_type(), - GridType::Triangular { .. } - )); - } - - #[test] - fn test_map_weighted_with_method() { - let edges = vec![(0, 1), (1, 2)]; - let result = map_weighted_with_method(3, &edges, PathDecompositionMethod::MinhThiTrick); - - assert!(result.grid_graph.num_vertices() > 0); - } - - #[test] - fn test_map_weighted_with_order() { - let edges = vec![(0, 1), (1, 2)]; - let vertex_order = vec![0, 1, 2]; - let result = map_weighted_with_order(3, &edges, &vertex_order); - - assert!(result.grid_graph.num_vertices() > 0); - } - - #[test] - fn test_trace_centers() { - let edges = vec![(0, 1), (1, 2)]; - let result = map_weighted(3, &edges); - - let centers = trace_centers(&result); - assert_eq!(centers.len(), 3); - - // Centers should be valid grid positions - for (row, col) in ¢ers { - assert!(*row > 0); - assert!(*col > 0); - } - } - - #[test] - fn test_map_weights() { - let edges = vec![(0, 1), (1, 2)]; - let result = map_weighted(3, &edges); - - let source_weights = vec![0.5, 0.3, 0.7]; - let grid_weights = map_weights(&result, &source_weights); - - // Should have same length as grid nodes - assert_eq!(grid_weights.len(), result.grid_graph.num_vertices()); - - // All weights should be positive - assert!(grid_weights.iter().all(|&w| w > 0.0)); - } - - #[test] - fn test_weighted_ruleset() { - let ruleset = weighted_ruleset(); - assert_eq!(ruleset.len(), 13); - } - - #[test] - #[should_panic(expected = "num_vertices must be > 0")] - fn test_map_weighted_panics_on_zero_vertices() { - let edges: Vec<(usize, usize)> = vec![]; - map_weighted(0, &edges); - } -} +#[path = "../../../tests_unit/rules/unitdiskmapping/triangular/mapping.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/triangular/mod.rs b/src/rules/unitdiskmapping/triangular/mod.rs index 6f84661..d20eff2 100644 --- a/src/rules/unitdiskmapping/triangular/mod.rs +++ b/src/rules/unitdiskmapping/triangular/mod.rs @@ -1627,126 +1627,5 @@ pub fn map_graph_triangular_with_order( } #[cfg(test)] -mod tests { - use super::*; - use crate::topology::Graph; - - #[test] - fn test_triangular_cross_gadget() { - // Julia: Base.size(::TriCross{true}) = (6, 4) - let cross = TriCross::; - assert_eq!(cross.size(), (6, 4)); - } - - #[test] - fn test_map_graph_triangular() { - let edges = vec![(0, 1), (1, 2)]; - let result = map_graph_triangular(3, &edges); - - assert!(result.grid_graph.num_vertices() > 0); - assert!(matches!( - result.grid_graph.grid_type(), - GridType::Triangular { .. } - )); - } - - #[test] - fn test_triangular_cross_connected_gadget() { - // Julia: TriCross{true} - size (6,4), cross (2,2), overhead 1 - let cross = TriCross::; - assert_eq!(TriangularGadget::size(&cross), (6, 4)); - assert_eq!(TriangularGadget::cross_location(&cross), (2, 2)); - assert!(TriangularGadget::is_connected(&cross)); - assert_eq!(TriangularGadget::mis_overhead(&cross), 1); - } - - #[test] - fn test_triangular_cross_disconnected_gadget() { - // Julia: TriCross{false} - size (6,6), cross (2,4), overhead 3 - let cross = TriCross::; - assert_eq!(TriangularGadget::size(&cross), (6, 6)); - assert_eq!(TriangularGadget::cross_location(&cross), (2, 4)); - assert!(!TriangularGadget::is_connected(&cross)); - assert_eq!(TriangularGadget::mis_overhead(&cross), 3); - } - - #[test] - fn test_triangular_turn_gadget() { - // Julia: TriTurn - size (3,4), cross (2,2), overhead 0 - let turn = TriTurn; - assert_eq!(TriangularGadget::size(&turn), (3, 4)); - assert_eq!(TriangularGadget::mis_overhead(&turn), 0); - let (_, _, pins) = TriangularGadget::source_graph(&turn); - assert_eq!(pins.len(), 2); - } - - #[test] - fn test_triangular_branch_gadget() { - // Julia: TriBranch - size (6,4), cross (2,2), overhead 0 - let branch = TriBranch; - assert_eq!(TriangularGadget::size(&branch), (6, 4)); - assert_eq!(TriangularGadget::mis_overhead(&branch), 0); - let (_, _, pins) = TriangularGadget::source_graph(&branch); - assert_eq!(pins.len(), 3); - } - - #[test] - fn test_map_graph_triangular_with_order() { - let edges = vec![(0, 1), (1, 2)]; - let order = vec![2, 1, 0]; - let result = map_graph_triangular_with_order(3, &edges, &order); - - assert!(result.grid_graph.num_vertices() > 0); - assert_eq!(result.spacing, TRIANGULAR_SPACING); - assert_eq!(result.padding, TRIANGULAR_PADDING); - } - - #[test] - fn test_map_graph_triangular_single_vertex() { - let edges: Vec<(usize, usize)> = vec![]; - let result = map_graph_triangular(1, &edges); - - assert!(result.grid_graph.num_vertices() > 0); - } - - #[test] - #[should_panic(expected = "num_vertices must be > 0")] - fn test_map_graph_triangular_zero_vertices_panics() { - let edges: Vec<(usize, usize)> = vec![]; - map_graph_triangular(0, &edges); - } - - #[test] - fn test_triangular_gadgets_have_valid_pins() { - // Verify pin indices are within bounds for each gadget - fn check_gadget(gadget: &G, name: &str) { - let (source_locs, _, source_pins) = gadget.source_graph(); - let (mapped_locs, mapped_pins) = gadget.mapped_graph(); - - for &pin in &source_pins { - assert!( - pin < source_locs.len(), - "{}: Source pin {} out of bounds (len={})", - name, - pin, - source_locs.len() - ); - } - - for &pin in &mapped_pins { - assert!( - pin < mapped_locs.len(), - "{}: Mapped pin {} out of bounds (len={})", - name, - pin, - mapped_locs.len() - ); - } - } - - check_gadget(&TriCross::, "TriCross"); - check_gadget(&TriCross::, "TriCross"); - check_gadget(&TriTurn, "TriTurn"); - check_gadget(&TriBranch, "TriBranch"); - } -} +#[path = "../../../tests_unit/rules/unitdiskmapping/triangular/mod.rs"] +mod tests; diff --git a/src/rules/unitdiskmapping/weighted.rs b/src/rules/unitdiskmapping/weighted.rs index 356f432..3d8ee3f 100644 --- a/src/rules/unitdiskmapping/weighted.rs +++ b/src/rules/unitdiskmapping/weighted.rs @@ -485,136 +485,5 @@ pub fn map_weights(result: &MappingResult, source_weights: &[f64]) -> Vec { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_triturn_weighted() { - let weighted = TriTurn.weighted(); - assert_eq!(weighted.source_weights, vec![2, 2, 2, 2]); - assert_eq!(weighted.mapped_weights, vec![2, 2, 2, 2]); - } - - #[test] - fn test_tribranch_weighted() { - let weighted = TriBranch.weighted(); - // Julia: sw = [2,2,3,2,2,2,2,2,2], mw = [2,2,2,3,2,2,2,2,2] - assert_eq!(weighted.source_weights, vec![2, 2, 3, 2, 2, 2, 2, 2, 2]); - assert_eq!(weighted.mapped_weights, vec![2, 2, 2, 3, 2, 2, 2, 2, 2]); - } - - #[test] - fn test_tricross_true_weighted() { - let weighted = TriCross::.weighted(); - // Julia: sw = [2,2,2,2,2,2,2,2,2,2], mw = [3,2,3,3,2,2,2,2,2,2,2] - assert_eq!(weighted.source_weights, vec![2, 2, 2, 2, 2, 2, 2, 2, 2, 2]); - assert_eq!( - weighted.mapped_weights, - vec![3, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2] - ); - } - - #[test] - fn test_tricross_false_weighted() { - let weighted = TriCross::.weighted(); - // Julia: sw = [2,2,2,2,2,2,2,2,2,2,2,2], mw = [3,3,2,4,2,2,2,4,3,2,2,2,2,2,2,2] - assert_eq!( - weighted.source_weights, - vec![2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] - ); - assert_eq!( - weighted.mapped_weights, - vec![3, 3, 2, 4, 2, 2, 2, 4, 3, 2, 2, 2, 2, 2, 2, 2] - ); - } - - #[test] - fn test_all_weighted_gadgets_have_correct_lengths() { - use super::super::triangular::TriangularGadget; - - fn check(g: G, name: &str) { - let weighted = g.clone().weighted(); - let (src_locs, _, _) = g.source_graph(); - let (map_locs, _) = g.mapped_graph(); - assert_eq!( - weighted.source_weights.len(), - src_locs.len(), - "{}: source weights length mismatch", - name - ); - assert_eq!( - weighted.mapped_weights.len(), - map_locs.len(), - "{}: mapped weights length mismatch", - name - ); - } - - check(TriTurn, "TriTurn"); - check(TriBranch, "TriBranch"); - check(TriCross::, "TriCross"); - check(TriCross::, "TriCross"); - check(TriTConLeft, "TriTConLeft"); - check(TriTConDown, "TriTConDown"); - check(TriTConUp, "TriTConUp"); - check(TriTrivialTurnLeft, "TriTrivialTurnLeft"); - check(TriTrivialTurnRight, "TriTrivialTurnRight"); - check(TriEndTurn, "TriEndTurn"); - check(TriWTurn, "TriWTurn"); - check(TriBranchFix, "TriBranchFix"); - check(TriBranchFixB, "TriBranchFixB"); - } - - #[test] - fn test_triangular_weighted_ruleset_has_13_gadgets() { - let ruleset = super::triangular_weighted_ruleset(); - assert_eq!(ruleset.len(), 13); - } - - #[test] - fn test_trace_centers_basic() { - use crate::rules::unitdiskmapping::map_graph_triangular; - - let edges = vec![(0, 1), (1, 2)]; - let result = map_graph_triangular(3, &edges); - - let centers = super::trace_centers(&result); - assert_eq!(centers.len(), 3); - - // Centers should be valid grid positions - for (row, col) in ¢ers { - assert!(*row > 0); - assert!(*col > 0); - } - } - - #[test] - fn test_map_weights_basic() { - use crate::rules::unitdiskmapping::map_graph_triangular; - use crate::topology::Graph; - - let edges = vec![(0, 1), (1, 2)]; - let result = map_graph_triangular(3, &edges); - - let source_weights = vec![0.5, 0.3, 0.7]; - let grid_weights = super::map_weights(&result, &source_weights); - - // Should have same length as grid nodes - assert_eq!(grid_weights.len(), result.grid_graph.num_vertices()); - - // All weights should be positive - assert!(grid_weights.iter().all(|&w| w > 0.0)); - } - - #[test] - #[should_panic(expected = "all weights must be in range")] - fn test_map_weights_rejects_invalid() { - use crate::rules::unitdiskmapping::map_graph_triangular; - - let edges = vec![(0, 1)]; - let result = map_graph_triangular(2, &edges); - - let source_weights = vec![1.5, 0.3]; // Invalid: > 1 - super::map_weights(&result, &source_weights); - } -} +#[path = "../../tests_unit/rules/unitdiskmapping/weighted.rs"] +mod tests; diff --git a/src/rules/vertexcovering_ilp.rs b/src/rules/vertexcovering_ilp.rs index cee6580..68eab15 100644 --- a/src/rules/vertexcovering_ilp.rs +++ b/src/rules/vertexcovering_ilp.rs @@ -90,285 +90,5 @@ impl ReduceTo for VertexCovering { } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, ILPSolver, Solver}; - - #[test] - fn test_reduction_creates_valid_ilp() { - // Triangle graph: 3 vertices, 3 edges - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check ILP structure - assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); - assert_eq!( - ilp.constraints.len(), - 3, - "Should have one constraint per edge" - ); - assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); - - // All variables should be binary - for bound in &ilp.bounds { - assert_eq!(*bound, VarBounds::binary()); - } - - // Each constraint should be x_i + x_j >= 1 - for constraint in &ilp.constraints { - assert_eq!(constraint.terms.len(), 2); - assert!((constraint.rhs - 1.0).abs() < 1e-9); - } - } - - #[test] - fn test_reduction_weighted() { - let problem = VertexCovering::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - // Check that weights are correctly transferred to objective - let mut coeffs: Vec = vec![0.0; 3]; - for &(var, coef) in &ilp.objective { - coeffs[var] = coef; - } - assert!((coeffs[0] - 5.0).abs() < 1e-9); - assert!((coeffs[1] - 10.0).abs() < 1e-9); - assert!((coeffs[2] - 15.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solution_equals_brute_force_triangle() { - // Triangle graph: min VC = 2 vertices - let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force on original problem - let bf_solutions = bf.find_best(&problem); - - // Solve via ILP reduction - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // Both should find optimal size = 2 - let bf_size: usize = bf_solutions[0].iter().sum(); - let ilp_size: usize = extracted.iter().sum(); - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify the ILP solution is valid for the original problem - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid, "Extracted solution should be valid"); - } - - #[test] - fn test_ilp_solution_equals_brute_force_path() { - // Path graph 0-1-2-3: min VC = 2 (e.g., {1, 2} or {0, 2} or {1, 3}) - let problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - // Solve with brute force - let bf_solutions = bf.find_best(&problem); - let bf_size: usize = bf_solutions[0].iter().sum(); - - // Solve via ILP - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size: usize = extracted.iter().sum(); - - assert_eq!(bf_size, 2); - assert_eq!(ilp_size, 2); - - // Verify validity - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_ilp_solution_equals_brute_force_weighted() { - // Weighted problem: vertex 1 has low weight and covers both edges - // 0 -- 1 -- 2 - // Weights: [100, 1, 100] - // Min VC by weight: just vertex 1 (weight 1) beats 0+2 (weight 200) - let problem = VertexCovering::with_weights(3, vec![(0, 1), (1, 2)], vec![100, 1, 100]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_obj = problem.solution_size(&bf_solutions[0]).size; - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_obj = problem.solution_size(&extracted).size; - - assert_eq!(bf_obj, 1); - assert_eq!(ilp_obj, 1); - - // Verify the solution selects vertex 1 - assert_eq!(extracted, vec![0, 1, 0]); - } - - #[test] - fn test_solution_extraction() { - let problem = VertexCovering::::new(4, vec![(0, 1), (2, 3)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - - // Test that extraction works correctly (1:1 mapping) - let ilp_solution = vec![1, 0, 0, 1]; - let extracted = reduction.extract_solution(&ilp_solution); - assert_eq!(extracted, vec![1, 0, 0, 1]); - - // Verify this is a valid VC (covers edges 0-1 and 2-3) - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - } - - #[test] - fn test_source_and_target_size() { - let problem = VertexCovering::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(4)); - - assert_eq!(target_size.get("num_vars"), Some(5)); - assert_eq!(target_size.get("num_constraints"), Some(4)); - } - - #[test] - fn test_empty_graph() { - // Graph with no edges: empty cover is valid - let problem = VertexCovering::::new(3, vec![]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.constraints.len(), 0); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - // No vertices should be selected - assert_eq!(extracted, vec![0, 0, 0]); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 0); - } - - #[test] - fn test_complete_graph() { - // Complete graph K4: min VC = 3 (all but one vertex) - let problem = - VertexCovering::::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - assert_eq!(ilp.constraints.len(), 6); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 3); - } - - #[test] - fn test_solve_reduced() { - // Test the ILPSolver::solve_reduced method - let problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - - let ilp_solver = ILPSolver::new(); - let solution = ilp_solver - .solve_reduced(&problem) - .expect("solve_reduced should work"); - - let sol_result = problem.solution_size(&solution); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - } - - #[test] - fn test_bipartite_graph() { - // Bipartite graph: 0-2, 0-3, 1-2, 1-3 (complete bipartite K_{2,2}) - // Min VC = 2 (either side of the bipartition) - let problem = VertexCovering::::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - - let sol_result = problem.solution_size(&extracted); - assert!(sol_result.is_valid); - assert_eq!(sol_result.size, 2); - - // Should select either {0, 1} or {2, 3} - let sum: usize = extracted.iter().sum(); - assert_eq!(sum, 2); - } - - #[test] - fn test_single_edge() { - // Single edge: min VC = 1 - let problem = VertexCovering::::new(2, vec![(0, 1)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_size: usize = bf_solutions[0].iter().sum(); - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size: usize = extracted.iter().sum(); - - assert_eq!(bf_size, 1); - assert_eq!(ilp_size, 1); - } - - #[test] - fn test_star_graph() { - // Star graph: center vertex 0 connected to all others - // Min VC = 1 (just the center) - let problem = VertexCovering::::new(5, vec![(0, 1), (0, 2), (0, 3), (0, 4)]); - let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); - let ilp = reduction.target_problem(); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&problem); - let bf_size: usize = bf_solutions[0].iter().sum(); - - let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_size: usize = extracted.iter().sum(); - - assert_eq!(bf_size, 1); - assert_eq!(ilp_size, 1); - - // The optimal solution should select vertex 0 - assert_eq!(extracted[0], 1); - } -} +#[path = "../tests_unit/rules/vertexcovering_ilp.rs"] +mod tests; diff --git a/src/rules/vertexcovering_independentset.rs b/src/rules/vertexcovering_independentset.rs index 7a3270c..2b97c75 100644 --- a/src/rules/vertexcovering_independentset.rs +++ b/src/rules/vertexcovering_independentset.rs @@ -137,96 +137,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - - #[test] - fn test_is_to_vc_reduction() { - // Triangle graph: max IS = 1, min VC = 2 - let is_problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::>::reduce_to(&is_problem); - let vc_problem = reduction.target_problem(); - - // Solve the VC problem - let solver = BruteForce::new(); - let vc_solutions = solver.find_best(vc_problem); - - // Extract back to IS solutions - let is_solutions: Vec<_> = vc_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Verify IS solutions are valid and optimal - for sol in &is_solutions { - let size: usize = sol.iter().sum(); - assert_eq!(size, 1, "Max IS in triangle should be 1"); - } - } - - #[test] - fn test_vc_to_is_reduction() { - // Path graph 0-1-2: min VC = 1 (just vertex 1), max IS = 2 (vertices 0 and 2) - let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let is_problem = reduction.target_problem(); - - let solver = BruteForce::new(); - let is_solutions = solver.find_best(is_problem); - - let vc_solutions: Vec<_> = is_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Verify VC solutions - for sol in &vc_solutions { - let size: usize = sol.iter().sum(); - assert_eq!(size, 1, "Min VC in path should be 1"); - } - } - - #[test] - fn test_roundtrip_is_vc_is() { - let original = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let solver = BruteForce::new(); - let original_solutions = solver.find_best(&original); - - // IS -> VC -> IS - let reduction1 = ReduceTo::>::reduce_to(&original); - let vc = reduction1.target_problem().clone(); - let reduction2 = ReduceTo::>::reduce_to(&vc); - let roundtrip = reduction2.target_problem(); - - let roundtrip_solutions = solver.find_best(roundtrip); - - // Solutions should have same objective value - let orig_size: usize = original_solutions[0].iter().sum(); - let rt_size: usize = roundtrip_solutions[0].iter().sum(); - assert_eq!(orig_size, rt_size); - } - - #[test] - fn test_weighted_reduction() { - // Test with weighted problems - let is_problem = IndependentSet::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 20, 30]); - let reduction = ReduceTo::>::reduce_to(&is_problem); - let vc_problem = reduction.target_problem(); - - // Weights should be preserved - assert_eq!(vc_problem.weights_ref(), &vec![10, 20, 30]); - } - - #[test] - fn test_source_and_target_size() { - let is_problem = IndependentSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction = ReduceTo::>::reduce_to(&is_problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(target_size.get("num_vertices"), Some(5)); - } -} +#[path = "../tests_unit/rules/vertexcovering_independentset.rs"] +mod tests; diff --git a/src/rules/vertexcovering_setcovering.rs b/src/rules/vertexcovering_setcovering.rs index 7ca41f4..9a0cd6b 100644 --- a/src/rules/vertexcovering_setcovering.rs +++ b/src/rules/vertexcovering_setcovering.rs @@ -91,189 +91,5 @@ where } #[cfg(test)] -mod tests { - use super::*; - use crate::solvers::{BruteForce, Solver}; - use crate::traits::ConstraintSatisfactionProblem; - - #[test] - fn test_vc_to_sc_basic() { - // Path graph 0-1-2 with edges (0,1) and (1,2) - // Vertex 0 covers edge 0 - // Vertex 1 covers edges 0 and 1 - // Vertex 2 covers edge 1 - let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_problem = reduction.target_problem(); - - // Check the sets are constructed correctly - assert_eq!(sc_problem.universe_size(), 2); // 2 edges - assert_eq!(sc_problem.num_sets(), 3); // 3 vertices - - // Set 0 (vertex 0): should contain edge 0 - assert_eq!(sc_problem.get_set(0), Some(&vec![0])); - // Set 1 (vertex 1): should contain edges 0 and 1 - assert_eq!(sc_problem.get_set(1), Some(&vec![0, 1])); - // Set 2 (vertex 2): should contain edge 1 - assert_eq!(sc_problem.get_set(2), Some(&vec![1])); - } - - #[test] - fn test_vc_to_sc_triangle() { - // Triangle graph: 3 vertices, 3 edges - // Edge indices: (0,1)->0, (1,2)->1, (0,2)->2 - let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2), (0, 2)]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_problem = reduction.target_problem(); - - assert_eq!(sc_problem.universe_size(), 3); - assert_eq!(sc_problem.num_sets(), 3); - - // Verify each vertex covers exactly 2 edges - for i in 0..3 { - let set = sc_problem.get_set(i).unwrap(); - assert_eq!(set.len(), 2); - } - } - - #[test] - fn test_vc_to_sc_solution_extraction() { - let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_problem = reduction.target_problem(); - - // Solve the SetCovering problem - let solver = BruteForce::new(); - let sc_solutions = solver.find_best(sc_problem); - - // Extract solutions back to VertexCovering - let vc_solutions: Vec<_> = sc_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - - // Verify extracted solutions are valid vertex covers - for sol in &vc_solutions { - assert!(vc_problem.solution_size(sol).is_valid); - } - - // The minimum should be selecting just vertex 1 (covers both edges) - let min_size: usize = vc_solutions[0].iter().sum(); - assert_eq!(min_size, 1); - } - - #[test] - fn test_vc_to_sc_optimality_preservation() { - // Test that optimal solutions are preserved through reduction - let vc_problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let solver = BruteForce::new(); - - // Solve VC directly - let direct_solutions = solver.find_best(&vc_problem); - let direct_size = direct_solutions[0].iter().sum::(); - - // Solve via reduction - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_solutions = solver.find_best(reduction.target_problem()); - let reduced_solutions: Vec<_> = sc_solutions - .iter() - .map(|s| reduction.extract_solution(s)) - .collect(); - let reduced_size = reduced_solutions[0].iter().sum::(); - - // Optimal sizes should match - assert_eq!(direct_size, reduced_size); - } - - #[test] - fn test_vc_to_sc_weighted() { - // Weighted problem: weights should be preserved - let vc_problem = VertexCovering::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 1, 10]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_problem = reduction.target_problem(); - - // Weights should be preserved - assert_eq!(sc_problem.weights(), vec![10, 1, 10]); - - // Solve both ways - let solver = BruteForce::new(); - let vc_solutions = solver.find_best(&vc_problem); - let sc_solutions = solver.find_best(sc_problem); - - // Both should select vertex 1 (weight 1) - assert_eq!(vc_solutions[0], vec![0, 1, 0]); - assert_eq!(sc_solutions[0], vec![0, 1, 0]); - } - - #[test] - fn test_vc_to_sc_empty_graph() { - // Graph with no edges - let vc_problem = VertexCovering::::new(3, vec![]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_problem = reduction.target_problem(); - - assert_eq!(sc_problem.universe_size(), 0); - assert_eq!(sc_problem.num_sets(), 3); - - // All sets should be empty - for i in 0..3 { - assert!(sc_problem.get_set(i).unwrap().is_empty()); - } - } - - #[test] - fn test_vc_to_sc_source_target_size() { - let vc_problem = VertexCovering::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - - let source_size = reduction.source_size(); - let target_size = reduction.target_size(); - - assert_eq!(source_size.get("num_vertices"), Some(5)); - assert_eq!(source_size.get("num_edges"), Some(4)); - assert_eq!(target_size.get("universe_size"), Some(4)); // edges become universe - assert_eq!(target_size.get("num_sets"), Some(5)); // vertices become sets - } - - #[test] - fn test_vc_to_sc_star_graph() { - // Star graph: center vertex 0 connected to all others - // Edges: (0,1), (0,2), (0,3) - let vc_problem = VertexCovering::::new(4, vec![(0, 1), (0, 2), (0, 3)]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_problem = reduction.target_problem(); - - // Vertex 0 should cover all 3 edges - assert_eq!(sc_problem.get_set(0), Some(&vec![0, 1, 2])); - // Other vertices cover only 1 edge each - assert_eq!(sc_problem.get_set(1), Some(&vec![0])); - assert_eq!(sc_problem.get_set(2), Some(&vec![1])); - assert_eq!(sc_problem.get_set(3), Some(&vec![2])); - - // Minimum cover should be just vertex 0 - let solver = BruteForce::new(); - let solutions = solver.find_best(&vc_problem); - assert_eq!(solutions[0], vec![1, 0, 0, 0]); - } - - #[test] - fn test_vc_to_sc_all_solutions_valid() { - // Ensure all solutions extracted from SC are valid VC solutions - let vc_problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (0, 2), (2, 3)]); - let reduction = ReduceTo::>::reduce_to(&vc_problem); - let sc_problem = reduction.target_problem(); - - let solver = BruteForce::new(); - let sc_solutions = solver.find_best(sc_problem); - - for sc_sol in &sc_solutions { - let vc_sol = reduction.extract_solution(sc_sol); - let sol_size = vc_problem.solution_size(&vc_sol); - assert!( - sol_size.is_valid, - "Extracted solution {:?} should be valid", - vc_sol - ); - } - } -} +#[path = "../tests_unit/rules/vertexcovering_setcovering.rs"] +mod tests; diff --git a/src/solvers/brute_force.rs b/src/solvers/brute_force.rs index 419ff74..7aa8689 100644 --- a/src/solvers/brute_force.rs +++ b/src/solvers/brute_force.rs @@ -176,372 +176,5 @@ impl BruteForceFloat for BruteForce { } #[cfg(test)] -mod tests { - use super::*; - use crate::types::{EnergyMode, ProblemSize}; - - // Simple maximization problem: maximize sum of selected weights - #[derive(Clone)] - struct MaxSumProblem { - weights: Vec, - } - - impl Problem for MaxSumProblem { - const NAME: &'static str = "MaxSumProblem"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "i32")] - } - - type Size = i32; - - fn num_variables(&self) -> usize { - self.weights.len() - } - - fn num_flavors(&self) -> usize { - 2 - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![("variables", self.weights.len())]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::LargerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - let sum: i32 = config - .iter() - .zip(&self.weights) - .map(|(&c, &w)| if c == 1 { w } else { 0 }) - .sum(); - SolutionSize::valid(sum) - } - } - - // Simple minimization problem: minimize sum of selected weights - #[derive(Clone)] - struct MinSumProblem { - weights: Vec, - } - - impl Problem for MinSumProblem { - const NAME: &'static str = "MinSumProblem"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "i32")] - } - - type Size = i32; - - fn num_variables(&self) -> usize { - self.weights.len() - } - - fn num_flavors(&self) -> usize { - 2 - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![("variables", self.weights.len())]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::SmallerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - let sum: i32 = config - .iter() - .zip(&self.weights) - .map(|(&c, &w)| if c == 1 { w } else { 0 }) - .sum(); - SolutionSize::valid(sum) - } - } - - // Problem with validity constraint: select at most one - #[derive(Clone)] - struct SelectAtMostOneProblem { - weights: Vec, - } - - impl Problem for SelectAtMostOneProblem { - const NAME: &'static str = "SelectAtMostOneProblem"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "i32")] - } - - type Size = i32; - - fn num_variables(&self) -> usize { - self.weights.len() - } - - fn num_flavors(&self) -> usize { - 2 - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![("variables", self.weights.len())]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::LargerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - let selected: usize = config.iter().sum(); - let sum: i32 = config - .iter() - .zip(&self.weights) - .map(|(&c, &w)| if c == 1 { w } else { 0 }) - .sum(); - SolutionSize::new(sum, selected <= 1) - } - } - - #[test] - fn test_variant_for_test_problems() { - // Test that variant() works for all test problems - let v = MaxSumProblem::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0], ("graph", "SimpleGraph")); - assert_eq!(v[1], ("weight", "i32")); - - let v = MinSumProblem::variant(); - assert_eq!(v.len(), 2); - - let v = SelectAtMostOneProblem::variant(); - assert_eq!(v.len(), 2); - - let v = FloatProblem::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[1], ("weight", "f64")); - } - - #[test] - fn test_brute_force_maximization() { - let problem = MaxSumProblem { - weights: vec![1, 2, 3], - }; - let solver = BruteForce::new(); - - let best = solver.find_best(&problem); - assert_eq!(best.len(), 1); - assert_eq!(best[0], vec![1, 1, 1]); // Select all for max sum = 6 - } - - #[test] - fn test_brute_force_minimization() { - let problem = MinSumProblem { - weights: vec![1, 2, 3], - }; - let solver = BruteForce::new(); - - let best = solver.find_best(&problem); - assert_eq!(best.len(), 1); - assert_eq!(best[0], vec![0, 0, 0]); // Select none for min sum = 0 - } - - #[test] - fn test_brute_force_with_validity() { - let problem = SelectAtMostOneProblem { - weights: vec![1, 5, 3], - }; - let solver = BruteForce::new(); - - let best = solver.find_best(&problem); - assert_eq!(best.len(), 1); - assert_eq!(best[0], vec![0, 1, 0]); // Select weight 5 (max single) - } - - #[test] - fn test_brute_force_multiple_optimal() { - let problem = MaxSumProblem { - weights: vec![1, 1, 1], - }; - let solver = BruteForce::new(); - - let best = solver.find_best(&problem); - assert_eq!(best.len(), 1); - assert_eq!(best[0], vec![1, 1, 1]); // All equal, so only one optimal - - // Problem with multiple optimal solutions - let problem2 = SelectAtMostOneProblem { - weights: vec![5, 5, 3], - }; - let best2 = solver.find_best(&problem2); - assert_eq!(best2.len(), 2); // Both [1,0,0] and [0,1,0] give weight 5 - } - - #[test] - fn test_brute_force_with_size() { - let problem = MaxSumProblem { - weights: vec![1, 2, 3], - }; - let solver = BruteForce::new(); - - let best = solver.find_best_with_size(&problem); - assert_eq!(best.len(), 1); - assert_eq!(best[0].0, vec![1, 1, 1]); - assert_eq!(best[0].1.size, 6); - assert!(best[0].1.is_valid); - } - - #[test] - fn test_brute_force_empty_problem() { - let problem = MaxSumProblem { weights: vec![] }; - let solver = BruteForce::new(); - - let best = solver.find_best(&problem); - assert!(best.is_empty()); - } - - #[test] - fn test_brute_force_valid_only_false() { - let problem = SelectAtMostOneProblem { - weights: vec![1, 2, 3], - }; - let solver = BruteForce::new().valid_only(false); - - let best = solver.find_best(&problem); - // With valid_only=false, the best is selecting all (sum=6) even though invalid - assert_eq!(best.len(), 1); - assert_eq!(best[0], vec![1, 1, 1]); - } - - #[test] - fn test_brute_force_with_tolerance() { - let solver = BruteForce::with_tolerance(0.01, 0.01); - assert_eq!(solver.atol, 0.01); - assert_eq!(solver.rtol, 0.01); - } - - // Float problem for testing BruteForceFloat - #[derive(Clone)] - struct FloatProblem { - weights: Vec, - } - - impl Problem for FloatProblem { - const NAME: &'static str = "FloatProblem"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "f64")] - } - - type Size = f64; - - fn num_variables(&self) -> usize { - self.weights.len() - } - - fn num_flavors(&self) -> usize { - 2 - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![("variables", self.weights.len())]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::LargerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - let sum: f64 = config - .iter() - .zip(&self.weights) - .map(|(&c, &w)| if c == 1 { w } else { 0.0 }) - .sum(); - SolutionSize::valid(sum) - } - } - - #[test] - fn test_brute_force_float() { - use super::BruteForceFloat; - - let problem = FloatProblem { - weights: vec![1.0, 2.0, 3.0], - }; - let solver = BruteForce::new(); - - let best = solver.find_best_float(&problem); - assert_eq!(best.len(), 1); - assert_eq!(best[0].0, vec![1, 1, 1]); - assert!((best[0].1.size - 6.0).abs() < 1e-10); - } - - #[test] - fn test_brute_force_float_tolerance() { - use super::BruteForceFloat; - - // Problem where multiple solutions have nearly equal values - #[derive(Clone)] - struct NearlyEqualProblem; - - impl Problem for NearlyEqualProblem { - const NAME: &'static str = "NearlyEqualProblem"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "f64")] - } - - type Size = f64; - - fn num_variables(&self) -> usize { - 2 - } - - fn num_flavors(&self) -> usize { - 2 - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![("variables", 2)]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::LargerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - let size = match (config.first(), config.get(1)) { - (Some(1), Some(0)) => 10.0, - (Some(0), Some(1)) => 10.0 + 1e-12, // Nearly equal - _ => 0.0, - }; - SolutionSize::valid(size) - } - } - - let problem = NearlyEqualProblem; - let solver = BruteForce::with_tolerance(1e-10, 1e-10); - - let best = solver.find_best_float(&problem); - // Both should be considered optimal due to tolerance - assert_eq!(best.len(), 2); - - // Test variant for NearlyEqualProblem - let v = NearlyEqualProblem::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0], ("graph", "SimpleGraph")); - assert_eq!(v[1], ("weight", "f64")); - } - - #[test] - fn test_brute_force_float_empty() { - use super::BruteForceFloat; - - let problem = FloatProblem { weights: vec![] }; - let solver = BruteForce::new(); - - let best = solver.find_best_float(&problem); - assert!(best.is_empty()); - } -} +#[path = "../tests_unit/solvers/brute_force.rs"] +mod tests; diff --git a/src/solvers/ilp/solver.rs b/src/solvers/ilp/solver.rs index c50b1d4..4ef1313 100644 --- a/src/solvers/ilp/solver.rs +++ b/src/solvers/ilp/solver.rs @@ -167,251 +167,5 @@ impl ILPSolver { } #[cfg(test)] -mod tests { - use super::*; - use crate::models::optimization::{LinearConstraint, VarBounds}; - use crate::solvers::{BruteForce, Solver}; - use crate::traits::Problem; - - #[test] - fn test_ilp_solver_basic_maximize() { - // Maximize x0 + 2*x1 subject to x0 + x1 <= 1, binary vars - let ilp = ILP::binary( - 2, - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 2.0)], - ObjectiveSense::Maximize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp); - - assert!(solution.is_some()); - let sol = solution.unwrap(); - - // Solution should be valid - let result = ilp.solution_size(&sol); - assert!(result.is_valid, "ILP solution should be valid"); - - // Optimal: x1=1, x0=0 => objective = 2 - assert!((result.size - 2.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solver_basic_minimize() { - // Minimize x0 + x1 subject to x0 + x1 >= 1, binary vars - let ilp = ILP::binary( - 2, - vec![LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Minimize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp); - - assert!(solution.is_some()); - let sol = solution.unwrap(); - - // Solution should be valid - let result = ilp.solution_size(&sol); - assert!(result.is_valid, "ILP solution should be valid"); - - // Optimal: one variable = 1, other = 0 => objective = 1 - assert!((result.size - 1.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_solver_matches_brute_force() { - // Maximize x0 + x1 + x2 subject to: - // x0 + x1 <= 1 - // x1 + x2 <= 1 - let ilp = ILP::binary( - 3, - vec![ - LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0), - LinearConstraint::le(vec![(1, 1.0), (2, 1.0)], 1.0), - ], - vec![(0, 1.0), (1, 1.0), (2, 1.0)], - ObjectiveSense::Maximize, - ); - - let bf = BruteForce::new(); - let ilp_solver = ILPSolver::new(); - - let bf_solutions = bf.find_best(&ilp); - let ilp_solution = ilp_solver.solve(&ilp).unwrap(); - - // Both should find optimal value (2) - let bf_size = ilp.solution_size(&bf_solutions[0]).size; - let ilp_size = ilp.solution_size(&ilp_solution).size; - assert!( - (bf_size - ilp_size).abs() < 1e-9, - "ILP should find optimal solution" - ); - } - - #[test] - fn test_ilp_empty_problem() { - let ilp = ILP::empty(); - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp); - assert_eq!(solution, Some(vec![])); - } - - #[test] - fn test_ilp_equality_constraint() { - // Minimize x0 subject to x0 + x1 == 1, binary vars - let ilp = ILP::binary( - 2, - vec![LinearConstraint::eq(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0)], - ObjectiveSense::Minimize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp).unwrap(); - - let result = ilp.solution_size(&solution); - assert!(result.is_valid); - // Optimal: x0=0, x1=1 => objective = 0 - assert!((result.size - 0.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_non_binary_bounds() { - // Variables with larger ranges - // x0 in [0, 3], x1 in [0, 2] - // Maximize x0 + x1 subject to x0 + x1 <= 4 - let ilp = ILP::new( - 2, - vec![VarBounds::bounded(0, 3), VarBounds::bounded(0, 2)], - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 4.0)], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp).unwrap(); - - let result = ilp.solution_size(&solution); - assert!(result.is_valid); - // Optimal: x0=3, x1=2 => objective = 5 (3 + 2 = 5 <= 4 is false!) - // Wait, 3+2=5 > 4, so constraint is violated. Let's check actual optimal: - // x0=2, x1=2 => 4 <= 4 valid, obj=4 - // x0=3, x1=1 => 4 <= 4 valid, obj=4 - assert!((result.size - 4.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_negative_lower_bounds() { - // Variables with negative lower bounds - // x0 in [-2, 2], x1 in [-1, 1] - // Maximize x0 + x1 (no constraints) - let ilp = ILP::new( - 2, - vec![VarBounds::bounded(-2, 2), VarBounds::bounded(-1, 1)], - vec![], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp).unwrap(); - - let result = ilp.solution_size(&solution); - assert!(result.is_valid); - // Optimal: x0=2, x1=1 => objective = 3 - assert!((result.size - 3.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_config_to_values_roundtrip() { - // Ensure the config encoding/decoding works correctly - let ilp = ILP::new( - 2, - vec![VarBounds::bounded(-2, 2), VarBounds::bounded(1, 3)], - vec![], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp).unwrap(); - - // The solution should be valid - let result = ilp.solution_size(&solution); - assert!(result.is_valid); - // Optimal: x0=2, x1=3 => objective = 5 - assert!((result.size - 5.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_multiple_constraints() { - // Maximize 2*x0 + 3*x1 + x2 subject to: - // x0 + x1 + x2 <= 2 - // x0 + x1 >= 1 - // Binary vars - let ilp = ILP::binary( - 3, - vec![ - LinearConstraint::le(vec![(0, 1.0), (1, 1.0), (2, 1.0)], 2.0), - LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 1.0), - ], - vec![(0, 2.0), (1, 3.0), (2, 1.0)], - ObjectiveSense::Maximize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp).unwrap(); - - let result = ilp.solution_size(&solution); - assert!(result.is_valid); - - // Check against brute force - let bf = BruteForce::new(); - let bf_solutions = bf.find_best(&ilp); - let bf_size = ilp.solution_size(&bf_solutions[0]).size; - - assert!( - (bf_size - result.size).abs() < 1e-9, - "ILP should match brute force" - ); - } - - #[test] - fn test_ilp_unconstrained() { - // Maximize x0 + x1, no constraints, binary vars - let ilp = ILP::binary( - 2, - vec![], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - - let solver = ILPSolver::new(); - let solution = solver.solve(&ilp).unwrap(); - - let result = ilp.solution_size(&solution); - assert!(result.is_valid); - // Optimal: both = 1 - assert!((result.size - 2.0).abs() < 1e-9); - } - - #[test] - fn test_ilp_with_time_limit() { - let solver = ILPSolver::with_time_limit(10.0); - assert_eq!(solver.time_limit, Some(10.0)); - - // Should still work for simple problems - let ilp = ILP::binary( - 2, - vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], - vec![(0, 1.0), (1, 1.0)], - ObjectiveSense::Maximize, - ); - - let solution = solver.solve(&ilp); - assert!(solution.is_some()); - } -} +#[path = "../../tests_unit/solvers/ilp/solver.rs"] +mod tests; diff --git a/src/testing/macros.rs b/src/testing/macros.rs index b1f380a..cea4e68 100644 --- a/src/testing/macros.rs +++ b/src/testing/macros.rs @@ -243,38 +243,5 @@ macro_rules! quick_problem_test { } #[cfg(test)] -mod tests { - use crate::prelude::*; - use crate::topology::SimpleGraph; - - // Test the quick_problem_test macro - #[test] - fn test_quick_problem_test_macro() { - quick_problem_test!( - IndependentSet, - new(3, vec![(0, 1), (1, 2)]), - solution: [1, 0, 1], - expected_size: 2, - is_valid: true - ); - - quick_problem_test!( - IndependentSet, - new(3, vec![(0, 1), (1, 2)]), - solution: [1, 1, 0], - expected_size: 2, - is_valid: false - ); - } - - // Test the complement_test macro - complement_test! { - name: test_is_vc_complement, - problem_a: IndependentSet, - problem_b: VertexCovering, - test_graphs: [ - (3, [(0, 1), (1, 2)]), - (4, [(0, 1), (1, 2), (2, 3), (0, 3)]), - ] - } -} +#[path = "../tests_unit/testing/macros.rs"] +mod tests; diff --git a/src/testing/mod.rs b/src/testing/mod.rs index 796ee6b..0559901 100644 --- a/src/testing/mod.rs +++ b/src/testing/mod.rs @@ -181,42 +181,5 @@ impl SatTestCase { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_graph_test_case() { - let case = GraphTestCase::new(3, vec![(0, 1), (1, 2)], vec![1, 0, 1], 2); - assert_eq!(case.num_vertices, 3); - assert_eq!(case.edges.len(), 2); - assert!(case.weights.is_none()); - assert!(case.optimal_size.is_none()); - } - - #[test] - fn test_graph_test_case_with_weights() { - let case = GraphTestCase::with_weights(3, vec![(0, 1)], vec![1, 2, 3], vec![0, 0, 1], 3); - assert!(case.weights.is_some()); - assert_eq!(case.weights.as_ref().unwrap(), &vec![1, 2, 3]); - } - - #[test] - fn test_graph_test_case_with_optimal() { - let case = GraphTestCase::new(3, vec![(0, 1)], vec![0, 0, 1], 1).with_optimal(2); - assert_eq!(case.optimal_size, Some(2)); - } - - #[test] - fn test_sat_test_case_satisfiable() { - let case = SatTestCase::satisfiable(2, vec![vec![1, 2], vec![-1]], vec![0, 1]); - assert!(case.is_satisfiable); - assert!(case.satisfying_assignment.is_some()); - } - - #[test] - fn test_sat_test_case_unsatisfiable() { - let case = SatTestCase::unsatisfiable(1, vec![vec![1], vec![-1]]); - assert!(!case.is_satisfiable); - assert!(case.satisfying_assignment.is_none()); - } -} +#[path = "../tests_unit/testing/mod.rs"] +mod tests; diff --git a/src/tests_unit/config.rs b/src/tests_unit/config.rs new file mode 100644 index 0000000..2620c74 --- /dev/null +++ b/src/tests_unit/config.rs @@ -0,0 +1,104 @@ +use super::*; + +#[test] +fn test_config_iterator_binary() { + let iter = ConfigIterator::new(3, 2); + assert_eq!(iter.total(), 8); + + let configs: Vec<_> = iter.collect(); + assert_eq!(configs.len(), 8); + assert_eq!(configs[0], vec![0, 0, 0]); + assert_eq!(configs[1], vec![0, 0, 1]); + assert_eq!(configs[2], vec![0, 1, 0]); + assert_eq!(configs[3], vec![0, 1, 1]); + assert_eq!(configs[4], vec![1, 0, 0]); + assert_eq!(configs[5], vec![1, 0, 1]); + assert_eq!(configs[6], vec![1, 1, 0]); + assert_eq!(configs[7], vec![1, 1, 1]); +} + +#[test] +fn test_config_iterator_ternary() { + let iter = ConfigIterator::new(2, 3); + assert_eq!(iter.total(), 9); + + let configs: Vec<_> = iter.collect(); + assert_eq!(configs.len(), 9); + assert_eq!(configs[0], vec![0, 0]); + assert_eq!(configs[1], vec![0, 1]); + assert_eq!(configs[2], vec![0, 2]); + assert_eq!(configs[3], vec![1, 0]); + assert_eq!(configs[8], vec![2, 2]); +} + +#[test] +fn test_config_iterator_empty() { + let iter = ConfigIterator::new(0, 2); + assert_eq!(iter.total(), 1); + let configs: Vec<_> = iter.collect(); + assert_eq!(configs.len(), 0); // Empty because num_variables is 0 +} + +#[test] +fn test_config_iterator_single_variable() { + let iter = ConfigIterator::new(1, 4); + assert_eq!(iter.total(), 4); + + let configs: Vec<_> = iter.collect(); + assert_eq!(configs, vec![vec![0], vec![1], vec![2], vec![3]]); +} + +#[test] +fn test_index_to_config() { + assert_eq!(index_to_config(0, 3, 2), vec![0, 0, 0]); + assert_eq!(index_to_config(1, 3, 2), vec![0, 0, 1]); + assert_eq!(index_to_config(7, 3, 2), vec![1, 1, 1]); + assert_eq!(index_to_config(5, 3, 2), vec![1, 0, 1]); +} + +#[test] +fn test_config_to_index() { + assert_eq!(config_to_index(&[0, 0, 0], 2), 0); + assert_eq!(config_to_index(&[0, 0, 1], 2), 1); + assert_eq!(config_to_index(&[1, 1, 1], 2), 7); + assert_eq!(config_to_index(&[1, 0, 1], 2), 5); +} + +#[test] +fn test_index_config_roundtrip() { + for i in 0..27 { + let config = index_to_config(i, 3, 3); + let back = config_to_index(&config, 3); + assert_eq!(i, back); + } +} + +#[test] +fn test_config_to_bits() { + assert_eq!( + config_to_bits(&[0, 1, 0, 1]), + vec![false, true, false, true] + ); + assert_eq!(config_to_bits(&[0, 0, 0]), vec![false, false, false]); + assert_eq!(config_to_bits(&[1, 1, 1]), vec![true, true, true]); +} + +#[test] +fn test_bits_to_config() { + assert_eq!( + bits_to_config(&[false, true, false, true]), + vec![0, 1, 0, 1] + ); + assert_eq!(bits_to_config(&[true, true, true]), vec![1, 1, 1]); +} + +#[test] +fn test_exact_size_iterator() { + let mut iter = ConfigIterator::new(3, 2); + assert_eq!(iter.len(), 8); + iter.next(); + assert_eq!(iter.len(), 7); + iter.next(); + iter.next(); + assert_eq!(iter.len(), 5); +} diff --git a/tests/unit_graph_tests.rs b/src/tests_unit/graph_models.rs similarity index 99% rename from tests/unit_graph_tests.rs rename to src/tests_unit/graph_models.rs index b7171cd..7e11ef0 100644 --- a/tests/unit_graph_tests.rs +++ b/src/tests_unit/graph_models.rs @@ -3,12 +3,12 @@ //! Tests extracted from source files for better compilation times //! and clearer separation of concerns. -use problemreductions::models::graph::{ +use crate::models::graph::{ is_independent_set, is_valid_coloring, is_vertex_cover, IndependentSet, KColoring, VertexCovering, }; -use problemreductions::prelude::*; -use problemreductions::topology::SimpleGraph; +use crate::prelude::*; +use crate::topology::SimpleGraph; // ============================================================================= // Independent Set Tests diff --git a/src/tests_unit/graph_types.rs b/src/tests_unit/graph_types.rs new file mode 100644 index 0000000..fa3b524 --- /dev/null +++ b/src/tests_unit/graph_types.rs @@ -0,0 +1,70 @@ +use super::*; + +#[test] +fn test_reflexive_subtype() { + fn assert_subtype, B: GraphMarker>() {} + + // Every type is a subtype of itself + assert_subtype::(); + assert_subtype::(); + assert_subtype::(); +} + +#[test] +fn test_subtype_entries_registered() { + let entries: Vec<_> = inventory::iter::().collect(); + + // Should have at least 4 entries + assert!(entries.len() >= 4); + + // Check specific relationships + assert!(entries + .iter() + .any(|e| e.subtype == "UnitDiskGraph" && e.supertype == "SimpleGraph")); + assert!(entries + .iter() + .any(|e| e.subtype == "PlanarGraph" && e.supertype == "SimpleGraph")); +} + +#[test] +fn test_declared_subtypes() { + fn assert_subtype, B: GraphMarker>() {} + + // Declared relationships + assert_subtype::(); + assert_subtype::(); + assert_subtype::(); + assert_subtype::(); +} + +#[test] +fn test_graph_type_traits() { + // Test Default + let _: SimpleGraph = Default::default(); + let _: PlanarGraph = Default::default(); + let _: UnitDiskGraph = Default::default(); + let _: BipartiteGraph = Default::default(); + + // Test Copy (SimpleGraph implements Copy, so no need to clone) + let g = SimpleGraph; + let _g2 = g; // Copy + let g = SimpleGraph; + let _g2 = g; + let _g3 = g; // still usable +} + +#[test] +fn test_bipartite_entry_registered() { + let entries: Vec<_> = inventory::iter::().collect(); + assert!(entries + .iter() + .any(|e| e.subtype == "BipartiteGraph" && e.supertype == "SimpleGraph")); +} + +#[test] +fn test_unit_disk_to_planar_registered() { + let entries: Vec<_> = inventory::iter::().collect(); + assert!(entries + .iter() + .any(|e| e.subtype == "UnitDiskGraph" && e.supertype == "PlanarGraph")); +} diff --git a/src/tests_unit/io.rs b/src/tests_unit/io.rs new file mode 100644 index 0000000..c59398e --- /dev/null +++ b/src/tests_unit/io.rs @@ -0,0 +1,81 @@ +use super::*; +use crate::models::graph::IndependentSet; +use crate::topology::SimpleGraph; +use std::fs; + +#[test] +fn test_to_json() { + let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); + let json = to_json(&problem); + assert!(json.is_ok()); + let json = json.unwrap(); + assert!(json.contains("graph")); +} + +#[test] +fn test_from_json() { + let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2)]); + let json = to_json(&problem).unwrap(); + let restored: IndependentSet = from_json(&json).unwrap(); + assert_eq!(restored.num_vertices(), 3); + assert_eq!(restored.num_edges(), 2); +} + +#[test] +fn test_json_compact() { + let problem = IndependentSet::::new(3, vec![(0, 1)]); + let compact = to_json_compact(&problem).unwrap(); + let pretty = to_json(&problem).unwrap(); + // Compact should be shorter + assert!(compact.len() < pretty.len()); +} + +#[test] +fn test_file_roundtrip() { + let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let path = "/tmp/test_problem.json"; + + // Write + write_problem(&problem, path, FileFormat::Json).unwrap(); + + // Read back + let restored: IndependentSet = read_problem(path, FileFormat::Json).unwrap(); + assert_eq!(restored.num_vertices(), 4); + assert_eq!(restored.num_edges(), 3); + + // Cleanup + fs::remove_file(path).ok(); +} + +#[test] +fn test_file_format_from_extension() { + assert_eq!( + FileFormat::from_extension(Path::new("test.json")), + Some(FileFormat::Json) + ); + assert_eq!( + FileFormat::from_extension(Path::new("test.JSON")), + Some(FileFormat::Json) + ); + assert_eq!(FileFormat::from_extension(Path::new("test.txt")), None); + assert_eq!(FileFormat::from_extension(Path::new("noext")), None); +} + +#[test] +fn test_read_write_file() { + let path = "/tmp/test_io.txt"; + let contents = "Hello, World!"; + + write_file(path, contents).unwrap(); + let read_back = read_file(path).unwrap(); + + assert_eq!(read_back, contents); + + fs::remove_file(path).ok(); +} + +#[test] +fn test_invalid_json() { + let result: Result> = from_json("not valid json"); + assert!(result.is_err()); +} diff --git a/src/tests_unit/models/graph/clique.rs b/src/tests_unit/models/graph/clique.rs new file mode 100644 index 0000000..1e2d838 --- /dev/null +++ b/src/tests_unit/models/graph/clique.rs @@ -0,0 +1,271 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_clique_creation() { + let problem = Clique::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_clique_with_weights() { + let problem = + Clique::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); + assert_eq!(problem.weights(), vec![1, 2, 3]); + assert!(problem.is_weighted()); +} + +#[test] +fn test_clique_unweighted() { + let problem = Clique::::new(3, vec![(0, 1)]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_has_edge() { + let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); + assert!(problem.has_edge(0, 1)); + assert!(problem.has_edge(1, 0)); // Undirected + assert!(problem.has_edge(1, 2)); + assert!(!problem.has_edge(0, 2)); +} + +#[test] +fn test_solution_size_valid() { + // Complete graph K3 (triangle) + let problem = Clique::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + + // Valid: all three form a clique + let sol = problem.solution_size(&[1, 1, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 3); + + // Valid: any pair + let sol = problem.solution_size(&[1, 1, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 2); +} + +#[test] +fn test_solution_size_invalid() { + // Path graph: 0-1-2 (no edge between 0 and 2) + let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); + + // Invalid: 0 and 2 are not adjacent + let sol = problem.solution_size(&[1, 0, 1]); + assert!(!sol.is_valid); + assert_eq!(sol.size, 2); + + // Invalid: all three selected but not a clique + let sol = problem.solution_size(&[1, 1, 1]); + assert!(!sol.is_valid); +} + +#[test] +fn test_solution_size_empty() { + let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); + let sol = problem.solution_size(&[0, 0, 0]); + assert!(sol.is_valid); // Empty set is a valid clique + assert_eq!(sol.size, 0); +} + +#[test] +fn test_weighted_solution() { + let problem = + Clique::::with_weights(3, vec![(0, 1), (1, 2), (0, 2)], vec![10, 20, 30]); + + // Select vertex 2 (weight 30) + let sol = problem.solution_size(&[0, 0, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 30); + + // Select all three (weights 10 + 20 + 30 = 60) + let sol = problem.solution_size(&[1, 1, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 60); +} + +#[test] +fn test_constraints() { + // Path graph: 0-1-2 (non-edge between 0 and 2) + let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); + let constraints = problem.constraints(); + assert_eq!(constraints.len(), 1); // One constraint for non-edge (0, 2) +} + +#[test] +fn test_objectives() { + let problem = + Clique::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 3); // One per vertex +} + +#[test] +fn test_brute_force_triangle() { + // Triangle graph (K3): max clique is all 3 vertices + let problem = + Clique::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 1, 1]); +} + +#[test] +fn test_brute_force_path() { + // Path graph 0-1-2: max clique is any adjacent pair + let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Maximum size is 2 + for sol in &solutions { + let size: usize = sol.iter().sum(); + assert_eq!(size, 2); + // Verify it's valid + let sol_result = problem.solution_size(sol); + assert!(sol_result.is_valid); + } +} + +#[test] +fn test_brute_force_weighted() { + // Path with weights: vertex 1 has high weight + let problem = + Clique::::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Should select {0, 1} (weight 101) or {1, 2} (weight 101) + assert!(solutions.len() == 2); + for sol in &solutions { + assert!(problem.solution_size(sol).is_valid); + assert_eq!(problem.solution_size(sol).size, 101); + } +} + +#[test] +fn test_is_clique_function() { + // Triangle + assert!(is_clique(3, &[(0, 1), (1, 2), (0, 2)], &[true, true, true])); + assert!(is_clique(3, &[(0, 1), (1, 2), (0, 2)], &[true, true, false])); + + // Path - not all pairs adjacent + assert!(!is_clique(3, &[(0, 1), (1, 2)], &[true, false, true])); + assert!(is_clique(3, &[(0, 1), (1, 2)], &[true, true, false])); // Adjacent pair +} + +#[test] +fn test_problem_size() { + let problem = Clique::::new(5, vec![(0, 1), (1, 2), (2, 3)]); + let size = problem.problem_size(); + assert_eq!(size.get("num_vertices"), Some(5)); + assert_eq!(size.get("num_edges"), Some(3)); +} + +#[test] +fn test_energy_mode() { + let problem = Clique::::new(3, vec![(0, 1)]); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_edges() { + let problem = Clique::::new(4, vec![(0, 1), (2, 3)]); + let edges = problem.edges(); + assert_eq!(edges.len(), 2); +} + +#[test] +fn test_set_weights() { + let mut problem = Clique::::new(3, vec![(0, 1)]); + problem.set_weights(vec![5, 10, 15]); + assert_eq!(problem.weights(), vec![5, 10, 15]); +} + +#[test] +fn test_empty_graph() { + // No edges means any single vertex is a max clique + let problem = Clique::::new(3, vec![]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + assert_eq!(solutions.len(), 3); + // Each solution should have exactly one vertex selected + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 1); + } +} + +#[test] +fn test_is_satisfied() { + let problem = Clique::::new(3, vec![(0, 1), (1, 2)]); + + assert!(problem.is_satisfied(&[1, 1, 0])); // Valid clique + assert!(problem.is_satisfied(&[0, 1, 1])); // Valid clique + assert!(!problem.is_satisfied(&[1, 0, 1])); // Invalid: 0-2 not adjacent +} + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = Clique::::from_graph(graph.clone(), vec![1, 2, 3]); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.weights(), vec![1, 2, 3]); +} + +#[test] +fn test_from_graph_unit_weights() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = Clique::::from_graph_unit_weights(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.weights(), vec![1, 1, 1]); +} + +#[test] +fn test_graph_accessor() { + let problem = Clique::::new(3, vec![(0, 1)]); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 3); + assert_eq!(graph.num_edges(), 1); +} + +#[test] +fn test_variant() { + let variant = Clique::::variant(); + assert_eq!(variant.len(), 2); + assert_eq!(variant[0], ("graph", "SimpleGraph")); + assert_eq!(variant[1], ("weight", "i32")); +} + +#[test] +fn test_weights_ref() { + let problem = + Clique::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + assert_eq!(problem.weights_ref(), &vec![5, 10, 15]); +} + +#[test] +fn test_is_clique_wrong_len() { + // Wrong length should return false + assert!(!is_clique(3, &[(0, 1)], &[true, false])); +} + +#[test] +fn test_complete_graph() { + // K4 - complete graph with 4 vertices + let problem = Clique::::new( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 1, 1, 1]); // All vertices form a clique +} diff --git a/src/tests_unit/models/graph/dominating_set.rs b/src/tests_unit/models/graph/dominating_set.rs new file mode 100644 index 0000000..1ee68c2 --- /dev/null +++ b/src/tests_unit/models/graph/dominating_set.rs @@ -0,0 +1,245 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_dominating_set_creation() { + let problem = DominatingSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); +} + +#[test] +fn test_dominating_set_with_weights() { + let problem = + DominatingSet::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); + assert_eq!(problem.weights(), vec![1, 2, 3]); +} + +#[test] +fn test_neighbors() { + let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (1, 2)]); + let nbrs = problem.neighbors(0); + assert!(nbrs.contains(&1)); + assert!(nbrs.contains(&2)); + assert!(!nbrs.contains(&3)); +} + +#[test] +fn test_closed_neighborhood() { + let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2)]); + let cn = problem.closed_neighborhood(0); + assert!(cn.contains(&0)); + assert!(cn.contains(&1)); + assert!(cn.contains(&2)); + assert!(!cn.contains(&3)); +} + +#[test] +fn test_solution_size_valid() { + // Star graph: center dominates all + let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); + + // Select center + let sol = problem.solution_size(&[1, 0, 0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + // Select all leaves + let sol = problem.solution_size(&[0, 1, 1, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 3); +} + +#[test] +fn test_solution_size_invalid() { + let problem = DominatingSet::::new(4, vec![(0, 1), (2, 3)]); + + // Select none + let sol = problem.solution_size(&[0, 0, 0, 0]); + assert!(!sol.is_valid); + + // Select only vertex 0 (doesn't dominate 2, 3) + let sol = problem.solution_size(&[1, 0, 0, 0]); + assert!(!sol.is_valid); +} + +#[test] +fn test_brute_force_star() { + // Star graph: minimum dominating set is the center + let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + assert!(solutions.contains(&vec![1, 0, 0, 0])); + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 1); + } +} + +#[test] +fn test_brute_force_path() { + // Path 0-1-2-3-4: need to dominate all 5 vertices + let problem = + DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Minimum is 2 (e.g., vertices 1 and 3) + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 2); + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_brute_force_weighted() { + // Star with heavy center + let problem = DominatingSet::::with_weights( + 4, + vec![(0, 1), (0, 2), (0, 3)], + vec![100, 1, 1, 1], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Prefer selecting all leaves (3) over center (100) + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0, 1, 1, 1]); +} + +#[test] +fn test_is_dominating_set_function() { + let edges = vec![(0, 1), (0, 2), (0, 3)]; + + // Center dominates all + assert!(is_dominating_set(4, &edges, &[true, false, false, false])); + // All leaves dominate (leaf dominates center which dominates others) + assert!(is_dominating_set(4, &edges, &[false, true, true, true])); + // Single leaf doesn't dominate other leaves + assert!(!is_dominating_set(4, &edges, &[false, true, false, false])); + // Empty doesn't dominate + assert!(!is_dominating_set(4, &edges, &[false, false, false, false])); +} + +#[test] +fn test_constraints() { + let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2)]); + let constraints = problem.constraints(); + assert_eq!(constraints.len(), 3); // One per vertex +} + +#[test] +fn test_energy_mode() { + let problem = DominatingSet::::new(2, vec![(0, 1)]); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_isolated_vertex() { + // Isolated vertex must be in dominating set + let problem = DominatingSet::::new(3, vec![(0, 1)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Vertex 2 is isolated, must be selected + for sol in &solutions { + assert_eq!(sol[2], 1); + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_is_satisfied() { + let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); + + assert!(problem.is_satisfied(&[1, 0, 0, 0])); // Center dominates all + assert!(problem.is_satisfied(&[0, 1, 1, 1])); // Leaves dominate + assert!(!problem.is_satisfied(&[0, 1, 0, 0])); // Missing 2 and 3 +} + +#[test] +fn test_objectives() { + let problem = + DominatingSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 3); +} + +#[test] +fn test_set_weights() { + let mut problem = DominatingSet::::new(3, vec![(0, 1)]); + assert!(!problem.is_weighted()); // Initially uniform + problem.set_weights(vec![1, 2, 3]); + assert!(problem.is_weighted()); + assert_eq!(problem.weights(), vec![1, 2, 3]); +} + +#[test] +fn test_is_weighted_empty() { + let problem = DominatingSet::::with_weights(0, vec![], vec![]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_is_dominating_set_wrong_len() { + assert!(!is_dominating_set(3, &[(0, 1)], &[true, false])); +} + +#[test] +fn test_problem_size() { + let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3)]); + let size = problem.problem_size(); + assert_eq!(size.get("num_vertices"), Some(5)); + assert_eq!(size.get("num_edges"), Some(3)); +} + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = DominatingSet::::from_graph(graph.clone(), vec![1, 2, 3]); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.weights(), vec![1, 2, 3]); + + let problem2 = DominatingSet::::from_graph_unit_weights(graph); + assert_eq!(problem2.num_vertices(), 3); + assert_eq!(problem2.weights(), vec![1, 1, 1]); +} + +#[test] +fn test_graph_accessor() { + let problem = DominatingSet::::new(3, vec![(0, 1)]); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 3); + assert_eq!(graph.num_edges(), 1); +} + +#[test] +fn test_weights_ref() { + let problem = + DominatingSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + assert_eq!(problem.weights_ref(), &vec![5, 10, 15]); +} + +#[test] +fn test_variant() { + let variant = DominatingSet::::variant(); + assert_eq!(variant.len(), 2); + assert_eq!(variant[0], ("graph", "SimpleGraph")); + assert_eq!(variant[1], ("weight", "i32")); +} + +#[test] +fn test_edges() { + let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2)]); + let edges = problem.edges(); + assert_eq!(edges.len(), 2); +} + +#[test] +fn test_has_edge() { + let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2)]); + assert!(problem.has_edge(0, 1)); + assert!(problem.has_edge(1, 0)); // Undirected + assert!(problem.has_edge(1, 2)); + assert!(!problem.has_edge(0, 2)); +} diff --git a/tests/unit/graph/independent_set_tests.rs b/src/tests_unit/models/graph/independent_set.rs similarity index 77% rename from tests/unit/graph/independent_set_tests.rs rename to src/tests_unit/models/graph/independent_set.rs index 47f2721..9ee41a4 100644 --- a/tests/unit/graph/independent_set_tests.rs +++ b/src/tests_unit/models/graph/independent_set.rs @@ -1,8 +1,5 @@ -//! Unit tests for the Independent Set problem. - -use problemreductions::models::graph::{is_independent_set, IndependentSet}; -use problemreductions::prelude::*; -use problemreductions::topology::SimpleGraph; +use super::*; +use crate::solvers::{BruteForce, Solver}; #[test] fn test_independent_set_creation() { @@ -15,7 +12,8 @@ fn test_independent_set_creation() { #[test] fn test_independent_set_with_weights() { - let problem = IndependentSet::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); + let problem = + IndependentSet::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); assert_eq!(problem.weights(), vec![1, 2, 3]); assert!(problem.is_weighted()); } @@ -74,7 +72,8 @@ fn test_solution_size_empty() { #[test] fn test_weighted_solution() { - let problem = IndependentSet::with_weights(3, vec![(0, 1)], vec![10, 20, 30]); + let problem = + IndependentSet::::with_weights(3, vec![(0, 1)], vec![10, 20, 30]); // Select vertex 2 (weight 30) let sol = problem.solution_size(&[0, 0, 1]); @@ -96,7 +95,8 @@ fn test_constraints() { #[test] fn test_objectives() { - let problem = IndependentSet::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let problem = + IndependentSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); let objectives = problem.objectives(); assert_eq!(objectives.len(), 3); // One per vertex } @@ -104,7 +104,8 @@ fn test_objectives() { #[test] fn test_brute_force_triangle() { // Triangle graph: maximum IS has size 1 - let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let problem = + IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); let solver = BruteForce::new(); let solutions = solver.find_best(&problem); @@ -135,7 +136,8 @@ fn test_brute_force_path() { #[test] fn test_brute_force_weighted() { // Graph with weights: vertex 1 has high weight but is connected to both 0 and 2 - let problem = IndependentSet::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); + let problem = + IndependentSet::::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); let solver = BruteForce::new(); let solutions = solver.find_best(&problem); @@ -211,3 +213,42 @@ fn test_is_satisfied() { assert!(!problem.is_satisfied(&[1, 1, 0])); // Invalid: 0-1 adjacent assert!(!problem.is_satisfied(&[0, 1, 1])); // Invalid: 1-2 adjacent } + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = IndependentSet::::from_graph(graph.clone(), vec![1, 2, 3]); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.weights(), vec![1, 2, 3]); +} + +#[test] +fn test_from_graph_unit_weights() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = IndependentSet::::from_graph_unit_weights(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.weights(), vec![1, 1, 1]); +} + +#[test] +fn test_graph_accessor() { + let problem = IndependentSet::::new(3, vec![(0, 1)]); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 3); + assert_eq!(graph.num_edges(), 1); +} + +#[test] +fn test_variant() { + let variant = IndependentSet::::variant(); + assert_eq!(variant.len(), 2); + assert_eq!(variant[0], ("graph", "SimpleGraph")); + assert_eq!(variant[1], ("weight", "i32")); +} + +#[test] +fn test_weights_ref() { + let problem = + IndependentSet::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + assert_eq!(problem.weights_ref(), &vec![5, 10, 15]); +} diff --git a/tests/unit/graph/coloring_tests.rs b/src/tests_unit/models/graph/kcoloring.rs similarity index 90% rename from tests/unit/graph/coloring_tests.rs rename to src/tests_unit/models/graph/kcoloring.rs index 15fb2b5..8e6b585 100644 --- a/tests/unit/graph/coloring_tests.rs +++ b/src/tests_unit/models/graph/kcoloring.rs @@ -1,8 +1,5 @@ -//! Unit tests for the Graph K-Coloring problem. - -use problemreductions::models::graph::{is_valid_coloring, KColoring}; -use problemreductions::prelude::*; -use problemreductions::topology::SimpleGraph; +use super::*; +use crate::solvers::{BruteForce, Solver}; #[test] fn test_kcoloring_creation() { @@ -176,3 +173,20 @@ fn test_set_weights() { problem.set_weights(vec![1, 2, 3]); assert!(!problem.is_weighted()); } + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = KColoring::<3, SimpleGraph, i32>::from_graph(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 2); +} + +#[test] +fn test_variant() { + let v = KColoring::<3, SimpleGraph, i32>::variant(); + assert_eq!(v.len(), 3); + assert_eq!(v[0], ("k", "3")); + assert_eq!(v[1], ("graph", "SimpleGraph")); + assert_eq!(v[2], ("weight", "i32")); +} diff --git a/src/tests_unit/models/graph/matching.rs b/src/tests_unit/models/graph/matching.rs new file mode 100644 index 0000000..9a10b63 --- /dev/null +++ b/src/tests_unit/models/graph/matching.rs @@ -0,0 +1,232 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_matching_creation() { + let problem = Matching::::new(4, vec![(0, 1, 1), (1, 2, 2), (2, 3, 3)]); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); + assert_eq!(problem.num_variables(), 3); +} + +#[test] +fn test_matching_unweighted() { + let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); + assert_eq!(problem.num_edges(), 2); +} + +#[test] +fn test_edge_endpoints() { + let problem = Matching::::new(3, vec![(0, 1, 1), (1, 2, 2)]); + assert_eq!(problem.edge_endpoints(0), Some((0, 1))); + assert_eq!(problem.edge_endpoints(1), Some((1, 2))); + assert_eq!(problem.edge_endpoints(2), None); +} + +#[test] +fn test_is_valid_matching() { + let problem = Matching::::new(4, vec![(0, 1, 1), (1, 2, 1), (2, 3, 1)]); + + // Valid: select edge 0 only + assert!(problem.is_valid_matching(&[1, 0, 0])); + + // Valid: select edges 0 and 2 (disjoint) + assert!(problem.is_valid_matching(&[1, 0, 1])); + + // Invalid: edges 0 and 1 share vertex 1 + assert!(!problem.is_valid_matching(&[1, 1, 0])); +} + +#[test] +fn test_solution_size() { + let problem = Matching::::new(4, vec![(0, 1, 5), (1, 2, 10), (2, 3, 3)]); + + let sol = problem.solution_size(&[1, 0, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 8); // 5 + 3 + + let sol = problem.solution_size(&[0, 1, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 10); +} + +#[test] +fn test_brute_force_path() { + // Path 0-1-2-3 with unit weights + let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Maximum matching has 2 edges: {0-1, 2-3} + assert!(solutions.contains(&vec![1, 0, 1])); + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 2); + } +} + +#[test] +fn test_brute_force_triangle() { + let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Maximum matching has 1 edge (any of the 3) + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 1); + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_brute_force_weighted() { + // Prefer heavy edge even if it excludes more edges + let problem = Matching::::new(4, vec![(0, 1, 100), (0, 2, 1), (1, 3, 1)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Edge 0-1 (weight 100) alone beats edges 0-2 + 1-3 (weight 2) + assert!(solutions.contains(&vec![1, 0, 0])); +} + +#[test] +fn test_is_matching_function() { + let edges = vec![(0, 1), (1, 2), (2, 3)]; + + assert!(is_matching(4, &edges, &[true, false, true])); // Disjoint + assert!(is_matching(4, &edges, &[false, true, false])); // Single edge + assert!(!is_matching(4, &edges, &[true, true, false])); // Share vertex 1 + assert!(is_matching(4, &edges, &[false, false, false])); // Empty is valid +} + +#[test] +fn test_energy_mode() { + let problem = Matching::::unweighted(2, vec![(0, 1)]); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_empty_graph() { + let problem = Matching::::unweighted(3, vec![]); + let sol = problem.solution_size(&[]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); +} + +#[test] +fn test_constraints() { + let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); + let constraints = problem.constraints(); + // Vertex 1 has degree 2, so 1 constraint + assert_eq!(constraints.len(), 1); +} + +#[test] +fn test_edges() { + let problem = Matching::::new(3, vec![(0, 1, 5), (1, 2, 10)]); + let edges = problem.edges(); + assert_eq!(edges.len(), 2); +} + +#[test] +fn test_perfect_matching() { + // K4: can have perfect matching (2 edges covering all 4 vertices) + let problem = Matching::::unweighted( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Perfect matching has 2 edges + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 2); + // Check it's a valid matching using 4 vertices + let mut used = [false; 4]; + for (idx, &sel) in sol.iter().enumerate() { + if sel == 1 { + if let Some((u, v)) = problem.edge_endpoints(idx) { + used[u] = true; + used[v] = true; + } + } + } + assert!(used.iter().all(|&u| u)); // All vertices matched + } +} + +#[test] +fn test_is_satisfied() { + let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); + + assert!(problem.is_satisfied(&[1, 0, 1])); // Valid matching + assert!(problem.is_satisfied(&[0, 1, 0])); // Valid matching + assert!(!problem.is_satisfied(&[1, 1, 0])); // Share vertex 1 +} + +#[test] +fn test_objectives() { + let problem = Matching::::new(3, vec![(0, 1, 5), (1, 2, 10)]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 2); +} + +#[test] +fn test_set_weights() { + let mut problem = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); + assert!(!problem.is_weighted()); // Initially uniform + problem.set_weights(vec![1, 2]); + assert!(problem.is_weighted()); + assert_eq!(problem.weights(), vec![1, 2]); +} + +#[test] +fn test_is_weighted_empty() { + let problem = Matching::::unweighted(2, vec![]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_is_matching_wrong_len() { + let edges = vec![(0, 1), (1, 2)]; + assert!(!is_matching(3, &edges, &[true])); // Wrong length +} + +#[test] +fn test_is_matching_out_of_bounds() { + let edges = vec![(0, 5)]; // Vertex 5 doesn't exist + assert!(!is_matching(3, &edges, &[true])); +} + +#[test] +fn test_problem_size() { + let problem = Matching::::unweighted(5, vec![(0, 1), (1, 2), (2, 3)]); + let size = problem.problem_size(); + assert_eq!(size.get("num_vertices"), Some(5)); + assert_eq!(size.get("num_edges"), Some(3)); +} + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = Matching::::from_graph(graph, vec![5, 10]); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 2); + assert_eq!(problem.weights(), vec![5, 10]); +} + +#[test] +fn test_from_graph_unit_weights() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = Matching::::from_graph_unit_weights(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 2); + assert_eq!(problem.weights(), vec![1, 1]); +} + +#[test] +fn test_graph_accessor() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = Matching::::from_graph_unit_weights(graph); + assert_eq!(problem.graph().num_vertices(), 3); + assert_eq!(problem.graph().num_edges(), 2); +} diff --git a/src/tests_unit/models/graph/max_cut.rs b/src/tests_unit/models/graph/max_cut.rs new file mode 100644 index 0000000..5437ab6 --- /dev/null +++ b/src/tests_unit/models/graph/max_cut.rs @@ -0,0 +1,225 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_maxcut_creation() { + let problem = MaxCut::::new(4, vec![(0, 1, 1), (1, 2, 2), (2, 3, 3)]); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_maxcut_unweighted() { + let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); + assert_eq!(problem.num_edges(), 2); +} + +#[test] +fn test_solution_size() { + let problem = MaxCut::::new(3, vec![(0, 1, 1), (1, 2, 2), (0, 2, 3)]); + + // All same partition: no cut + let sol = problem.solution_size(&[0, 0, 0]); + assert_eq!(sol.size, 0); + assert!(sol.is_valid); + + // 0 vs {1,2}: cuts edges 0-1 (1) and 0-2 (3) = 4 + let sol = problem.solution_size(&[0, 1, 1]); + assert_eq!(sol.size, 4); + + // {0,2} vs {1}: cuts edges 0-1 (1) and 1-2 (2) = 3 + let sol = problem.solution_size(&[0, 1, 0]); + assert_eq!(sol.size, 3); +} + +#[test] +fn test_brute_force_triangle() { + // Triangle with unit weights: max cut is 2 + let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + let size = problem.solution_size(sol); + assert_eq!(size.size, 2); + } +} + +#[test] +fn test_brute_force_path() { + // Path 0-1-2: max cut is 2 (partition {0,2} vs {1}) + let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + let size = problem.solution_size(sol); + assert_eq!(size.size, 2); + } +} + +#[test] +fn test_brute_force_weighted() { + // Edge with weight 10 should always be cut + let problem = MaxCut::::new(3, vec![(0, 1, 10), (1, 2, 1)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Max is 11 (cut both edges) with partition like [0,1,0] or [1,0,1] + for sol in &solutions { + let size = problem.solution_size(sol); + assert_eq!(size.size, 11); + } +} + +#[test] +fn test_cut_size_function() { + let edges = vec![(0, 1, 1), (1, 2, 2), (0, 2, 3)]; + + // Partition {0} vs {1, 2} + assert_eq!(cut_size(&edges, &[false, true, true]), 4); // 1 + 3 + + // Partition {0, 1} vs {2} + assert_eq!(cut_size(&edges, &[false, false, true]), 5); // 2 + 3 + + // All same partition + assert_eq!(cut_size(&edges, &[false, false, false]), 0); +} + +#[test] +fn test_edge_weight() { + let problem = MaxCut::::new(3, vec![(0, 1, 5), (1, 2, 10)]); + assert_eq!(problem.edge_weight(0, 1), Some(&5)); + assert_eq!(problem.edge_weight(1, 2), Some(&10)); + assert_eq!(problem.edge_weight(0, 2), None); +} + +#[test] +fn test_edges() { + let problem = MaxCut::::new(3, vec![(0, 1, 1), (1, 2, 2)]); + let edges = problem.edges(); + assert_eq!(edges.len(), 2); +} + +#[test] +fn test_energy_mode() { + let problem = MaxCut::::unweighted(2, vec![(0, 1)]); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_empty_graph() { + let problem = MaxCut::::unweighted(3, vec![]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Any partition gives cut size 0 + assert!(!solutions.is_empty()); + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 0); + } +} + +#[test] +fn test_single_edge() { + let problem = MaxCut::::new(2, vec![(0, 1, 5)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Putting vertices in different sets maximizes cut + assert_eq!(solutions.len(), 2); // [0,1] and [1,0] + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 5); + } +} + +#[test] +fn test_complete_graph_k4() { + // K4: every partition cuts exactly 4 edges (balanced) or less + let problem = MaxCut::::unweighted( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Max cut in K4 is 4 (2-2 partition) + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 4); + } +} + +#[test] +fn test_bipartite_graph() { + // Complete bipartite K_{2,2}: max cut is all 4 edges + let problem = + MaxCut::::unweighted(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Bipartite graph can achieve max cut = all edges + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, 4); + } +} + +#[test] +fn test_symmetry() { + // Complementary partitions should give same cut + let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); + + let sol1 = problem.solution_size(&[0, 1, 1]); + let sol2 = problem.solution_size(&[1, 0, 0]); // complement + assert_eq!(sol1.size, sol2.size); +} + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaxCut::::from_graph(graph, vec![5, 10]); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 2); + assert_eq!(problem.edge_weights(), vec![5, 10]); +} + +#[test] +fn test_from_graph_unweighted() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaxCut::::from_graph_unweighted(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 2); + assert_eq!(problem.edge_weights(), vec![1, 1]); +} + +#[test] +fn test_graph_accessor() { + let problem = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 3); + assert_eq!(graph.num_edges(), 2); +} + +#[test] +fn test_with_weights() { + let problem = + MaxCut::::with_weights(3, vec![(0, 1), (1, 2)], vec![7, 3]); + assert_eq!(problem.edge_weights(), vec![7, 3]); +} + +#[test] +fn test_edge_weight_by_index() { + let problem = MaxCut::::new(3, vec![(0, 1, 5), (1, 2, 10)]); + assert_eq!(problem.edge_weight_by_index(0), Some(&5)); + assert_eq!(problem.edge_weight_by_index(1), Some(&10)); + assert_eq!(problem.edge_weight_by_index(2), None); +} + +#[test] +fn test_variant() { + let variant = MaxCut::::variant(); + assert_eq!(variant.len(), 2); + assert_eq!(variant[0], ("graph", "SimpleGraph")); + assert_eq!(variant[1], ("weight", "i32")); +} diff --git a/src/tests_unit/models/graph/maximal_is.rs b/src/tests_unit/models/graph/maximal_is.rs new file mode 100644 index 0000000..90ef66e --- /dev/null +++ b/src/tests_unit/models/graph/maximal_is.rs @@ -0,0 +1,249 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_maximal_is_creation() { + let problem = MaximalIS::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); +} + +#[test] +fn test_maximal_is_with_weights() { + let problem = MaximalIS::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); + assert_eq!(problem.weights(), vec![1, 2, 3]); + assert!(problem.is_weighted()); +} + +#[test] +fn test_maximal_is_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximalIS::::from_graph(graph, vec![1, 2, 3]); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.weights(), vec![1, 2, 3]); +} + +#[test] +fn test_maximal_is_from_graph_unit_weights() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximalIS::::from_graph_unit_weights(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.weights(), vec![1, 1, 1]); +} + +#[test] +fn test_is_independent() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + + assert!(problem.is_independent(&[1, 0, 1])); + assert!(problem.is_independent(&[0, 1, 0])); + assert!(!problem.is_independent(&[1, 1, 0])); +} + +#[test] +fn test_is_maximal() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + + // {0, 2} is maximal (cannot add 1) + assert!(problem.is_maximal(&[1, 0, 1])); + + // {1} is maximal (cannot add 0 or 2) + assert!(problem.is_maximal(&[0, 1, 0])); + + // {0} is not maximal (can add 2) + assert!(!problem.is_maximal(&[1, 0, 0])); + + // {} is not maximal (can add any vertex) + assert!(!problem.is_maximal(&[0, 0, 0])); +} + +#[test] +fn test_solution_size() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + + // Maximal: {0, 2} + let sol = problem.solution_size(&[1, 0, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 2); + + // Maximal: {1} + let sol = problem.solution_size(&[0, 1, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + // Not maximal: {0} + let sol = problem.solution_size(&[1, 0, 0]); + assert!(!sol.is_valid); +} + +#[test] +fn test_brute_force_path() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Largest maximal IS is {0, 2} with size 2 + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 0, 1]); +} + +#[test] +fn test_brute_force_triangle() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // All maximal IS have size 1 (any single vertex) + assert_eq!(solutions.len(), 3); + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 1); + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_is_maximal_independent_set_function() { + let edges = vec![(0, 1), (1, 2)]; + + assert!(is_maximal_independent_set(3, &edges, &[true, false, true])); + assert!(is_maximal_independent_set(3, &edges, &[false, true, false])); + assert!(!is_maximal_independent_set( + 3, + &edges, + &[true, false, false] + )); // Can add 2 + assert!(!is_maximal_independent_set(3, &edges, &[true, true, false])); // Not independent +} + +#[test] +fn test_energy_mode() { + let problem = MaximalIS::::new(2, vec![(0, 1)]); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_empty_graph() { + let problem = MaximalIS::::new(3, vec![]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Only maximal IS is all vertices + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 1, 1]); +} + +#[test] +fn test_constraints() { + let problem = MaximalIS::::new(3, vec![(0, 1)]); + let constraints = problem.constraints(); + // 1 edge constraint + 3 maximality constraints + assert_eq!(constraints.len(), 4); +} + +#[test] +fn test_is_satisfied() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + + assert!(problem.is_satisfied(&[1, 0, 1])); // Maximal + assert!(problem.is_satisfied(&[0, 1, 0])); // Maximal + // Note: is_satisfied checks constraints, which may be more complex +} + +#[test] +fn test_objectives() { + let problem = MaximalIS::::new(3, vec![(0, 1)]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 3); // One per vertex +} + +#[test] +fn test_weights() { + let problem = MaximalIS::::new(3, vec![(0, 1)]); + let weights = problem.weights(); + assert_eq!(weights, vec![1, 1, 1]); // Unit weights +} + +#[test] +fn test_set_weights() { + let mut problem = MaximalIS::::new(3, vec![(0, 1)]); + problem.set_weights(vec![1, 2, 3]); + assert_eq!(problem.weights(), vec![1, 2, 3]); +} + +#[test] +fn test_is_weighted() { + let problem = MaximalIS::::new(3, vec![(0, 1)]); + assert!(!problem.is_weighted()); // Initially uniform +} + +#[test] +fn test_is_weighted_empty() { + let problem = MaximalIS::::with_weights(0, vec![], vec![]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_is_maximal_independent_set_wrong_len() { + assert!(!is_maximal_independent_set(3, &[(0, 1)], &[true, false])); +} + +#[test] +fn test_problem_size() { + let problem = MaximalIS::::new(5, vec![(0, 1), (1, 2), (2, 3)]); + let size = problem.problem_size(); + assert_eq!(size.get("num_vertices"), Some(5)); + assert_eq!(size.get("num_edges"), Some(3)); +} + +#[test] +fn test_variant() { + let variant = MaximalIS::::variant(); + assert_eq!(variant.len(), 2); + assert_eq!(variant[0], ("graph", "SimpleGraph")); + assert_eq!(variant[1], ("weight", "i32")); +} + +#[test] +fn test_graph_ref() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 3); + assert_eq!(graph.num_edges(), 2); +} + +#[test] +fn test_edges() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + let edges = problem.edges(); + assert_eq!(edges.len(), 2); +} + +#[test] +fn test_has_edge() { + let problem = MaximalIS::::new(3, vec![(0, 1), (1, 2)]); + assert!(problem.has_edge(0, 1)); + assert!(problem.has_edge(1, 0)); // Undirected + assert!(problem.has_edge(1, 2)); + assert!(!problem.has_edge(0, 2)); +} + +#[test] +fn test_weights_ref() { + let problem = MaximalIS::::new(3, vec![(0, 1)]); + assert_eq!(problem.weights_ref(), &vec![1, 1, 1]); +} + +#[test] +fn test_weighted_solution() { + let problem = + MaximalIS::::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 100, 10]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Should prefer {1} with weight 100 over {0, 2} with weight 20 + // But {0, 2} is also maximal... maximization prefers larger size + // Actually {0, 2} has size 20 and {1} has size 100 + // With LargerSizeIsBetter, {1} with 100 > {0, 2} with 20 + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0, 1, 0]); +} diff --git a/tests/unit/graph/vertex_covering_tests.rs b/src/tests_unit/models/graph/vertex_covering.rs similarity index 75% rename from tests/unit/graph/vertex_covering_tests.rs rename to src/tests_unit/models/graph/vertex_covering.rs index e19477f..b83c068 100644 --- a/tests/unit/graph/vertex_covering_tests.rs +++ b/src/tests_unit/models/graph/vertex_covering.rs @@ -1,8 +1,5 @@ -//! Unit tests for the Vertex Covering problem. - -use problemreductions::models::graph::{is_vertex_cover, IndependentSet, VertexCovering}; -use problemreductions::prelude::*; -use problemreductions::topology::SimpleGraph; +use super::*; +use crate::solvers::{BruteForce, Solver}; #[test] fn test_vertex_cover_creation() { @@ -15,7 +12,8 @@ fn test_vertex_cover_creation() { #[test] fn test_vertex_cover_with_weights() { - let problem = VertexCovering::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); + let problem = + VertexCovering::::with_weights(3, vec![(0, 1)], vec![1, 2, 3]); assert_eq!(problem.weights(), vec![1, 2, 3]); assert!(problem.is_weighted()); } @@ -77,7 +75,11 @@ fn test_brute_force_triangle() { #[test] fn test_brute_force_weighted() { // Weighted: prefer selecting low-weight vertices - let problem = VertexCovering::with_weights(3, vec![(0, 1), (1, 2)], vec![100, 1, 100]); + let problem = VertexCovering::::with_weights( + 3, + vec![(0, 1), (1, 2)], + vec![100, 1, 100], + ); let solver = BruteForce::new(); let solutions = solver.find_best(&problem); @@ -149,6 +151,8 @@ fn test_is_satisfied() { #[test] fn test_complement_relationship() { // For a graph, if S is an independent set, then V\S is a vertex cover + use crate::models::graph::IndependentSet; + let edges = vec![(0, 1), (1, 2), (2, 3)]; let is_problem = IndependentSet::::new(4, edges.clone()); let vc_problem = VertexCovering::::new(4, edges); @@ -165,7 +169,8 @@ fn test_complement_relationship() { #[test] fn test_objectives() { - let problem = VertexCovering::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let problem = + VertexCovering::::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); let objectives = problem.objectives(); assert_eq!(objectives.len(), 3); } @@ -190,3 +195,44 @@ fn test_is_vertex_cover_wrong_len() { // Wrong length should return false assert!(!is_vertex_cover(3, &[(0, 1)], &[true, false])); } + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = VertexCovering::::from_graph_unit_weights(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 2); +} + +#[test] +fn test_from_graph_with_weights() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = VertexCovering::::from_graph(graph, vec![1, 2, 3]); + assert_eq!(problem.weights(), vec![1, 2, 3]); + assert!(problem.is_weighted()); +} + +#[test] +fn test_graph_accessor() { + let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 3); + assert_eq!(graph.num_edges(), 2); +} + +#[test] +fn test_has_edge() { + let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); + assert!(problem.has_edge(0, 1)); + assert!(problem.has_edge(1, 0)); // Undirected + assert!(problem.has_edge(1, 2)); + assert!(!problem.has_edge(0, 2)); +} + +#[test] +fn test_variant() { + let variant = VertexCovering::::variant(); + assert_eq!(variant.len(), 2); + assert_eq!(variant[0], ("graph", "SimpleGraph")); + assert_eq!(variant[1], ("weight", "i32")); +} diff --git a/src/tests_unit/models/optimization/ilp.rs b/src/tests_unit/models/optimization/ilp.rs new file mode 100644 index 0000000..f0948e3 --- /dev/null +++ b/src/tests_unit/models/optimization/ilp.rs @@ -0,0 +1,584 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +// ============================================================ +// VarBounds tests +// ============================================================ + +#[test] +fn test_varbounds_binary() { + let bounds = VarBounds::binary(); + assert_eq!(bounds.lower, Some(0)); + assert_eq!(bounds.upper, Some(1)); + assert!(bounds.contains(0)); + assert!(bounds.contains(1)); + assert!(!bounds.contains(-1)); + assert!(!bounds.contains(2)); + assert_eq!(bounds.num_values(), Some(2)); +} + +#[test] +fn test_varbounds_non_negative() { + let bounds = VarBounds::non_negative(); + assert_eq!(bounds.lower, Some(0)); + assert_eq!(bounds.upper, None); + assert!(bounds.contains(0)); + assert!(bounds.contains(100)); + assert!(!bounds.contains(-1)); + assert_eq!(bounds.num_values(), None); +} + +#[test] +fn test_varbounds_unbounded() { + let bounds = VarBounds::unbounded(); + assert_eq!(bounds.lower, None); + assert_eq!(bounds.upper, None); + assert!(bounds.contains(-1000)); + assert!(bounds.contains(0)); + assert!(bounds.contains(1000)); + assert_eq!(bounds.num_values(), None); +} + +#[test] +fn test_varbounds_bounded() { + let bounds = VarBounds::bounded(-5, 10); + assert_eq!(bounds.lower, Some(-5)); + assert_eq!(bounds.upper, Some(10)); + assert!(bounds.contains(-5)); + assert!(bounds.contains(0)); + assert!(bounds.contains(10)); + assert!(!bounds.contains(-6)); + assert!(!bounds.contains(11)); + assert_eq!(bounds.num_values(), Some(16)); // -5 to 10 inclusive +} + +#[test] +fn test_varbounds_default() { + let bounds = VarBounds::default(); + assert_eq!(bounds.lower, None); + assert_eq!(bounds.upper, None); +} + +#[test] +fn test_varbounds_empty_range() { + let bounds = VarBounds::bounded(5, 3); // Invalid: lo > hi + assert_eq!(bounds.num_values(), Some(0)); +} + +// ============================================================ +// Comparison tests +// ============================================================ + +#[test] +fn test_comparison_le() { + let cmp = Comparison::Le; + assert!(cmp.holds(5.0, 10.0)); + assert!(cmp.holds(10.0, 10.0)); + assert!(!cmp.holds(11.0, 10.0)); +} + +#[test] +fn test_comparison_ge() { + let cmp = Comparison::Ge; + assert!(cmp.holds(10.0, 5.0)); + assert!(cmp.holds(10.0, 10.0)); + assert!(!cmp.holds(4.0, 5.0)); +} + +#[test] +fn test_comparison_eq() { + let cmp = Comparison::Eq; + assert!(cmp.holds(10.0, 10.0)); + assert!(!cmp.holds(10.0, 10.1)); + assert!(!cmp.holds(9.9, 10.0)); + // Test tolerance + assert!(cmp.holds(10.0, 10.0 + 1e-10)); +} + +// ============================================================ +// LinearConstraint tests +// ============================================================ + +#[test] +fn test_linear_constraint_le() { + // x0 + 2*x1 <= 5 + let constraint = LinearConstraint::le(vec![(0, 1.0), (1, 2.0)], 5.0); + assert_eq!(constraint.cmp, Comparison::Le); + assert_eq!(constraint.rhs, 5.0); + + // x0=1, x1=2 => 1 + 4 = 5 <= 5 (satisfied) + assert!(constraint.is_satisfied(&[1, 2])); + // x0=2, x1=2 => 2 + 4 = 6 > 5 (not satisfied) + assert!(!constraint.is_satisfied(&[2, 2])); +} + +#[test] +fn test_linear_constraint_ge() { + // x0 + x1 >= 3 + let constraint = LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 3.0); + assert_eq!(constraint.cmp, Comparison::Ge); + + assert!(constraint.is_satisfied(&[2, 2])); // 4 >= 3 + assert!(constraint.is_satisfied(&[1, 2])); // 3 >= 3 + assert!(!constraint.is_satisfied(&[1, 1])); // 2 < 3 +} + +#[test] +fn test_linear_constraint_eq() { + // x0 + x1 == 2 + let constraint = LinearConstraint::eq(vec![(0, 1.0), (1, 1.0)], 2.0); + assert_eq!(constraint.cmp, Comparison::Eq); + + assert!(constraint.is_satisfied(&[1, 1])); // 2 == 2 + assert!(!constraint.is_satisfied(&[1, 2])); // 3 != 2 + assert!(!constraint.is_satisfied(&[0, 1])); // 1 != 2 +} + +#[test] +fn test_linear_constraint_evaluate_lhs() { + let constraint = LinearConstraint::le(vec![(0, 3.0), (2, -1.0)], 10.0); + // 3*x0 - 1*x2 with x=[2, 5, 7] => 3*2 - 1*7 = -1 + assert!((constraint.evaluate_lhs(&[2, 5, 7]) - (-1.0)).abs() < 1e-9); +} + +#[test] +fn test_linear_constraint_variables() { + let constraint = LinearConstraint::le(vec![(0, 1.0), (3, 2.0), (5, -1.0)], 10.0); + assert_eq!(constraint.variables(), vec![0, 3, 5]); +} + +#[test] +fn test_linear_constraint_out_of_bounds() { + // Constraint references variable 5, but values only has 3 elements + let constraint = LinearConstraint::le(vec![(5, 1.0)], 10.0); + // Missing variable defaults to 0, so 0 <= 10 is satisfied + assert!(constraint.is_satisfied(&[1, 2, 3])); +} + +// ============================================================ +// ObjectiveSense tests +// ============================================================ + +#[test] +fn test_objective_sense_from_energy_mode() { + assert_eq!( + ObjectiveSense::from(EnergyMode::LargerSizeIsBetter), + ObjectiveSense::Maximize + ); + assert_eq!( + ObjectiveSense::from(EnergyMode::SmallerSizeIsBetter), + ObjectiveSense::Minimize + ); +} + +#[test] +fn test_energy_mode_from_objective_sense() { + assert_eq!( + EnergyMode::from(ObjectiveSense::Maximize), + EnergyMode::LargerSizeIsBetter + ); + assert_eq!( + EnergyMode::from(ObjectiveSense::Minimize), + EnergyMode::SmallerSizeIsBetter + ); +} + +// ============================================================ +// ILP tests +// ============================================================ + +#[test] +fn test_ilp_new() { + let ilp = ILP::new( + 2, + vec![VarBounds::binary(), VarBounds::binary()], + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 2.0)], + ObjectiveSense::Maximize, + ); + assert_eq!(ilp.num_vars, 2); + assert_eq!(ilp.bounds.len(), 2); + assert_eq!(ilp.constraints.len(), 1); + assert_eq!(ilp.objective.len(), 2); + assert_eq!(ilp.sense, ObjectiveSense::Maximize); +} + +#[test] +#[should_panic(expected = "bounds length must match num_vars")] +fn test_ilp_new_mismatched_bounds() { + ILP::new( + 3, + vec![VarBounds::binary(), VarBounds::binary()], // Only 2 bounds for 3 vars + vec![], + vec![], + ObjectiveSense::Minimize, + ); +} + +#[test] +fn test_ilp_binary() { + let ilp = ILP::binary( + 3, + vec![], + vec![(0, 1.0), (1, 1.0), (2, 1.0)], + ObjectiveSense::Minimize, + ); + assert_eq!(ilp.num_vars, 3); + assert!(ilp.bounds.iter().all(|b| *b == VarBounds::binary())); +} + +#[test] +fn test_ilp_empty() { + let ilp = ILP::empty(); + assert_eq!(ilp.num_vars, 0); + assert!(ilp.bounds.is_empty()); + assert!(ilp.constraints.is_empty()); + assert!(ilp.objective.is_empty()); +} + +#[test] +fn test_ilp_evaluate_objective() { + let ilp = ILP::binary( + 3, + vec![], + vec![(0, 2.0), (1, 3.0), (2, -1.0)], + ObjectiveSense::Maximize, + ); + // 2*1 + 3*1 + (-1)*0 = 5 + assert!((ilp.evaluate_objective(&[1, 1, 0]) - 5.0).abs() < 1e-9); + // 2*0 + 3*0 + (-1)*1 = -1 + assert!((ilp.evaluate_objective(&[0, 0, 1]) - (-1.0)).abs() < 1e-9); +} + +#[test] +fn test_ilp_bounds_satisfied() { + let ilp = ILP::new( + 2, + vec![VarBounds::bounded(0, 5), VarBounds::bounded(-2, 2)], + vec![], + vec![], + ObjectiveSense::Minimize, + ); + assert!(ilp.bounds_satisfied(&[0, 0])); + assert!(ilp.bounds_satisfied(&[5, 2])); + assert!(ilp.bounds_satisfied(&[3, -2])); + assert!(!ilp.bounds_satisfied(&[6, 0])); // x0 > 5 + assert!(!ilp.bounds_satisfied(&[0, 3])); // x1 > 2 + assert!(!ilp.bounds_satisfied(&[0])); // Wrong length +} + +#[test] +fn test_ilp_constraints_satisfied() { + let ilp = ILP::binary( + 3, + vec![ + LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0), // x0 + x1 <= 1 + LinearConstraint::ge(vec![(2, 1.0)], 0.0), // x2 >= 0 + ], + vec![], + ObjectiveSense::Minimize, + ); + assert!(ilp.constraints_satisfied(&[0, 0, 1])); + assert!(ilp.constraints_satisfied(&[1, 0, 0])); + assert!(ilp.constraints_satisfied(&[0, 1, 1])); + assert!(!ilp.constraints_satisfied(&[1, 1, 0])); // x0 + x1 = 2 > 1 +} + +#[test] +fn test_ilp_is_feasible() { + let ilp = ILP::binary( + 2, + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + assert!(ilp.is_feasible(&[0, 0])); + assert!(ilp.is_feasible(&[1, 0])); + assert!(ilp.is_feasible(&[0, 1])); + assert!(!ilp.is_feasible(&[1, 1])); // Constraint violated + assert!(!ilp.is_feasible(&[2, 0])); // Bounds violated +} + +// ============================================================ +// Problem trait tests +// ============================================================ + +#[test] +fn test_ilp_num_variables() { + let ilp = ILP::binary(5, vec![], vec![], ObjectiveSense::Minimize); + assert_eq!(ilp.num_variables(), 5); +} + +#[test] +fn test_ilp_num_flavors_binary() { + let ilp = ILP::binary(3, vec![], vec![], ObjectiveSense::Minimize); + assert_eq!(ilp.num_flavors(), 2); +} + +#[test] +fn test_ilp_num_flavors_mixed() { + let ilp = ILP::new( + 3, + vec![ + VarBounds::binary(), + VarBounds::bounded(0, 5), + VarBounds::bounded(-1, 1), + ], + vec![], + vec![], + ObjectiveSense::Minimize, + ); + assert_eq!(ilp.num_flavors(), 6); // Max is 6 (from 0-5) +} + +#[test] +fn test_ilp_num_flavors_unbounded() { + let ilp = ILP::new( + 2, + vec![VarBounds::binary(), VarBounds::unbounded()], + vec![], + vec![], + ObjectiveSense::Minimize, + ); + assert_eq!(ilp.num_flavors(), usize::MAX); +} + +#[test] +fn test_ilp_num_flavors_empty() { + let ilp = ILP::empty(); + assert_eq!(ilp.num_flavors(), 2); // Default when empty +} + +#[test] +fn test_ilp_problem_size() { + let ilp = ILP::binary( + 4, + vec![ + LinearConstraint::le(vec![(0, 1.0)], 1.0), + LinearConstraint::le(vec![(1, 1.0)], 1.0), + ], + vec![], + ObjectiveSense::Minimize, + ); + let size = ilp.problem_size(); + assert_eq!(size.get("num_vars"), Some(4)); + assert_eq!(size.get("num_constraints"), Some(2)); +} + +#[test] +fn test_ilp_energy_mode() { + let max_ilp = ILP::binary(2, vec![], vec![], ObjectiveSense::Maximize); + let min_ilp = ILP::binary(2, vec![], vec![], ObjectiveSense::Minimize); + + assert!(max_ilp.energy_mode().is_maximization()); + assert!(min_ilp.energy_mode().is_minimization()); +} + +#[test] +fn test_ilp_solution_size_valid() { + // Maximize x0 + 2*x1 subject to x0 + x1 <= 1 + let ilp = ILP::binary( + 2, + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 2.0)], + ObjectiveSense::Maximize, + ); + + // Config [0, 1] means x0=0, x1=1 => obj = 2, valid + let sol = ilp.solution_size(&[0, 1]); + assert!(sol.is_valid); + assert!((sol.size - 2.0).abs() < 1e-9); + + // Config [1, 0] means x0=1, x1=0 => obj = 1, valid + let sol = ilp.solution_size(&[1, 0]); + assert!(sol.is_valid); + assert!((sol.size - 1.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_size_invalid() { + // x0 + x1 <= 1 + let ilp = ILP::binary( + 2, + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 2.0)], + ObjectiveSense::Maximize, + ); + + // Config [1, 1] means x0=1, x1=1 => obj = 3, but invalid (1+1 > 1) + let sol = ilp.solution_size(&[1, 1]); + assert!(!sol.is_valid); + assert!((sol.size - 3.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_size_with_offset_bounds() { + // Variables with non-zero lower bounds + let ilp = ILP::new( + 2, + vec![VarBounds::bounded(1, 3), VarBounds::bounded(-1, 1)], + vec![], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + // Config [0, 0] maps to x0=1, x1=-1 => obj = 0 + let sol = ilp.solution_size(&[0, 0]); + assert!(sol.is_valid); + assert!((sol.size - 0.0).abs() < 1e-9); + + // Config [2, 2] maps to x0=3, x1=1 => obj = 4 + let sol = ilp.solution_size(&[2, 2]); + assert!(sol.is_valid); + assert!((sol.size - 4.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_brute_force_maximization() { + // Maximize x0 + 2*x1 subject to x0 + x1 <= 1, x0, x1 binary + let ilp = ILP::binary( + 2, + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 2.0)], + ObjectiveSense::Maximize, + ); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&ilp); + + // Optimal: x1=1, x0=0 => objective = 2 + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0, 1]); +} + +#[test] +fn test_ilp_brute_force_minimization() { + // Minimize x0 + x1 subject to x0 + x1 >= 1, x0, x1 binary + let ilp = ILP::binary( + 2, + vec![LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Minimize, + ); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&ilp); + + // Optimal: x0=1,x1=0 or x0=0,x1=1 => objective = 1 + assert_eq!(solutions.len(), 2); + for sol in &solutions { + let size = ilp.solution_size(sol); + assert!(size.is_valid); + assert!((size.size - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_ilp_brute_force_no_feasible() { + // x0 >= 1 AND x0 <= 0 (infeasible) + let ilp = ILP::binary( + 1, + vec![ + LinearConstraint::ge(vec![(0, 1.0)], 1.0), + LinearConstraint::le(vec![(0, 1.0)], 0.0), + ], + vec![(0, 1.0)], + ObjectiveSense::Minimize, + ); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&ilp); + + // No feasible solutions + assert!(solutions.is_empty()); +} + +#[test] +fn test_ilp_unconstrained() { + // Maximize x0 + x1, no constraints, binary vars + let ilp = ILP::binary( + 2, + vec![], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&ilp); + + // Optimal: both = 1 + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 1]); +} + +#[test] +fn test_ilp_equality_constraint() { + // Minimize x0 subject to x0 + x1 == 1, binary vars + let ilp = ILP::binary( + 2, + vec![LinearConstraint::eq(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0)], + ObjectiveSense::Minimize, + ); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&ilp); + + // Optimal: x0=0, x1=1 => objective = 0 + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0, 1]); +} + +#[test] +fn test_ilp_multiple_constraints() { + // Maximize x0 + x1 + x2 subject to: + // x0 + x1 <= 1 + // x1 + x2 <= 1 + // Binary vars + let ilp = ILP::binary( + 3, + vec![ + LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0), + LinearConstraint::le(vec![(1, 1.0), (2, 1.0)], 1.0), + ], + vec![(0, 1.0), (1, 1.0), (2, 1.0)], + ObjectiveSense::Maximize, + ); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&ilp); + + // Optimal: x0=1, x1=0, x2=1 => objective = 2 + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 0, 1]); +} + +#[test] +fn test_ilp_config_to_values() { + let ilp = ILP::new( + 3, + vec![ + VarBounds::bounded(0, 2), // 0,1,2 + VarBounds::bounded(-1, 1), // -1,0,1 + VarBounds::bounded(5, 7), // 5,6,7 + ], + vec![], + vec![], + ObjectiveSense::Minimize, + ); + + // Config [0,0,0] => [0, -1, 5] + assert_eq!(ilp.config_to_values(&[0, 0, 0]), vec![0, -1, 5]); + // Config [2,2,2] => [2, 1, 7] + assert_eq!(ilp.config_to_values(&[2, 2, 2]), vec![2, 1, 7]); + // Config [1,1,1] => [1, 0, 6] + assert_eq!(ilp.config_to_values(&[1, 1, 1]), vec![1, 0, 6]); +} + +#[test] +fn test_ilp_variant() { + let v = ILP::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0], ("graph", "SimpleGraph")); + assert_eq!(v[1], ("weight", "f64")); +} diff --git a/src/tests_unit/models/optimization/qubo.rs b/src/tests_unit/models/optimization/qubo.rs new file mode 100644 index 0000000..78cc1df --- /dev/null +++ b/src/tests_unit/models/optimization/qubo.rs @@ -0,0 +1,136 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_qubo_from_matrix() { + let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); + assert_eq!(problem.num_vars(), 2); + assert_eq!(problem.get(0, 0), Some(&1.0)); + assert_eq!(problem.get(0, 1), Some(&2.0)); + assert_eq!(problem.get(1, 1), Some(&3.0)); +} + +#[test] +fn test_qubo_new() { + let problem = QUBO::new(vec![1.0, 2.0], vec![((0, 1), 3.0)]); + assert_eq!(problem.get(0, 0), Some(&1.0)); + assert_eq!(problem.get(1, 1), Some(&2.0)); + assert_eq!(problem.get(0, 1), Some(&3.0)); +} + +#[test] +fn test_evaluate() { + // Q = [[1, 2], [0, 3]] + // f(x) = x0 + 3*x1 + 2*x0*x1 + let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); + + assert_eq!(problem.evaluate(&[0, 0]), 0.0); + assert_eq!(problem.evaluate(&[1, 0]), 1.0); + assert_eq!(problem.evaluate(&[0, 1]), 3.0); + assert_eq!(problem.evaluate(&[1, 1]), 6.0); // 1 + 3 + 2 = 6 +} + +#[test] +fn test_solution_size() { + let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); + + let sol = problem.solution_size(&[0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0.0); + + let sol = problem.solution_size(&[1, 1]); + assert_eq!(sol.size, 6.0); +} + +#[test] +fn test_brute_force_minimize() { + // Q = [[1, 0], [0, -2]] + // f(x) = x0 - 2*x1 + // Minimum at x = [0, 1] with value -2 + let problem = QUBO::from_matrix(vec![vec![1.0, 0.0], vec![0.0, -2.0]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0, 1]); + assert_eq!(problem.solution_size(&solutions[0]).size, -2.0); +} + +#[test] +fn test_brute_force_with_interaction() { + // Q = [[-1, 2], [0, -1]] + // f(x) = -x0 - x1 + 2*x0*x1 + // x=[0,0] -> 0, x=[1,0] -> -1, x=[0,1] -> -1, x=[1,1] -> 0 + let problem = QUBO::from_matrix(vec![vec![-1.0, 2.0], vec![0.0, -1.0]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Minimum is -1 at [1,0] or [0,1] + assert_eq!(solutions.len(), 2); + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, -1.0); + } +} + +#[test] +fn test_energy_mode() { + let problem = QUBO::::from_matrix(vec![vec![1.0]]); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_num_variables_flavors() { + let problem = QUBO::::from_matrix(vec![vec![0.0; 5]; 5]); + assert_eq!(problem.num_variables(), 5); + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_problem_size() { + let problem = QUBO::::from_matrix(vec![vec![0.0; 3]; 3]); + let size = problem.problem_size(); + assert_eq!(size.get("num_vars"), Some(3)); +} + +#[test] +fn test_matrix_access() { + let problem = QUBO::from_matrix(vec![ + vec![1.0, 2.0, 3.0], + vec![0.0, 4.0, 5.0], + vec![0.0, 0.0, 6.0], + ]); + let matrix = problem.matrix(); + assert_eq!(matrix.len(), 3); + assert_eq!(matrix[0], vec![1.0, 2.0, 3.0]); +} + +#[test] +fn test_empty_qubo() { + let problem = QUBO::::from_matrix(vec![]); + assert_eq!(problem.num_vars(), 0); + assert_eq!(problem.evaluate(&[]), 0.0); +} + +#[test] +fn test_single_variable() { + let problem = QUBO::from_matrix(vec![vec![-5.0]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1]); // x=1 gives -5, x=0 gives 0 +} + +#[test] +fn test_qubo_new_reverse_indices() { + // Test the case where (j, i) is provided with i < j + let problem = QUBO::new(vec![1.0, 2.0], vec![((1, 0), 3.0)]); // j > i + assert_eq!(problem.get(0, 1), Some(&3.0)); // Should be stored at (0, 1) +} + +#[test] +fn test_get_out_of_bounds() { + let problem = QUBO::from_matrix(vec![vec![1.0, 2.0], vec![0.0, 3.0]]); + assert_eq!(problem.get(5, 5), None); + assert_eq!(problem.get(0, 5), None); +} diff --git a/src/tests_unit/models/optimization/spin_glass.rs b/src/tests_unit/models/optimization/spin_glass.rs new file mode 100644 index 0000000..ecb70ca --- /dev/null +++ b/src/tests_unit/models/optimization/spin_glass.rs @@ -0,0 +1,193 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_spin_glass_creation() { + let problem = SpinGlass::::new( + 3, + vec![((0, 1), 1.0), ((1, 2), -1.0)], + vec![0.0, 0.0, 0.0], + ); + assert_eq!(problem.num_spins(), 3); + assert_eq!(problem.interactions().len(), 2); + assert_eq!(problem.fields().len(), 3); +} + +#[test] +fn test_spin_glass_without_fields() { + let problem = SpinGlass::::without_fields(3, vec![((0, 1), 1.0)]); + assert_eq!(problem.fields(), &[0.0, 0.0, 0.0]); +} + +#[test] +fn test_config_to_spins() { + assert_eq!( + SpinGlass::::config_to_spins(&[0, 0]), + vec![-1, -1] + ); + assert_eq!( + SpinGlass::::config_to_spins(&[1, 1]), + vec![1, 1] + ); + assert_eq!( + SpinGlass::::config_to_spins(&[0, 1]), + vec![-1, 1] + ); + assert_eq!( + SpinGlass::::config_to_spins(&[1, 0]), + vec![1, -1] + ); +} + +#[test] +fn test_compute_energy() { + // Two spins with J = 1 (ferromagnetic prefers aligned) + let problem = + SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); + + // Aligned spins: energy = J * s1 * s2 = 1 * 1 * 1 = 1 or 1 * (-1) * (-1) = 1 + assert_eq!(problem.compute_energy(&[1, 1]), 1.0); + assert_eq!(problem.compute_energy(&[-1, -1]), 1.0); + + // Anti-aligned spins: energy = J * s1 * s2 = 1 * 1 * (-1) = -1 + assert_eq!(problem.compute_energy(&[1, -1]), -1.0); + assert_eq!(problem.compute_energy(&[-1, 1]), -1.0); +} + +#[test] +fn test_compute_energy_with_fields() { + let problem = SpinGlass::::new(2, vec![], vec![1.0, -1.0]); + + // Energy = h1*s1 + h2*s2 = 1*s1 + (-1)*s2 + assert_eq!(problem.compute_energy(&[1, 1]), 0.0); // 1 - 1 = 0 + assert_eq!(problem.compute_energy(&[-1, -1]), 0.0); // -1 + 1 = 0 + assert_eq!(problem.compute_energy(&[1, -1]), 2.0); // 1 + 1 = 2 + assert_eq!(problem.compute_energy(&[-1, 1]), -2.0); // -1 - 1 = -2 +} + +#[test] +fn test_solution_size() { + let problem = + SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); + + // config [0,0] -> spins [-1,-1] -> energy = 1 + let sol = problem.solution_size(&[0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1.0); + + // config [0,1] -> spins [-1,1] -> energy = -1 + let sol = problem.solution_size(&[0, 1]); + assert_eq!(sol.size, -1.0); +} + +#[test] +fn test_brute_force_ferromagnetic() { + // Ferromagnetic: J > 0 prefers aligned spins to minimize energy + // But wait, energy = J*s1*s2, so J>0 with aligned gives positive energy + // For minimization, we want anti-aligned for J>0 + let problem = + SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Minimum energy is -1 (anti-aligned) + for sol in &solutions { + assert_ne!(sol[0], sol[1]); + assert_eq!(problem.solution_size(sol).size, -1.0); + } +} + +#[test] +fn test_brute_force_antiferromagnetic() { + // Antiferromagnetic: J < 0, energy = J*s1*s2 + // J<0 with aligned spins gives negative energy (good for minimization) + let problem = + SpinGlass::::new(2, vec![((0, 1), -1.0)], vec![0.0, 0.0]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Minimum energy is -1 (aligned) + for sol in &solutions { + assert_eq!(sol[0], sol[1]); + assert_eq!(problem.solution_size(sol).size, -1.0); + } +} + +#[test] +fn test_energy_mode() { + let problem = SpinGlass::::without_fields(2, vec![]); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_num_variables_flavors() { + let problem = SpinGlass::::without_fields(5, vec![]); + assert_eq!(problem.num_variables(), 5); + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_problem_size() { + let problem = SpinGlass::::new( + 3, + vec![((0, 1), 1.0), ((1, 2), 1.0)], + vec![0.0, 0.0, 0.0], + ); + let size = problem.problem_size(); + assert_eq!(size.get("num_spins"), Some(3)); + assert_eq!(size.get("num_interactions"), Some(2)); +} + +#[test] +fn test_triangle_frustration() { + // Triangle with all antiferromagnetic couplings - frustrated system + let problem = SpinGlass::::new( + 3, + vec![((0, 1), 1.0), ((1, 2), 1.0), ((0, 2), 1.0)], + vec![0.0, 0.0, 0.0], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Best we can do is satisfy 2 out of 3 interactions + // Energy = -1 -1 + 1 = -1 (one frustrated) + for sol in &solutions { + assert_eq!(problem.solution_size(sol).size, -1.0); + } +} + +#[test] +fn test_from_graph() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = + SpinGlass::::from_graph(graph, vec![1.0, 2.0], vec![0.0, 0.0, 0.0]); + assert_eq!(problem.num_spins(), 3); + assert_eq!(problem.couplings(), &[1.0, 2.0]); + assert_eq!(problem.fields(), &[0.0, 0.0, 0.0]); +} + +#[test] +fn test_from_graph_without_fields() { + let graph = SimpleGraph::new(2, vec![(0, 1)]); + let problem = SpinGlass::::from_graph_without_fields(graph, vec![1.5]); + assert_eq!(problem.num_spins(), 2); + assert_eq!(problem.couplings(), &[1.5]); + assert_eq!(problem.fields(), &[0.0, 0.0]); +} + +#[test] +fn test_graph_accessor() { + let problem = + SpinGlass::::new(3, vec![((0, 1), 1.0)], vec![0.0, 0.0, 0.0]); + let graph = problem.graph(); + assert_eq!(graph.num_vertices(), 3); + assert_eq!(graph.num_edges(), 1); +} + +#[test] +fn test_variant() { + let variant = SpinGlass::::variant(); + assert_eq!(variant.len(), 2); + assert_eq!(variant[0], ("graph", "SimpleGraph")); + assert_eq!(variant[1], ("weight", "f64")); +} diff --git a/src/tests_unit/models/satisfiability/ksat.rs b/src/tests_unit/models/satisfiability/ksat.rs new file mode 100644 index 0000000..d24c6aa --- /dev/null +++ b/src/tests_unit/models/satisfiability/ksat.rs @@ -0,0 +1,168 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_3sat_creation() { + let problem = KSatisfiability::<3, i32>::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + assert_eq!(problem.num_vars(), 3); + assert_eq!(problem.num_clauses(), 2); +} + +#[test] +#[should_panic(expected = "Clause 0 has 2 literals, expected 3")] +fn test_3sat_wrong_clause_size() { + let _ = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2])]); +} + +#[test] +fn test_2sat_creation() { + let problem = KSatisfiability::<2, i32>::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], + ); + assert_eq!(problem.num_vars(), 2); + assert_eq!(problem.num_clauses(), 2); +} + +#[test] +fn test_3sat_is_satisfying() { + // (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR NOT x3) + let problem = KSatisfiability::<3, i32>::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ); + + // x1=T, x2=F, x3=F satisfies both + assert!(problem.is_satisfying(&[true, false, false])); + // x1=T, x2=T, x3=T fails second clause + assert!(!problem.is_satisfying(&[true, true, true])); +} + +#[test] +fn test_3sat_brute_force() { + let problem = KSatisfiability::<3, i32>::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + let solver = BruteForce::new(); + let solutions = solver.find_best(&problem); + + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_ksat_problem_size() { + let problem = KSatisfiability::<3, i32>::new(4, vec![CNFClause::new(vec![1, 2, 3])]); + let size = problem.problem_size(); + assert_eq!(size.get("k"), Some(3)); + assert_eq!(size.get("num_vars"), Some(4)); + assert_eq!(size.get("num_clauses"), Some(1)); +} + +#[test] +fn test_ksat_with_weights() { + let problem = KSatisfiability::<3>::with_weights( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + vec![5, 10], + ); + assert_eq!(problem.weights(), vec![5, 10]); + assert!(problem.is_weighted()); +} + +#[test] +fn test_ksat_allow_less() { + // This should work - clause has 2 literals which is <= 3 + let problem = + KSatisfiability::<3, i32>::new_allow_less(2, vec![CNFClause::new(vec![1, 2])]); + assert_eq!(problem.num_clauses(), 1); +} + +#[test] +#[should_panic(expected = "Clause 0 has 4 literals, expected at most 3")] +fn test_ksat_allow_less_too_many() { + let _ = + KSatisfiability::<3, i32>::new_allow_less(4, vec![CNFClause::new(vec![1, 2, 3, 4])]); +} + +#[test] +fn test_ksat_constraints() { + let problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + let constraints = problem.constraints(); + assert_eq!(constraints.len(), 1); +} + +#[test] +fn test_ksat_objectives() { + let problem = + KSatisfiability::<3>::with_weights(3, vec![CNFClause::new(vec![1, 2, 3])], vec![5]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 1); +} + +#[test] +fn test_ksat_energy_mode() { + let problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_ksat_get_clause() { + let problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + assert_eq!(problem.get_clause(0), Some(&CNFClause::new(vec![1, 2, 3]))); + assert_eq!(problem.get_clause(1), None); +} + +#[test] +fn test_ksat_count_satisfied() { + let problem = KSatisfiability::<3, i32>::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ); + // x1=T, x2=T, x3=T: first satisfied, second not + assert_eq!(problem.count_satisfied(&[true, true, true]), 1); + // x1=T, x2=F, x3=F: both satisfied + assert_eq!(problem.count_satisfied(&[true, false, false]), 2); +} + +#[test] +fn test_ksat_set_weights() { + let mut problem = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + assert!(!problem.is_weighted()); + problem.set_weights(vec![10]); + assert_eq!(problem.weights(), vec![10]); +} + +#[test] +fn test_ksat_is_satisfied_csp() { + let problem = KSatisfiability::<3, i32>::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ); + assert!(problem.is_satisfied(&[1, 0, 0])); // x1=T, x2=F, x3=F + assert!(!problem.is_satisfied(&[1, 1, 1])); // x1=T, x2=T, x3=T +} diff --git a/src/tests_unit/models/satisfiability/sat.rs b/src/tests_unit/models/satisfiability/sat.rs new file mode 100644 index 0000000..aa094f7 --- /dev/null +++ b/src/tests_unit/models/satisfiability/sat.rs @@ -0,0 +1,310 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_cnf_clause_creation() { + let clause = CNFClause::new(vec![1, -2, 3]); + assert_eq!(clause.len(), 3); + assert!(!clause.is_empty()); + assert_eq!(clause.variables(), vec![0, 1, 2]); +} + +#[test] +fn test_cnf_clause_satisfaction() { + let clause = CNFClause::new(vec![1, 2]); // x1 OR x2 + + assert!(clause.is_satisfied(&[true, false])); // x1 = T + assert!(clause.is_satisfied(&[false, true])); // x2 = T + assert!(clause.is_satisfied(&[true, true])); // Both T + assert!(!clause.is_satisfied(&[false, false])); // Both F +} + +#[test] +fn test_cnf_clause_negation() { + let clause = CNFClause::new(vec![-1, 2]); // NOT x1 OR x2 + + assert!(clause.is_satisfied(&[false, false])); // NOT x1 = T + assert!(clause.is_satisfied(&[false, true])); // Both true + assert!(clause.is_satisfied(&[true, true])); // x2 = T + assert!(!clause.is_satisfied(&[true, false])); // Both false +} + +#[test] +fn test_sat_creation() { + let problem = Satisfiability::::new( + 3, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], + ); + assert_eq!(problem.num_vars(), 3); + assert_eq!(problem.num_clauses(), 2); + assert_eq!(problem.num_variables(), 3); +} + +#[test] +fn test_sat_with_weights() { + let problem = Satisfiability::with_weights( + 2, + vec![CNFClause::new(vec![1]), CNFClause::new(vec![2])], + vec![5, 10], + ); + assert_eq!(problem.weights(), vec![5, 10]); + assert!(problem.is_weighted()); +} + +#[test] +fn test_is_satisfying() { + // (x1 OR x2) AND (NOT x1 OR NOT x2) + let problem = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], + ); + + assert!(problem.is_satisfying(&[true, false])); // Satisfies both + assert!(problem.is_satisfying(&[false, true])); // Satisfies both + assert!(!problem.is_satisfying(&[true, true])); // Fails second clause + assert!(!problem.is_satisfying(&[false, false])); // Fails first clause +} + +#[test] +fn test_count_satisfied() { + let problem = Satisfiability::::new( + 2, + vec![ + CNFClause::new(vec![1]), + CNFClause::new(vec![2]), + CNFClause::new(vec![-1, -2]), + ], + ); + + assert_eq!(problem.count_satisfied(&[true, true]), 2); // x1, x2 satisfied + assert_eq!(problem.count_satisfied(&[false, false]), 1); // Only last + assert_eq!(problem.count_satisfied(&[true, false]), 2); // x1 and last +} + +#[test] +fn test_solution_size() { + let problem = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], + ); + + let sol = problem.solution_size(&[1, 0]); // true, false + assert!(sol.is_valid); + assert_eq!(sol.size, 2); // Both clauses satisfied + + let sol = problem.solution_size(&[1, 1]); // true, true + assert!(!sol.is_valid); + assert_eq!(sol.size, 1); // Only first clause satisfied +} + +#[test] +fn test_brute_force_satisfiable() { + // (x1) AND (x2) AND (NOT x1 OR NOT x2) - UNSAT + let problem = Satisfiability::::new( + 2, + vec![ + CNFClause::new(vec![1]), + CNFClause::new(vec![2]), + CNFClause::new(vec![-1, -2]), + ], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // This is unsatisfiable, so no valid solutions + // BruteForce will return configs with max satisfied clauses + for sol in &solutions { + // Best we can do is satisfy 2 out of 3 clauses + assert!(!problem.solution_size(sol).is_valid); + assert_eq!(problem.solution_size(sol).size, 2); + } +} + +#[test] +fn test_brute_force_simple_sat() { + // (x1 OR x2) - many solutions + let problem = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // 3 satisfying assignments + assert_eq!(solutions.len(), 3); + for sol in &solutions { + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_max_sat() { + // Weighted: clause 1 has weight 10, clause 2 has weight 1 + // They conflict, so we prefer satisfying clause 1 + let problem = Satisfiability::with_weights( + 1, + vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])], + vec![10, 1], + ); + let solver = BruteForce::new().valid_only(false); // Allow invalid (partial) solutions + + let solutions = solver.find_best(&problem); + // Should select x1 = true (weight 10) + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1]); +} + +#[test] +fn test_is_satisfying_assignment() { + let clauses = vec![vec![1, 2], vec![-1, 3]]; + + assert!(is_satisfying_assignment(3, &clauses, &[true, false, true])); + assert!(is_satisfying_assignment(3, &clauses, &[false, true, false])); + assert!(!is_satisfying_assignment( + 3, + &clauses, + &[true, false, false] + )); +} + +#[test] +fn test_constraints() { + let problem = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], + ); + let constraints = problem.constraints(); + assert_eq!(constraints.len(), 2); +} + +#[test] +fn test_energy_mode() { + let problem = Satisfiability::::new(2, vec![CNFClause::new(vec![1])]); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_empty_formula() { + let problem = Satisfiability::::new(2, vec![]); + let sol = problem.solution_size(&[0, 0]); + assert!(sol.is_valid); // Empty formula is trivially satisfied +} + +#[test] +fn test_single_literal_clauses() { + // Unit propagation scenario: x1 AND NOT x2 + let problem = + Satisfiability::::new(2, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-2])]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 0]); // x1=T, x2=F +} + +#[test] +fn test_get_clause() { + let problem = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], + ); + assert_eq!(problem.get_clause(0), Some(&CNFClause::new(vec![1, 2]))); + assert_eq!(problem.get_clause(2), None); +} + +#[test] +fn test_three_sat_example() { + // (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR x3) AND (x1 OR NOT x2 OR NOT x3) + let problem = Satisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + CNFClause::new(vec![1, -2, -3]), + ], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_is_satisfied_csp() { + let problem = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], + ); + + assert!(problem.is_satisfied(&[1, 0])); + assert!(problem.is_satisfied(&[0, 1])); + assert!(!problem.is_satisfied(&[1, 1])); + assert!(!problem.is_satisfied(&[0, 0])); +} + +#[test] +fn test_objectives() { + let problem = Satisfiability::with_weights(2, vec![CNFClause::new(vec![1, 2])], vec![5]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 1); +} + +#[test] +fn test_set_weights() { + let mut problem = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], + ); + assert!(!problem.is_weighted()); // Initially uniform + problem.set_weights(vec![1, 2]); + assert!(problem.is_weighted()); + assert_eq!(problem.weights(), vec![1, 2]); +} + +#[test] +fn test_is_weighted_empty() { + let problem = Satisfiability::::new(2, vec![]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_is_satisfying_assignment_defaults() { + // When assignment is shorter than needed, missing vars default to false + let clauses = vec![vec![1, 2]]; + // assignment is [true], var 0 = true satisfies literal 1 + assert!(is_satisfying_assignment(3, &clauses, &[true])); + // assignment is [false], var 0 = false, var 1 defaults to false + // Neither literal 1 (var0=false) nor literal 2 (var1=false) satisfied + assert!(!is_satisfying_assignment(3, &clauses, &[false])); +} + +#[test] +fn test_problem_size() { + let problem = Satisfiability::::new( + 3, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], + ); + let size = problem.problem_size(); + assert_eq!(size.get("num_vars"), Some(3)); + assert_eq!(size.get("num_clauses"), Some(2)); +} + +#[test] +fn test_num_variables_flavors() { + let problem = Satisfiability::::new(5, vec![CNFClause::new(vec![1])]); + assert_eq!(problem.num_variables(), 5); + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_clause_variables() { + let clause = CNFClause::new(vec![1, -2, 3]); + let vars = clause.variables(); + assert_eq!(vars, vec![0, 1, 2]); // 0-indexed +} + +#[test] +fn test_clause_debug() { + let clause = CNFClause::new(vec![1, -2, 3]); + let debug = format!("{:?}", clause); + assert!(debug.contains("CNFClause")); +} diff --git a/src/tests_unit/models/set/set_covering.rs b/src/tests_unit/models/set/set_covering.rs new file mode 100644 index 0000000..0313693 --- /dev/null +++ b/src/tests_unit/models/set/set_covering.rs @@ -0,0 +1,196 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_set_covering_creation() { + let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + assert_eq!(problem.universe_size(), 4); + assert_eq!(problem.num_sets(), 3); + assert_eq!(problem.num_variables(), 3); +} + +#[test] +fn test_set_covering_with_weights() { + let problem = SetCovering::with_weights(3, vec![vec![0, 1], vec![1, 2]], vec![5, 10]); + assert_eq!(problem.weights(), vec![5, 10]); + assert!(problem.is_weighted()); +} + +#[test] +fn test_covered_elements() { + let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + + let covered = problem.covered_elements(&[1, 0, 0]); + assert!(covered.contains(&0)); + assert!(covered.contains(&1)); + assert!(!covered.contains(&2)); + + let covered = problem.covered_elements(&[1, 0, 1]); + assert!(covered.contains(&0)); + assert!(covered.contains(&1)); + assert!(covered.contains(&2)); + assert!(covered.contains(&3)); +} + +#[test] +fn test_solution_size_valid() { + let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + + // Select first and third sets: covers {0,1} ∪ {2,3} = {0,1,2,3} + let sol = problem.solution_size(&[1, 0, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 2); + + // Select all sets + let sol = problem.solution_size(&[1, 1, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 3); +} + +#[test] +fn test_solution_size_invalid() { + let problem = SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + + // Select only first set: missing 2, 3 + let sol = problem.solution_size(&[1, 0, 0]); + assert!(!sol.is_valid); + + // Select none + let sol = problem.solution_size(&[0, 0, 0]); + assert!(!sol.is_valid); +} + +#[test] +fn test_brute_force_simple() { + // Universe {0,1,2}, sets: {0,1}, {1,2}, {0,2} + // Minimum cover: any 2 sets work + let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2], vec![0, 2]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 2); + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_brute_force_weighted() { + // Prefer lighter sets + let problem = + SetCovering::with_weights(3, vec![vec![0, 1, 2], vec![0, 1], vec![2]], vec![10, 3, 3]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Should select sets 1 and 2 (total 6) instead of set 0 (total 10) + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0, 1, 1]); +} + +#[test] +fn test_is_set_cover_function() { + let sets = vec![vec![0, 1], vec![1, 2], vec![2, 3]]; + + assert!(is_set_cover(4, &sets, &[true, false, true])); + assert!(is_set_cover(4, &sets, &[true, true, true])); + assert!(!is_set_cover(4, &sets, &[true, false, false])); + assert!(!is_set_cover(4, &sets, &[false, false, false])); +} + +#[test] +fn test_get_set() { + let problem = SetCovering::::new(4, vec![vec![0, 1], vec![2, 3]]); + assert_eq!(problem.get_set(0), Some(&vec![0, 1])); + assert_eq!(problem.get_set(1), Some(&vec![2, 3])); + assert_eq!(problem.get_set(2), None); +} + +#[test] +fn test_energy_mode() { + let problem = SetCovering::::new(2, vec![vec![0, 1]]); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_constraints() { + let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); + let constraints = problem.constraints(); + // One constraint per element + assert_eq!(constraints.len(), 3); +} + +#[test] +fn test_single_set_covers_all() { + let problem = SetCovering::::new(3, vec![vec![0, 1, 2], vec![0], vec![1], vec![2]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // First set alone covers everything + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 0, 0, 0]); +} + +#[test] +fn test_overlapping_sets() { + // All sets overlap on element 1 + let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2], vec![1]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Minimum is selecting first two sets + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 2); + } +} + +#[test] +fn test_is_satisfied() { + let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); + + assert!(problem.is_satisfied(&[1, 1, 0])); // Note: 3 vars needed + assert!(!problem.is_satisfied(&[1, 0])); +} + +#[test] +fn test_empty_universe() { + let problem = SetCovering::::new(0, vec![]); + let sol = problem.solution_size(&[]); + assert!(sol.is_valid); // Empty universe is trivially covered + assert_eq!(sol.size, 0); +} + +#[test] +fn test_objectives() { + let problem = SetCovering::with_weights(3, vec![vec![0, 1], vec![1, 2]], vec![5, 10]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 2); +} + +#[test] +fn test_set_weights() { + let mut problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); + assert!(!problem.is_weighted()); // Initially uniform + problem.set_weights(vec![1, 2]); + assert!(problem.is_weighted()); + assert_eq!(problem.weights(), vec![1, 2]); +} + +#[test] +fn test_is_weighted_empty() { + let problem = SetCovering::::new(0, vec![]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_is_set_cover_wrong_len() { + let sets = vec![vec![0, 1], vec![1, 2]]; + assert!(!is_set_cover(3, &sets, &[true])); // Wrong length +} + +#[test] +fn test_problem_size() { + let problem = SetCovering::::new(5, vec![vec![0, 1], vec![1, 2], vec![3, 4]]); + let size = problem.problem_size(); + assert_eq!(size.get("universe_size"), Some(5)); + assert_eq!(size.get("num_sets"), Some(3)); +} diff --git a/src/tests_unit/models/set/set_packing.rs b/src/tests_unit/models/set/set_packing.rs new file mode 100644 index 0000000..416b8b1 --- /dev/null +++ b/src/tests_unit/models/set/set_packing.rs @@ -0,0 +1,220 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_set_packing_creation() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); + assert_eq!(problem.num_sets(), 3); + assert_eq!(problem.num_variables(), 3); +} + +#[test] +fn test_set_packing_with_weights() { + let problem = SetPacking::with_weights(vec![vec![0, 1], vec![2, 3]], vec![5, 10]); + assert_eq!(problem.weights(), vec![5, 10]); + assert!(problem.is_weighted()); +} + +#[test] +fn test_sets_overlap() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); + + assert!(problem.sets_overlap(0, 1)); // Share element 1 + assert!(!problem.sets_overlap(0, 2)); // No overlap + assert!(!problem.sets_overlap(1, 2)); // No overlap +} + +#[test] +fn test_overlapping_pairs() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + + let pairs = problem.overlapping_pairs(); + assert_eq!(pairs.len(), 2); + assert!(pairs.contains(&(0, 1))); + assert!(pairs.contains(&(1, 2))); +} + +#[test] +fn test_solution_size_valid() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![2, 3], vec![4, 5]]); + + // All disjoint, can select all + let sol = problem.solution_size(&[1, 1, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 3); + + // Select none + let sol = problem.solution_size(&[0, 0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); +} + +#[test] +fn test_solution_size_invalid() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); + + // Sets 0 and 1 overlap + let sol = problem.solution_size(&[1, 1, 0]); + assert!(!sol.is_valid); +} + +#[test] +fn test_brute_force_chain() { + // Chain: {0,1}, {1,2}, {2,3} - can select at most 2 non-adjacent sets + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Max is 2: select {0,1} and {2,3} + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 2); + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_brute_force_weighted() { + // Weighted: single heavy set vs multiple light sets + let problem = SetPacking::with_weights( + vec![vec![0, 1, 2, 3], vec![0, 1], vec![2, 3]], + vec![5, 3, 3], + ); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Should select sets 1 and 2 (total 6) over set 0 (total 5) + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0, 1, 1]); +} + +#[test] +fn test_is_set_packing_function() { + let sets = vec![vec![0, 1], vec![1, 2], vec![3, 4]]; + + assert!(is_set_packing(&sets, &[true, false, true])); // Disjoint + assert!(is_set_packing(&sets, &[false, true, true])); // Disjoint + assert!(!is_set_packing(&sets, &[true, true, false])); // Overlap on 1 + assert!(is_set_packing(&sets, &[false, false, false])); // Empty is valid +} + +#[test] +fn test_constraints() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); + let constraints = problem.constraints(); + // Only one overlapping pair + assert_eq!(constraints.len(), 1); +} + +#[test] +fn test_energy_mode() { + let problem = SetPacking::::new(vec![vec![0, 1]]); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_disjoint_sets() { + let problem = SetPacking::::new(vec![vec![0], vec![1], vec![2], vec![3]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // All sets are disjoint, so select all + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![1, 1, 1, 1]); +} + +#[test] +fn test_all_overlapping() { + // All sets share element 0 + let problem = SetPacking::::new(vec![vec![0, 1], vec![0, 2], vec![0, 3]]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Can only select one set + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 1); + } +} + +#[test] +fn test_is_satisfied() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); + + assert!(problem.is_satisfied(&[1, 0, 1])); // Disjoint selection + assert!(problem.is_satisfied(&[0, 1, 1])); // Disjoint selection + assert!(!problem.is_satisfied(&[1, 1, 0])); // Overlapping selection +} + +#[test] +fn test_empty_sets() { + let problem = SetPacking::::new(vec![]); + let sol = problem.solution_size(&[]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); +} + +#[test] +fn test_get_set() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![2, 3]]); + assert_eq!(problem.get_set(0), Some(&vec![0, 1])); + assert_eq!(problem.get_set(1), Some(&vec![2, 3])); + assert_eq!(problem.get_set(2), None); +} + +#[test] +fn test_relationship_to_independent_set() { + // SetPacking on sets is equivalent to IndependentSet on the intersection graph + use crate::models::graph::IndependentSet; + use crate::topology::SimpleGraph; + + let sets = vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![3, 4]]; + let sp_problem = SetPacking::::new(sets.clone()); + + // Build intersection graph + let edges = sp_problem.overlapping_pairs(); + let is_problem = IndependentSet::::new(sets.len(), edges); + + let solver = BruteForce::new(); + + let sp_solutions = solver.find_best(&sp_problem); + let is_solutions = solver.find_best(&is_problem); + + // Should have same optimal value + let sp_size: usize = sp_solutions[0].iter().sum(); + let is_size: usize = is_solutions[0].iter().sum(); + assert_eq!(sp_size, is_size); +} + +#[test] +fn test_objectives() { + let problem = SetPacking::with_weights(vec![vec![0, 1], vec![1, 2]], vec![5, 10]); + let objectives = problem.objectives(); + assert_eq!(objectives.len(), 2); +} + +#[test] +fn test_set_weights() { + let mut problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2]]); + assert!(!problem.is_weighted()); // Initially uniform + problem.set_weights(vec![1, 2]); + assert!(problem.is_weighted()); + assert_eq!(problem.weights(), vec![1, 2]); +} + +#[test] +fn test_is_weighted_empty() { + let problem = SetPacking::::new(vec![]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_is_set_packing_wrong_len() { + let sets = vec![vec![0, 1], vec![1, 2]]; + assert!(!is_set_packing(&sets, &[true])); // Wrong length +} + +#[test] +fn test_problem_size() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![3, 4]]); + let size = problem.problem_size(); + assert_eq!(size.get("num_sets"), Some(3)); +} diff --git a/src/tests_unit/models/specialized/biclique_cover.rs b/src/tests_unit/models/specialized/biclique_cover.rs new file mode 100644 index 0000000..8f68913 --- /dev/null +++ b/src/tests_unit/models/specialized/biclique_cover.rs @@ -0,0 +1,151 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_biclique_cover_creation() { + let problem = BicliqueCover::new(2, 2, vec![(0, 2), (0, 3), (1, 2)], 2); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); + assert_eq!(problem.k(), 2); + assert_eq!(problem.num_variables(), 8); // 4 vertices * 2 bicliques +} + +#[test] +fn test_from_matrix() { + // Matrix: + // [[1, 1], + // [1, 0]] + // Edges: (0,2), (0,3), (1,2) + let matrix = vec![vec![1, 1], vec![1, 0]]; + let problem = BicliqueCover::from_matrix(&matrix, 2); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); +} + +#[test] +fn test_get_biclique_memberships() { + let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); + // Config: vertex 0 in biclique 0, vertex 2 in biclique 0 + // Variables: [v0_b0, v1_b0, v2_b0, v3_b0] + let config = vec![1, 0, 1, 0]; + let (left, right) = problem.get_biclique_memberships(&config); + assert!(left[0].contains(&0)); + assert!(!left[0].contains(&1)); + assert!(right[0].contains(&2)); + assert!(!right[0].contains(&3)); +} + +#[test] +fn test_is_edge_covered() { + let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); + // Put vertex 0 and 2 in biclique 0 + let config = vec![1, 0, 1, 0]; + assert!(problem.is_edge_covered(0, 2, &config)); + + // Don't put vertex 2 in biclique + let config = vec![1, 0, 0, 0]; + assert!(!problem.is_edge_covered(0, 2, &config)); +} + +#[test] +fn test_is_valid_cover() { + let problem = BicliqueCover::new(2, 2, vec![(0, 2), (0, 3)], 1); + // Put 0, 2, 3 in biclique 0 -> covers both edges + let config = vec![1, 0, 1, 1]; + assert!(problem.is_valid_cover(&config)); + + // Only put 0, 2 -> doesn't cover (0,3) + let config = vec![1, 0, 1, 0]; + assert!(!problem.is_valid_cover(&config)); +} + +#[test] +fn test_solution_size() { + let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); + + // Valid cover with size 2 + let sol = problem.solution_size(&[1, 0, 1, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 2); + + // Invalid cover + let sol = problem.solution_size(&[1, 0, 0, 0]); + assert!(!sol.is_valid); + assert_eq!(sol.size, 1); +} + +#[test] +fn test_brute_force_simple() { + // Single edge (0, 2) with k=1 + let problem = BicliqueCover::new(2, 2, vec![(0, 2)], 1); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + assert!(problem.is_valid_cover(sol)); + // Minimum size is 2 (one left, one right vertex) + assert_eq!(problem.total_biclique_size(sol), 2); + } +} + +#[test] +fn test_brute_force_two_bicliques() { + // Edges that need 2 bicliques to cover efficiently + // (0,2), (1,3) - these don't share vertices + let problem = BicliqueCover::new(2, 2, vec![(0, 2), (1, 3)], 2); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + assert!(problem.is_valid_cover(sol)); + } +} + +#[test] +fn test_count_covered_edges() { + let problem = BicliqueCover::new(2, 2, vec![(0, 2), (0, 3), (1, 2)], 1); + // Cover only (0,2): put 0 and 2 in biclique + let config = vec![1, 0, 1, 0]; + assert_eq!(problem.count_covered_edges(&config), 1); + + // Cover (0,2) and (0,3): put 0, 2, 3 in biclique + let config = vec![1, 0, 1, 1]; + assert_eq!(problem.count_covered_edges(&config), 2); +} + +#[test] +fn test_is_biclique_cover_function() { + let edges = vec![(0, 2), (1, 3)]; + let left = vec![vec![0].into_iter().collect(), vec![1].into_iter().collect()]; + let right = vec![vec![2].into_iter().collect(), vec![3].into_iter().collect()]; + assert!(is_biclique_cover(&edges, &left, &right)); + + // Missing coverage + let left = vec![vec![0].into_iter().collect()]; + let right = vec![vec![2].into_iter().collect()]; + assert!(!is_biclique_cover(&edges, &left, &right)); +} + +#[test] +fn test_energy_mode() { + let problem = BicliqueCover::new(1, 1, vec![(0, 1)], 1); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_problem_size() { + let problem = BicliqueCover::new(3, 4, vec![(0, 3), (1, 4)], 2); + let size = problem.problem_size(); + assert_eq!(size.get("left_size"), Some(3)); + assert_eq!(size.get("right_size"), Some(4)); + assert_eq!(size.get("num_edges"), Some(2)); + assert_eq!(size.get("k"), Some(2)); +} + +#[test] +fn test_empty_edges() { + let problem = BicliqueCover::new(2, 2, vec![], 1); + let sol = problem.solution_size(&[0, 0, 0, 0]); + assert!(sol.is_valid); // No edges to cover + assert_eq!(sol.size, 0); +} diff --git a/src/tests_unit/models/specialized/bmf.rs b/src/tests_unit/models/specialized/bmf.rs new file mode 100644 index 0000000..5ab53db --- /dev/null +++ b/src/tests_unit/models/specialized/bmf.rs @@ -0,0 +1,184 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_bmf_creation() { + let matrix = vec![vec![true, false], vec![false, true]]; + let problem = BMF::new(matrix, 2); + assert_eq!(problem.rows(), 2); + assert_eq!(problem.cols(), 2); + assert_eq!(problem.rank(), 2); + assert_eq!(problem.num_variables(), 8); // 2*2 + 2*2 +} + +#[test] +fn test_extract_factors() { + let matrix = vec![vec![true]]; + let problem = BMF::new(matrix, 1); + // Config: [b00, c00] = [1, 1] + let (b, c) = problem.extract_factors(&[1, 1]); + assert_eq!(b, vec![vec![true]]); + assert_eq!(c, vec![vec![true]]); +} + +#[test] +fn test_extract_factors_larger() { + // 2x2 matrix with rank 1 + let matrix = vec![vec![true, true], vec![true, true]]; + let problem = BMF::new(matrix, 1); + // B: 2x1, C: 1x2 + // Config: [b00, b10, c00, c01] = [1, 1, 1, 1] + let (b, c) = problem.extract_factors(&[1, 1, 1, 1]); + assert_eq!(b, vec![vec![true], vec![true]]); + assert_eq!(c, vec![vec![true, true]]); +} + +#[test] +fn test_boolean_product() { + // B = [[1], [1]], C = [[1, 1]] + // B ⊙ C = [[1,1], [1,1]] + let b = vec![vec![true], vec![true]]; + let c = vec![vec![true, true]]; + let product = BMF::boolean_product(&b, &c); + assert_eq!(product, vec![vec![true, true], vec![true, true]]); +} + +#[test] +fn test_boolean_product_rank2() { + // B = [[1,0], [0,1]], C = [[1,0], [0,1]] + // B ⊙ C = [[1,0], [0,1]] (identity) + let b = vec![vec![true, false], vec![false, true]]; + let c = vec![vec![true, false], vec![false, true]]; + let product = BMF::boolean_product(&b, &c); + assert_eq!(product, vec![vec![true, false], vec![false, true]]); +} + +#[test] +fn test_hamming_distance() { + // Target: [[1,0], [0,1]] + let matrix = vec![vec![true, false], vec![false, true]]; + let problem = BMF::new(matrix, 2); + + // B = [[1,0], [0,1]], C = [[1,0], [0,1]] -> exact match + // Config: [1,0,0,1, 1,0,0,1] + let config = vec![1, 0, 0, 1, 1, 0, 0, 1]; + assert_eq!(problem.hamming_distance(&config), 0); + + // All zeros -> product is all zeros, distance = 2 + let config = vec![0, 0, 0, 0, 0, 0, 0, 0]; + assert_eq!(problem.hamming_distance(&config), 2); +} + +#[test] +fn test_solution_size() { + let matrix = vec![vec![true, false], vec![false, true]]; + let problem = BMF::new(matrix, 2); + + // Exact factorization + let config = vec![1, 0, 0, 1, 1, 0, 0, 1]; + let sol = problem.solution_size(&config); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); + + // Non-exact + let config = vec![0, 0, 0, 0, 0, 0, 0, 0]; + let sol = problem.solution_size(&config); + assert!(!sol.is_valid); + assert_eq!(sol.size, 2); +} + +#[test] +fn test_brute_force_ones() { + // All ones matrix can be factored with rank 1 + let matrix = vec![vec![true, true], vec![true, true]]; + let problem = BMF::new(matrix, 1); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + let sol_size = problem.solution_size(sol); + assert_eq!(sol_size.size, 0); + assert!(sol_size.is_valid); + } +} + +#[test] +fn test_brute_force_identity() { + // Identity matrix needs rank 2 + let matrix = vec![vec![true, false], vec![false, true]]; + let problem = BMF::new(matrix, 2); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Should find exact factorization + for sol in &solutions { + assert!(problem.is_exact(sol)); + } +} + +#[test] +fn test_brute_force_insufficient_rank() { + // Identity matrix with rank 1 cannot be exact + let matrix = vec![vec![true, false], vec![false, true]]; + let problem = BMF::new(matrix, 1); + let solver = BruteForce::new().valid_only(false); + + let solutions = solver.find_best(&problem); + // Best approximation has distance > 0 + let best_distance = problem.hamming_distance(&solutions[0]); + // With rank 1, best we can do is distance 1 (all ones or all zeros except one) + assert!(best_distance >= 1); +} + +#[test] +fn test_boolean_matrix_product_function() { + let b = vec![vec![true], vec![true]]; + let c = vec![vec![true, true]]; + let product = boolean_matrix_product(&b, &c); + assert_eq!(product, vec![vec![true, true], vec![true, true]]); +} + +#[test] +fn test_matrix_hamming_distance_function() { + let a = vec![vec![true, false], vec![false, true]]; + let b = vec![vec![true, true], vec![true, true]]; + assert_eq!(matrix_hamming_distance(&a, &b), 2); + + let c = vec![vec![true, false], vec![false, true]]; + assert_eq!(matrix_hamming_distance(&a, &c), 0); +} + +#[test] +fn test_energy_mode() { + let matrix = vec![vec![true]]; + let problem = BMF::new(matrix, 1); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_problem_size() { + let matrix = vec![vec![true, false, true], vec![false, true, false]]; + let problem = BMF::new(matrix, 2); + let size = problem.problem_size(); + assert_eq!(size.get("rows"), Some(2)); + assert_eq!(size.get("cols"), Some(3)); + assert_eq!(size.get("rank"), Some(2)); +} + +#[test] +fn test_empty_matrix() { + let matrix: Vec> = vec![]; + let problem = BMF::new(matrix, 1); + assert_eq!(problem.num_variables(), 0); + let sol = problem.solution_size(&[]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); +} + +#[test] +fn test_is_exact() { + let matrix = vec![vec![true]]; + let problem = BMF::new(matrix, 1); + assert!(problem.is_exact(&[1, 1])); + assert!(!problem.is_exact(&[0, 0])); +} diff --git a/src/tests_unit/models/specialized/circuit.rs b/src/tests_unit/models/specialized/circuit.rs new file mode 100644 index 0000000..07f73cc --- /dev/null +++ b/src/tests_unit/models/specialized/circuit.rs @@ -0,0 +1,270 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_boolean_expr_var() { + let expr = BooleanExpr::var("x"); + let mut assignments = HashMap::new(); + assignments.insert("x".to_string(), true); + assert!(expr.evaluate(&assignments)); + + assignments.insert("x".to_string(), false); + assert!(!expr.evaluate(&assignments)); +} + +#[test] +fn test_boolean_expr_const() { + let t = BooleanExpr::constant(true); + let f = BooleanExpr::constant(false); + let assignments = HashMap::new(); + assert!(t.evaluate(&assignments)); + assert!(!f.evaluate(&assignments)); +} + +#[test] +fn test_boolean_expr_not() { + let expr = BooleanExpr::not(BooleanExpr::var("x")); + let mut assignments = HashMap::new(); + assignments.insert("x".to_string(), true); + assert!(!expr.evaluate(&assignments)); + + assignments.insert("x".to_string(), false); + assert!(expr.evaluate(&assignments)); +} + +#[test] +fn test_boolean_expr_and() { + let expr = BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]); + let mut assignments = HashMap::new(); + + assignments.insert("x".to_string(), true); + assignments.insert("y".to_string(), true); + assert!(expr.evaluate(&assignments)); + + assignments.insert("y".to_string(), false); + assert!(!expr.evaluate(&assignments)); +} + +#[test] +fn test_boolean_expr_or() { + let expr = BooleanExpr::or(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]); + let mut assignments = HashMap::new(); + + assignments.insert("x".to_string(), false); + assignments.insert("y".to_string(), false); + assert!(!expr.evaluate(&assignments)); + + assignments.insert("y".to_string(), true); + assert!(expr.evaluate(&assignments)); +} + +#[test] +fn test_boolean_expr_xor() { + let expr = BooleanExpr::xor(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]); + let mut assignments = HashMap::new(); + + assignments.insert("x".to_string(), true); + assignments.insert("y".to_string(), true); + assert!(!expr.evaluate(&assignments)); // XOR(T, T) = F + + assignments.insert("y".to_string(), false); + assert!(expr.evaluate(&assignments)); // XOR(T, F) = T +} + +#[test] +fn test_boolean_expr_variables() { + let expr = BooleanExpr::and(vec![ + BooleanExpr::var("x"), + BooleanExpr::or(vec![BooleanExpr::var("y"), BooleanExpr::var("z")]), + ]); + let vars = expr.variables(); + assert_eq!(vars, vec!["x", "y", "z"]); +} + +#[test] +fn test_assignment_satisfied() { + let assign = Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + ); + + let mut assignments = HashMap::new(); + assignments.insert("x".to_string(), true); + assignments.insert("y".to_string(), true); + assignments.insert("c".to_string(), true); + assert!(assign.is_satisfied(&assignments)); + + assignments.insert("c".to_string(), false); + assert!(!assign.is_satisfied(&assignments)); +} + +#[test] +fn test_circuit_variables() { + let circuit = Circuit::new(vec![ + Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + ), + Assignment::new( + vec!["d".to_string()], + BooleanExpr::or(vec![BooleanExpr::var("c"), BooleanExpr::var("z")]), + ), + ]); + let vars = circuit.variables(); + assert_eq!(vars, vec!["c", "d", "x", "y", "z"]); +} + +#[test] +fn test_circuit_sat_creation() { + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + let problem = CircuitSAT::::new(circuit); + assert_eq!(problem.num_variables(), 3); // c, x, y + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_circuit_sat_solution_size() { + // c = x AND y + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + let problem = CircuitSAT::::new(circuit); + + // Variables sorted: c, x, y + // c=1, x=1, y=1 -> c = 1 AND 1 = 1, valid + let sol = problem.solution_size(&[1, 1, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + // c=0, x=0, y=0 -> c = 0 AND 0 = 0, valid + let sol = problem.solution_size(&[0, 0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + // c=1, x=0, y=0 -> c should be 0, but c=1, invalid + let sol = problem.solution_size(&[1, 0, 0]); + assert!(!sol.is_valid); + assert_eq!(sol.size, 0); +} + +#[test] +fn test_circuit_sat_brute_force() { + // c = x AND y + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + let problem = CircuitSAT::::new(circuit); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // All satisfying: c matches x AND y + // 4 valid configs: (0,0,0), (0,0,1), (0,1,0), (1,1,1) + assert_eq!(solutions.len(), 4); + for sol in &solutions { + assert!(problem.solution_size(sol).is_valid); + } +} + +#[test] +fn test_circuit_sat_complex() { + // c = x AND y + // d = c OR z + let circuit = Circuit::new(vec![ + Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + ), + Assignment::new( + vec!["d".to_string()], + BooleanExpr::or(vec![BooleanExpr::var("c"), BooleanExpr::var("z")]), + ), + ]); + let problem = CircuitSAT::::new(circuit); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // All valid solutions satisfy both assignments + for sol in &solutions { + let sol_size = problem.solution_size(sol); + assert!(sol_size.is_valid); + assert_eq!(sol_size.size, 2); + } +} + +#[test] +fn test_is_circuit_satisfying() { + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + + let mut assignments = HashMap::new(); + assignments.insert("x".to_string(), true); + assignments.insert("y".to_string(), true); + assignments.insert("c".to_string(), true); + assert!(is_circuit_satisfying(&circuit, &assignments)); + + assignments.insert("c".to_string(), false); + assert!(!is_circuit_satisfying(&circuit, &assignments)); +} + +#[test] +fn test_problem_size() { + let circuit = Circuit::new(vec![ + Assignment::new(vec!["c".to_string()], BooleanExpr::var("x")), + Assignment::new(vec!["d".to_string()], BooleanExpr::var("y")), + ]); + let problem = CircuitSAT::::new(circuit); + let size = problem.problem_size(); + assert_eq!(size.get("num_variables"), Some(4)); + assert_eq!(size.get("num_assignments"), Some(2)); +} + +#[test] +fn test_energy_mode() { + let circuit = Circuit::new(vec![]); + let problem = CircuitSAT::::new(circuit); + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_empty_circuit() { + let circuit = Circuit::new(vec![]); + let problem = CircuitSAT::::new(circuit); + let sol = problem.solution_size(&[]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); +} + +#[test] +fn test_weighted_circuit_sat() { + let circuit = Circuit::new(vec![ + Assignment::new(vec!["c".to_string()], BooleanExpr::var("x")), + Assignment::new(vec!["d".to_string()], BooleanExpr::var("y")), + ]); + let problem = CircuitSAT::with_weights(circuit, vec![10, 1]); + + // Variables sorted: c, d, x, y + // Config [1, 0, 1, 0]: c=1, d=0, x=1, y=0 + // c=x (1=1) satisfied (weight 10), d=y (0=0) satisfied (weight 1) + let sol = problem.solution_size(&[1, 0, 1, 0]); + assert_eq!(sol.size, 11); // Both satisfied: 10 + 1 + assert!(sol.is_valid); + + // Config [1, 0, 0, 0]: c=1, d=0, x=0, y=0 + // c=x (1!=0) not satisfied, d=y (0=0) satisfied (weight 1) + let sol = problem.solution_size(&[1, 0, 0, 0]); + assert_eq!(sol.size, 1); // Only d=y satisfied + assert!(!sol.is_valid); + + // Config [0, 1, 0, 0]: c=0, d=1, x=0, y=0 + // c=x (0=0) satisfied (weight 10), d=y (1!=0) not satisfied + let sol = problem.solution_size(&[0, 1, 0, 0]); + assert_eq!(sol.size, 10); // Only c=x satisfied + assert!(!sol.is_valid); +} diff --git a/src/tests_unit/models/specialized/factoring.rs b/src/tests_unit/models/specialized/factoring.rs new file mode 100644 index 0000000..a4a766b --- /dev/null +++ b/src/tests_unit/models/specialized/factoring.rs @@ -0,0 +1,152 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_factoring_creation() { + let problem = Factoring::new(3, 3, 15); + assert_eq!(problem.m(), 3); + assert_eq!(problem.n(), 3); + assert_eq!(problem.target(), 15); + assert_eq!(problem.num_variables(), 6); + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_bits_to_int() { + assert_eq!(bits_to_int(&[0, 0, 0]), 0); + assert_eq!(bits_to_int(&[1, 0, 0]), 1); + assert_eq!(bits_to_int(&[0, 1, 0]), 2); + assert_eq!(bits_to_int(&[1, 1, 0]), 3); + assert_eq!(bits_to_int(&[0, 0, 1]), 4); + assert_eq!(bits_to_int(&[1, 1, 1]), 7); +} + +#[test] +fn test_int_to_bits() { + assert_eq!(int_to_bits(0, 3), vec![0, 0, 0]); + assert_eq!(int_to_bits(1, 3), vec![1, 0, 0]); + assert_eq!(int_to_bits(2, 3), vec![0, 1, 0]); + assert_eq!(int_to_bits(3, 3), vec![1, 1, 0]); + assert_eq!(int_to_bits(7, 3), vec![1, 1, 1]); +} + +#[test] +fn test_read_factors() { + let problem = Factoring::new(2, 2, 6); + // bits: [a0, a1, b0, b1] + // a=2 (binary 10), b=3 (binary 11) -> config = [0,1,1,1] + let (a, b) = problem.read_factors(&[0, 1, 1, 1]); + assert_eq!(a, 2); + assert_eq!(b, 3); +} + +#[test] +fn test_solution_size_valid() { + let problem = Factoring::new(2, 2, 6); + // 2 * 3 = 6 + let sol = problem.solution_size(&[0, 1, 1, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); // Exact match + + // 3 * 2 = 6 + let sol = problem.solution_size(&[1, 1, 0, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); +} + +#[test] +fn test_solution_size_invalid() { + let problem = Factoring::new(2, 2, 6); + // 2 * 2 = 4 != 6 + let sol = problem.solution_size(&[0, 1, 0, 1]); + assert!(!sol.is_valid); + assert_eq!(sol.size, 2); // Distance from 6 + + // 1 * 1 = 1 != 6 + let sol = problem.solution_size(&[1, 0, 1, 0]); + assert!(!sol.is_valid); + assert_eq!(sol.size, 5); // Distance from 6 +} + +#[test] +fn test_brute_force_factor_6() { + let problem = Factoring::new(2, 2, 6); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Should find 2*3 and 3*2 + assert!(!solutions.is_empty()); + for sol in &solutions { + let (a, b) = problem.read_factors(sol); + assert_eq!(a * b, 6); + } +} + +#[test] +fn test_brute_force_factor_15() { + let problem = Factoring::new(3, 3, 15); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Should find 3*5, 5*3, 1*15, 15*1 + for sol in &solutions { + let (a, b) = problem.read_factors(sol); + assert_eq!(a * b, 15); + } +} + +#[test] +fn test_brute_force_prime() { + // 7 is prime, only 1*7 and 7*1 work + let problem = Factoring::new(3, 3, 7); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + let factor_pairs: Vec<_> = solutions.iter().map(|s| problem.read_factors(s)).collect(); + + // Should find (1,7) and (7,1) + assert!(factor_pairs.contains(&(1, 7)) || factor_pairs.contains(&(7, 1))); +} + +#[test] +fn test_is_factoring_function() { + assert!(is_factoring(6, 2, 3)); + assert!(is_factoring(6, 3, 2)); + assert!(is_factoring(15, 3, 5)); + assert!(!is_factoring(6, 2, 2)); +} + +#[test] +fn test_energy_mode() { + let problem = Factoring::new(2, 2, 6); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_problem_size() { + let problem = Factoring::new(3, 4, 12); + let size = problem.problem_size(); + assert_eq!(size.get("num_bits_first"), Some(3)); + assert_eq!(size.get("num_bits_second"), Some(4)); + assert_eq!(size.get("target"), Some(12)); +} + +#[test] +fn test_is_valid_factorization() { + let problem = Factoring::new(2, 2, 6); + assert!(problem.is_valid_factorization(&[0, 1, 1, 1])); // 2*3=6 + assert!(!problem.is_valid_factorization(&[0, 1, 0, 1])); // 2*2=4 +} + +#[test] +fn test_factor_one() { + // Factor 1: only 1*1 works + let problem = Factoring::new(2, 2, 1); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + for sol in &solutions { + let (a, b) = problem.read_factors(sol); + assert_eq!(a * b, 1); + } +} diff --git a/src/tests_unit/models/specialized/paintshop.rs b/src/tests_unit/models/specialized/paintshop.rs new file mode 100644 index 0000000..b1638a1 --- /dev/null +++ b/src/tests_unit/models/specialized/paintshop.rs @@ -0,0 +1,147 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_paintshop_creation() { + let problem = PaintShop::new(vec!["a", "b", "a", "b"]); + assert_eq!(problem.num_cars(), 2); + assert_eq!(problem.sequence_len(), 4); + assert_eq!(problem.num_variables(), 2); + assert_eq!(problem.num_flavors(), 2); +} + +#[test] +fn test_is_first() { + let problem = PaintShop::new(vec!["a", "b", "a", "b"]); + // First occurrence: a at 0, b at 1 + // Second occurrence: a at 2, b at 3 + assert_eq!(problem.is_first, vec![true, true, false, false]); +} + +#[test] +fn test_get_coloring() { + let problem = PaintShop::new(vec!["a", "b", "a", "b"]); + // Config: a=0, b=1 + // Sequence: a(0), b(1), a(1-opposite), b(0-opposite) + let coloring = problem.get_coloring(&[0, 1]); + assert_eq!(coloring, vec![0, 1, 1, 0]); + + // Config: a=1, b=0 + let coloring = problem.get_coloring(&[1, 0]); + assert_eq!(coloring, vec![1, 0, 0, 1]); +} + +#[test] +fn test_count_switches() { + let problem = PaintShop::new(vec!["a", "b", "a", "b"]); + + // Config [0, 1] -> coloring [0, 1, 1, 0] -> 2 switches + assert_eq!(problem.count_switches(&[0, 1]), 2); + + // Config [0, 0] -> coloring [0, 0, 1, 1] -> 1 switch + assert_eq!(problem.count_switches(&[0, 0]), 1); + + // Config [1, 1] -> coloring [1, 1, 0, 0] -> 1 switch + assert_eq!(problem.count_switches(&[1, 1]), 1); +} + +#[test] +fn test_solution_size() { + let problem = PaintShop::new(vec!["a", "b", "a", "b"]); + + let sol = problem.solution_size(&[0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + let sol = problem.solution_size(&[0, 1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 2); +} + +#[test] +fn test_brute_force_simple() { + let problem = PaintShop::new(vec!["a", "b", "a", "b"]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Optimal has 1 switch: [0,0] or [1,1] + for sol in &solutions { + assert_eq!(problem.count_switches(sol), 1); + } +} + +#[test] +fn test_brute_force_longer() { + // Sequence: a, b, a, c, c, b + let problem = PaintShop::new(vec!["a", "b", "a", "c", "c", "b"]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Find the minimum number of switches + let min_switches = problem.count_switches(&solutions[0]); + for sol in &solutions { + assert_eq!(problem.count_switches(sol), min_switches); + } +} + +#[test] +fn test_count_paint_switches_function() { + assert_eq!(count_paint_switches(&[0, 0, 0]), 0); + assert_eq!(count_paint_switches(&[0, 1, 0]), 2); + assert_eq!(count_paint_switches(&[0, 0, 1, 1]), 1); + assert_eq!(count_paint_switches(&[0, 1, 0, 1]), 3); +} + +#[test] +fn test_energy_mode() { + let problem = PaintShop::new(vec!["a", "a"]); + assert!(problem.energy_mode().is_minimization()); +} + +#[test] +fn test_problem_size() { + let problem = PaintShop::new(vec!["a", "b", "c", "a", "b", "c"]); + let size = problem.problem_size(); + assert_eq!(size.get("num_cars"), Some(3)); + assert_eq!(size.get("sequence_length"), Some(6)); +} + +#[test] +fn test_single_car() { + let problem = PaintShop::new(vec!["a", "a"]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Both configs give 1 switch: a(0)->a(1) or a(1)->a(0) + assert_eq!(solutions.len(), 2); + for sol in &solutions { + assert_eq!(problem.count_switches(sol), 1); + } +} + +#[test] +fn test_adjacent_same_car() { + // Sequence: a, a, b, b + let problem = PaintShop::new(vec!["a", "a", "b", "b"]); + let solver = BruteForce::new(); + + let solutions = solver.find_best(&problem); + // Best case: [0,0] -> [0,1,0,1] = 3 switches, or [0,1] -> [0,1,1,0] = 2 switches + // Actually: [0,0] -> a=0,a=1,b=0,b=1 = [0,1,0,1] = 3 switches + // [0,1] -> a=0,a=1,b=1,b=0 = [0,1,1,0] = 2 switches + let min_switches = problem.count_switches(&solutions[0]); + assert!(min_switches <= 3); +} + +#[test] +#[should_panic] +fn test_invalid_sequence_single_occurrence() { + // This should panic because 'c' only appears once + let _ = PaintShop::new(vec!["a", "b", "a", "c"]); +} + +#[test] +fn test_car_labels() { + let problem = PaintShop::new(vec!["car1", "car2", "car1", "car2"]); + assert_eq!(problem.car_labels().len(), 2); +} diff --git a/src/tests_unit/polynomial.rs b/src/tests_unit/polynomial.rs new file mode 100644 index 0000000..486a2bb --- /dev/null +++ b/src/tests_unit/polynomial.rs @@ -0,0 +1,96 @@ +use super::*; + +#[test] +fn test_monomial_constant() { + let m = Monomial::constant(5.0); + let size = ProblemSize::new(vec![("n", 10)]); + assert_eq!(m.evaluate(&size), 5.0); +} + +#[test] +fn test_monomial_variable() { + let m = Monomial::var("n"); + let size = ProblemSize::new(vec![("n", 10)]); + assert_eq!(m.evaluate(&size), 10.0); +} + +#[test] +fn test_monomial_var_pow() { + let m = Monomial::var_pow("n", 2); + let size = ProblemSize::new(vec![("n", 5)]); + assert_eq!(m.evaluate(&size), 25.0); +} + +#[test] +fn test_polynomial_add() { + // 3n + 2m + let p = Polynomial::var("n").scale(3.0) + Polynomial::var("m").scale(2.0); + + let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); + assert_eq!(p.evaluate(&size), 40.0); // 3*10 + 2*5 +} + +#[test] +fn test_polynomial_complex() { + // n^2 + 3m + let p = Polynomial::var_pow("n", 2) + Polynomial::var("m").scale(3.0); + + let size = ProblemSize::new(vec![("n", 4), ("m", 2)]); + assert_eq!(p.evaluate(&size), 22.0); // 16 + 6 +} + +#[test] +fn test_poly_macro() { + let size = ProblemSize::new(vec![("n", 5), ("m", 3)]); + + assert_eq!(poly!(n).evaluate(&size), 5.0); + assert_eq!(poly!(n ^ 2).evaluate(&size), 25.0); + assert_eq!(poly!(3 * n).evaluate(&size), 15.0); + assert_eq!(poly!(2 * m ^ 2).evaluate(&size), 18.0); +} + +#[test] +fn test_missing_variable() { + let p = Polynomial::var("missing"); + let size = ProblemSize::new(vec![("n", 10)]); + assert_eq!(p.evaluate(&size), 0.0); // missing var = 0 +} + +#[test] +fn test_polynomial_zero() { + let p = Polynomial::zero(); + let size = ProblemSize::new(vec![("n", 100)]); + assert_eq!(p.evaluate(&size), 0.0); +} + +#[test] +fn test_polynomial_constant() { + let p = Polynomial::constant(42.0); + let size = ProblemSize::new(vec![("n", 100)]); + assert_eq!(p.evaluate(&size), 42.0); +} + +#[test] +fn test_monomial_scale() { + let m = Monomial::var("n").scale(3.0); + let size = ProblemSize::new(vec![("n", 10)]); + assert_eq!(m.evaluate(&size), 30.0); +} + +#[test] +fn test_polynomial_scale() { + let p = Polynomial::var("n").scale(5.0); + let size = ProblemSize::new(vec![("n", 10)]); + assert_eq!(p.evaluate(&size), 50.0); +} + +#[test] +fn test_monomial_multi_variable() { + // n * m^2 + let m = Monomial { + coefficient: 1.0, + variables: vec![("n", 1), ("m", 2)], + }; + let size = ProblemSize::new(vec![("n", 2), ("m", 3)]); + assert_eq!(m.evaluate(&size), 18.0); // 2 * 9 +} diff --git a/tests/property_tests.rs b/src/tests_unit/property.rs similarity index 97% rename from tests/property_tests.rs rename to src/tests_unit/property.rs index 9d562bf..bf70713 100644 --- a/tests/property_tests.rs +++ b/src/tests_unit/property.rs @@ -3,9 +3,9 @@ //! These tests verify mathematical invariants and properties //! that should hold for all valid inputs. -use problemreductions::models::graph::{IndependentSet, VertexCovering}; -use problemreductions::prelude::*; -use problemreductions::topology::SimpleGraph; +use crate::models::graph::{IndependentSet, VertexCovering}; +use crate::prelude::*; +use crate::topology::SimpleGraph; use proptest::prelude::*; use proptest::strategy::ValueTree; use std::collections::HashSet; diff --git a/tests/set_theoretic_tests.rs b/src/tests_unit/reduction_graph.rs similarity index 63% rename from tests/set_theoretic_tests.rs rename to src/tests_unit/reduction_graph.rs index 57105d4..8f2b54f 100644 --- a/tests/set_theoretic_tests.rs +++ b/src/tests_unit/reduction_graph.rs @@ -1,7 +1,11 @@ -//! Integration tests for set-theoretic reduction path finding. +//! Tests for ReductionGraph: discovery, path finding, graph hierarchy, and typed API. -use problemreductions::rules::{MinimizeSteps, ReductionGraph}; -use problemreductions::types::ProblemSize; +use crate::prelude::*; +use crate::rules::{MinimizeSteps, ReductionGraph}; +use crate::topology::SimpleGraph; +use crate::types::ProblemSize; + +// ---- Discovery and registration ---- #[test] fn test_reduction_graph_discovers_registered_reductions() { @@ -23,13 +27,26 @@ fn test_reduction_graph_discovers_registered_reductions() { assert!(graph.has_direct_reduction_by_name("Satisfiability", "IndependentSet")); } +#[test] +fn test_bidirectional_reductions() { + let graph = ReductionGraph::new(); + + // IS <-> VC should both be registered + assert!(graph.has_direct_reduction_by_name("IndependentSet", "VertexCovering")); + assert!(graph.has_direct_reduction_by_name("VertexCovering", "IndependentSet")); + + // MaxCut <-> SpinGlass should both be registered + assert!(graph.has_direct_reduction_by_name("MaxCut", "SpinGlass")); + assert!(graph.has_direct_reduction_by_name("SpinGlass", "MaxCut")); +} + +// ---- Path finding (by name) ---- + #[test] fn test_find_path_with_cost_function() { let graph = ReductionGraph::new(); let input_size = ProblemSize::new(vec![("n", 100), ("m", 200)]); - // Find path from IndependentSet to VertexCovering using SimpleGraph - // This is a direct path where both source and target use SimpleGraph let path = graph.find_cheapest_path( ("IndependentSet", "SimpleGraph"), ("VertexCovering", "SimpleGraph"), @@ -48,7 +65,6 @@ fn test_find_path_with_cost_function() { fn test_multi_step_path() { let graph = ReductionGraph::new(); - // Use find_shortest_path_by_name which doesn't validate graph types // Factoring -> CircuitSAT -> SpinGlass is a 2-step path let path = graph.find_shortest_path_by_name("Factoring", "SpinGlass"); @@ -64,11 +80,30 @@ fn test_multi_step_path() { ); } +#[test] +fn test_problem_size_propagation() { + let graph = ReductionGraph::new(); + let input_size = ProblemSize::new(vec![("num_vertices", 50), ("num_edges", 100)]); + + let path = graph.find_cheapest_path( + ("IndependentSet", "SimpleGraph"), + ("VertexCovering", "SimpleGraph"), + &input_size, + &MinimizeSteps, + ); + + assert!(path.is_some()); + + let path2 = graph.find_shortest_path_by_name("IndependentSet", "SetPacking"); + assert!(path2.is_some()); +} + +// ---- Graph hierarchy ---- + #[test] fn test_graph_hierarchy_built() { let graph = ReductionGraph::new(); - // Test the graph hierarchy was built from GraphSubtypeEntry assert!(graph.is_graph_subtype("UnitDiskGraph", "SimpleGraph")); assert!(graph.is_graph_subtype("PlanarGraph", "SimpleGraph")); assert!(graph.is_graph_subtype("BipartiteGraph", "SimpleGraph")); @@ -91,51 +126,75 @@ fn test_rule_applicability() { assert!(!graph.rule_applicable("SimpleGraph", "SimpleGraph", "UnitDiskGraph", "SimpleGraph")); } +// ---- JSON export ---- + #[test] -fn test_bidirectional_reductions() { +fn test_json_export() { let graph = ReductionGraph::new(); + let json = graph.to_json(); - // IS <-> VC should both be registered - assert!(graph.has_direct_reduction_by_name("IndependentSet", "VertexCovering")); - assert!(graph.has_direct_reduction_by_name("VertexCovering", "IndependentSet")); + assert!(!json.nodes.is_empty()); + assert!(!json.edges.is_empty()); - // MaxCut <-> SpinGlass should both be registered - assert!(graph.has_direct_reduction_by_name("MaxCut", "SpinGlass")); - assert!(graph.has_direct_reduction_by_name("SpinGlass", "MaxCut")); + let categories: std::collections::HashSet<&str> = + json.nodes.iter().map(|n| n.category.as_str()).collect(); + assert!(categories.len() >= 3, "Should have multiple categories"); } +// ---- Path finding (typed API) ---- + #[test] -fn test_problem_size_propagation() { +fn test_direct_reduction_exists() { let graph = ReductionGraph::new(); - let input_size = ProblemSize::new(vec![("num_vertices", 50), ("num_edges", 100)]); - // Path finding should work with size propagation using compatible graph types - // IndependentSet -> VertexCovering uses SimpleGraph -> SimpleGraph - let path = graph.find_cheapest_path( - ("IndependentSet", "SimpleGraph"), - ("VertexCovering", "SimpleGraph"), - &input_size, - &MinimizeSteps, - ); + assert!(graph.has_direct_reduction::, VertexCovering>()); + assert!(graph.has_direct_reduction::, IndependentSet>()); + assert!(graph.has_direct_reduction::, SetPacking>()); + assert!(graph.has_direct_reduction::, QUBO>()); + assert!(graph.has_direct_reduction::, MaxCut>()); +} - assert!(path.is_some()); +#[test] +fn test_find_direct_path() { + let graph = ReductionGraph::new(); - // Also test that find_shortest_path_by_name works for multi-step paths - let path2 = graph.find_shortest_path_by_name("IndependentSet", "SetPacking"); - assert!(path2.is_some()); + let paths = graph.find_paths::, VertexCovering>(); + assert!(!paths.is_empty()); + assert_eq!(paths[0].len(), 1); } #[test] -fn test_json_export() { +fn test_find_indirect_path() { let graph = ReductionGraph::new(); - let json = graph.to_json(); - // Should have nodes for registered problems - assert!(!json.nodes.is_empty()); - assert!(!json.edges.is_empty()); + // SetPacking -> IndependentSet -> VertexCovering + let paths = graph.find_paths::, VertexCovering>(); + assert!(!paths.is_empty()); - // Categories should be assigned - let categories: std::collections::HashSet<&str> = - json.nodes.iter().map(|n| n.category.as_str()).collect(); - assert!(categories.len() >= 3, "Should have multiple categories"); + let shortest = graph.find_shortest_path::, VertexCovering>(); + assert!(shortest.is_some()); + assert_eq!(shortest.unwrap().len(), 2); +} + +#[test] +fn test_no_path_exists() { + let graph = ReductionGraph::new(); + + let paths = graph.find_paths::, SetPacking>(); + assert!(paths.is_empty()); +} + +#[test] +fn test_bidirectional_paths() { + let graph = ReductionGraph::new(); + + assert!(!graph + .find_paths::, VertexCovering>() + .is_empty()); + assert!(!graph + .find_paths::, IndependentSet>() + .is_empty()); + + assert!(!graph.find_paths::, QUBO>().is_empty()); + assert!(!graph.find_paths::, SpinGlass>().is_empty()); } diff --git a/src/tests_unit/registry/category.rs b/src/tests_unit/registry/category.rs new file mode 100644 index 0000000..5e66f86 --- /dev/null +++ b/src/tests_unit/registry/category.rs @@ -0,0 +1,110 @@ +use super::*; + +#[test] +fn test_category_path() { + let cat = ProblemCategory::Graph(GraphSubcategory::Independent); + assert_eq!(cat.path(), "graph/independent"); + assert_eq!(cat.name(), "graph"); + assert_eq!(cat.subcategory_name(), "independent"); +} + +#[test] +fn test_category_display() { + let cat = ProblemCategory::Satisfiability(SatisfiabilitySubcategory::Sat); + assert_eq!(format!("{}", cat), "satisfiability/sat"); +} + +#[test] +fn test_all_subcategories() { + // Graph + assert_eq!(GraphSubcategory::Coloring.name(), "coloring"); + assert_eq!(GraphSubcategory::Covering.name(), "covering"); + assert_eq!(GraphSubcategory::Independent.name(), "independent"); + assert_eq!(GraphSubcategory::Paths.name(), "paths"); + assert_eq!(GraphSubcategory::Structure.name(), "structure"); + assert_eq!(GraphSubcategory::Trees.name(), "trees"); + assert_eq!(GraphSubcategory::Matching.name(), "matching"); + + // Satisfiability + assert_eq!(SatisfiabilitySubcategory::Sat.name(), "sat"); + assert_eq!(SatisfiabilitySubcategory::Circuit.name(), "circuit"); + assert_eq!(SatisfiabilitySubcategory::Qbf.name(), "qbf"); + + // Set + assert_eq!(SetSubcategory::Covering.name(), "covering"); + assert_eq!(SetSubcategory::Packing.name(), "packing"); + assert_eq!(SetSubcategory::Partition.name(), "partition"); + assert_eq!(SetSubcategory::Matching.name(), "matching"); + + // Optimization + assert_eq!(OptimizationSubcategory::Quadratic.name(), "quadratic"); + assert_eq!(OptimizationSubcategory::Linear.name(), "linear"); + assert_eq!(OptimizationSubcategory::Constraint.name(), "constraint"); + + // Scheduling + assert_eq!(SchedulingSubcategory::Machine.name(), "machine"); + assert_eq!(SchedulingSubcategory::Sequencing.name(), "sequencing"); + assert_eq!(SchedulingSubcategory::Resource.name(), "resource"); + + // Network + assert_eq!(NetworkSubcategory::Flow.name(), "flow"); + assert_eq!(NetworkSubcategory::Routing.name(), "routing"); + assert_eq!(NetworkSubcategory::Connectivity.name(), "connectivity"); + + // String + assert_eq!(StringSubcategory::Sequence.name(), "sequence"); + assert_eq!(StringSubcategory::Matching.name(), "matching"); + assert_eq!(StringSubcategory::Compression.name(), "compression"); + + // Specialized + assert_eq!(SpecializedSubcategory::Geometry.name(), "geometry"); + assert_eq!(SpecializedSubcategory::Number.name(), "number"); + assert_eq!(SpecializedSubcategory::Game.name(), "game"); + assert_eq!(SpecializedSubcategory::Other.name(), "other"); +} + +#[test] +fn test_all_category_paths() { + // Test ProblemCategory name() and subcategory_name() for all variants + let categories = [ + ProblemCategory::Graph(GraphSubcategory::Coloring), + ProblemCategory::Satisfiability(SatisfiabilitySubcategory::Sat), + ProblemCategory::Set(SetSubcategory::Covering), + ProblemCategory::Optimization(OptimizationSubcategory::Quadratic), + ProblemCategory::Scheduling(SchedulingSubcategory::Machine), + ProblemCategory::Network(NetworkSubcategory::Flow), + ProblemCategory::String(StringSubcategory::Sequence), + ProblemCategory::Specialized(SpecializedSubcategory::Geometry), + ]; + + let expected_names = [ + "graph", + "satisfiability", + "set", + "optimization", + "scheduling", + "network", + "string", + "specialized", + ]; + + let expected_subcategories = [ + "coloring", + "sat", + "covering", + "quadratic", + "machine", + "flow", + "sequence", + "geometry", + ]; + + for (i, cat) in categories.iter().enumerate() { + assert_eq!(cat.name(), expected_names[i]); + assert_eq!(cat.subcategory_name(), expected_subcategories[i]); + assert!(!cat.path().is_empty()); + // Test Display + let display = format!("{}", cat); + assert!(display.contains('/')); + } +} diff --git a/src/tests_unit/registry/info.rs b/src/tests_unit/registry/info.rs new file mode 100644 index 0000000..44aea16 --- /dev/null +++ b/src/tests_unit/registry/info.rs @@ -0,0 +1,43 @@ +use super::*; + +#[test] +fn test_complexity_class() { + assert_eq!(ComplexityClass::NpComplete.name(), "NP-complete"); + assert!(ComplexityClass::NpComplete.is_hard()); + assert!(ComplexityClass::NpHard.is_hard()); + assert!(!ComplexityClass::P.is_hard()); +} + +#[test] +fn test_problem_info_builder() { + let info = ProblemInfo::new("Independent Set", "Find a maximum weight independent set") + .with_aliases(&["MIS", "MWIS"]) + .with_complexity(ComplexityClass::NpComplete) + .with_reduction_from("3-SAT") + .with_reference("https://en.wikipedia.org/wiki/Independent_set_(graph_theory)"); + + assert_eq!(info.name, "Independent Set"); + assert_eq!(info.aliases, &["MIS", "MWIS"]); + assert!(info.is_np_complete()); + assert_eq!(info.canonical_reduction_from, Some("3-SAT")); + assert_eq!(info.all_names(), vec!["Independent Set", "MIS", "MWIS"]); +} + +#[test] +fn test_problem_info_display() { + let info = ProblemInfo::new("Vertex Cover", "Find a minimum vertex cover"); + assert_eq!(format!("{}", info), "Vertex Cover (NP-complete)"); +} + +#[test] +fn test_problem_info_versions() { + let decision_only = + ProblemInfo::new("Decision Problem", "A yes/no problem").with_optimization(false); + assert!(decision_only.decision_version); + assert!(!decision_only.optimization_version); + + let opt_only = ProblemInfo::new("Optimization Problem", "An optimization problem") + .with_decision(false); + assert!(!opt_only.decision_version); + assert!(opt_only.optimization_version); +} diff --git a/src/tests_unit/rules/circuit_spinglass.rs b/src/tests_unit/rules/circuit_spinglass.rs new file mode 100644 index 0000000..1f0ce08 --- /dev/null +++ b/src/tests_unit/rules/circuit_spinglass.rs @@ -0,0 +1,522 @@ +use super::*; +use crate::models::specialized::Circuit; +use crate::solvers::{BruteForce, Solver}; + +/// Verify a gadget has the correct ground states. +fn verify_gadget_truth_table(gadget: &LogicGadget, expected: &[(Vec, Vec)]) +where + W: Clone + + Default + + PartialOrd + + Num + + Zero + + AddAssign + + From + + std::ops::Mul + + std::fmt::Debug + + 'static, +{ + let solver = BruteForce::new(); + let solutions = solver.find_best(&gadget.problem); + + // For each expected input/output pair, verify there's a matching ground state + for (inputs, outputs) in expected { + let found = solutions.iter().any(|sol| { + let input_match = gadget + .inputs + .iter() + .zip(inputs) + .all(|(&idx, &expected)| sol[idx] == expected); + let output_match = gadget + .outputs + .iter() + .zip(outputs) + .all(|(&idx, &expected)| sol[idx] == expected); + input_match && output_match + }); + assert!( + found, + "Expected ground state with inputs {:?} and outputs {:?} not found in {:?}", + inputs, outputs, solutions + ); + } +} + +#[test] +fn test_and_gadget() { + let gadget: LogicGadget = and_gadget(); + assert_eq!(gadget.num_spins(), 3); + assert_eq!(gadget.inputs, vec![0, 1]); + assert_eq!(gadget.outputs, vec![2]); + + // AND truth table: (a, b) -> a AND b + let truth_table = vec![ + (vec![0, 0], vec![0]), // 0 AND 0 = 0 + (vec![0, 1], vec![0]), // 0 AND 1 = 0 + (vec![1, 0], vec![0]), // 1 AND 0 = 0 + (vec![1, 1], vec![1]), // 1 AND 1 = 1 + ]; + verify_gadget_truth_table(&gadget, &truth_table); +} + +#[test] +fn test_or_gadget() { + let gadget: LogicGadget = or_gadget(); + assert_eq!(gadget.num_spins(), 3); + assert_eq!(gadget.inputs, vec![0, 1]); + assert_eq!(gadget.outputs, vec![2]); + + // OR truth table: (a, b) -> a OR b + let truth_table = vec![ + (vec![0, 0], vec![0]), // 0 OR 0 = 0 + (vec![0, 1], vec![1]), // 0 OR 1 = 1 + (vec![1, 0], vec![1]), // 1 OR 0 = 1 + (vec![1, 1], vec![1]), // 1 OR 1 = 1 + ]; + verify_gadget_truth_table(&gadget, &truth_table); +} + +#[test] +fn test_not_gadget() { + let gadget: LogicGadget = not_gadget(); + assert_eq!(gadget.num_spins(), 2); + assert_eq!(gadget.inputs, vec![0]); + assert_eq!(gadget.outputs, vec![1]); + + // NOT truth table: a -> NOT a + let truth_table = vec![ + (vec![0], vec![1]), // NOT 0 = 1 + (vec![1], vec![0]), // NOT 1 = 0 + ]; + verify_gadget_truth_table(&gadget, &truth_table); +} + +#[test] +fn test_xor_gadget() { + let gadget: LogicGadget = xor_gadget(); + assert_eq!(gadget.num_spins(), 4); + assert_eq!(gadget.inputs, vec![0, 1]); + assert_eq!(gadget.outputs, vec![2]); + + // XOR truth table: (a, b) -> a XOR b + let truth_table = vec![ + (vec![0, 0], vec![0]), // 0 XOR 0 = 0 + (vec![0, 1], vec![1]), // 0 XOR 1 = 1 + (vec![1, 0], vec![1]), // 1 XOR 0 = 1 + (vec![1, 1], vec![0]), // 1 XOR 1 = 0 + ]; + verify_gadget_truth_table(&gadget, &truth_table); +} + +#[test] +fn test_set0_gadget() { + let gadget: LogicGadget = set0_gadget(); + assert_eq!(gadget.num_spins(), 1); + assert_eq!(gadget.inputs, Vec::::new()); + assert_eq!(gadget.outputs, vec![0]); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&gadget.problem); + // Ground state should be spin down (0) + assert!(solutions.contains(&vec![0])); + assert!(!solutions.contains(&vec![1])); +} + +#[test] +fn test_set1_gadget() { + let gadget: LogicGadget = set1_gadget(); + assert_eq!(gadget.num_spins(), 1); + assert_eq!(gadget.inputs, Vec::::new()); + assert_eq!(gadget.outputs, vec![0]); + + let solver = BruteForce::new(); + let solutions = solver.find_best(&gadget.problem); + // Ground state should be spin up (1) + assert!(solutions.contains(&vec![1])); + assert!(!solutions.contains(&vec![0])); +} + +#[test] +fn test_simple_and_circuit() { + // c = x AND y + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + // Extract and verify solutions + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Should have valid AND configurations + // Variables are sorted: c, x, y + let valid_configs = vec![ + vec![0, 0, 0], // c=0, x=0, y=0: 0 AND 0 = 0 OK + vec![0, 0, 1], // c=0, x=0, y=1: 0 AND 1 = 0 OK + vec![0, 1, 0], // c=0, x=1, y=0: 1 AND 0 = 0 OK + vec![1, 1, 1], // c=1, x=1, y=1: 1 AND 1 = 1 OK + ]; + + for config in &valid_configs { + assert!( + extracted.contains(config), + "Expected valid config {:?} not found in {:?}", + config, + extracted + ); + } +} + +#[test] +fn test_simple_or_circuit() { + // c = x OR y + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::or(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Variables sorted: c, x, y + let valid_configs = vec![ + vec![0, 0, 0], // c=0, x=0, y=0: 0 OR 0 = 0 OK + vec![1, 0, 1], // c=1, x=0, y=1: 0 OR 1 = 1 OK + vec![1, 1, 0], // c=1, x=1, y=0: 1 OR 0 = 1 OK + vec![1, 1, 1], // c=1, x=1, y=1: 1 OR 1 = 1 OK + ]; + + for config in &valid_configs { + assert!( + extracted.contains(config), + "Expected valid config {:?} not found in {:?}", + config, + extracted + ); + } +} + +#[test] +fn test_not_circuit() { + // c = NOT x + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::not(BooleanExpr::var("x")), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Variables sorted: c, x + let valid_configs = vec![ + vec![1, 0], // c=1, x=0: NOT 0 = 1 OK + vec![0, 1], // c=0, x=1: NOT 1 = 0 OK + ]; + + for config in &valid_configs { + assert!( + extracted.contains(config), + "Expected valid config {:?} not found in {:?}", + config, + extracted + ); + } +} + +#[test] +fn test_xor_circuit() { + // c = x XOR y + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::xor(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Variables sorted: c, x, y + let valid_configs = vec![ + vec![0, 0, 0], // c=0, x=0, y=0: 0 XOR 0 = 0 OK + vec![1, 0, 1], // c=1, x=0, y=1: 0 XOR 1 = 1 OK + vec![1, 1, 0], // c=1, x=1, y=0: 1 XOR 0 = 1 OK + vec![0, 1, 1], // c=0, x=1, y=1: 1 XOR 1 = 0 OK + ]; + + for config in &valid_configs { + assert!( + extracted.contains(config), + "Expected valid config {:?} not found in {:?}", + config, + extracted + ); + } +} + +#[test] +fn test_constant_true() { + // c = true + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::constant(true), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // c should be 1 + assert!( + extracted.contains(&vec![1]), + "Expected c=1 in {:?}", + extracted + ); +} + +#[test] +fn test_constant_false() { + // c = false + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::constant(false), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // c should be 0 + assert!( + extracted.contains(&vec![0]), + "Expected c=0 in {:?}", + extracted + ); +} + +#[test] +fn test_multi_input_and() { + // c = x AND y AND z (3-input AND) + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![ + BooleanExpr::var("x"), + BooleanExpr::var("y"), + BooleanExpr::var("z"), + ]), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Variables sorted: c, x, y, z + // Only c=1 when all inputs are 1 + assert!( + extracted.contains(&vec![1, 1, 1, 1]), + "Expected (1,1,1,1) in {:?}", + extracted + ); + // c=0 for all other combinations + assert!( + extracted.contains(&vec![0, 0, 0, 0]), + "Expected (0,0,0,0) in {:?}", + extracted + ); +} + +#[test] +fn test_chained_circuit() { + // c = x AND y + // d = c OR z + let circuit = Circuit::new(vec![ + Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + ), + Assignment::new( + vec!["d".to_string()], + BooleanExpr::or(vec![BooleanExpr::var("c"), BooleanExpr::var("z")]), + ), + ]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Verify some valid configurations + // Variables sorted: c, d, x, y, z + // c = x AND y, d = c OR z + + // x=1, y=1 -> c=1, z=0 -> d=1 + assert!( + extracted.contains(&vec![1, 1, 1, 1, 0]), + "Expected (1,1,1,1,0) in {:?}", + extracted + ); + + // x=0, y=0 -> c=0, z=1 -> d=1 + assert!( + extracted.contains(&vec![0, 1, 0, 0, 1]), + "Expected (0,1,0,0,1) in {:?}", + extracted + ); + + // x=0, y=0 -> c=0, z=0 -> d=0 + assert!( + extracted.contains(&vec![0, 0, 0, 0, 0]), + "Expected (0,0,0,0,0) in {:?}", + extracted + ); +} + +#[test] +fn test_nested_expression() { + // c = (x AND y) OR z + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::or(vec![ + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + BooleanExpr::var("z"), + ]), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + let extracted: Vec> = solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Variables sorted: c, x, y, z + // c = (x AND y) OR z + + // x=1, y=1, z=0 -> c=1 + assert!( + extracted.contains(&vec![1, 1, 1, 0]), + "Expected (1,1,1,0) in {:?}", + extracted + ); + + // x=0, y=0, z=1 -> c=1 + assert!( + extracted.contains(&vec![1, 0, 0, 1]), + "Expected (1,0,0,1) in {:?}", + extracted + ); + + // x=0, y=0, z=0 -> c=0 + assert!( + extracted.contains(&vec![0, 0, 0, 0]), + "Expected (0,0,0,0) in {:?}", + extracted + ); +} + +#[test] +fn test_reduction_result_methods() { + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::var("x"), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + + // Test source_size and target_size + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert!(source_size.get("num_variables").is_some()); + assert!(target_size.get("num_spins").is_some()); +} + +#[test] +fn test_empty_circuit() { + let circuit = Circuit::new(vec![]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + let sg = reduction.target_problem(); + + // Empty circuit should result in empty SpinGlass + assert_eq!(sg.num_spins(), 0); +} + +#[test] +fn test_solution_extraction() { + let circuit = Circuit::new(vec![Assignment::new( + vec!["c".to_string()], + BooleanExpr::and(vec![BooleanExpr::var("x"), BooleanExpr::var("y")]), + )]); + let problem = CircuitSAT::::new(circuit); + let reduction = problem.reduce_to(); + + // The source variables are c, x, y (sorted) + assert_eq!(reduction.source_variables, vec!["c", "x", "y"]); + + // Test extraction with a mock target solution + // Need to know the mapping to construct proper test + let sg = reduction.target_problem(); + assert!(sg.num_spins() >= 3); // At least c, x, y +} diff --git a/src/tests_unit/rules/clique_ilp.rs b/src/tests_unit/rules/clique_ilp.rs new file mode 100644 index 0000000..b79c28c --- /dev/null +++ b/src/tests_unit/rules/clique_ilp.rs @@ -0,0 +1,298 @@ +use super::*; +use crate::solvers::ILPSolver; + +/// Check if a configuration represents a valid clique in the graph. +/// A clique is valid if all selected vertices are pairwise adjacent. +fn is_valid_clique(problem: &Clique, config: &[usize]) -> bool { + let selected: Vec = config + .iter() + .enumerate() + .filter(|(_, &v)| v == 1) + .map(|(i, _)| i) + .collect(); + + // Check all pairs of selected vertices are adjacent + for i in 0..selected.len() { + for j in (i + 1)..selected.len() { + if !problem.has_edge(selected[i], selected[j]) { + return false; + } + } + } + true +} + +/// Compute the clique size (sum of weights of selected vertices). +fn clique_size(problem: &Clique, config: &[usize]) -> i32 { + let weights = problem.weights(); + config + .iter() + .enumerate() + .filter(|(_, &v)| v == 1) + .map(|(i, _)| weights[i]) + .sum() +} + +/// Find maximum clique size by brute force enumeration. +fn brute_force_max_clique(problem: &Clique) -> i32 { + let n = problem.num_vertices(); + let mut max_size = 0; + for mask in 0..(1 << n) { + let config: Vec = (0..n).map(|i| (mask >> i) & 1).collect(); + if is_valid_clique(problem, &config) { + let size = clique_size(problem, &config); + if size > max_size { + max_size = size; + } + } + } + max_size +} + +#[test] +fn test_reduction_creates_valid_ilp() { + // Triangle graph: 3 vertices, 3 edges (complete graph K3) + // All pairs are adjacent, so no constraints should be added + let problem: Clique = Clique::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); + assert_eq!( + ilp.constraints.len(), + 0, + "Complete graph has no non-edges, so no constraints" + ); + assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } +} + +#[test] +fn test_reduction_with_non_edges() { + // Path graph 0-1-2: edges (0,1) and (1,2), non-edge (0,2) + let problem: Clique = Clique::new(3, vec![(0, 1), (1, 2)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Should have 1 constraint for non-edge (0, 2) + assert_eq!(ilp.constraints.len(), 1); + + // The constraint should be x_0 + x_2 <= 1 + let constraint = &ilp.constraints[0]; + assert_eq!(constraint.terms.len(), 2); + assert!((constraint.rhs - 1.0).abs() < 1e-9); +} + +#[test] +fn test_reduction_weighted() { + let problem: Clique = + Clique::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check that weights are correctly transferred to objective + let mut coeffs: Vec = vec![0.0; 3]; + for &(var, coef) in &ilp.objective { + coeffs[var] = coef; + } + assert!((coeffs[0] - 5.0).abs() < 1e-9); + assert!((coeffs[1] - 10.0).abs() < 1e-9); + assert!((coeffs[2] - 15.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_equals_brute_force_triangle() { + // Triangle graph (K3): max clique = 3 vertices + let problem: Clique = Clique::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + + // Solve with brute force for clique + let bf_size = brute_force_max_clique(&problem); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Both should find optimal size = 3 (all vertices form a clique) + let ilp_size = clique_size(&problem, &extracted); + assert_eq!(bf_size, 3); + assert_eq!(ilp_size, 3); + + // Verify the ILP solution is a valid clique + assert!( + is_valid_clique(&problem, &extracted), + "Extracted solution should be a valid clique" + ); +} + +#[test] +fn test_ilp_solution_equals_brute_force_path() { + // Path graph 0-1-2-3: max clique = 2 (any adjacent pair) + let problem: Clique = Clique::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + + // Solve with brute force for clique + let bf_size = brute_force_max_clique(&problem); + + // Solve via ILP + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size = clique_size(&problem, &extracted); + + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify validity + assert!(is_valid_clique(&problem, &extracted)); +} + +#[test] +fn test_ilp_solution_equals_brute_force_weighted() { + // Triangle with one missing edge: 0-1, 1-2, but no 0-2 + // Weights: [1, 100, 1] + // Max clique by weight: {0, 1} (weight 101) or {1, 2} (weight 101), or just {1} (weight 100) + // Since 0-1 and 1-2 are edges, both {0,1} and {1,2} are valid cliques + let problem: Clique = + Clique::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + + let bf_obj = brute_force_max_clique(&problem); + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_obj = clique_size(&problem, &extracted); + + assert_eq!(bf_obj, 101); + assert_eq!(ilp_obj, 101); + + // Verify the solution is a valid clique + assert!(is_valid_clique(&problem, &extracted)); +} + +#[test] +fn test_solution_extraction() { + let problem: Clique = Clique::new(4, vec![(0, 1), (2, 3)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 1, 0, 0]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 1, 0, 0]); + + // Verify this is a valid clique (0 and 1 are adjacent) + assert!(is_valid_clique(&problem, &extracted)); +} + +#[test] +fn test_source_and_target_size() { + let problem: Clique = + Clique::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(4)); + + assert_eq!(target_size.get("num_vars"), Some(5)); + // Number of non-edges in a path of 5 vertices: C(5,2) - 4 = 10 - 4 = 6 + assert_eq!(target_size.get("num_constraints"), Some(6)); +} + +#[test] +fn test_empty_graph() { + // Graph with no edges: max clique = 1 (any single vertex) + let problem: Clique = Clique::new(3, vec![]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // All pairs are non-edges, so 3 constraints + assert_eq!(ilp.constraints.len(), 3); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Only one vertex should be selected + assert_eq!(extracted.iter().sum::(), 1); + + assert!(is_valid_clique(&problem, &extracted)); + assert_eq!(clique_size(&problem, &extracted), 1); +} + +#[test] +fn test_complete_graph() { + // Complete graph K4: max clique = 4 (all vertices) + let problem: Clique = + Clique::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // No non-edges, so no constraints + assert_eq!(ilp.constraints.len(), 0); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // All vertices should be selected + assert_eq!(extracted, vec![1, 1, 1, 1]); + + assert!(is_valid_clique(&problem, &extracted)); + assert_eq!(clique_size(&problem, &extracted), 4); +} + +#[test] +fn test_bipartite_graph() { + // Bipartite graph: 0-2, 0-3, 1-2, 1-3 (two independent sets: {0,1} and {2,3}) + // Max clique = 2 (any edge, e.g., {0, 2}) + let problem: Clique = + Clique::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(is_valid_clique(&problem, &extracted)); + assert_eq!(clique_size(&problem, &extracted), 2); + + // Should select an adjacent pair + let sum: usize = extracted.iter().sum(); + assert_eq!(sum, 2); +} + +#[test] +fn test_star_graph() { + // Star graph: center 0 connected to 1, 2, 3 + // Max clique = 2 (center + any leaf) + let problem: Clique = Clique::new(4, vec![(0, 1), (0, 2), (0, 3)]); + let reduction: ReductionCliqueToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Non-edges: (1,2), (1,3), (2,3) = 3 constraints + assert_eq!(ilp.constraints.len(), 3); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(is_valid_clique(&problem, &extracted)); + assert_eq!(clique_size(&problem, &extracted), 2); +} diff --git a/src/tests_unit/rules/coloring_ilp.rs b/src/tests_unit/rules/coloring_ilp.rs new file mode 100644 index 0000000..f52098e --- /dev/null +++ b/src/tests_unit/rules/coloring_ilp.rs @@ -0,0 +1,281 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Triangle graph with 3 colors + let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + // num_vars = 3 vertices * 3 colors = 9 + assert_eq!( + ilp.num_vars, 9, + "Should have 9 variables (3 vertices * 3 colors)" + ); + + // num_constraints = 3 (one per vertex for "exactly one color") + // + 3 edges * 3 colors = 9 (edge constraints) + // = 12 total + assert_eq!( + ilp.constraints.len(), + 12, + "Should have 12 constraints (3 vertex + 9 edge)" + ); + + assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } +} + +#[test] +fn test_reduction_path_graph() { + // Path graph 0-1-2 with 2 colors (2-colorable) + let problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2)]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // num_vars = 3 * 2 = 6 + assert_eq!(ilp.num_vars, 6); + + // constraints = 3 (vertex) + 2 edges * 2 colors = 7 + assert_eq!(ilp.constraints.len(), 7); +} + +#[test] +fn test_ilp_solution_equals_brute_force_triangle() { + // Triangle needs 3 colors + let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_best(&problem); + assert!( + !bf_solutions.is_empty(), + "Brute force should find solutions" + ); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Verify the extracted solution is valid for the original problem + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); + + // All three vertices should have different colors + assert_ne!(extracted[0], extracted[1]); + assert_ne!(extracted[1], extracted[2]); + assert_ne!(extracted[0], extracted[2]); +} + +#[test] +fn test_ilp_solution_equals_brute_force_path() { + // Path graph 0-1-2-3 with 2 colors + let problem = KColoring::<2, SimpleGraph, i32>::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + + // Solve via ILP + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Verify validity + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); + + // Check adjacent vertices have different colors + assert_ne!(extracted[0], extracted[1]); + assert_ne!(extracted[1], extracted[2]); + assert_ne!(extracted[2], extracted[3]); +} + +#[test] +fn test_ilp_infeasible_triangle_2_colors() { + // Triangle cannot be 2-colored + let problem = KColoring::<2, SimpleGraph, i32>::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + + // ILP should be infeasible + let result = ilp_solver.solve(ilp); + assert!( + result.is_none(), + "Triangle with 2 colors should be infeasible" + ); +} + +#[test] +fn test_solution_extraction() { + let problem = KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1)]); + let reduction = ReduceTo::::reduce_to(&problem); + + // ILP solution where: + // vertex 0 has color 1 (x_{0,1} = 1) + // vertex 1 has color 2 (x_{1,2} = 1) + // vertex 2 has color 0 (x_{2,0} = 1) + // Variables are indexed as: v0c0, v0c1, v0c2, v1c0, v1c1, v1c2, v2c0, v2c1, v2c2 + let ilp_solution = vec![0, 1, 0, 0, 0, 1, 1, 0, 0]; + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(extracted, vec![1, 2, 0]); + + // Verify this is a valid coloring (vertex 0 and 1 have different colors) + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_source_and_target_size() { + let problem = KColoring::<3, SimpleGraph, i32>::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(4)); + assert_eq!(source_size.get("num_colors"), Some(3)); + + assert_eq!(target_size.get("num_vars"), Some(15)); // 5 * 3 + // constraints = 5 (vertex) + 4 * 3 (edge) = 17 + assert_eq!(target_size.get("num_constraints"), Some(17)); +} + +#[test] +fn test_empty_graph() { + // Graph with no edges: any coloring is valid + let problem = KColoring::<1, SimpleGraph, i32>::new(3, vec![]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Should only have vertex constraints (each vertex = one color) + assert_eq!(ilp.constraints.len(), 3); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_complete_graph_k4() { + // K4 needs 4 colors + let problem = KColoring::<4, SimpleGraph, i32>::new( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + + // All vertices should have different colors + let mut colors: Vec = extracted.clone(); + colors.sort(); + colors.dedup(); + assert_eq!(colors.len(), 4); +} + +#[test] +fn test_complete_graph_k4_with_3_colors_infeasible() { + // K4 cannot be 3-colored + let problem = KColoring::<3, SimpleGraph, i32>::new( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(ilp); + assert!(result.is_none(), "K4 with 3 colors should be infeasible"); +} + +#[test] +fn test_bipartite_graph() { + // Complete bipartite K_{2,2}: 0-2, 0-3, 1-2, 1-3 + // This is 2-colorable + let problem = KColoring::<2, SimpleGraph, i32>::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + + // Vertices 0,1 should have same color, vertices 2,3 should have same color + // And different from 0,1 + assert_eq!(extracted[0], extracted[1]); + assert_eq!(extracted[2], extracted[3]); + assert_ne!(extracted[0], extracted[2]); +} + +#[test] +fn test_solve_reduced() { + // Test the ILPSolver::solve_reduced method + let problem = KColoring::<2, SimpleGraph, i32>::new(4, vec![(0, 1), (1, 2), (2, 3)]); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver + .solve_reduced(&problem) + .expect("solve_reduced should work"); + + let sol_result = problem.solution_size(&solution); + assert!(sol_result.is_valid); +} + +#[test] +fn test_single_vertex() { + // Single vertex graph: always 1-colorable + let problem = KColoring::<1, SimpleGraph, i32>::new(1, vec![]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 1); + assert_eq!(ilp.constraints.len(), 1); // Just the "exactly one color" constraint + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(extracted, vec![0]); +} + +#[test] +fn test_single_edge() { + // Single edge: needs 2 colors + let problem = KColoring::<2, SimpleGraph, i32>::new(2, vec![(0, 1)]); + let reduction = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_ne!(extracted[0], extracted[1]); +} diff --git a/src/tests_unit/rules/cost.rs b/src/tests_unit/rules/cost.rs new file mode 100644 index 0000000..571d5cb --- /dev/null +++ b/src/tests_unit/rules/cost.rs @@ -0,0 +1,93 @@ +use super::*; +use crate::polynomial::Polynomial; + +fn test_overhead() -> ReductionOverhead { + ReductionOverhead::new(vec![ + ("n", Polynomial::var("n").scale(2.0)), + ("m", Polynomial::var("m")), + ]) +} + +#[test] +fn test_minimize_single() { + let cost_fn = Minimize("n"); + let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); + let overhead = test_overhead(); + + assert_eq!(cost_fn.edge_cost(&overhead, &size), 20.0); // 2 * 10 +} + +#[test] +fn test_minimize_weighted() { + let cost_fn = MinimizeWeighted(vec![("n", 1.0), ("m", 2.0)]); + let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); + let overhead = test_overhead(); + + // output n = 20, output m = 5 + // cost = 1.0 * 20 + 2.0 * 5 = 30 + assert_eq!(cost_fn.edge_cost(&overhead, &size), 30.0); +} + +#[test] +fn test_minimize_steps() { + let cost_fn = MinimizeSteps; + let size = ProblemSize::new(vec![("n", 100)]); + let overhead = test_overhead(); + + assert_eq!(cost_fn.edge_cost(&overhead, &size), 1.0); +} + +#[test] +fn test_minimize_max() { + let cost_fn = MinimizeMax(vec!["n", "m"]); + let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); + let overhead = test_overhead(); + + // output n = 20, output m = 5 + // max(20, 5) = 20 + assert_eq!(cost_fn.edge_cost(&overhead, &size), 20.0); +} + +#[test] +fn test_minimize_lexicographic() { + let cost_fn = MinimizeLexicographic(vec!["n", "m"]); + let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); + let overhead = test_overhead(); + + // output n = 20, output m = 5 + // cost = 20 * 1.0 + 5 * 1e-10 = 20.0000000005 + let cost = cost_fn.edge_cost(&overhead, &size); + assert!(cost > 20.0 && cost < 20.001); +} + +#[test] +fn test_custom_cost() { + let cost_fn = CustomCost(|overhead: &ReductionOverhead, size: &ProblemSize| { + let output = overhead.evaluate_output_size(size); + (output.get("n").unwrap_or(0) + output.get("m").unwrap_or(0)) as f64 + }); + let size = ProblemSize::new(vec![("n", 10), ("m", 5)]); + let overhead = test_overhead(); + + // output n = 20, output m = 5 + // custom = 20 + 5 = 25 + assert_eq!(cost_fn.edge_cost(&overhead, &size), 25.0); +} + +#[test] +fn test_minimize_missing_field() { + let cost_fn = Minimize("nonexistent"); + let size = ProblemSize::new(vec![("n", 10)]); + let overhead = test_overhead(); + + assert_eq!(cost_fn.edge_cost(&overhead, &size), 0.0); +} + +#[test] +fn test_minimize_max_empty() { + let cost_fn = MinimizeMax(vec![]); + let size = ProblemSize::new(vec![("n", 10)]); + let overhead = test_overhead(); + + assert_eq!(cost_fn.edge_cost(&overhead, &size), 0.0); +} diff --git a/src/tests_unit/rules/dominatingset_ilp.rs b/src/tests_unit/rules/dominatingset_ilp.rs new file mode 100644 index 0000000..0b9aad1 --- /dev/null +++ b/src/tests_unit/rules/dominatingset_ilp.rs @@ -0,0 +1,235 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Triangle graph: 3 vertices, 3 edges + let problem = DominatingSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); + assert_eq!( + ilp.constraints.len(), + 3, + "Should have one constraint per vertex" + ); + assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } + + // Each constraint should be x_v + sum_{u in N(v)} x_u >= 1 + for constraint in &ilp.constraints { + assert!(!constraint.terms.is_empty()); + assert!((constraint.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_reduction_weighted() { + let problem = DominatingSet::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check that weights are correctly transferred to objective + let mut coeffs: Vec = vec![0.0; 3]; + for &(var, coef) in &ilp.objective { + coeffs[var] = coef; + } + assert!((coeffs[0] - 5.0).abs() < 1e-9); + assert!((coeffs[1] - 10.0).abs() < 1e-9); + assert!((coeffs[2] - 15.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_equals_brute_force_star() { + // Star graph: center vertex 0 connected to all others + // Minimum dominating set is just the center (weight 1) + let problem = DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_best(&problem); + let bf_size = problem.solution_size(&bf_solutions[0]).size; + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size = problem.solution_size(&extracted).size; + + // Both should find optimal size = 1 (just the center) + assert_eq!(bf_size, 1); + assert_eq!(ilp_size, 1); + + // Verify the ILP solution is valid for the original problem + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); +} + +#[test] +fn test_ilp_solution_equals_brute_force_path() { + // Path graph 0-1-2-3-4: min DS = 2 (e.g., vertices 1 and 3) + let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force + let bf_solutions = bf.find_best(&problem); + let bf_size = problem.solution_size(&bf_solutions[0]).size; + + // Solve via ILP + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size = problem.solution_size(&extracted).size; + + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify validity + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_ilp_solution_equals_brute_force_weighted() { + // Star with heavy center: prefer selecting all leaves (total weight 3) + // over center (weight 100) + let problem = + DominatingSet::with_weights(4, vec![(0, 1), (0, 2), (0, 3)], vec![100, 1, 1, 1]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_obj = problem.solution_size(&bf_solutions[0]).size; + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_obj = problem.solution_size(&extracted).size; + + assert_eq!(bf_obj, 3); + assert_eq!(ilp_obj, 3); + + // Verify the solution selects all leaves + assert_eq!(extracted, vec![0, 1, 1, 1]); +} + +#[test] +fn test_solution_extraction() { + let problem = DominatingSet::::new(4, vec![(0, 1), (2, 3)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 0, 1, 0]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 0, 1, 0]); + + // Verify this is a valid DS (0 dominates 0,1 and 2 dominates 2,3) + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_source_and_target_size() { + let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(4)); + + assert_eq!(target_size.get("num_vars"), Some(5)); + assert_eq!(target_size.get("num_constraints"), Some(5)); // one per vertex +} + +#[test] +fn test_isolated_vertices() { + // Graph with isolated vertex 2: it must be in the dominating set + let problem = DominatingSet::::new(3, vec![(0, 1)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Vertex 2 must be selected (isolated) + assert_eq!(extracted[2], 1); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_complete_graph() { + // Complete graph K4: min DS = 1 (any vertex dominates all) + let problem = + DominatingSet::::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 1); +} + +#[test] +fn test_single_vertex() { + // Single vertex with no edges: must be in dominating set + let problem = DominatingSet::::new(1, vec![]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(extracted, vec![1]); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 1); +} + +#[test] +fn test_cycle_graph() { + // Cycle C5: 0-1-2-3-4-0 + // Minimum dominating set size = 2 + let problem = DominatingSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]); + let reduction: ReductionDSToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_size = problem.solution_size(&bf_solutions[0]).size; + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size = problem.solution_size(&extracted).size; + + assert_eq!(bf_size, ilp_size); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} diff --git a/src/tests_unit/rules/factoring_circuit.rs b/src/tests_unit/rules/factoring_circuit.rs new file mode 100644 index 0000000..aaeb4c7 --- /dev/null +++ b/src/tests_unit/rules/factoring_circuit.rs @@ -0,0 +1,297 @@ +use super::*; +use std::collections::HashMap; + +#[test] +fn test_read_bit() { + // 6 = 110 in binary (little-endian: bit1=0, bit2=1, bit3=1) + assert!(!read_bit(6, 1)); // bit 1 (LSB) = 0 + assert!(read_bit(6, 2)); // bit 2 = 1 + assert!(read_bit(6, 3)); // bit 3 = 1 + assert!(!read_bit(6, 4)); // bit 4 = 0 + + // 15 = 1111 in binary + assert!(read_bit(15, 1)); + assert!(read_bit(15, 2)); + assert!(read_bit(15, 3)); + assert!(read_bit(15, 4)); + assert!(!read_bit(15, 5)); +} + +#[test] +fn test_reduction_structure() { + // Factor 6 = 2 * 3 with 2-bit factors + let factoring = Factoring::new(2, 2, 6); + let reduction = ReduceTo::>::reduce_to(&factoring); + + assert_eq!(reduction.p_vars().len(), 2); + assert_eq!(reduction.q_vars().len(), 2); + assert_eq!(reduction.m_vars().len(), 4); // 2 + 2 = 4 bits for product +} + +#[test] +fn test_reduction_structure_3x3() { + // Factor 15 = 3 * 5 with 3-bit factors + let factoring = Factoring::new(3, 3, 15); + let reduction = ReduceTo::>::reduce_to(&factoring); + + assert_eq!(reduction.p_vars().len(), 3); + assert_eq!(reduction.q_vars().len(), 3); + assert_eq!(reduction.m_vars().len(), 6); // 3 + 3 = 6 bits for product +} + +/// Helper function to evaluate a circuit with given inputs. +/// Returns a HashMap of all variable assignments after propagation. +fn evaluate_multiplier_circuit( + reduction: &ReductionFactoringToCircuit, + p_val: u64, + q_val: u64, +) -> HashMap { + let circuit = reduction.target_problem().circuit(); + let mut assignments: HashMap = HashMap::new(); + + // Set input variables for p + for (i, var_name) in reduction.p_vars().iter().enumerate() { + let bit = ((p_val >> i) & 1) == 1; + assignments.insert(var_name.clone(), bit); + } + + // Set input variables for q + for (i, var_name) in reduction.q_vars().iter().enumerate() { + let bit = ((q_val >> i) & 1) == 1; + assignments.insert(var_name.clone(), bit); + } + + // Evaluate the circuit assignments in order + for assign in &circuit.assignments { + let result = assign.expr.evaluate(&assignments); + for out in &assign.outputs { + assignments.insert(out.clone(), result); + } + } + + assignments +} + +/// Check if inputs satisfying the circuit give correct factorization. +/// This tests the core functionality: given p and q, does the circuit +/// correctly identify when p * q = target? +fn check_factorization_satisfies( + factoring: &Factoring, + reduction: &ReductionFactoringToCircuit, + p_val: u64, + q_val: u64, +) -> bool { + let assignments = evaluate_multiplier_circuit(reduction, p_val, q_val); + let circuit = reduction.target_problem().circuit(); + + // Check if all assignments are satisfied + for assign in &circuit.assignments { + if !assign.is_satisfied(&assignments) { + return false; + } + } + + // Also verify the product equals target (redundant but explicit) + p_val * q_val == factoring.target() +} + +#[test] +fn test_factorization_6_satisfies_circuit() { + let factoring = Factoring::new(2, 2, 6); + let reduction = ReduceTo::>::reduce_to(&factoring); + + // 2 * 3 = 6 should satisfy the circuit + assert!( + check_factorization_satisfies(&factoring, &reduction, 2, 3), + "2 * 3 = 6 should satisfy the circuit" + ); + + // 3 * 2 = 6 should also satisfy + assert!( + check_factorization_satisfies(&factoring, &reduction, 3, 2), + "3 * 2 = 6 should satisfy the circuit" + ); + + // 1 * 1 = 1 != 6 should NOT satisfy (product constraint fails) + assert!( + !check_factorization_satisfies(&factoring, &reduction, 1, 1), + "1 * 1 != 6 should not satisfy the circuit" + ); + + // 2 * 2 = 4 != 6 should NOT satisfy + assert!( + !check_factorization_satisfies(&factoring, &reduction, 2, 2), + "2 * 2 != 6 should not satisfy the circuit" + ); +} + +#[test] +fn test_factorization_15_satisfies_circuit() { + let factoring = Factoring::new(4, 4, 15); + let reduction = ReduceTo::>::reduce_to(&factoring); + + // Valid factorizations of 15 + assert!( + check_factorization_satisfies(&factoring, &reduction, 3, 5), + "3 * 5 = 15 should satisfy" + ); + assert!( + check_factorization_satisfies(&factoring, &reduction, 5, 3), + "5 * 3 = 15 should satisfy" + ); + assert!( + check_factorization_satisfies(&factoring, &reduction, 1, 15), + "1 * 15 = 15 should satisfy" + ); + assert!( + check_factorization_satisfies(&factoring, &reduction, 15, 1), + "15 * 1 = 15 should satisfy" + ); + + // Invalid: 2 * 7 = 14 != 15 + assert!( + !check_factorization_satisfies(&factoring, &reduction, 2, 7), + "2 * 7 != 15 should not satisfy" + ); +} + +#[test] +fn test_factorization_21_satisfies_circuit() { + let factoring = Factoring::new(3, 3, 21); + let reduction = ReduceTo::>::reduce_to(&factoring); + + // 3 * 7 = 21 + assert!( + check_factorization_satisfies(&factoring, &reduction, 3, 7), + "3 * 7 = 21 should satisfy" + ); + assert!( + check_factorization_satisfies(&factoring, &reduction, 7, 3), + "7 * 3 = 21 should satisfy" + ); + + // Invalid: 3 * 5 = 15 != 21 + assert!( + !check_factorization_satisfies(&factoring, &reduction, 3, 5), + "3 * 5 != 21 should not satisfy" + ); +} + +#[test] +fn test_source_and_target_size() { + let factoring = Factoring::new(3, 4, 15); + let reduction = ReduceTo::>::reduce_to(&factoring); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_bits_first"), Some(3)); + assert_eq!(source_size.get("num_bits_second"), Some(4)); + assert!(target_size.get("num_variables").unwrap() > 0); + assert!(target_size.get("num_assignments").unwrap() > 0); +} + +#[test] +fn test_extract_solution() { + let factoring = Factoring::new(2, 2, 6); + let reduction = ReduceTo::>::reduce_to(&factoring); + let circuit_sat = reduction.target_problem(); + + // Create a solution where p=2 (binary: 01) and q=3 (binary: 11) + // We need to find the indices of p1, p2, q1, q2 in the variable list + let var_names = circuit_sat.variable_names(); + let mut sol = vec![0usize; var_names.len()]; + + // Now evaluate the circuit to set all internal variables correctly + let assignments = evaluate_multiplier_circuit(&reduction, 2, 3); + for (i, name) in var_names.iter().enumerate() { + if let Some(&val) = assignments.get(name) { + sol[i] = if val { 1 } else { 0 }; + } + } + + let factoring_sol = reduction.extract_solution(&sol); + assert_eq!( + factoring_sol.len(), + 4, + "Should have 4 bits (2 for p, 2 for q)" + ); + + let (p, q) = factoring.read_factors(&factoring_sol); + assert_eq!(p, 2, "p should be 2"); + assert_eq!(q, 3, "q should be 3"); + assert_eq!(p * q, 6, "Product should equal target"); +} + +#[test] +fn test_prime_7_only_trivial_factorizations() { + let factoring = Factoring::new(3, 3, 7); + let reduction = ReduceTo::>::reduce_to(&factoring); + + // Check that only trivial factorizations satisfy + for p in 0..8u64 { + for q in 0..8u64 { + let satisfies = check_factorization_satisfies(&factoring, &reduction, p, q); + let is_valid_factorization = p * q == 7; + + if is_valid_factorization { + assert!(satisfies, "{}*{}=7 should satisfy the circuit", p, q); + // Check it's a trivial factorization (1*7 or 7*1) + assert!( + (p == 1 && q == 7) || (p == 7 && q == 1), + "7 is prime, so only 1*7 or 7*1 should work" + ); + } else if p > 0 && q > 0 { + // Non-zero products that don't equal 7 should not satisfy + assert!( + !satisfies, + "{}*{}={} != 7 should not satisfy the circuit", + p, + q, + p * q + ); + } + } + } +} + +#[test] +fn test_all_2bit_factorizations() { + // Test all possible 2-bit * 2-bit multiplications for target 6 + let factoring = Factoring::new(2, 2, 6); + let reduction = ReduceTo::>::reduce_to(&factoring); + + let mut valid_factorizations = Vec::new(); + for p in 0..4u64 { + for q in 0..4u64 { + if check_factorization_satisfies(&factoring, &reduction, p, q) { + valid_factorizations.push((p, q)); + } + } + } + + // Only 2*3 and 3*2 should satisfy (both give 6) + assert_eq!( + valid_factorizations.len(), + 2, + "Should find exactly 2 factorizations of 6" + ); + assert!(valid_factorizations.contains(&(2, 3)), "Should find 2*3"); + assert!(valid_factorizations.contains(&(3, 2)), "Should find 3*2"); +} + +#[test] +fn test_factorization_1_trivial() { + // Factor 1 = 1 * 1 + let factoring = Factoring::new(2, 2, 1); + let reduction = ReduceTo::>::reduce_to(&factoring); + + assert!( + check_factorization_satisfies(&factoring, &reduction, 1, 1), + "1 * 1 = 1 should satisfy" + ); + assert!( + !check_factorization_satisfies(&factoring, &reduction, 2, 1), + "2 * 1 = 2 != 1 should not satisfy" + ); +} diff --git a/src/tests_unit/rules/factoring_ilp.rs b/src/tests_unit/rules/factoring_ilp.rs new file mode 100644 index 0000000..6f6bfbd --- /dev/null +++ b/src/tests_unit/rules/factoring_ilp.rs @@ -0,0 +1,302 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Factor 6 with 2-bit factors + let problem = Factoring::new(2, 2, 6); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check variable count: m + n + m*n + (m+n) = 2 + 2 + 4 + 4 = 12 + assert_eq!(ilp.num_vars, 12); + + // Check constraint count: 3*m*n + (m+n) + 1 = 12 + 4 + 1 = 17 + assert_eq!(ilp.constraints.len(), 17); + + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_variable_layout() { + let problem = Factoring::new(3, 2, 6); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + + // p variables: [0, 1, 2] + assert_eq!(reduction.p_var(0), 0); + assert_eq!(reduction.p_var(2), 2); + + // q variables: [3, 4] + assert_eq!(reduction.q_var(0), 3); + assert_eq!(reduction.q_var(1), 4); + + // z variables: [5, 6, 7, 8, 9, 10] (3x2 = 6) + assert_eq!(reduction.z_var(0, 0), 5); + assert_eq!(reduction.z_var(0, 1), 6); + assert_eq!(reduction.z_var(1, 0), 7); + assert_eq!(reduction.z_var(2, 1), 10); + + // carry variables: [11, 12, 13, 14, 15] (m+n = 5) + assert_eq!(reduction.carry_var(0), 11); + assert_eq!(reduction.carry_var(4), 15); +} + +#[test] +fn test_factor_6() { + // 6 = 2 × 3 or 3 × 2 + let problem = Factoring::new(2, 2, 6); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Verify it's a valid factorization + assert!(problem.is_valid_factorization(&extracted)); + + let (a, b) = problem.read_factors(&extracted); + assert_eq!(a * b, 6); +} + +#[test] +fn test_factor_15() { + // Closed-loop test for factoring 15 = 3 × 5 (or 5 × 3, 1 × 15, 15 × 1) + + // 1. Create factoring instance: find p (4-bit) × q (4-bit) = 15 + let problem = Factoring::new(4, 4, 15); + + // 2. Reduce to ILP + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 3. Solve ILP + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + + // 4. Extract factoring solution + let extracted = reduction.extract_solution(&ilp_solution); + + // 5. Verify: solution is valid and p × q = 15 + assert!(problem.is_valid_factorization(&extracted)); + let (p, q) = problem.read_factors(&extracted); + assert_eq!(p * q, 15); // e.g., (3, 5) or (5, 3) +} + +#[test] +fn test_factor_35() { + // 35 = 5 × 7 or 7 × 5 + let problem = Factoring::new(3, 3, 35); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(problem.is_valid_factorization(&extracted)); + + let (a, b) = problem.read_factors(&extracted); + assert_eq!(a * b, 35); +} + +#[test] +fn test_factor_one() { + // 1 = 1 × 1 + let problem = Factoring::new(2, 2, 1); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(problem.is_valid_factorization(&extracted)); + + let (a, b) = problem.read_factors(&extracted); + assert_eq!(a * b, 1); +} + +#[test] +fn test_factor_prime() { + // 7 is prime: 7 = 1 × 7 or 7 × 1 + let problem = Factoring::new(3, 3, 7); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(problem.is_valid_factorization(&extracted)); + + let (a, b) = problem.read_factors(&extracted); + assert_eq!(a * b, 7); +} + +#[test] +fn test_factor_square() { + // 9 = 3 × 3 + let problem = Factoring::new(3, 3, 9); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(problem.is_valid_factorization(&extracted)); + + let (a, b) = problem.read_factors(&extracted); + assert_eq!(a * b, 9); +} + +#[test] +fn test_infeasible_target_too_large() { + // Target 100 with 2-bit factors (max product is 3 × 3 = 9) + let problem = Factoring::new(2, 2, 100); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(ilp); + + assert!(result.is_none(), "Should be infeasible"); +} + +#[test] +fn test_ilp_matches_brute_force() { + let problem = Factoring::new(2, 2, 6); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Get ILP solution + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let ilp_factors = reduction.extract_solution(&ilp_solution); + + // Get brute force solutions + let bf = BruteForce::new(); + let bf_solutions = bf.find_best(&problem); + + // ILP solution should be among brute force solutions + let (a, b) = problem.read_factors(&ilp_factors); + let bf_pairs: Vec<(u64, u64)> = bf_solutions + .iter() + .map(|s| problem.read_factors(s)) + .collect(); + + assert!( + bf_pairs.contains(&(a, b)), + "ILP solution ({}, {}) should be in brute force solutions {:?}", + a, + b, + bf_pairs + ); +} + +#[test] +fn test_solution_extraction() { + let problem = Factoring::new(2, 2, 6); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + + // Manually construct ILP solution for 2 × 3 = 6 + // p = 2 = binary 10 -> p_0=0, p_1=1 + // q = 3 = binary 11 -> q_0=1, q_1=1 + // z_00 = p_0 * q_0 = 0, z_01 = p_0 * q_1 = 0 + // z_10 = p_1 * q_0 = 1, z_11 = p_1 * q_1 = 1 + // Variables: [p0, p1, q0, q1, z00, z01, z10, z11, c0, c1, c2, c3] + let ilp_solution = vec![0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0]; + let extracted = reduction.extract_solution(&ilp_solution); + + // Should extract [p0, p1, q0, q1] = [0, 1, 1, 1] + assert_eq!(extracted, vec![0, 1, 1, 1]); + + let (a, b) = problem.read_factors(&extracted); + assert_eq!(a, 2); + assert_eq!(b, 3); + assert_eq!(a * b, 6); +} + +#[test] +fn test_source_and_target_size() { + let problem = Factoring::new(3, 4, 12); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_bits_first"), Some(3)); + assert_eq!(source_size.get("num_bits_second"), Some(4)); + + // num_vars = 3 + 4 + 12 + 7 = 26 + assert_eq!(target_size.get("num_vars"), Some(26)); + + // num_constraints = 3*12 + 7 + 1 = 44 + assert_eq!(target_size.get("num_constraints"), Some(44)); +} + +#[test] +fn test_solve_reduced() { + let problem = Factoring::new(2, 2, 6); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver + .solve_reduced(&problem) + .expect("solve_reduced should work"); + + assert!(problem.is_valid_factorization(&solution)); +} + +#[test] +fn test_asymmetric_bit_widths() { + // 12 = 3 × 4 or 4 × 3 or 2 × 6 or 6 × 2 or 1 × 12 or 12 × 1 + let problem = Factoring::new(2, 4, 12); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(problem.is_valid_factorization(&extracted)); + + let (a, b) = problem.read_factors(&extracted); + assert_eq!(a * b, 12); +} + +#[test] +fn test_constraint_count_formula() { + // Verify constraint count matches formula: 3*m*n + (m+n) + 1 + for (m, n) in [(2, 2), (3, 3), (2, 4), (4, 2)] { + let problem = Factoring::new(m, n, 1); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let expected = 3 * m * n + (m + n) + 1; + assert_eq!( + ilp.constraints.len(), + expected, + "Constraint count mismatch for m={}, n={}", + m, + n + ); + } +} + +#[test] +fn test_variable_count_formula() { + // Verify variable count matches formula: m + n + m*n + (m+n) + for (m, n) in [(2, 2), (3, 3), (2, 4), (4, 2)] { + let problem = Factoring::new(m, n, 1); + let reduction: ReductionFactoringToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let expected = m + n + m * n + (m + n); + assert_eq!( + ilp.num_vars, expected, + "Variable count mismatch for m={}, n={}", + m, n + ); + } +} diff --git a/src/tests_unit/rules/graph.rs b/src/tests_unit/rules/graph.rs new file mode 100644 index 0000000..938851f --- /dev/null +++ b/src/tests_unit/rules/graph.rs @@ -0,0 +1,730 @@ +use super::*; +use crate::models::graph::{IndependentSet, VertexCovering}; +use crate::models::set::SetPacking; +use crate::rules::cost::MinimizeSteps; +use crate::topology::SimpleGraph; + +#[test] +fn test_find_direct_path() { + let graph = ReductionGraph::new(); + let paths = graph.find_paths::, VertexCovering>(); + assert!(!paths.is_empty()); + assert_eq!(paths[0].type_names.len(), 2); + assert_eq!(paths[0].len(), 1); // One reduction step +} + +#[test] +fn test_find_indirect_path() { + let graph = ReductionGraph::new(); + // IS -> VC -> IS -> SP or IS -> SP directly + let paths = graph.find_paths::, SetPacking>(); + assert!(!paths.is_empty()); +} + +#[test] +fn test_find_shortest_path() { + let graph = ReductionGraph::new(); + let path = graph.find_shortest_path::, SetPacking>(); + assert!(path.is_some()); + let path = path.unwrap(); + assert_eq!(path.len(), 1); // Direct path exists +} + +#[test] +fn test_has_direct_reduction() { + let graph = ReductionGraph::new(); + assert!(graph.has_direct_reduction::, VertexCovering>()); + assert!(graph.has_direct_reduction::, IndependentSet>()); +} + +#[test] +fn test_no_path() { + let graph = ReductionGraph::new(); + // No path between IndependentSet and QUBO (disconnected in graph topology) + let paths = + graph.find_paths::, crate::models::optimization::QUBO>(); + assert!(paths.is_empty()); +} + +#[test] +fn test_type_erased_paths() { + let graph = ReductionGraph::new(); + + // Different weight types should find the same path (type-erased) + let paths_i32 = graph.find_paths::< + crate::models::graph::MaxCut, + crate::models::optimization::SpinGlass, + >(); + let paths_f64 = graph.find_paths::< + crate::models::graph::MaxCut, + crate::models::optimization::SpinGlass, + >(); + + // Both should find paths since we use type-erased names + assert!(!paths_i32.is_empty()); + assert!(!paths_f64.is_empty()); + assert_eq!(paths_i32[0].type_names, paths_f64[0].type_names); +} + +#[test] +fn test_find_paths_by_name() { + let graph = ReductionGraph::new(); + + let paths = graph.find_paths_by_name("MaxCut", "SpinGlass"); + assert!(!paths.is_empty()); + assert_eq!(paths[0].len(), 1); // Direct path + + let paths = graph.find_paths_by_name("Factoring", "SpinGlass"); + assert!(!paths.is_empty()); + assert_eq!(paths[0].len(), 2); // Factoring -> CircuitSAT -> SpinGlass +} + +#[test] +fn test_problem_types() { + let graph = ReductionGraph::new(); + let types = graph.problem_types(); + assert!(types.len() >= 5); + assert!(types.iter().any(|t| t.contains("IndependentSet"))); + assert!(types.iter().any(|t| t.contains("VertexCovering"))); +} + +#[test] +fn test_graph_statistics() { + let graph = ReductionGraph::new(); + assert!(graph.num_types() >= 5); + assert!(graph.num_reductions() >= 6); +} + +#[test] +fn test_reduction_path_methods() { + let graph = ReductionGraph::new(); + let path = graph + .find_shortest_path::, VertexCovering>() + .unwrap(); + + assert!(!path.is_empty()); + assert!(path.source().unwrap().contains("IndependentSet")); + assert!(path.target().unwrap().contains("VertexCovering")); +} + +#[test] +fn test_bidirectional_paths() { + let graph = ReductionGraph::new(); + + // Forward path + let forward = graph.find_paths::, VertexCovering>(); + assert!(!forward.is_empty()); + + // Backward path + let backward = graph.find_paths::, IndependentSet>(); + assert!(!backward.is_empty()); +} + +#[test] +fn test_to_json() { + let graph = ReductionGraph::new(); + let json = graph.to_json(); + + // Check nodes + assert!(json.nodes.len() >= 10); + assert!(json.nodes.iter().any(|n| n.name == "IndependentSet")); + assert!(json.nodes.iter().any(|n| n.category == "graph")); + assert!(json.nodes.iter().any(|n| n.category == "optimization")); + + // Check edges + assert!(json.edges.len() >= 10); + + // Check that IS <-> VC is marked bidirectional + let is_vc_edge = json.edges.iter().find(|e| { + (e.source.name.contains("IndependentSet") && e.target.name.contains("VertexCovering")) + || (e.source.name.contains("VertexCovering") + && e.target.name.contains("IndependentSet")) + }); + assert!(is_vc_edge.is_some()); + assert!(is_vc_edge.unwrap().bidirectional); +} + +#[test] +fn test_to_json_string() { + let graph = ReductionGraph::new(); + let json_string = graph.to_json_string().unwrap(); + + // Should be valid JSON + assert!(json_string.contains("\"nodes\"")); + assert!(json_string.contains("\"edges\"")); + assert!(json_string.contains("IndependentSet")); + assert!(json_string.contains("\"category\"")); + assert!(json_string.contains("\"bidirectional\"")); +} + +#[test] +fn test_categorize_type() { + // Graph problems + assert_eq!( + ReductionGraph::categorize_type("IndependentSet"), + "graph" + ); + assert_eq!( + ReductionGraph::categorize_type("VertexCovering"), + "graph" + ); + assert_eq!(ReductionGraph::categorize_type("MaxCut"), "graph"); + assert_eq!(ReductionGraph::categorize_type("KColoring"), "graph"); + assert_eq!( + ReductionGraph::categorize_type("DominatingSet"), + "graph" + ); + assert_eq!(ReductionGraph::categorize_type("Matching"), "graph"); + + // Set problems + assert_eq!(ReductionGraph::categorize_type("SetPacking"), "set"); + assert_eq!(ReductionGraph::categorize_type("SetCovering"), "set"); + + // Optimization + assert_eq!( + ReductionGraph::categorize_type("SpinGlass"), + "optimization" + ); + assert_eq!(ReductionGraph::categorize_type("QUBO"), "optimization"); + + // Satisfiability + assert_eq!( + ReductionGraph::categorize_type("Satisfiability"), + "satisfiability" + ); + assert_eq!( + ReductionGraph::categorize_type("KSatisfiability<3, i32>"), + "satisfiability" + ); + assert_eq!( + ReductionGraph::categorize_type("CircuitSAT"), + "satisfiability" + ); + + // Specialized + assert_eq!(ReductionGraph::categorize_type("Factoring"), "specialized"); + + // Unknown + assert_eq!(ReductionGraph::categorize_type("UnknownProblem"), "other"); +} + +#[test] +fn test_sat_based_reductions() { + use crate::models::graph::KColoring; + use crate::models::graph::DominatingSet; + use crate::models::satisfiability::Satisfiability; + + let graph = ReductionGraph::new(); + + // SAT -> IS + assert!(graph.has_direct_reduction::, IndependentSet>()); + + // SAT -> KColoring + assert!(graph.has_direct_reduction::, KColoring<3, SimpleGraph, i32>>()); + + // SAT -> DominatingSet + assert!(graph.has_direct_reduction::, DominatingSet>()); +} + +#[test] +fn test_circuit_reductions() { + use crate::models::optimization::SpinGlass; + use crate::models::specialized::{CircuitSAT, Factoring}; + + let graph = ReductionGraph::new(); + + // Factoring -> CircuitSAT + assert!(graph.has_direct_reduction::>()); + + // CircuitSAT -> SpinGlass + assert!(graph.has_direct_reduction::, SpinGlass>()); + + // Find path from Factoring to SpinGlass + let paths = graph.find_paths::>(); + assert!(!paths.is_empty()); + let shortest = graph + .find_shortest_path::>() + .unwrap(); + assert_eq!(shortest.len(), 2); // Factoring -> CircuitSAT -> SpinGlass +} + +#[test] +fn test_optimization_reductions() { + use crate::models::graph::MaxCut; + use crate::models::optimization::{SpinGlass, QUBO}; + + let graph = ReductionGraph::new(); + + // SpinGlass <-> QUBO (bidirectional) + assert!(graph.has_direct_reduction::, QUBO>()); + assert!(graph.has_direct_reduction::, SpinGlass>()); + + // MaxCut <-> SpinGlass (bidirectional) + assert!(graph.has_direct_reduction::, SpinGlass>()); + assert!(graph.has_direct_reduction::, MaxCut>()); +} + +#[test] +fn test_ksat_reductions() { + use crate::models::satisfiability::{KSatisfiability, Satisfiability}; + + let graph = ReductionGraph::new(); + + // SAT <-> 3-SAT (bidirectional) + assert!(graph.has_direct_reduction::, KSatisfiability<3, i32>>()); + assert!(graph.has_direct_reduction::, Satisfiability>()); +} + +#[test] +fn test_all_categories_present() { + let graph = ReductionGraph::new(); + let json = graph.to_json(); + + let categories: std::collections::HashSet<&str> = + json.nodes.iter().map(|n| n.category.as_str()).collect(); + + assert!(categories.contains("graph")); + assert!(categories.contains("set")); + assert!(categories.contains("optimization")); + assert!(categories.contains("satisfiability")); + assert!(categories.contains("specialized")); +} + +#[test] +fn test_empty_path_source_target() { + let path = ReductionPath { type_names: vec![] }; + assert!(path.is_empty()); + assert_eq!(path.len(), 0); + assert!(path.source().is_none()); + assert!(path.target().is_none()); +} + +#[test] +fn test_single_node_path() { + let path = ReductionPath { + type_names: vec!["IndependentSet"], + }; + assert!(!path.is_empty()); + assert_eq!(path.len(), 0); // No reductions, just one type + assert_eq!(path.source(), Some("IndependentSet")); + assert_eq!(path.target(), Some("IndependentSet")); +} + +#[test] +fn test_default_implementation() { + let graph1 = ReductionGraph::new(); + let graph2 = ReductionGraph::default(); + + assert_eq!(graph1.num_types(), graph2.num_types()); + assert_eq!(graph1.num_reductions(), graph2.num_reductions()); +} + +#[test] +fn test_to_json_file() { + use std::env; + use std::fs; + + let graph = ReductionGraph::new(); + let file_path = env::temp_dir().join("problemreductions_test_graph.json"); + + // Write to file + graph.to_json_file(&file_path).unwrap(); + + // Read back and verify + let content = fs::read_to_string(&file_path).unwrap(); + assert!(content.contains("\"nodes\"")); + assert!(content.contains("\"edges\"")); + assert!(content.contains("IndependentSet")); + + // Parse as generic JSON to verify validity + let parsed: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert!(!parsed["nodes"].as_array().unwrap().is_empty()); + assert!(!parsed["edges"].as_array().unwrap().is_empty()); + + // Clean up + let _ = fs::remove_file(&file_path); +} + +#[test] +fn test_has_direct_reduction_unregistered_types() { + // Test with a type that's not registered in the graph + struct UnregisteredType; + + let graph = ReductionGraph::new(); + + // Source type not registered + assert!(!graph.has_direct_reduction::>()); + + // Target type not registered + assert!(!graph.has_direct_reduction::, UnregisteredType>()); + + // Both types not registered + assert!(!graph.has_direct_reduction::()); +} + +#[test] +fn test_find_paths_unregistered_source() { + struct UnregisteredType; + + let graph = ReductionGraph::new(); + let paths = graph.find_paths::>(); + assert!(paths.is_empty()); +} + +#[test] +fn test_find_paths_unregistered_target() { + struct UnregisteredType; + + let graph = ReductionGraph::new(); + let paths = graph.find_paths::, UnregisteredType>(); + assert!(paths.is_empty()); +} + +#[test] +fn test_find_shortest_path_no_path() { + struct UnregisteredType; + + let graph = ReductionGraph::new(); + let path = graph.find_shortest_path::>(); + assert!(path.is_none()); +} + +#[test] +fn test_categorize_circuit_as_specialized() { + // CircuitSAT should be categorized as specialized (contains "Circuit") + assert_eq!( + ReductionGraph::categorize_type("CircuitSAT"), + "satisfiability" + ); + // But it contains "SAT" so it goes to satisfiability first + // Let's verify the actual behavior matches what the code does +} + +#[test] +fn test_edge_bidirectionality_detection() { + let graph = ReductionGraph::new(); + let json = graph.to_json(); + + // Count bidirectional and unidirectional edges + let bidirectional_count = json.edges.iter().filter(|e| e.bidirectional).count(); + let unidirectional_count = json.edges.iter().filter(|e| !e.bidirectional).count(); + + // We should have both types + assert!(bidirectional_count > 0, "Should have bidirectional edges"); + assert!(unidirectional_count > 0, "Should have unidirectional edges"); + + // Verify specific known bidirectional edges + let is_vc_bidir = json.edges.iter().any(|e| { + (e.source.name.contains("IndependentSet") && e.target.name.contains("VertexCovering") + || e.source.name.contains("VertexCovering") + && e.target.name.contains("IndependentSet")) + && e.bidirectional + }); + assert!(is_vc_bidir, "IS <-> VC should be bidirectional"); + + // Verify specific known unidirectional edge + let factoring_circuit_unidir = json.edges.iter().any(|e| { + e.source.name.contains("Factoring") + && e.target.name.contains("CircuitSAT") + && !e.bidirectional + }); + assert!( + factoring_circuit_unidir, + "Factoring -> CircuitSAT should be unidirectional" + ); +} + +// New tests for set-theoretic path finding + +#[test] +fn test_graph_hierarchy_built() { + let graph = ReductionGraph::new(); + let hierarchy = graph.graph_hierarchy(); + + // Should have relationships from GraphSubtypeEntry registrations + // UnitDiskGraph -> PlanarGraph -> SimpleGraph + // BipartiteGraph -> SimpleGraph + assert!( + hierarchy + .get("UnitDiskGraph") + .map(|s| s.contains("SimpleGraph")) + .unwrap_or(false), + "UnitDiskGraph should have SimpleGraph as supertype" + ); + assert!( + hierarchy + .get("PlanarGraph") + .map(|s| s.contains("SimpleGraph")) + .unwrap_or(false), + "PlanarGraph should have SimpleGraph as supertype" + ); +} + +#[test] +fn test_is_graph_subtype_reflexive() { + let graph = ReductionGraph::new(); + + // Every type is a subtype of itself + assert!(graph.is_graph_subtype("SimpleGraph", "SimpleGraph")); + assert!(graph.is_graph_subtype("PlanarGraph", "PlanarGraph")); + assert!(graph.is_graph_subtype("UnitDiskGraph", "UnitDiskGraph")); +} + +#[test] +fn test_is_graph_subtype_direct() { + let graph = ReductionGraph::new(); + + // Direct subtype relationships + assert!(graph.is_graph_subtype("PlanarGraph", "SimpleGraph")); + assert!(graph.is_graph_subtype("BipartiteGraph", "SimpleGraph")); + assert!(graph.is_graph_subtype("UnitDiskGraph", "PlanarGraph")); +} + +#[test] +fn test_is_graph_subtype_transitive() { + let graph = ReductionGraph::new(); + + // Transitive closure: UnitDiskGraph -> PlanarGraph -> SimpleGraph + assert!(graph.is_graph_subtype("UnitDiskGraph", "SimpleGraph")); +} + +#[test] +fn test_is_graph_subtype_not_supertype() { + let graph = ReductionGraph::new(); + + // SimpleGraph is NOT a subtype of PlanarGraph (only the reverse) + assert!(!graph.is_graph_subtype("SimpleGraph", "PlanarGraph")); + assert!(!graph.is_graph_subtype("SimpleGraph", "UnitDiskGraph")); +} + +#[test] +fn test_rule_applicable_same_graphs() { + let graph = ReductionGraph::new(); + + // Rule for SimpleGraph -> SimpleGraph applies to same + assert!(graph.rule_applicable("SimpleGraph", "SimpleGraph", "SimpleGraph", "SimpleGraph")); +} + +#[test] +fn test_rule_applicable_subtype_source() { + let graph = ReductionGraph::new(); + + // Rule for SimpleGraph -> SimpleGraph applies when source is PlanarGraph + // (because PlanarGraph <= SimpleGraph) + assert!(graph.rule_applicable("PlanarGraph", "SimpleGraph", "SimpleGraph", "SimpleGraph")); +} + +#[test] +fn test_rule_applicable_subtype_target() { + let graph = ReductionGraph::new(); + + // Rule producing PlanarGraph applies when we want SimpleGraph + // (because PlanarGraph <= SimpleGraph) + assert!(graph.rule_applicable("SimpleGraph", "SimpleGraph", "SimpleGraph", "PlanarGraph")); +} + +#[test] +fn test_rule_not_applicable_wrong_source() { + let graph = ReductionGraph::new(); + + // Rule requiring PlanarGraph does NOT apply to SimpleGraph source + // (because SimpleGraph is NOT <= PlanarGraph) + assert!(!graph.rule_applicable("SimpleGraph", "SimpleGraph", "PlanarGraph", "SimpleGraph")); +} + +#[test] +fn test_rule_not_applicable_wrong_target() { + let graph = ReductionGraph::new(); + + // Rule producing SimpleGraph does NOT apply when we need PlanarGraph + // (because SimpleGraph is NOT <= PlanarGraph) + assert!(!graph.rule_applicable("SimpleGraph", "PlanarGraph", "SimpleGraph", "SimpleGraph")); +} + +#[test] +fn test_find_cheapest_path_minimize_steps() { + let graph = ReductionGraph::new(); + let cost_fn = MinimizeSteps; + let input_size = ProblemSize::new(vec![("n", 10), ("m", 20)]); + + // Find path from IndependentSet to VertexCovering on SimpleGraph + let path = graph.find_cheapest_path( + ("IndependentSet", "SimpleGraph"), + ("VertexCovering", "SimpleGraph"), + &input_size, + &cost_fn, + ); + + assert!(path.is_some()); + let path = path.unwrap(); + assert_eq!(path.len(), 1); // Direct path +} + +#[test] +fn test_find_cheapest_path_multi_step() { + let graph = ReductionGraph::new(); + let cost_fn = MinimizeSteps; + let input_size = ProblemSize::new(vec![("num_vertices", 10), ("num_edges", 20)]); + + // Find multi-step path where all edges use compatible graph types + // IndependentSet (SimpleGraph) -> SetPacking (SimpleGraph) + // This tests the algorithm can find paths with consistent graph types + let path = graph.find_cheapest_path( + ("IndependentSet", "SimpleGraph"), + ("SetPacking", "SimpleGraph"), + &input_size, + &cost_fn, + ); + + assert!(path.is_some()); + let path = path.unwrap(); + assert_eq!(path.len(), 1); // Direct path: IndependentSet -> SetPacking +} + +#[test] +fn test_find_cheapest_path_no_path() { + let graph = ReductionGraph::new(); + let cost_fn = MinimizeSteps; + let input_size = ProblemSize::new(vec![("n", 10)]); + + // No path from IndependentSet to QUBO + let path = graph.find_cheapest_path( + ("IndependentSet", "SimpleGraph"), + ("QUBO", "SimpleGraph"), + &input_size, + &cost_fn, + ); + + assert!(path.is_none()); +} + +#[test] +fn test_find_cheapest_path_unknown_source() { + let graph = ReductionGraph::new(); + let cost_fn = MinimizeSteps; + let input_size = ProblemSize::new(vec![("n", 10)]); + + let path = graph.find_cheapest_path( + ("UnknownProblem", "SimpleGraph"), + ("VertexCovering", "SimpleGraph"), + &input_size, + &cost_fn, + ); + + assert!(path.is_none()); +} + +#[test] +fn test_find_cheapest_path_unknown_target() { + let graph = ReductionGraph::new(); + let cost_fn = MinimizeSteps; + let input_size = ProblemSize::new(vec![("n", 10)]); + + let path = graph.find_cheapest_path( + ("IndependentSet", "SimpleGraph"), + ("UnknownProblem", "SimpleGraph"), + &input_size, + &cost_fn, + ); + + assert!(path.is_none()); +} + +#[test] +fn test_reduction_edge_struct() { + let edge = ReductionEdge { + source_variant: &[("graph", "PlanarGraph"), ("weight", "Unweighted")], + target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + overhead: ReductionOverhead::default(), + }; + + assert_eq!(edge.source_graph(), "PlanarGraph"); + assert_eq!(edge.target_graph(), "SimpleGraph"); +} + +#[test] +fn test_reduction_edge_default_graph() { + // When no "graph" key is present, default to SimpleGraph + let edge = ReductionEdge { + source_variant: &[("weight", "Unweighted")], + target_variant: &[], + overhead: ReductionOverhead::default(), + }; + + assert_eq!(edge.source_graph(), "SimpleGraph"); + assert_eq!(edge.target_graph(), "SimpleGraph"); +} + +#[test] +fn test_variant_to_map() { + let variant: &[(&str, &str)] = &[("graph", "SimpleGraph"), ("weight", "i32")]; + let map = ReductionGraph::variant_to_map(variant); + assert_eq!(map.get("graph"), Some(&"SimpleGraph".to_string())); + assert_eq!(map.get("weight"), Some(&"i32".to_string())); + assert_eq!(map.len(), 2); +} + +#[test] +fn test_variant_to_map_empty() { + let variant: &[(&str, &str)] = &[]; + let map = ReductionGraph::variant_to_map(variant); + assert!(map.is_empty()); +} + +#[test] +fn test_make_variant_ref() { + let variant: &[(&str, &str)] = &[("graph", "PlanarGraph"), ("weight", "f64")]; + let variant_ref = ReductionGraph::make_variant_ref("IndependentSet", variant); + assert_eq!(variant_ref.name, "IndependentSet"); + assert_eq!( + variant_ref.variant.get("graph"), + Some(&"PlanarGraph".to_string()) + ); + assert_eq!(variant_ref.variant.get("weight"), Some(&"f64".to_string())); +} + +#[test] +fn test_to_json_nodes_have_variants() { + let graph = ReductionGraph::new(); + let json = graph.to_json(); + + // Check that nodes have variant information + for node in &json.nodes { + // Verify node has a name + assert!(!node.name.is_empty()); + // Verify node has a category + assert!(!node.category.is_empty()); + } +} + +#[test] +fn test_to_json_edges_have_variants() { + let graph = ReductionGraph::new(); + let json = graph.to_json(); + + // Check that edges have source and target variant refs + for edge in &json.edges { + assert!(!edge.source.name.is_empty()); + assert!(!edge.target.name.is_empty()); + } +} + +#[test] +fn test_json_variant_content() { + let graph = ReductionGraph::new(); + let json = graph.to_json(); + + // Find a node and verify its variant contains expected keys + let is_node = json.nodes.iter().find(|n| n.name == "IndependentSet"); + assert!(is_node.is_some(), "IndependentSet node should exist"); + + // Find an edge involving IndependentSet (could be source or target) + let is_edge = json + .edges + .iter() + .find(|e| e.source.name == "IndependentSet" || e.target.name == "IndependentSet"); + assert!( + is_edge.is_some(), + "Edge involving IndependentSet should exist" + ); +} diff --git a/src/tests_unit/rules/independentset_ilp.rs b/src/tests_unit/rules/independentset_ilp.rs new file mode 100644 index 0000000..f92aa67 --- /dev/null +++ b/src/tests_unit/rules/independentset_ilp.rs @@ -0,0 +1,234 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Triangle graph: 3 vertices, 3 edges + let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); + assert_eq!( + ilp.constraints.len(), + 3, + "Should have one constraint per edge" + ); + assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } + + // Each constraint should be x_i + x_j <= 1 + for constraint in &ilp.constraints { + assert_eq!(constraint.terms.len(), 2); + assert!((constraint.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_reduction_weighted() { + let problem = IndependentSet::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check that weights are correctly transferred to objective + let mut coeffs: Vec = vec![0.0; 3]; + for &(var, coef) in &ilp.objective { + coeffs[var] = coef; + } + assert!((coeffs[0] - 5.0).abs() < 1e-9); + assert!((coeffs[1] - 10.0).abs() < 1e-9); + assert!((coeffs[2] - 15.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_equals_brute_force_triangle() { + // Triangle graph: max IS = 1 vertex + let problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_best(&problem); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Both should find optimal size = 1 + let bf_size: usize = bf_solutions[0].iter().sum(); + let ilp_size: usize = extracted.iter().sum(); + assert_eq!(bf_size, 1); + assert_eq!(ilp_size, 1); + + // Verify the ILP solution is valid for the original problem + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); +} + +#[test] +fn test_ilp_solution_equals_brute_force_path() { + // Path graph 0-1-2-3: max IS = 2 (e.g., {0, 2} or {1, 3} or {0, 3}) + let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force + let bf_solutions = bf.find_best(&problem); + let bf_size: usize = bf_solutions[0].iter().sum(); + + // Solve via ILP + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size: usize = extracted.iter().sum(); + + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify validity + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_ilp_solution_equals_brute_force_weighted() { + // Weighted problem: vertex 1 has high weight but is connected to both 0 and 2 + // 0 -- 1 -- 2 + // Weights: [1, 100, 1] + // Max IS by weight: just vertex 1 (weight 100) beats 0+2 (weight 2) + let problem = IndependentSet::with_weights(3, vec![(0, 1), (1, 2)], vec![1, 100, 1]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_obj = problem.solution_size(&bf_solutions[0]).size; + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_obj = problem.solution_size(&extracted).size; + + assert_eq!(bf_obj, 100); + assert_eq!(ilp_obj, 100); + + // Verify the solution selects vertex 1 + assert_eq!(extracted, vec![0, 1, 0]); +} + +#[test] +fn test_solution_extraction() { + let problem = IndependentSet::::new(4, vec![(0, 1), (2, 3)]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 0, 0, 1]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 0, 0, 1]); + + // Verify this is a valid IS (0 and 3 are not adjacent) + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_source_and_target_size() { + let problem = IndependentSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(4)); + + assert_eq!(target_size.get("num_vars"), Some(5)); + assert_eq!(target_size.get("num_constraints"), Some(4)); +} + +#[test] +fn test_empty_graph() { + // Graph with no edges: all vertices can be selected + let problem = IndependentSet::::new(3, vec![]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.constraints.len(), 0); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // All vertices should be selected + assert_eq!(extracted, vec![1, 1, 1]); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 3); +} + +#[test] +fn test_complete_graph() { + // Complete graph K4: max IS = 1 + let problem = + IndependentSet::::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.constraints.len(), 6); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 1); +} + +#[test] +fn test_solve_reduced() { + // Test the ILPSolver::solve_reduced method + let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver + .solve_reduced(&problem) + .expect("solve_reduced should work"); + + let sol_result = problem.solution_size(&solution); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); +} + +#[test] +fn test_bipartite_graph() { + // Bipartite graph: 0-2, 0-3, 1-2, 1-3 (two independent sets: {0,1} and {2,3}) + // With equal weights, max IS = 2 + let problem = IndependentSet::::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); + let reduction: ReductionISToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); + + // Should select either {0, 1} or {2, 3} + let sum: usize = extracted.iter().sum(); + assert_eq!(sum, 2); +} diff --git a/src/tests_unit/rules/independentset_setpacking.rs b/src/tests_unit/rules/independentset_setpacking.rs new file mode 100644 index 0000000..14f827f --- /dev/null +++ b/src/tests_unit/rules/independentset_setpacking.rs @@ -0,0 +1,134 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_is_to_setpacking() { + // Triangle graph + let is_problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::>::reduce_to(&is_problem); + let sp_problem = reduction.target_problem(); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp_problem); + + // Extract back + let is_solutions: Vec<_> = sp_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Max IS in triangle = 1 + for sol in &is_solutions { + let size: usize = sol.iter().sum(); + assert_eq!(size, 1); + } +} + +#[test] +fn test_setpacking_to_is() { + // Two disjoint sets and one overlapping + let sets = vec![ + vec![0, 1], + vec![2, 3], + vec![1, 2], // overlaps with both + ]; + let sp_problem = SetPacking::::new(sets); + let reduction: ReductionSPToIS = + ReduceTo::>::reduce_to(&sp_problem); + let is_problem = reduction.target_problem(); + + let solver = BruteForce::new(); + let is_solutions = solver.find_best(is_problem); + + // Max packing = 2 (sets 0 and 1) + for sol in &is_solutions { + let size: usize = sol.iter().sum(); + assert_eq!(size, 2); + } +} + +#[test] +fn test_roundtrip_is_sp_is() { + let original = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let solver = BruteForce::new(); + let original_solutions = solver.find_best(&original); + + // IS -> SP -> IS + let reduction1 = ReduceTo::>::reduce_to(&original); + let sp = reduction1.target_problem().clone(); + let reduction2: ReductionSPToIS = ReduceTo::>::reduce_to(&sp); + let roundtrip = reduction2.target_problem(); + + let roundtrip_solutions = solver.find_best(roundtrip); + + // Solutions should have same objective value + let orig_size: usize = original_solutions[0].iter().sum(); + let rt_size: usize = roundtrip_solutions[0].iter().sum(); + assert_eq!(orig_size, rt_size); +} + +#[test] +fn test_weighted_reduction() { + let is_problem = IndependentSet::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 20, 30]); + let reduction = ReduceTo::>::reduce_to(&is_problem); + let sp_problem = reduction.target_problem(); + + // Weights should be preserved + assert_eq!(sp_problem.weights_ref(), &vec![10, 20, 30]); +} + +#[test] +fn test_empty_graph() { + // No edges means all sets are empty (or we need to handle it) + let is_problem = IndependentSet::::new(3, vec![]); + let reduction = ReduceTo::>::reduce_to(&is_problem); + let sp_problem = reduction.target_problem(); + + // All sets should be empty (no edges to include) + assert_eq!(sp_problem.num_sets(), 3); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sp_problem); + + // With no overlaps, we can select all sets + assert_eq!(solutions[0].iter().sum::(), 3); +} + +#[test] +fn test_disjoint_sets() { + // Completely disjoint sets + let sets = vec![vec![0], vec![1], vec![2]]; + let sp_problem = SetPacking::::new(sets); + let reduction: ReductionSPToIS = + ReduceTo::>::reduce_to(&sp_problem); + let is_problem = reduction.target_problem(); + + // No edges in the intersection graph + assert_eq!(is_problem.num_edges(), 0); +} + +#[test] +fn test_reduction_sizes() { + // Test source_size and target_size methods + let is_problem = IndependentSet::::new(4, vec![(0, 1), (1, 2)]); + let reduction = ReduceTo::>::reduce_to(&is_problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + // Source and target sizes should have components + assert!(!source_size.components.is_empty()); + assert!(!target_size.components.is_empty()); + + // Test SP to IS sizes + let sets = vec![vec![0, 1], vec![2, 3]]; + let sp_problem = SetPacking::::new(sets); + let reduction2: ReductionSPToIS = + ReduceTo::>::reduce_to(&sp_problem); + + let source_size2 = reduction2.source_size(); + let target_size2 = reduction2.target_size(); + + assert!(!source_size2.components.is_empty()); + assert!(!target_size2.components.is_empty()); +} diff --git a/src/tests_unit/rules/matching_ilp.rs b/src/tests_unit/rules/matching_ilp.rs new file mode 100644 index 0000000..6a36176 --- /dev/null +++ b/src/tests_unit/rules/matching_ilp.rs @@ -0,0 +1,252 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Triangle graph: 3 vertices, 3 edges + let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 3, "Should have one variable per edge"); + // Each vertex has degree 2, so 3 constraints (one per vertex) + assert_eq!( + ilp.constraints.len(), + 3, + "Should have one constraint per vertex" + ); + assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } + + // Each constraint should be sum of incident edge vars <= 1 + for constraint in &ilp.constraints { + assert!((constraint.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_reduction_weighted() { + let problem = Matching::new(3, vec![(0, 1, 5), (1, 2, 10)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check that weights are correctly transferred to objective + let mut coeffs: Vec = vec![0.0; 2]; + for &(var, coef) in &ilp.objective { + coeffs[var] = coef; + } + assert!((coeffs[0] - 5.0).abs() < 1e-9); + assert!((coeffs[1] - 10.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_equals_brute_force_triangle() { + // Triangle graph: max matching = 1 edge + let problem = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_best(&problem); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Both should find optimal size = 1 (one edge) + let bf_size = problem.solution_size(&bf_solutions[0]).size; + let ilp_size = problem.solution_size(&extracted).size; + assert_eq!(bf_size, 1); + assert_eq!(ilp_size, 1); + + // Verify the ILP solution is valid for the original problem + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); +} + +#[test] +fn test_ilp_solution_equals_brute_force_path() { + // Path graph 0-1-2-3: max matching = 2 (edges {0-1, 2-3}) + let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force + let bf_solutions = bf.find_best(&problem); + let bf_size = problem.solution_size(&bf_solutions[0]).size; + + // Solve via ILP + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size = problem.solution_size(&extracted).size; + + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify validity + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_ilp_solution_equals_brute_force_weighted() { + // Weighted matching: edge 0-1 has high weight + // 0 -- 1 -- 2 + // Weights: [100, 1] + // Max matching by weight: just edge 0-1 (weight 100) beats edge 1-2 (weight 1) + let problem = Matching::new(3, vec![(0, 1, 100), (1, 2, 1)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_obj = problem.solution_size(&bf_solutions[0]).size; + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_obj = problem.solution_size(&extracted).size; + + assert_eq!(bf_obj, 100); + assert_eq!(ilp_obj, 100); + + // Verify the solution selects edge 0 (0-1) + assert_eq!(extracted, vec![1, 0]); +} + +#[test] +fn test_solution_extraction() { + let problem = Matching::::unweighted(4, vec![(0, 1), (2, 3)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 1]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 1]); + + // Verify this is a valid matching (edges 0-1 and 2-3 are disjoint) + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_source_and_target_size() { + let problem = + Matching::::unweighted(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(4)); + + assert_eq!(target_size.get("num_vars"), Some(4)); + // Constraints: one per vertex with degree >= 1 + // Vertices 0,1,2,3,4 have degrees 1,2,2,2,1 respectively + assert_eq!(target_size.get("num_constraints"), Some(5)); +} + +#[test] +fn test_empty_graph() { + // Graph with no edges: empty matching + let problem = Matching::::unweighted(3, vec![]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 0); + assert_eq!(ilp.constraints.len(), 0); + + let sol_result = problem.solution_size(&[]); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 0); +} + +#[test] +fn test_k4_perfect_matching() { + // Complete graph K4: can have perfect matching (2 edges covering all 4 vertices) + let problem = Matching::::unweighted( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 6 edges, 4 vertices with constraints + assert_eq!(ilp.num_vars, 6); + assert_eq!(ilp.constraints.len(), 4); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); // Perfect matching has 2 edges + + // Verify all vertices are matched + let sum: usize = extracted.iter().sum(); + assert_eq!(sum, 2); +} + +#[test] +fn test_star_graph() { + // Star graph with center vertex 0 connected to 1, 2, 3 + // Max matching = 1 (only one edge can be selected) + let problem = Matching::::unweighted(4, vec![(0, 1), (0, 2), (0, 3)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 1); +} + +#[test] +fn test_bipartite_graph() { + // Bipartite graph: {0,1} and {2,3} with all cross edges + // Max matching = 2 (one perfect matching) + let problem = + Matching::::unweighted(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); + let reduction: ReductionMatchingToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); +} + +#[test] +fn test_solve_reduced() { + // Test the ILPSolver::solve_reduced method + let problem = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver + .solve_reduced(&problem) + .expect("solve_reduced should work"); + + let sol_result = problem.solution_size(&solution); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); +} diff --git a/src/tests_unit/rules/matching_setpacking.rs b/src/tests_unit/rules/matching_setpacking.rs new file mode 100644 index 0000000..dbb15cb --- /dev/null +++ b/src/tests_unit/rules/matching_setpacking.rs @@ -0,0 +1,193 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::topology::SimpleGraph; + +#[test] +fn test_matching_to_setpacking_structure() { + // Path graph 0-1-2 + let matching = Matching::::unweighted(3, vec![(0, 1), (1, 2)]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + // Should have 2 sets (one for each edge) + assert_eq!(sp.num_sets(), 2); + + // Sets should contain edge endpoints + let sets = sp.sets(); + assert_eq!(sets[0], vec![0, 1]); + assert_eq!(sets[1], vec![1, 2]); +} + +#[test] +fn test_matching_to_setpacking_path() { + // Path 0-1-2-3 with unit weights + let matching = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp); + + // Extract back to Matching solutions + let _matching_solutions: Vec<_> = sp_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Verify against direct Matching solution + let direct_solutions = solver.find_best(&matching); + + // Solutions should have same objective value + let sp_size: usize = sp_solutions[0].iter().sum(); + let direct_size: usize = direct_solutions[0].iter().sum(); + assert_eq!(sp_size, direct_size); + assert_eq!(sp_size, 2); // Max matching in path graph has 2 edges +} + +#[test] +fn test_matching_to_setpacking_triangle() { + // Triangle graph + let matching = Matching::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp); + + // Max matching in triangle = 1 (any single edge) + for sol in &sp_solutions { + assert_eq!(sol.iter().sum::(), 1); + } + + // Should have 3 optimal solutions (one for each edge) + assert_eq!(sp_solutions.len(), 3); +} + +#[test] +fn test_matching_to_setpacking_weighted() { + // Weighted edges: heavy edge should win over multiple light edges + let matching = + Matching::::new(4, vec![(0, 1, 100), (0, 2, 1), (1, 3, 1)]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + // Weights should be preserved + assert_eq!(sp.weights_ref(), &vec![100, 1, 1]); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp); + + // Edge 0-1 (weight 100) alone beats edges 0-2 + 1-3 (weight 2) + assert!(sp_solutions.contains(&vec![1, 0, 0])); + + // Verify through direct Matching solution + let direct_solutions = solver.find_best(&matching); + assert_eq!(matching.solution_size(&sp_solutions[0]).size, 100); + assert_eq!(matching.solution_size(&direct_solutions[0]).size, 100); +} + +#[test] +fn test_matching_to_setpacking_solution_extraction() { + let matching = Matching::::unweighted(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction = ReduceTo::>::reduce_to(&matching); + + // Test solution extraction is 1:1 + let sp_solution = vec![1, 0, 1]; + let matching_solution = reduction.extract_solution(&sp_solution); + assert_eq!(matching_solution, vec![1, 0, 1]); + + // Verify the extracted solution is valid for original Matching + assert!(matching.solution_size(&matching_solution).is_valid); +} + +#[test] +fn test_matching_to_setpacking_k4() { + // Complete graph K4: can have perfect matching (2 edges covering all 4 vertices) + let matching = Matching::::unweighted( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp); + let direct_solutions = solver.find_best(&matching); + + // Both should find matchings of size 2 + let sp_size: usize = sp_solutions[0].iter().sum(); + let direct_size: usize = direct_solutions[0].iter().sum(); + assert_eq!(sp_size, 2); + assert_eq!(direct_size, 2); +} + +#[test] +fn test_matching_to_setpacking_empty() { + // Graph with no edges + let matching = Matching::::unweighted(3, vec![]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + assert_eq!(sp.num_sets(), 0); +} + +#[test] +fn test_matching_to_setpacking_single_edge() { + let matching = Matching::::unweighted(2, vec![(0, 1)]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + assert_eq!(sp.num_sets(), 1); + assert_eq!(sp.sets()[0], vec![0, 1]); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp); + + // Should select the only set + assert_eq!(sp_solutions, vec![vec![1]]); +} + +#[test] +fn test_matching_to_setpacking_disjoint_edges() { + // Two disjoint edges: 0-1 and 2-3 + let matching = Matching::::unweighted(4, vec![(0, 1), (2, 3)]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp); + + // Both edges can be selected (they don't share vertices) + assert_eq!(sp_solutions, vec![vec![1, 1]]); +} + +#[test] +fn test_reduction_sizes() { + let matching = Matching::::unweighted(5, vec![(0, 1), (1, 2), (2, 3)]); + let reduction = ReduceTo::>::reduce_to(&matching); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(3)); + assert_eq!(target_size.get("num_sets"), Some(3)); +} + +#[test] +fn test_matching_to_setpacking_star() { + // Star graph: center vertex 0 connected to 1, 2, 3 + let matching = Matching::::unweighted(4, vec![(0, 1), (0, 2), (0, 3)]); + let reduction = ReduceTo::>::reduce_to(&matching); + let sp = reduction.target_problem(); + + let solver = BruteForce::new(); + let sp_solutions = solver.find_best(sp); + + // All edges share vertex 0, so max matching = 1 + for sol in &sp_solutions { + assert_eq!(sol.iter().sum::(), 1); + } + // Should have 3 optimal solutions + assert_eq!(sp_solutions.len(), 3); +} diff --git a/src/tests_unit/rules/registry.rs b/src/tests_unit/rules/registry.rs new file mode 100644 index 0000000..5e7eb3d --- /dev/null +++ b/src/tests_unit/rules/registry.rs @@ -0,0 +1,124 @@ +use super::*; +use crate::poly; + +#[test] +fn test_reduction_overhead_evaluate() { + let overhead = ReductionOverhead::new(vec![("n", poly!(3 * m)), ("m", poly!(m ^ 2))]); + + let input = ProblemSize::new(vec![("m", 4)]); + let output = overhead.evaluate_output_size(&input); + + assert_eq!(output.get("n"), Some(12)); // 3 * 4 + assert_eq!(output.get("m"), Some(16)); // 4^2 +} + +#[test] +fn test_reduction_overhead_default() { + let overhead = ReductionOverhead::default(); + assert!(overhead.output_size.is_empty()); +} + +#[test] +fn test_reduction_entry_overhead() { + let entry = ReductionEntry { + source_name: "TestSource", + target_name: "TestTarget", + source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + overhead_fn: || ReductionOverhead::new(vec![("n", poly!(2 * n))]), + }; + + let overhead = entry.overhead(); + let input = ProblemSize::new(vec![("n", 5)]); + let output = overhead.evaluate_output_size(&input); + assert_eq!(output.get("n"), Some(10)); +} + +#[test] +fn test_reduction_entry_debug() { + let entry = ReductionEntry { + source_name: "A", + target_name: "B", + source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + overhead_fn: || ReductionOverhead::default(), + }; + + let debug_str = format!("{:?}", entry); + assert!(debug_str.contains("A")); + assert!(debug_str.contains("B")); +} + +#[test] +fn test_is_base_reduction_unweighted() { + let entry = ReductionEntry { + source_name: "A", + target_name: "B", + source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + overhead_fn: || ReductionOverhead::default(), + }; + assert!(entry.is_base_reduction()); +} + +#[test] +fn test_is_base_reduction_source_weighted() { + let entry = ReductionEntry { + source_name: "A", + target_name: "B", + source_variant: &[("graph", "SimpleGraph"), ("weight", "i32")], + target_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + overhead_fn: || ReductionOverhead::default(), + }; + assert!(!entry.is_base_reduction()); +} + +#[test] +fn test_is_base_reduction_target_weighted() { + let entry = ReductionEntry { + source_name: "A", + target_name: "B", + source_variant: &[("graph", "SimpleGraph"), ("weight", "Unweighted")], + target_variant: &[("graph", "SimpleGraph"), ("weight", "f64")], + overhead_fn: || ReductionOverhead::default(), + }; + assert!(!entry.is_base_reduction()); +} + +#[test] +fn test_is_base_reduction_both_weighted() { + let entry = ReductionEntry { + source_name: "A", + target_name: "B", + source_variant: &[("graph", "SimpleGraph"), ("weight", "i32")], + target_variant: &[("graph", "SimpleGraph"), ("weight", "f64")], + overhead_fn: || ReductionOverhead::default(), + }; + assert!(!entry.is_base_reduction()); +} + +#[test] +fn test_is_base_reduction_no_weight_key() { + // If no weight key is present, assume unweighted (base) + let entry = ReductionEntry { + source_name: "A", + target_name: "B", + source_variant: &[("graph", "SimpleGraph")], + target_variant: &[("graph", "SimpleGraph")], + overhead_fn: || ReductionOverhead::default(), + }; + assert!(entry.is_base_reduction()); +} + +#[test] +fn test_reduction_entries_registered() { + let entries: Vec<_> = inventory::iter::().collect(); + + // Should have at least some registered reductions + assert!(entries.len() >= 10); + + // Check specific reductions exist + assert!(entries + .iter() + .any(|e| e.source_name == "IndependentSet" && e.target_name == "VertexCovering")); +} diff --git a/src/tests_unit/rules/sat_coloring.rs b/src/tests_unit/rules/sat_coloring.rs new file mode 100644 index 0000000..017292d --- /dev/null +++ b/src/tests_unit/rules/sat_coloring.rs @@ -0,0 +1,304 @@ +use super::*; +use crate::models::satisfiability::CNFClause; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_constructor_basic_structure() { + let constructor = SATColoringConstructor::new(2); + + // Should have 2*2 + 3 = 7 vertices + assert_eq!(constructor.num_vertices, 7); + + // Check pos_vertices and neg_vertices + assert_eq!(constructor.pos_vertices, vec![3, 4]); + assert_eq!(constructor.neg_vertices, vec![5, 6]); + + // Check vmap + assert_eq!(constructor.vmap[&(0, false)], 3); + assert_eq!(constructor.vmap[&(0, true)], 5); + assert_eq!(constructor.vmap[&(1, false)], 4); + assert_eq!(constructor.vmap[&(1, true)], 6); +} + +#[test] +fn test_special_vertex_accessors() { + let constructor = SATColoringConstructor::new(1); + assert_eq!(constructor.true_vertex(), 0); + assert_eq!(constructor.false_vertex(), 1); + assert_eq!(constructor.aux_vertex(), 2); +} + +#[test] +fn test_simple_sat_to_coloring() { + // Simple SAT: (x1) - one clause with one literal + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let coloring = reduction.target_problem(); + + // Should have 2*1 + 3 = 5 base vertices + // Plus edges to set x1 to TRUE (attached to AUX and FALSE) + assert!(coloring.num_vertices() >= 5); +} + +#[test] +fn test_reduction_structure() { + // Satisfiable formula: (x1 OR x2) AND (NOT x1 OR x2) + // Just verify the reduction builds the correct structure + let sat = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 2])], + ); + + let reduction = ReduceTo::>::reduce_to(&sat); + let coloring = reduction.target_problem(); + + // Base vertices: 3 (TRUE, FALSE, AUX) + 2*2 (pos and neg for each var) = 7 + // Each 2-literal clause adds 5 vertices for OR gadget = 2 * 5 = 10 + // Total: 7 + 10 = 17 vertices + assert_eq!(coloring.num_vertices(), 17); + assert_eq!(coloring.num_colors(), 3); + assert_eq!(reduction.pos_vertices().len(), 2); + assert_eq!(reduction.neg_vertices().len(), 2); +} + +#[test] +fn test_unsatisfiable_formula() { + // Unsatisfiable: (x1) AND (NOT x1) + let sat = + Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let coloring = reduction.target_problem(); + + // Solve the coloring problem + let solver = BruteForce::new(); + let solutions = solver.find_best(coloring); + + // For an unsatisfiable formula, the coloring should have no valid solutions + // OR no valid coloring exists that extracts to a satisfying SAT assignment + let mut found_satisfying = false; + for sol in &solutions { + if coloring.solution_size(sol).is_valid { + let sat_sol = reduction.extract_solution(sol); + let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); + if sat.is_satisfying(&assignment) { + found_satisfying = true; + break; + } + } + } + + // The coloring should not yield a satisfying SAT assignment + // because the formula is unsatisfiable + // Note: The coloring graph itself may still be colorable, + // but the constraints should make it impossible for both + // x1 and NOT x1 to be TRUE color simultaneously + // Actually, let's check if ANY coloring solution produces a valid SAT solution + // If the formula is unsat, no valid coloring should extract to a satisfying assignment + assert!( + !found_satisfying, + "Unsatisfiable formula should not produce satisfying assignment" + ); +} + +#[test] +fn test_three_literal_clause_structure() { + // (x1 OR x2 OR x3) + let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let coloring = reduction.target_problem(); + + // Base vertices: 3 + 2*3 = 9 + // 3-literal clause needs 2 OR gadgets (x1 OR x2, then result OR x3) + // Each OR gadget adds 5 vertices, so 2*5 = 10 + // Total: 9 + 10 = 19 vertices + assert_eq!(coloring.num_vertices(), 19); + assert_eq!(coloring.num_colors(), 3); + assert_eq!(reduction.pos_vertices().len(), 3); + assert_eq!(reduction.neg_vertices().len(), 3); +} + +#[test] +fn test_source_and_target_size() { + let sat = Satisfiability::::new( + 3, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], + ); + let reduction = ReduceTo::>::reduce_to(&sat); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vars"), Some(3)); + assert_eq!(source_size.get("num_clauses"), Some(2)); + assert!(target_size.get("num_vertices").is_some()); + assert!(target_size.get("num_colors").unwrap() == 3); +} + +#[test] +fn test_extract_solution_basic() { + // Simple case: one variable, one clause (x1) + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + // Manually construct a valid coloring where x1 has TRUE color + // Vertices: 0=TRUE, 1=FALSE, 2=AUX, 3=x1, 4=NOT_x1 + // Colors: TRUE=0, FALSE=1, AUX=2 + // For x1 to be true, pos_vertex[0]=3 should have color 0 (TRUE) + + // A valid coloring that satisfies x1=TRUE: + // - Vertex 0 (TRUE): color 0 + // - Vertex 1 (FALSE): color 1 + // - Vertex 2 (AUX): color 2 + // - Vertex 3 (x1): color 0 (TRUE) - connected to AUX(2), NOT_x1(4) + // - Vertex 4 (NOT_x1): color 1 (FALSE) - connected to AUX(2), x1(3) + + // However, the actual coloring depends on the full graph structure + // Let's just verify the extraction logic works by checking type signatures + assert_eq!(reduction.pos_vertices().len(), 1); + assert_eq!(reduction.neg_vertices().len(), 1); +} + +#[test] +fn test_complex_formula_structure() { + // (x1 OR x2) AND (NOT x1 OR x3) AND (NOT x2 OR NOT x3) + let sat = Satisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2]), // x1 OR x2 + CNFClause::new(vec![-1, 3]), // NOT x1 OR x3 + CNFClause::new(vec![-2, -3]), // NOT x2 OR NOT x3 + ], + ); + + let reduction = ReduceTo::>::reduce_to(&sat); + let coloring = reduction.target_problem(); + + // Base vertices: 3 + 2*3 = 9 + // 3 clauses each with 2 literals, each needs 1 OR gadget = 3*5 = 15 + // Total: 9 + 15 = 24 vertices + assert_eq!(coloring.num_vertices(), 24); + assert_eq!(coloring.num_colors(), 3); + assert_eq!(reduction.num_clauses(), 3); +} + +#[test] +fn test_single_literal_clauses() { + // (x1) AND (x2) - both must be true + let sat = + Satisfiability::::new(2, vec![CNFClause::new(vec![1]), CNFClause::new(vec![2])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let coloring = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(coloring); + + let mut found_correct = false; + for sol in &solutions { + if coloring.solution_size(sol).is_valid { + let sat_sol = reduction.extract_solution(sol); + if sat_sol == vec![1, 1] { + found_correct = true; + break; + } + } + } + + assert!( + found_correct, + "Should find solution where both x1 and x2 are true" + ); +} + +#[test] +fn test_empty_sat() { + // Empty SAT (trivially satisfiable) + let sat = Satisfiability::::new(0, vec![]); + let reduction = ReduceTo::>::reduce_to(&sat); + + assert_eq!(reduction.num_clauses(), 0); + assert!(reduction.pos_vertices().is_empty()); + assert!(reduction.neg_vertices().is_empty()); + + let coloring = reduction.target_problem(); + // Just the 3 special vertices + assert_eq!(coloring.num_vertices(), 3); +} + +#[test] +fn test_num_clauses_accessor() { + let sat = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1])], + ); + let reduction = ReduceTo::>::reduce_to(&sat); + assert_eq!(reduction.num_clauses(), 2); +} + +#[test] +fn test_or_gadget_construction() { + // Test that OR gadget is correctly added + let mut constructor = SATColoringConstructor::new(2); + let initial_vertices = constructor.num_vertices; + + // Add an OR gadget + let input1 = constructor.pos_vertices[0]; // x1 + let input2 = constructor.pos_vertices[1]; // x2 + let output = constructor.add_or_gadget(input1, input2); + + // Should add 5 vertices + assert_eq!(constructor.num_vertices, initial_vertices + 5); + + // Output should be the last added vertex + assert_eq!(output, constructor.num_vertices - 1); +} + +#[test] +fn test_manual_coloring_extraction() { + // Test solution extraction with a manually constructed coloring solution + // for a simple 1-variable SAT problem: (x1) + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let coloring = reduction.target_problem(); + + // The graph structure for (x1) with set_true: + // - Vertices 0, 1, 2: TRUE, FALSE, AUX (triangle) + // - Vertex 3: x1 (pos) + // - Vertex 4: NOT x1 (neg) + // After set_true(3): x1 is connected to AUX and FALSE + // So x1 must have TRUE color + + // A valid 3-coloring where x1 has TRUE color: + // TRUE=0, FALSE=1, AUX=2 + // x1 must have color 0 (connected to 1 and 2) + // NOT_x1 must have color 1 (connected to 2 and x1=0) + let valid_coloring = vec![0, 1, 2, 0, 1]; + + assert_eq!(coloring.num_vertices(), 5); + let extracted = reduction.extract_solution(&valid_coloring); + // x1 should be true (1) because vertex 3 has color 0 which equals TRUE vertex's color + assert_eq!(extracted, vec![1]); +} + +#[test] +fn test_extraction_with_different_color_assignment() { + // Test that extraction works with different color assignments + // (colors may be permuted but semantics preserved) + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + // Different valid coloring: TRUE=2, FALSE=0, AUX=1 + // x1 must have color 2 (TRUE), NOT_x1 must have color 0 (FALSE) + let coloring_permuted = vec![2, 0, 1, 2, 0]; + let extracted = reduction.extract_solution(&coloring_permuted); + // x1 should still be true because its color equals TRUE vertex's color + assert_eq!(extracted, vec![1]); + + // Another permutation: TRUE=1, FALSE=2, AUX=0 + // x1 has color 1 (TRUE), NOT_x1 has color 2 (FALSE) + let coloring_permuted2 = vec![1, 2, 0, 1, 2]; + let extracted2 = reduction.extract_solution(&coloring_permuted2); + assert_eq!(extracted2, vec![1]); +} diff --git a/src/tests_unit/rules/sat_dominatingset.rs b/src/tests_unit/rules/sat_dominatingset.rs new file mode 100644 index 0000000..1b18826 --- /dev/null +++ b/src/tests_unit/rules/sat_dominatingset.rs @@ -0,0 +1,320 @@ +use super::*; +use crate::models::satisfiability::CNFClause; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_simple_sat_to_ds() { + // Simple SAT: (x1) - one variable, one clause + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + // Should have 3 vertices (variable gadget) + 1 clause vertex = 4 vertices + assert_eq!(ds_problem.num_vertices(), 4); + + // Edges: 3 for triangle + 1 from positive literal to clause = 4 + // Triangle edges: (0,1), (0,2), (1,2) + // Clause edge: (0, 3) since x1 positive connects to clause vertex + assert_eq!(ds_problem.num_edges(), 4); +} + +#[test] +fn test_two_variable_sat_to_ds() { + // SAT: (x1 OR x2) + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + // 2 variables * 3 = 6 gadget vertices + 1 clause vertex = 7 + assert_eq!(ds_problem.num_vertices(), 7); + + // Edges: + // - 3 edges for first triangle: (0,1), (0,2), (1,2) + // - 3 edges for second triangle: (3,4), (3,5), (4,5) + // - 2 edges from literals to clause: (0,6), (3,6) + assert_eq!(ds_problem.num_edges(), 8); +} + +#[test] +fn test_satisfiable_formula() { + // SAT: (x1 OR x2) AND (NOT x1 OR x2) + // Satisfiable with x2 = true + let sat = Satisfiability::::new( + 2, + vec![ + CNFClause::new(vec![1, 2]), // x1 OR x2 + CNFClause::new(vec![-1, 2]), // NOT x1 OR x2 + ], + ); + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + // Solve the dominating set problem + let solver = BruteForce::new(); + let solutions = solver.find_best(ds_problem); + + // Minimum dominating set should be of size 2 (one per variable) + let min_size = solutions[0].iter().sum::(); + assert_eq!(min_size, 2, "Minimum dominating set should have 2 vertices"); + + // Extract and verify at least one solution satisfies SAT + let mut found_satisfying = false; + for sol in &solutions { + let sat_sol = reduction.extract_solution(sol); + let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); + if sat.is_satisfying(&assignment) { + found_satisfying = true; + break; + } + } + assert!(found_satisfying, "Should find a satisfying assignment"); +} + +#[test] +fn test_unsatisfiable_formula() { + // SAT: (x1) AND (NOT x1) - unsatisfiable + let sat = + Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + // Vertices: 3 (gadget) + 2 (clauses) = 5 + assert_eq!(ds_problem.num_vertices(), 5); + + let solver = BruteForce::new(); + let solutions = solver.find_best(ds_problem); + + // For unsatisfiable formula, the minimum dominating set will need + // more than num_variables vertices OR won't produce a valid assignment + // Actually, in this case we can still dominate with just selecting + // one literal vertex (it dominates its gadget AND one clause), + // but then the other clause isn't dominated. + // So we need at least 2 vertices: one for each clause's requirement. + + // The key insight is that both clauses share the same variable gadget + // but require opposite literals. To dominate both clause vertices, + // we need to select BOTH literal vertices (0 and 1) or the dummy + + // something else. + + // Verify no extracted solution satisfies the formula + for sol in &solutions { + let sat_sol = reduction.extract_solution(sol); + let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); + // This unsatisfiable formula should not have a satisfying assignment + assert!( + !sat.is_satisfying(&assignment), + "Unsatisfiable formula should not be satisfied" + ); + } +} + +#[test] +fn test_three_sat_example() { + // 3-SAT: (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR x3) AND (x1 OR NOT x2 OR NOT x3) + let sat = Satisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), // x1 OR x2 OR x3 + CNFClause::new(vec![-1, -2, 3]), // NOT x1 OR NOT x2 OR x3 + CNFClause::new(vec![1, -2, -3]), // x1 OR NOT x2 OR NOT x3 + ], + ); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + // 3 variables * 3 = 9 gadget vertices + 3 clauses = 12 + assert_eq!(ds_problem.num_vertices(), 12); + + let solver = BruteForce::new(); + let solutions = solver.find_best(ds_problem); + + // Minimum should be 3 (one per variable) + let min_size = solutions[0].iter().sum::(); + assert_eq!(min_size, 3, "Minimum dominating set should have 3 vertices"); + + // Verify extracted solutions + let mut found_satisfying = false; + for sol in &solutions { + let sat_sol = reduction.extract_solution(sol); + let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); + if sat.is_satisfying(&assignment) { + found_satisfying = true; + break; + } + } + assert!( + found_satisfying, + "Should find a satisfying assignment for 3-SAT" + ); +} + +#[test] +fn test_extract_solution_positive_literal() { + // (x1) - select positive literal + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + // Solution: select vertex 0 (positive literal x1) + // This dominates vertices 1, 2 (gadget) and vertex 3 (clause) + let ds_sol = vec![1, 0, 0, 0]; + let sat_sol = reduction.extract_solution(&ds_sol); + assert_eq!(sat_sol, vec![1]); // x1 = true +} + +#[test] +fn test_extract_solution_negative_literal() { + // (NOT x1) - select negative literal + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![-1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + // Solution: select vertex 1 (negative literal NOT x1) + // This dominates vertices 0, 2 (gadget) and vertex 3 (clause) + let ds_sol = vec![0, 1, 0, 0]; + let sat_sol = reduction.extract_solution(&ds_sol); + assert_eq!(sat_sol, vec![0]); // x1 = false +} + +#[test] +fn test_extract_solution_dummy() { + // (x1 OR x2) where only x1 matters + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + // Select: vertex 0 (x1 positive) and vertex 5 (x2 dummy) + // Vertex 0 dominates: itself, 1, 2, and clause 6 + // Vertex 5 dominates: 3, 4, and itself + let ds_sol = vec![1, 0, 0, 0, 0, 1, 0]; + let sat_sol = reduction.extract_solution(&ds_sol); + assert_eq!(sat_sol, vec![1, 0]); // x1 = true, x2 = false (from dummy) +} + +#[test] +fn test_source_and_target_size() { + let sat = Satisfiability::::new( + 3, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], + ); + let reduction = ReduceTo::>::reduce_to(&sat); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vars"), Some(3)); + assert_eq!(source_size.get("num_clauses"), Some(2)); + // 3 vars * 3 = 9 gadget vertices + 2 clause vertices = 11 + assert_eq!(target_size.get("num_vertices"), Some(11)); +} + +#[test] +fn test_empty_sat() { + // Empty SAT (trivially satisfiable) + let sat = Satisfiability::::new(0, vec![]); + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + assert_eq!(ds_problem.num_vertices(), 0); + assert_eq!(ds_problem.num_edges(), 0); + assert_eq!(reduction.num_clauses(), 0); + assert_eq!(reduction.num_literals(), 0); +} + +#[test] +fn test_multiple_literals_same_variable() { + // Clause with repeated variable: (x1 OR NOT x1) - tautology + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1, -1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + // 3 gadget vertices + 1 clause vertex = 4 + assert_eq!(ds_problem.num_vertices(), 4); + + // Edges: + // - 3 for triangle + // - 2 from literals to clause (both positive and negative literals connect) + assert_eq!(ds_problem.num_edges(), 5); +} + +#[test] +fn test_sat_ds_solution_correspondence() { + // Comprehensive test: verify that solutions extracted from DS satisfy SAT + let sat = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], + ); + + // Solve SAT directly + let sat_solver = BruteForce::new(); + let direct_sat_solutions = sat_solver.find_best(&sat); + + // Solve via reduction + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + let ds_solutions = sat_solver.find_best(ds_problem); + + // Direct SAT solutions should all be valid + for sol in &direct_sat_solutions { + let assignment: Vec = sol.iter().map(|&v| v == 1).collect(); + assert!(sat.is_satisfying(&assignment)); + } + + // DS solutions with minimum size should correspond to valid SAT solutions + let min_size = ds_solutions[0].iter().sum::(); + if min_size == 2 { + // Only if min dominating set = num_vars + let mut found_satisfying = false; + for sol in &ds_solutions { + if sol.iter().sum::() == 2 { + let sat_sol = reduction.extract_solution(sol); + let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); + if sat.is_satisfying(&assignment) { + found_satisfying = true; + break; + } + } + } + assert!( + found_satisfying, + "At least one DS solution should give a SAT solution" + ); + } +} + +#[test] +fn test_accessors() { + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, -2])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + assert_eq!(reduction.num_literals(), 2); + assert_eq!(reduction.num_clauses(), 1); +} + +#[test] +fn test_extract_solution_too_many_selected() { + // Test that extract_solution handles invalid (non-minimal) dominating sets + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + // Select all 4 vertices (more than num_literals=1) + let ds_sol = vec![1, 1, 1, 1]; + let sat_sol = reduction.extract_solution(&ds_sol); + // Should return default (all false) + assert_eq!(sat_sol, vec![0]); +} + +#[test] +fn test_negated_variable_connection() { + // (NOT x1 OR NOT x2) - both negated + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![-1, -2])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let ds_problem = reduction.target_problem(); + + // 2 * 3 = 6 gadget vertices + 1 clause = 7 + assert_eq!(ds_problem.num_vertices(), 7); + + // Edges: + // - 3 for first triangle: (0,1), (0,2), (1,2) + // - 3 for second triangle: (3,4), (3,5), (4,5) + // - 2 from negated literals to clause: (1,6), (4,6) + assert_eq!(ds_problem.num_edges(), 8); +} diff --git a/src/tests_unit/rules/sat_independentset.rs b/src/tests_unit/rules/sat_independentset.rs new file mode 100644 index 0000000..235e2f6 --- /dev/null +++ b/src/tests_unit/rules/sat_independentset.rs @@ -0,0 +1,312 @@ +use super::*; +use crate::models::satisfiability::CNFClause; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_boolvar_creation() { + let var = BoolVar::new(0, false); + assert_eq!(var.name, 0); + assert!(!var.neg); + + let neg_var = BoolVar::new(1, true); + assert_eq!(neg_var.name, 1); + assert!(neg_var.neg); +} + +#[test] +fn test_boolvar_from_literal() { + // Positive literal: variable 1 (1-indexed) -> variable 0 (0-indexed), not negated + let var = BoolVar::from_literal(1); + assert_eq!(var.name, 0); + assert!(!var.neg); + + // Negative literal: variable 2 (1-indexed) -> variable 1 (0-indexed), negated + let neg_var = BoolVar::from_literal(-2); + assert_eq!(neg_var.name, 1); + assert!(neg_var.neg); +} + +#[test] +fn test_boolvar_complement() { + let x = BoolVar::new(0, false); + let not_x = BoolVar::new(0, true); + let y = BoolVar::new(1, false); + + assert!(x.is_complement(¬_x)); + assert!(not_x.is_complement(&x)); + assert!(!x.is_complement(&y)); + assert!(!x.is_complement(&x)); +} + +#[test] +fn test_simple_sat_to_is() { + // Simple SAT: (x1) - one clause with one literal + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + // Should have 1 vertex (one literal) + assert_eq!(is_problem.num_vertices(), 1); + // No edges (single vertex can't form a clique) + assert_eq!(is_problem.num_edges(), 0); +} + +#[test] +fn test_two_clause_sat_to_is() { + // SAT: (x1) AND (NOT x1) + // This is unsatisfiable + let sat = + Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + // Should have 2 vertices + assert_eq!(is_problem.num_vertices(), 2); + // Should have 1 edge (between x1 and NOT x1) + assert_eq!(is_problem.num_edges(), 1); + + // Maximum IS should have size 1 (can't select both) + let solver = BruteForce::new(); + let solutions = solver.find_best(is_problem); + for sol in &solutions { + assert_eq!(sol.iter().sum::(), 1); + } +} + +#[test] +fn test_satisfiable_formula() { + // SAT: (x1 OR x2) AND (NOT x1 OR x2) AND (x1 OR NOT x2) + // Satisfiable with x1=true, x2=true or x1=false, x2=true + let sat = Satisfiability::::new( + 2, + vec![ + CNFClause::new(vec![1, 2]), // x1 OR x2 + CNFClause::new(vec![-1, 2]), // NOT x1 OR x2 + CNFClause::new(vec![1, -2]), // x1 OR NOT x2 + ], + ); + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + // Should have 6 vertices (2 literals per clause, 3 clauses) + assert_eq!(is_problem.num_vertices(), 6); + + // Count edges: + // - 3 edges within clauses (one per clause, since each clause has 2 literals) + // - Edges between complementary literals across clauses: + // - x1 (clause 0, vertex 0) and NOT x1 (clause 1, vertex 2) + // - x2 (clause 0, vertex 1) and NOT x2 (clause 2, vertex 5) + // - x2 (clause 1, vertex 3) and NOT x2 (clause 2, vertex 5) + // - x1 (clause 2, vertex 4) and NOT x1 (clause 1, vertex 2) + // Total: 3 (clique) + 4 (complement) = 7 edges + + // Solve the IS problem + let solver = BruteForce::new(); + let is_solutions = solver.find_best(is_problem); + + // Max IS should be 3 (one literal per clause) + for sol in &is_solutions { + assert_eq!(sol.iter().sum::(), 3); + } + + // Extract SAT solutions and verify they satisfy the original formula + for sol in &is_solutions { + let sat_sol = reduction.extract_solution(sol); + let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); + assert!( + sat.is_satisfying(&assignment), + "Extracted solution {:?} should satisfy the SAT formula", + assignment + ); + } +} + +#[test] +fn test_unsatisfiable_formula() { + // SAT: (x1) AND (NOT x1) - unsatisfiable + let sat = + Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + let solver = BruteForce::new(); + let is_solutions = solver.find_best(is_problem); + + // Max IS can only be 1 (not 2 = num_clauses) + // This indicates the formula is unsatisfiable + for sol in &is_solutions { + assert!( + sol.iter().sum::() < reduction.num_clauses(), + "For unsatisfiable formula, IS size should be less than num_clauses" + ); + } +} + +#[test] +fn test_three_sat_example() { + // 3-SAT: (x1 OR x2 OR x3) AND (NOT x1 OR NOT x2 OR x3) AND (x1 OR NOT x2 OR NOT x3) + let sat = Satisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), // x1 OR x2 OR x3 + CNFClause::new(vec![-1, -2, 3]), // NOT x1 OR NOT x2 OR x3 + CNFClause::new(vec![1, -2, -3]), // x1 OR NOT x2 OR NOT x3 + ], + ); + + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + // Should have 9 vertices (3 literals per clause, 3 clauses) + assert_eq!(is_problem.num_vertices(), 9); + + let solver = BruteForce::new(); + let is_solutions = solver.find_best(is_problem); + + // Check that max IS has size 3 (satisfiable) + let max_size = is_solutions[0].iter().sum::(); + assert_eq!(max_size, 3, "3-SAT should be satisfiable with IS size = 3"); + + // Verify extracted solutions + for sol in &is_solutions { + let sat_sol = reduction.extract_solution(sol); + let assignment: Vec = sat_sol.iter().map(|&v| v == 1).collect(); + assert!(sat.is_satisfying(&assignment)); + } +} + +#[test] +fn test_extract_solution_basic() { + // Simple case: (x1 OR x2) + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + // Select vertex 0 (literal x1) + let is_sol = vec![1, 0]; + let sat_sol = reduction.extract_solution(&is_sol); + assert_eq!(sat_sol, vec![1, 0]); // x1=true, x2=false + + // Select vertex 1 (literal x2) + let is_sol = vec![0, 1]; + let sat_sol = reduction.extract_solution(&is_sol); + assert_eq!(sat_sol, vec![0, 1]); // x1=false, x2=true +} + +#[test] +fn test_extract_solution_with_negation() { + // (NOT x1) - selecting NOT x1 means x1 should be false + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![-1])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + let is_sol = vec![1]; + let sat_sol = reduction.extract_solution(&is_sol); + assert_eq!(sat_sol, vec![0]); // x1=false (so NOT x1 is true) +} + +#[test] +fn test_clique_edges_in_clause() { + // A clause with 3 literals should form a clique (3 edges) + let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + // 3 vertices, 3 edges (complete graph K3) + assert_eq!(is_problem.num_vertices(), 3); + assert_eq!(is_problem.num_edges(), 3); +} + +#[test] +fn test_complement_edges_across_clauses() { + // (x1) AND (NOT x1) AND (x2) - three clauses + // Vertices: 0 (x1), 1 (NOT x1), 2 (x2) + // Edges: (0,1) for complement x1 and NOT x1 + let sat = Satisfiability::::new( + 2, + vec![ + CNFClause::new(vec![1]), + CNFClause::new(vec![-1]), + CNFClause::new(vec![2]), + ], + ); + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + assert_eq!(is_problem.num_vertices(), 3); + assert_eq!(is_problem.num_edges(), 1); // Only the complement edge +} + +#[test] +fn test_source_and_target_size() { + let sat = Satisfiability::::new( + 3, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, 3])], + ); + let reduction = ReduceTo::>::reduce_to(&sat); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vars"), Some(3)); + assert_eq!(source_size.get("num_clauses"), Some(2)); + assert_eq!(target_size.get("num_vertices"), Some(4)); // 2 + 2 literals +} + +#[test] +fn test_empty_sat() { + // Empty SAT (trivially satisfiable) + let sat = Satisfiability::::new(0, vec![]); + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + + assert_eq!(is_problem.num_vertices(), 0); + assert_eq!(is_problem.num_edges(), 0); + assert_eq!(reduction.num_clauses(), 0); +} + +#[test] +fn test_sat_is_solution_correspondence() { + // Comprehensive test: solve both SAT and IS, compare solutions + let sat = Satisfiability::::new( + 2, + vec![CNFClause::new(vec![1, 2]), CNFClause::new(vec![-1, -2])], + ); + + // Solve SAT directly + let sat_solver = BruteForce::new(); + let direct_sat_solutions = sat_solver.find_best(&sat); + + // Solve via reduction + let reduction = ReduceTo::>::reduce_to(&sat); + let is_problem = reduction.target_problem(); + let is_solutions = sat_solver.find_best(is_problem); + + // Extract SAT solutions from IS + let extracted_sat_solutions: Vec<_> = is_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // All extracted solutions should be valid SAT solutions + for sol in &extracted_sat_solutions { + let assignment: Vec = sol.iter().map(|&v| v == 1).collect(); + assert!(sat.is_satisfying(&assignment)); + } + + // Direct SAT solutions and extracted solutions should be compatible + // (same satisfying assignments, though representation might differ) + for sol in &direct_sat_solutions { + let assignment: Vec = sol.iter().map(|&v| v == 1).collect(); + assert!(sat.is_satisfying(&assignment)); + } +} + +#[test] +fn test_literals_accessor() { + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, -2])]); + let reduction = ReduceTo::>::reduce_to(&sat); + + let literals = reduction.literals(); + assert_eq!(literals.len(), 2); + assert_eq!(literals[0], BoolVar::new(0, false)); // x1 + assert_eq!(literals[1], BoolVar::new(1, true)); // NOT x2 +} diff --git a/src/tests_unit/rules/sat_ksat.rs b/src/tests_unit/rules/sat_ksat.rs new file mode 100644 index 0000000..308185c --- /dev/null +++ b/src/tests_unit/rules/sat_ksat.rs @@ -0,0 +1,330 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_sat_to_3sat_exact_size() { + // Clause already has 3 literals - should remain unchanged + let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + assert_eq!(ksat.num_vars(), 3); + assert_eq!(ksat.num_clauses(), 1); + assert_eq!(ksat.clauses()[0].literals, vec![1, 2, 3]); +} + +#[test] +fn test_sat_to_3sat_padding() { + // Clause has 2 literals - should be padded to 3 + // (a v b) becomes (a v b v x) AND (a v b v -x) + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // Should have 2 clauses (positive and negative ancilla) + assert_eq!(ksat.num_clauses(), 2); + // All clauses should have exactly 3 literals + for clause in ksat.clauses() { + assert_eq!(clause.len(), 3); + } +} + +#[test] +fn test_sat_to_3sat_splitting() { + // Clause has 4 literals - should be split + // (a v b v c v d) becomes (a v b v x) AND (-x v c v d) + let sat = Satisfiability::::new(4, vec![CNFClause::new(vec![1, 2, 3, 4])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // Should have 2 clauses after splitting + assert_eq!(ksat.num_clauses(), 2); + // All clauses should have exactly 3 literals + for clause in ksat.clauses() { + assert_eq!(clause.len(), 3); + } + + // Verify structure: first clause has positive ancilla, second has negative + let c1 = &ksat.clauses()[0]; + let c2 = &ksat.clauses()[1]; + // First clause: [1, 2, 5] (ancilla is var 5) + assert_eq!(c1.literals[0], 1); + assert_eq!(c1.literals[1], 2); + let ancilla = c1.literals[2]; + assert!(ancilla > 0); + // Second clause: [-5, 3, 4] + assert_eq!(c2.literals[0], -ancilla); + assert_eq!(c2.literals[1], 3); + assert_eq!(c2.literals[2], 4); +} + +#[test] +fn test_sat_to_3sat_large_clause() { + // Clause has 5 literals - requires multiple splits + // (a v b v c v d v e) -> (a v b v x1) AND (-x1 v c v x2) AND (-x2 v d v e) + let sat = Satisfiability::::new(5, vec![CNFClause::new(vec![1, 2, 3, 4, 5])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // Should have 3 clauses after splitting + assert_eq!(ksat.num_clauses(), 3); + // All clauses should have exactly 3 literals + for clause in ksat.clauses() { + assert_eq!(clause.len(), 3); + } +} + +#[test] +fn test_sat_to_3sat_single_literal() { + // Single literal clause - needs padding twice + // (a) becomes (a v x v y) where we pad twice + let sat = Satisfiability::::new(1, vec![CNFClause::new(vec![1])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // With recursive padding: (a) -> (a v x) AND (a v -x) + // Then each of those gets padded again + // (a v x) -> (a v x v y) AND (a v x v -y) + // (a v -x) -> (a v -x v z) AND (a v -x v -z) + // Total: 4 clauses + assert_eq!(ksat.num_clauses(), 4); + for clause in ksat.clauses() { + assert_eq!(clause.len(), 3); + } +} + +#[test] +fn test_sat_to_3sat_preserves_satisfiability() { + // Create a SAT formula and verify the 3-SAT version is equisatisfiable + let sat = Satisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, 2]), // Needs padding + CNFClause::new(vec![-1, 2, 3]), // Already 3 literals + CNFClause::new(vec![1, -2, 3, -3]), // Needs splitting (tautology for testing) + ], + ); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // Solve both problems + let solver = BruteForce::new(); + + let sat_solutions = solver.find_best(&sat); + let ksat_solutions = solver.find_best(ksat); + + // If SAT is satisfiable, K-SAT should be too + let sat_satisfiable = sat_solutions.iter().any(|s| sat.solution_size(s).is_valid); + let ksat_satisfiable = ksat_solutions + .iter() + .any(|s| ksat.solution_size(s).is_valid); + + assert_eq!(sat_satisfiable, ksat_satisfiable); + + // Extract solutions should map back correctly + if ksat_satisfiable { + for ksat_sol in &ksat_solutions { + if ksat.solution_size(ksat_sol).is_valid { + let sat_sol = reduction.extract_solution(ksat_sol); + assert_eq!(sat_sol.len(), 3); // Original variable count + } + } + } +} + +#[test] +fn test_sat_to_3sat_solution_extraction() { + let sat = Satisfiability::::new(2, vec![CNFClause::new(vec![1, 2])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // Solve K-SAT + let solver = BruteForce::new(); + let ksat_solutions = solver.find_best(ksat); + + // Extract and verify solutions + for ksat_sol in &ksat_solutions { + if ksat.solution_size(ksat_sol).is_valid { + let sat_sol = reduction.extract_solution(ksat_sol); + // Should only have original 2 variables + assert_eq!(sat_sol.len(), 2); + // Should satisfy original problem + assert!(sat.solution_size(&sat_sol).is_valid); + } + } +} + +#[test] +fn test_3sat_to_sat() { + let ksat = KSatisfiability::<3, i32>::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + + let reduction = ReduceTo::>::reduce_to(&ksat); + let sat = reduction.target_problem(); + + assert_eq!(sat.num_vars(), 3); + assert_eq!(sat.num_clauses(), 2); + + // Verify clauses are preserved + assert_eq!(sat.clauses()[0].literals, vec![1, 2, 3]); + assert_eq!(sat.clauses()[1].literals, vec![-1, -2, 3]); +} + +#[test] +fn test_3sat_to_sat_solution_extraction() { + let ksat = KSatisfiability::<3, i32>::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + + let reduction = ReduceTo::>::reduce_to(&ksat); + + let sol = vec![1, 0, 1]; + let extracted = reduction.extract_solution(&sol); + assert_eq!(extracted, vec![1, 0, 1]); +} + +#[test] +fn test_roundtrip_sat_3sat_sat() { + // SAT -> 3-SAT -> SAT roundtrip + let original_sat = Satisfiability::::new( + 3, + vec![CNFClause::new(vec![1, -2]), CNFClause::new(vec![2, 3])], + ); + + // SAT -> 3-SAT + let to_ksat = ReduceTo::>::reduce_to(&original_sat); + let ksat = to_ksat.target_problem(); + + // 3-SAT -> SAT + let to_sat = ReduceTo::>::reduce_to(ksat); + let final_sat = to_sat.target_problem(); + + // Solve all three + let solver = BruteForce::new(); + + let orig_solutions = solver.find_best(&original_sat); + let ksat_solutions = solver.find_best(ksat); + let final_solutions = solver.find_best(final_sat); + + // All should be satisfiable + assert!(orig_solutions + .iter() + .any(|s| original_sat.solution_size(s).is_valid)); + assert!(ksat_solutions + .iter() + .any(|s| ksat.solution_size(s).is_valid)); + assert!(final_solutions + .iter() + .any(|s| final_sat.solution_size(s).is_valid)); +} + +#[test] +fn test_sat_to_4sat() { + let sat = Satisfiability::::new( + 4, + vec![ + CNFClause::new(vec![1, 2]), // Needs padding + CNFClause::new(vec![1, 2, 3, 4]), // Exact + CNFClause::new(vec![1, 2, 3, 4, -1]), // Needs splitting + ], + ); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // All clauses should have exactly 4 literals + for clause in ksat.clauses() { + assert_eq!(clause.len(), 4); + } +} + +#[test] +fn test_problem_sizes() { + let sat = Satisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3, 4])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vars"), Some(3)); + assert_eq!(target_size.get("k"), Some(3)); +} + +#[test] +fn test_empty_sat_to_3sat() { + let sat = Satisfiability::::new(3, vec![]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + assert_eq!(ksat.num_clauses(), 0); + assert_eq!(ksat.num_vars(), 3); +} + +#[test] +fn test_mixed_clause_sizes() { + let sat = Satisfiability::::new( + 5, + vec![ + CNFClause::new(vec![1]), // 1 literal + CNFClause::new(vec![2, 3]), // 2 literals + CNFClause::new(vec![1, 2, 3]), // 3 literals + CNFClause::new(vec![1, 2, 3, 4]), // 4 literals + CNFClause::new(vec![1, 2, 3, 4, 5]), // 5 literals + ], + ); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + // All clauses should have exactly 3 literals + for clause in ksat.clauses() { + assert_eq!(clause.len(), 3); + } + + // Verify satisfiability is preserved + let solver = BruteForce::new(); + let sat_solutions = solver.find_best(&sat); + let ksat_solutions = solver.find_best(ksat); + + let sat_satisfiable = sat_solutions.iter().any(|s| sat.solution_size(s).is_valid); + let ksat_satisfiable = ksat_solutions + .iter() + .any(|s| ksat.solution_size(s).is_valid); + assert_eq!(sat_satisfiable, ksat_satisfiable); +} + +#[test] +fn test_unsatisfiable_formula() { + // (x) AND (-x) is unsatisfiable + let sat = + Satisfiability::::new(1, vec![CNFClause::new(vec![1]), CNFClause::new(vec![-1])]); + + let reduction = ReduceTo::>::reduce_to(&sat); + let ksat = reduction.target_problem(); + + let solver = BruteForce::new(); + + // Both should be unsatisfiable + let sat_solutions = solver.find_best(&sat); + let ksat_solutions = solver.find_best(ksat); + + let sat_satisfiable = sat_solutions.iter().any(|s| sat.solution_size(s).is_valid); + let ksat_satisfiable = ksat_solutions + .iter() + .any(|s| ksat.solution_size(s).is_valid); + + assert!(!sat_satisfiable); + assert!(!ksat_satisfiable); +} diff --git a/src/tests_unit/rules/setcovering_ilp.rs b/src/tests_unit/rules/setcovering_ilp.rs new file mode 100644 index 0000000..5d2ce38 --- /dev/null +++ b/src/tests_unit/rules/setcovering_ilp.rs @@ -0,0 +1,234 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Universe: {0, 1, 2}, Sets: S0={0,1}, S1={1,2} + let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 2, "Should have one variable per set"); + assert_eq!( + ilp.constraints.len(), + 3, + "Should have one constraint per element" + ); + assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } + + // Each constraint should be sum >= 1 + for constraint in &ilp.constraints { + assert!((constraint.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_reduction_weighted() { + let problem = SetCovering::with_weights(3, vec![vec![0, 1], vec![1, 2]], vec![5, 10]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check that weights are correctly transferred to objective + let mut coeffs: Vec = vec![0.0; 2]; + for &(var, coef) in &ilp.objective { + coeffs[var] = coef; + } + assert!((coeffs[0] - 5.0).abs() < 1e-9); + assert!((coeffs[1] - 10.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_equals_brute_force_simple() { + // Universe: {0, 1, 2}, Sets: S0={0,1}, S1={1,2}, S2={0,2} + // Minimum cover: any 2 sets work + let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2], vec![0, 2]]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_best(&problem); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Both should find optimal size = 2 + let bf_size: usize = bf_solutions[0].iter().sum(); + let ilp_size: usize = extracted.iter().sum(); + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify the ILP solution is valid for the original problem + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); +} + +#[test] +fn test_ilp_solution_equals_brute_force_weighted() { + // Weighted problem: prefer lighter sets + // Universe: {0,1,2}, Sets: S0={0,1,2}, S1={0,1}, S2={2} + // Weights: [10, 3, 3] + // Optimal: select S1 and S2 (weight 6) instead of S0 (weight 10) + let problem = + SetCovering::with_weights(3, vec![vec![0, 1, 2], vec![0, 1], vec![2]], vec![10, 3, 3]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_obj = problem.solution_size(&bf_solutions[0]).size; + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_obj = problem.solution_size(&extracted).size; + + assert_eq!(bf_obj, 6); + assert_eq!(ilp_obj, 6); + + // Verify the solution selects S1 and S2 + assert_eq!(extracted, vec![0, 1, 1]); +} + +#[test] +fn test_solution_extraction() { + let problem = SetCovering::::new(4, vec![vec![0, 1], vec![2, 3]]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 1]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 1]); + + // Verify this is a valid set cover + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_source_and_target_size() { + let problem = + SetCovering::::new(5, vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![3, 4]]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("universe_size"), Some(5)); + assert_eq!(source_size.get("num_sets"), Some(4)); + + assert_eq!(target_size.get("num_vars"), Some(4)); + assert_eq!(target_size.get("num_constraints"), Some(5)); +} + +#[test] +fn test_single_set_covers_all() { + // Single set covers entire universe + let problem = SetCovering::::new(3, vec![vec![0, 1, 2], vec![0], vec![1], vec![2]]); + + let ilp_solver = ILPSolver::new(); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // First set alone covers everything with weight 1 + assert_eq!(extracted, vec![1, 0, 0, 0]); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 1); +} + +#[test] +fn test_overlapping_sets() { + // All sets overlap on element 1 + let problem = SetCovering::::new(3, vec![vec![0, 1], vec![1, 2]]); + + let ilp_solver = ILPSolver::new(); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Need both sets to cover all elements + assert_eq!(extracted, vec![1, 1]); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); +} + +#[test] +fn test_empty_universe() { + // Empty universe is trivially covered + let problem = SetCovering::::new(0, vec![]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 0); + assert_eq!(ilp.constraints.len(), 0); +} + +#[test] +fn test_solve_reduced() { + // Test the ILPSolver::solve_reduced method + let problem = + SetCovering::::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![0, 3]]); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver + .solve_reduced(&problem) + .expect("solve_reduced should work"); + + let sol_result = problem.solution_size(&solution); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); +} + +#[test] +fn test_constraint_structure() { + // Universe: {0, 1, 2} + // Sets: S0={0}, S1={0,1}, S2={1,2} + // Element 0 is in S0, S1 -> constraint: x0 + x1 >= 1 + // Element 1 is in S1, S2 -> constraint: x1 + x2 >= 1 + // Element 2 is in S2 -> constraint: x2 >= 1 + let problem = SetCovering::::new(3, vec![vec![0], vec![0, 1], vec![1, 2]]); + let reduction: ReductionSCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.constraints.len(), 3); + + // Check constraint for element 0: should involve sets 0 and 1 + let c0 = &ilp.constraints[0]; + let vars0: Vec = c0.terms.iter().map(|&(v, _)| v).collect(); + assert!(vars0.contains(&0)); + assert!(vars0.contains(&1)); + assert!(!vars0.contains(&2)); + + // Check constraint for element 1: should involve sets 1 and 2 + let c1 = &ilp.constraints[1]; + let vars1: Vec = c1.terms.iter().map(|&(v, _)| v).collect(); + assert!(!vars1.contains(&0)); + assert!(vars1.contains(&1)); + assert!(vars1.contains(&2)); + + // Check constraint for element 2: should involve only set 2 + let c2 = &ilp.constraints[2]; + let vars2: Vec = c2.terms.iter().map(|&(v, _)| v).collect(); + assert!(!vars2.contains(&0)); + assert!(!vars2.contains(&1)); + assert!(vars2.contains(&2)); +} diff --git a/src/tests_unit/rules/setpacking_ilp.rs b/src/tests_unit/rules/setpacking_ilp.rs new file mode 100644 index 0000000..e15b263 --- /dev/null +++ b/src/tests_unit/rules/setpacking_ilp.rs @@ -0,0 +1,222 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Three sets with two overlapping pairs + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 3, "Should have one variable per set"); + assert_eq!( + ilp.constraints.len(), + 2, + "Should have one constraint per overlapping pair" + ); + assert_eq!(ilp.sense, ObjectiveSense::Maximize, "Should maximize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } + + // Each constraint should be x_i + x_j <= 1 + for constraint in &ilp.constraints { + assert_eq!(constraint.terms.len(), 2); + assert!((constraint.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_reduction_weighted() { + let problem = SetPacking::with_weights(vec![vec![0, 1], vec![2, 3]], vec![5, 10]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check that weights are correctly transferred to objective + let mut coeffs: Vec = vec![0.0; 2]; + for &(var, coef) in &ilp.objective { + coeffs[var] = coef; + } + assert!((coeffs[0] - 5.0).abs() < 1e-9); + assert!((coeffs[1] - 10.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_equals_brute_force_chain() { + // Chain: {0,1}, {1,2}, {2,3} - can select at most 2 non-adjacent sets + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_best(&problem); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Both should find optimal size = 2 + let bf_size: usize = bf_solutions[0].iter().sum(); + let ilp_size: usize = extracted.iter().sum(); + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify the ILP solution is valid for the original problem + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); +} + +#[test] +fn test_ilp_solution_equals_brute_force_all_overlap() { + // All sets share element 0: can only select one + let problem = SetPacking::::new(vec![vec![0, 1], vec![0, 2], vec![0, 3]]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_size: usize = bf_solutions[0].iter().sum(); + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size: usize = extracted.iter().sum(); + + assert_eq!(bf_size, 1); + assert_eq!(ilp_size, 1); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_ilp_solution_equals_brute_force_weighted() { + // Weighted problem: single heavy set vs multiple light sets + // Set 0 covers all elements but has weight 5 + // Sets 1 and 2 are disjoint and together have weight 6 + let problem = SetPacking::with_weights( + vec![vec![0, 1, 2, 3], vec![0, 1], vec![2, 3]], + vec![5, 3, 3], + ); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_obj = problem.solution_size(&bf_solutions[0]).size; + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_obj = problem.solution_size(&extracted).size; + + assert_eq!(bf_obj, 6); + assert_eq!(ilp_obj, 6); + + // Should select sets 1 and 2 + assert_eq!(extracted, vec![0, 1, 1]); +} + +#[test] +fn test_solution_extraction() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![2, 3], vec![4, 5], vec![6, 7]]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 0, 1, 0]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 0, 1, 0]); + + // Verify this is a valid packing (sets 0 and 2 are disjoint) + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_source_and_target_size() { + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3], vec![3, 4]]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_sets"), Some(4)); + + assert_eq!(target_size.get("num_vars"), Some(4)); + // 3 overlapping pairs: (0,1), (1,2), (2,3) + assert_eq!(target_size.get("num_constraints"), Some(3)); +} + +#[test] +fn test_disjoint_sets() { + // All sets are disjoint: no overlapping pairs + let problem = SetPacking::::new(vec![vec![0], vec![1], vec![2], vec![3]]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.constraints.len(), 0); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // All sets should be selected + assert_eq!(extracted, vec![1, 1, 1, 1]); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 4); +} + +#[test] +fn test_empty_sets() { + let problem = SetPacking::::new(vec![]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 0); + assert_eq!(ilp.constraints.len(), 0); +} + +#[test] +fn test_solve_reduced() { + // Test the ILPSolver::solve_reduced method + let problem = SetPacking::::new(vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver + .solve_reduced(&problem) + .expect("solve_reduced should work"); + + let sol_result = problem.solution_size(&solution); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); +} + +#[test] +fn test_all_sets_overlap_pairwise() { + // All pairs overlap: can only select one set + // Sets: {0,1}, {0,2}, {1,2} - each pair shares one element + let problem = SetPacking::::new(vec![vec![0, 1], vec![0, 2], vec![1, 2]]); + let reduction: ReductionSPToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 3 overlapping pairs + assert_eq!(ilp.constraints.len(), 3); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 1); +} diff --git a/src/tests_unit/rules/spinglass_maxcut.rs b/src/tests_unit/rules/spinglass_maxcut.rs new file mode 100644 index 0000000..6f8a5ce --- /dev/null +++ b/src/tests_unit/rules/spinglass_maxcut.rs @@ -0,0 +1,97 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_maxcut_to_spinglass() { + // Simple triangle MaxCut + let mc = MaxCut::::unweighted(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::>::reduce_to(&mc); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(sg); + + assert!(!solutions.is_empty()); +} + +#[test] +fn test_spinglass_to_maxcut_no_onsite() { + // SpinGlass without onsite terms + let sg = SpinGlass::::new(3, vec![((0, 1), 1), ((1, 2), 1)], vec![0, 0, 0]); + let reduction = ReduceTo::>::reduce_to(&sg); + let mc = reduction.target_problem(); + + assert_eq!(mc.num_vertices(), 3); // No ancilla needed + assert!(reduction.ancilla.is_none()); +} + +#[test] +fn test_spinglass_to_maxcut_with_onsite() { + // SpinGlass with onsite terms + let sg = SpinGlass::::new(2, vec![((0, 1), 1)], vec![1, 0]); + let reduction = ReduceTo::>::reduce_to(&sg); + let mc = reduction.target_problem(); + + assert_eq!(mc.num_vertices(), 3); // Ancilla added + assert_eq!(reduction.ancilla, Some(2)); +} + +#[test] +fn test_solution_extraction_no_ancilla() { + let sg = SpinGlass::::new(2, vec![((0, 1), 1)], vec![0, 0]); + let reduction = ReduceTo::>::reduce_to(&sg); + + let mc_sol = vec![0, 1]; + let extracted = reduction.extract_solution(&mc_sol); + assert_eq!(extracted, vec![0, 1]); +} + +#[test] +fn test_solution_extraction_with_ancilla() { + let sg = SpinGlass::::new(2, vec![((0, 1), 1)], vec![1, 0]); + let reduction = ReduceTo::>::reduce_to(&sg); + + // If ancilla is 0, don't flip + let mc_sol = vec![0, 1, 0]; + let extracted = reduction.extract_solution(&mc_sol); + assert_eq!(extracted, vec![0, 1]); + + // If ancilla is 1, flip all + let mc_sol = vec![0, 1, 1]; + let extracted = reduction.extract_solution(&mc_sol); + assert_eq!(extracted, vec![1, 0]); // flipped and ancilla removed +} + +#[test] +fn test_weighted_maxcut() { + let mc = MaxCut::::new(3, vec![(0, 1, 10), (1, 2, 20)]); + let reduction = ReduceTo::>::reduce_to(&mc); + let sg = reduction.target_problem(); + + // Verify interactions have correct weights + let interactions = sg.interactions(); + assert_eq!(interactions.len(), 2); +} + +#[test] +fn test_reduction_sizes() { + // Test source_size and target_size methods + let mc = MaxCut::::unweighted(3, vec![(0, 1), (1, 2)]); + let reduction = ReduceTo::>::reduce_to(&mc); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert!(!source_size.components.is_empty()); + assert!(!target_size.components.is_empty()); + + // Test SG to MaxCut sizes + let sg = SpinGlass::::new(3, vec![((0, 1), 1)], vec![0, 0, 0]); + let reduction2 = ReduceTo::>::reduce_to(&sg); + + let source_size2 = reduction2.source_size(); + let target_size2 = reduction2.target_size(); + + assert!(!source_size2.components.is_empty()); + assert!(!target_size2.components.is_empty()); +} diff --git a/src/tests_unit/rules/spinglass_qubo.rs b/src/tests_unit/rules/spinglass_qubo.rs new file mode 100644 index 0000000..8955c6c --- /dev/null +++ b/src/tests_unit/rules/spinglass_qubo.rs @@ -0,0 +1,135 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_qubo_to_spinglass() { + // Simple 2-variable QUBO: minimize x0 + x1 - 2*x0*x1 + // Optimal at x = [0, 0] (value 0) or x = [1, 1] (value 0) + let qubo = QUBO::from_matrix(vec![vec![1.0, -2.0], vec![0.0, 1.0]]); + let reduction = ReduceTo::>::reduce_to(&qubo); + let sg = reduction.target_problem(); + + let solver = BruteForce::new(); + let sg_solutions = solver.find_best(sg); + let qubo_solutions: Vec<_> = sg_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Verify solutions are valid + assert!(!qubo_solutions.is_empty()); + + // Original QUBO at [0,0]: 0, at [1,1]: 1 + 1 - 2 = 0, at [0,1]: 1, at [1,0]: 1 + // So [0,0] and [1,1] are optimal with value 0 + for sol in &qubo_solutions { + let val = qubo.solution_size(sol).size; + assert!( + val <= 0.0 + 1e-6, + "Expected optimal value near 0, got {}", + val + ); + } +} + +#[test] +fn test_spinglass_to_qubo() { + // Simple SpinGlass: J_01 = -1 (ferromagnetic: prefers aligned spins) + // Energy: J_01 * s0 * s1 = -s0 * s1 + // Aligned spins give -1, anti-aligned give +1 + // Minimum is -1 at [0,0] or [1,1] (both give s=-1,-1 or s=+1,+1) + let sg = SpinGlass::::new(2, vec![((0, 1), -1.0)], vec![0.0, 0.0]); + let reduction = ReduceTo::>::reduce_to(&sg); + let qubo = reduction.target_problem(); + + let solver = BruteForce::new(); + let qubo_solutions = solver.find_best(qubo); + + // Ferromagnetic: aligned spins are optimal + for sol in &qubo_solutions { + assert_eq!(sol[0], sol[1], "Ferromagnetic should have aligned spins"); + } +} + +#[test] +fn test_roundtrip_qubo_sg_qubo() { + let original = QUBO::from_matrix(vec![vec![-1.0, 2.0], vec![0.0, -1.0]]); + let solver = BruteForce::new(); + let original_solutions = solver.find_best(&original); + let _original_val = original.solution_size(&original_solutions[0]).size; + + // QUBO -> SG -> QUBO + let reduction1 = ReduceTo::>::reduce_to(&original); + let sg = reduction1.target_problem().clone(); + let reduction2 = ReduceTo::>::reduce_to(&sg); + let roundtrip = reduction2.target_problem(); + + let roundtrip_solutions = solver.find_best(roundtrip); + let _roundtrip_val = roundtrip.solution_size(&roundtrip_solutions[0]).size; + + // The solutions should have the same configuration + // (optimal configs should match) + let orig_configs: std::collections::HashSet<_> = original_solutions.iter().collect(); + let rt_configs: std::collections::HashSet<_> = roundtrip_solutions.iter().collect(); + assert!( + orig_configs.intersection(&rt_configs).count() > 0, + "At least one optimal solution should match" + ); +} + +#[test] +fn test_antiferromagnetic() { + // Antiferromagnetic: J > 0, prefers anti-aligned spins + let sg = SpinGlass::::new(2, vec![((0, 1), 1.0)], vec![0.0, 0.0]); + let reduction = ReduceTo::>::reduce_to(&sg); + let qubo = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(qubo); + + // Anti-ferromagnetic: opposite spins are optimal + for sol in &solutions { + assert_ne!( + sol[0], sol[1], + "Antiferromagnetic should have opposite spins" + ); + } +} + +#[test] +fn test_with_onsite_fields() { + // SpinGlass with only on-site field h_0 = 1 + // Energy = h_0 * s_0 = s_0 + // Minimum at s_0 = -1, i.e., x_0 = 0 + let sg = SpinGlass::::new(1, vec![], vec![1.0]); + let reduction = ReduceTo::>::reduce_to(&sg); + let qubo = reduction.target_problem(); + + let solver = BruteForce::new(); + let solutions = solver.find_best(qubo); + + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0], "Should prefer x=0 (s=-1)"); +} + +#[test] +fn test_reduction_sizes() { + // Test source_size and target_size methods + let qubo = QUBO::from_matrix(vec![vec![1.0, -2.0], vec![0.0, 1.0]]); + let reduction = ReduceTo::>::reduce_to(&qubo); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert!(!source_size.components.is_empty()); + assert!(!target_size.components.is_empty()); + + // Test SG to QUBO sizes + let sg = SpinGlass::::new(3, vec![((0, 1), -1.0)], vec![0.0, 0.0, 0.0]); + let reduction2 = ReduceTo::>::reduce_to(&sg); + + let source_size2 = reduction2.source_size(); + let target_size2 = reduction2.target_size(); + + assert!(!source_size2.components.is_empty()); + assert!(!target_size2.components.is_empty()); +} diff --git a/src/tests_unit/rules/traits.rs b/src/tests_unit/rules/traits.rs new file mode 100644 index 0000000..8effa4d --- /dev/null +++ b/src/tests_unit/rules/traits.rs @@ -0,0 +1,4 @@ +#[test] +fn test_traits_compile() { + // Traits should compile - actual tests in reduction implementations +} diff --git a/src/tests_unit/rules/unitdiskmapping/alpha_tensor.rs b/src/tests_unit/rules/unitdiskmapping/alpha_tensor.rs new file mode 100644 index 0000000..d48d99f --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/alpha_tensor.rs @@ -0,0 +1,163 @@ +use super::*; + +#[test] +fn test_simple_path_alpha_tensor() { + // Path graph: 0-1-2, all weight 1, pins = [0, 2] + let edges = vec![(0, 1), (1, 2)]; + let weights = vec![1, 1, 1]; + let pins = vec![0, 2]; + + let tensor = compute_alpha_tensor(3, &edges, &weights, &pins); + + // Config 0b00: neither pin in IS -> MIS can include vertex 1 -> MIS = 1 + // Config 0b01: pin 0 (vertex 0) in -> vertex 1 blocked -> MIS = 1 + // Config 0b10: pin 1 (vertex 2) in -> vertex 1 blocked -> MIS = 1 + // Config 0b11: both pins in -> vertices 0,2 in IS, vertex 1 blocked -> MIS = 2 + assert_eq!(tensor, vec![1, 1, 1, 2]); +} + +#[test] +fn test_triangle_alpha_tensor() { + // Triangle: 0-1, 1-2, 0-2, all weight 1, pins = [0, 1, 2] + let edges = vec![(0, 1), (1, 2), (0, 2)]; + let weights = vec![1, 1, 1]; + let pins = vec![0, 1, 2]; + + let tensor = compute_alpha_tensor(3, &edges, &weights, &pins); + + // When all vertices are pins: + // 0b000: all pins forced OUT -> no vertices available -> MIS = 0 + // 0b001: vertex 0 in, others forced out -> MIS = 1 + // 0b010: vertex 1 in, others forced out -> MIS = 1 + // 0b011: vertices 0,1 in -> INVALID (adjacent) -> i32::MIN + // 0b100: vertex 2 in, others forced out -> MIS = 1 + // 0b101: vertices 0,2 in -> INVALID (adjacent) -> i32::MIN + // 0b110: vertices 1,2 in -> INVALID (adjacent) -> i32::MIN + // 0b111: all in -> INVALID (all adjacent) -> i32::MIN + assert_eq!( + tensor, + vec![0, 1, 1, i32::MIN, 1, i32::MIN, i32::MIN, i32::MIN] + ); +} + +#[test] +fn test_mis_compactify_simple() { + // From path graph test + let mut tensor = vec![1, 1, 1, 2]; + mis_compactify(&mut tensor); + + // Entry 0b00 (val=1): is it dominated? + // - By 0b01 (val=1)? (0b01 & 0b00) == 0b00 != 0b01, NO + // - By 0b10 (val=1)? (0b10 & 0b00) == 0b00 != 0b10, NO + // - By 0b11 (val=2)? (0b11 & 0b00) == 0b00 != 0b11, NO + // Entry 0b01 (val=1): + // - By 0b11 (val=2)? (0b11 & 0b01) == 0b01, but val=1 <= val=2, YES dominated + // Entry 0b10 (val=1): + // - By 0b11 (val=2)? (0b11 & 0b10) == 0b10, but val=1 <= val=2, YES dominated + + // After compactify: entries 0b01 and 0b10 should be i32::MIN + assert_eq!(tensor[0], 1); // 0b00 not dominated + assert_eq!(tensor[1], i32::MIN); // 0b01 dominated by 0b11 + assert_eq!(tensor[2], i32::MIN); // 0b10 dominated by 0b11 + assert_eq!(tensor[3], 2); // 0b11 not dominated +} + +#[test] +fn test_is_diff_by_const() { + let t1 = vec![3, i32::MIN, i32::MIN, 5]; + let t2 = vec![2, i32::MIN, i32::MIN, 4]; + + let (is_equiv, diff) = is_diff_by_const(&t1, &t2); + assert!(is_equiv); + assert_eq!(diff, 1); // 3-2 = 1, 5-4 = 1 + + let t3 = vec![3, i32::MIN, i32::MIN, 6]; + let (is_equiv2, _) = is_diff_by_const(&t1, &t3); + assert!(!is_equiv2); // 3-3=0, 5-6=-1, not constant +} + +#[test] +fn test_weighted_mis_exhaustive() { + // Path: 0-1-2, weights [3, 1, 3] + let edges = vec![(0, 1), (1, 2)]; + let weights = vec![3, 1, 3]; + + let mis = weighted_mis_exhaustive(3, &edges, &weights); + assert_eq!(mis, 6); // Select vertices 0 and 2 +} + +#[test] +fn test_triangular_unit_disk_edges() { + // Simple case: two adjacent nodes on triangular lattice + // Nodes at (1, 1) and (1, 2) should be connected (distance ~0.866) + let locs = vec![(1, 1), (1, 2)]; + let edges = build_triangular_unit_disk_edges(&locs); + assert_eq!(edges.len(), 1); + assert_eq!(edges[0], (0, 1)); + + // Nodes at (1, 1) and (3, 1) should NOT be connected (distance = 2) + let locs2 = vec![(1, 1), (3, 1)]; + let edges2 = build_triangular_unit_disk_edges(&locs2); + assert_eq!(edges2.len(), 0); +} + +#[test] +fn test_verify_tri_turn() { + use super::super::triangular::TriTurn; + + let gadget = TriTurn; + let result = verify_triangular_gadget(&gadget); + assert!(result.is_ok(), "TriTurn verification failed: {:?}", result); +} + +#[test] +fn test_verify_tri_cross_false() { + use super::super::triangular::TriCross; + + let gadget = TriCross::; + let result = verify_triangular_gadget(&gadget); + assert!( + result.is_ok(), + "TriCross verification failed: {:?}", + result + ); +} + +#[test] +fn test_verify_tri_cross_true() { + use super::super::triangular::TriCross; + + let gadget = TriCross::; + let result = verify_triangular_gadget(&gadget); + assert!( + result.is_ok(), + "TriCross verification failed: {:?}", + result + ); +} + +#[test] +fn test_verify_tri_branch() { + use super::super::triangular::TriBranch; + + let gadget = TriBranch; + let result = verify_triangular_gadget(&gadget); + assert!( + result.is_ok(), + "TriBranch verification failed: {:?}", + result + ); +} + +#[test] +fn test_verify_tri_tcon_left() { + use super::super::triangular::TriTConLeft; + + let gadget = TriTConLeft; + let result = verify_triangular_gadget(&gadget); + assert!( + result.is_ok(), + "TriTConLeft verification failed: {:?}", + result + ); +} diff --git a/src/tests_unit/rules/unitdiskmapping/copyline.rs b/src/tests_unit/rules/unitdiskmapping/copyline.rs new file mode 100644 index 0000000..41e1572 --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/copyline.rs @@ -0,0 +1,343 @@ +use super::*; + +#[test] +fn test_create_copylines_path() { + // Path graph: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let order = vec![0, 1, 2]; + let lines = create_copylines(3, &edges, &order); + + assert_eq!(lines.len(), 3); + // Each vertex gets a copy line + assert_eq!(lines[0].vertex, 0); + assert_eq!(lines[1].vertex, 1); + assert_eq!(lines[2].vertex, 2); +} + +#[test] +fn test_copyline_locations() { + let line = CopyLine { + vertex: 0, + vslot: 1, + hslot: 1, + vstart: 1, + vstop: 1, + hstop: 3, + }; + let locs = line.locations(2, 4); // padding=2, spacing=4 + assert!(!locs.is_empty()); +} + +#[test] +fn test_create_copylines_empty() { + let edges: Vec<(usize, usize)> = vec![]; + let order: Vec = vec![]; + let lines = create_copylines(0, &edges, &order); + assert!(lines.is_empty()); +} + +#[test] +fn test_create_copylines_single_vertex() { + let edges: Vec<(usize, usize)> = vec![]; + let order = vec![0]; + let lines = create_copylines(1, &edges, &order); + + assert_eq!(lines.len(), 1); + assert_eq!(lines[0].vertex, 0); + assert_eq!(lines[0].vslot, 1); +} + +#[test] +fn test_create_copylines_triangle() { + // Triangle: 0-1, 1-2, 0-2 + let edges = vec![(0, 1), (1, 2), (0, 2)]; + let order = vec![0, 1, 2]; + let lines = create_copylines(3, &edges, &order); + + assert_eq!(lines.len(), 3); + // Vertex 0 should have hstop reaching to vertex 2's slot + assert!(lines[0].hstop >= 2); +} + +#[test] +fn test_copyline_center_location() { + let line = CopyLine::new(0, 2, 3, 1, 3, 4); + let (row, col) = line.center_location(1, 4); + // Julia 1-indexed: row = 4 * (3-1) + 1 + 2 = 11, col = 4 * (2-1) + 1 + 1 = 6 + // Rust 0-indexed: row = 11 - 1 = 10, col = 6 - 1 = 5 + assert_eq!(row, 10); + assert_eq!(col, 5); +} + +#[test] +fn test_remove_order_path() { + // Path: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let order = vec![0, 1, 2]; + let removal = remove_order(3, &edges, &order); + + // Vertex 2 has no later neighbors, so it can be removed at step 2 + // Vertex 1's latest neighbor is 2, so can be removed at step 2 + // Vertex 0's latest neighbor is 1, so can be removed at step 1 + assert_eq!(removal.len(), 3); +} + +#[test] +fn test_mis_overhead_copyline() { + let line = CopyLine::new(0, 1, 2, 1, 2, 3); + let spacing = 4; + let padding = 2; + let locs = line.copyline_locations(padding, spacing); + let overhead = mis_overhead_copyline(&line, spacing, padding); + // Julia formula for UnWeighted mode: length(locs) / 2 + assert_eq!(overhead, locs.len() / 2); +} + +#[test] +fn test_copyline_serialization() { + let line = CopyLine::new(0, 1, 2, 1, 2, 3); + let json = serde_json::to_string(&line).unwrap(); + let deserialized: CopyLine = serde_json::from_str(&json).unwrap(); + assert_eq!(line, deserialized); +} + +#[test] +fn test_create_copylines_star() { + // Star graph: 0 connected to 1, 2, 3 + let edges = vec![(0, 1), (0, 2), (0, 3)]; + let order = vec![0, 1, 2, 3]; + let lines = create_copylines(4, &edges, &order); + + assert_eq!(lines.len(), 4); + // Vertex 0 (center) should have hstop reaching the last neighbor + assert_eq!(lines[0].hstop, 4); +} + +#[test] +fn test_copyline_locations_detailed() { + let line = CopyLine::new(0, 1, 2, 1, 2, 2); + let locs = line.locations(0, 2); + + // With padding=0, spacing=2 (0-indexed output): + // Julia 1-indexed: col = 2*(1-1) + 0 + 1 = 1 -> Rust 0-indexed: col = 0 + // Julia 1-indexed: row = 2*(2-1) + 0 + 2 = 4 -> Rust 0-indexed: row = 3 + // Vertical segment covers rows around the center + + assert!(!locs.is_empty()); + // Check that we have vertical positions (col = 0 in 0-indexed) + let has_vertical = locs.iter().any(|&(_r, c, _)| c == 0); + assert!(has_vertical); +} + +#[test] +fn test_copyline_locations_simple() { + // Simple L-shape: vslot=1, hslot=1, vstart=1, vstop=2, hstop=2 + let line = CopyLine::new(0, 1, 1, 1, 2, 2); + let locs = line.copyline_locations(2, 4); // padding=2, spacing=4 + + // Center: I = 4*(1-1) + 2 + 2 = 4, J = 4*(1-1) + 2 + 1 = 3 + // vstart=1, hslot=1: no "up" segment + // vstop=2, hslot=1: "down" segment from I to I + 4*(2-1) - 1 = 4 to 7 + // hstop=2, vslot=1: "right" segment from J+2=5 to J + 4*(2-1) - 1 = 6 + + assert!(!locs.is_empty()); + // Should have nodes at every cell, not just at spacing intervals + // Check we have more than just the sparse waypoints + let node_count = locs.len(); + println!("Dense locations for simple L-shape: {:?}", locs); + println!("Node count: {}", node_count); + + // Dense should have many more nodes than sparse (which has ~3-4) + assert!( + node_count > 4, + "Dense locations should have more than sparse" + ); +} + +#[test] +fn test_copyline_locations_matches_julia() { + // Test case that can be verified against Julia's UnitDiskMapping + // Using vslot=1, hslot=2, vstart=1, vstop=2, hstop=3, padding=2, spacing=4 + let line = CopyLine::new(0, 1, 2, 1, 2, 3); + let locs = line.copyline_locations(2, 4); + + // Julia 1-indexed: I = 4*(2-1) + 2 + 2 = 8, J = 4*(1-1) + 2 + 1 = 3 + // Rust 0-indexed: row = 7, col = 2 + // Center node at (I, J+1) in Julia = (8, 4) -> Rust 0-indexed = (7, 3) + let has_center = locs.iter().any(|&(r, c, _)| r == 7 && c == 3); + assert!( + has_center, + "Center node at (7, 3) should be present. Locs: {:?}", + locs + ); + + // All positions should be valid (0-indexed, so >= 0) + for &(_row, _col, weight) in &locs { + assert!(weight >= 1, "Weight should be >= 1"); + } + + println!("Dense locations: {:?}", locs); +} + +// === Julia comparison tests === +// These test cases are derived from Julia's UnitDiskMapping tests + +#[test] +fn test_mis_overhead_julia_cases() { + // Test cases using UnWeighted formula: length(copyline_locations) / 2 + // Using vslot=5, hslot=5 as the base configuration + let spacing = 4; + let padding = 2; + + let test_cases = [ + // (vstart, vstop, hstop) + (3, 7, 8), + (3, 5, 8), + (5, 9, 8), + (5, 5, 8), + (1, 7, 5), + (5, 8, 5), + (1, 5, 5), + (5, 5, 5), + ]; + + for (vstart, vstop, hstop) in test_cases { + let line = CopyLine::new(1, 5, 5, vstart, vstop, hstop); + let locs = line.copyline_locations(padding, spacing); + let overhead = mis_overhead_copyline(&line, spacing, padding); + + // UnWeighted formula: length(locs) / 2 + let expected = locs.len() / 2; + + assert_eq!( + overhead, expected, + "MIS overhead mismatch for (vstart={}, vstop={}, hstop={}): got {}, expected {}", + vstart, vstop, hstop, overhead, expected + ); + } +} + +#[test] +fn test_create_copylines_petersen() { + // Petersen graph edges (0-indexed) + let edges = vec![ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 0), // outer pentagon + (5, 7), + (7, 9), + (9, 6), + (6, 8), + (8, 5), // inner star + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), // connections + ]; + let order: Vec = (0..10).collect(); + + let lines = create_copylines(10, &edges, &order); + + // Verify all lines are created + assert_eq!(lines.len(), 10); + + // Verify basic invariants + for (i, &v) in order.iter().enumerate() { + let line = &lines[v]; + assert_eq!(line.vertex, v, "Vertex mismatch"); + assert_eq!(line.vslot, i + 1, "vslot should be position + 1"); + assert!( + line.vstart <= line.hslot && line.hslot <= line.vstop, + "hslot should be between vstart and vstop for vertex {}", + v + ); + assert!( + line.hstop >= line.vslot, + "hstop should be >= vslot for vertex {}", + v + ); + } + + // Verify that neighboring vertices have overlapping L-shapes + for &(u, v) in &edges { + let line_u = &lines[u]; + let line_v = &lines[v]; + // Two lines cross if one's vslot is in the other's hslot range + // and one's hslot is in the other's vslot range + let u_pos = order.iter().position(|&x| x == u).unwrap() + 1; + let v_pos = order.iter().position(|&x| x == v).unwrap() + 1; + // For a valid embedding, connected vertices should have crossing copy lines + assert!( + line_u.hstop >= v_pos || line_v.hstop >= u_pos, + "Connected vertices {} and {} should have overlapping L-shapes", + u, + v + ); + } +} + +#[test] +fn test_remove_order_detailed() { + // Path graph: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let order = vec![0, 1, 2]; + let removal = remove_order(3, &edges, &order); + + // Trace through Julia's algorithm: + // Step 0: add vertex 0, counts = [0, 1, 0], totalcounts = [1, 2, 1] + // vertex 0: counts[0]=0 != totalcounts[0]=1, not removed + // vertex 1: counts[1]=1 != totalcounts[1]=2, not removed + // vertex 2: counts[2]=0 != totalcounts[2]=1, not removed + // removal[0] = [] + // Step 1: add vertex 1, counts = [1, 2, 1], totalcounts = [1, 2, 1] + // vertex 0: counts[0]=1 == totalcounts[0]=1, remove at max(1, 0)=1 + // vertex 1: counts[1]=2 == totalcounts[1]=2, remove at max(1, 1)=1 + // vertex 2: counts[2]=1 == totalcounts[2]=1, remove at max(1, 2)=2 + // removal[1] = [0, 1] + // Step 2: add vertex 2, counts = [1, 3, 2] + // vertex 2 already marked removed at step 2 + // removal[2] = [2] + + assert_eq!(removal.len(), 3); + // At step 1, vertices 0 and 1 can be removed + assert!(removal[1].contains(&0) || removal[1].contains(&1)); + // At step 2, vertex 2 can be removed + assert!(removal[2].contains(&2)); +} + +#[test] +fn test_copyline_locations_node_count() { + // For a copy line, copyline_locations should produce nodes at every cell + // The number of nodes should be odd (ends + center) + let spacing = 4; + + let test_cases = [(1, 1, 1, 2), (1, 2, 1, 3), (1, 1, 2, 3), (3, 7, 5, 8)]; + + for (vslot, hslot, vstart, hstop) in test_cases { + let vstop = hslot; // Simplified: vstop = hslot + let line = CopyLine::new(0, vslot, hslot, vstart, vstop, hstop); + let locs = line.copyline_locations(2, spacing); + + // Node count should be odd (property of copy line construction) + // This is verified in Julia's test: @assert length(locs) % 2 == 1 + println!( + "vslot={}, hslot={}, vstart={}, vstop={}, hstop={}: {} nodes", + vslot, + hslot, + vstart, + vstop, + hstop, + locs.len() + ); + + // All weights should be 1 or 2 (for non-center nodes) + // except center node which has weight = nline (number of line segments) + for &(row, col, weight) in &locs { + assert!(row > 0 && col > 0, "Coordinates should be positive"); + assert!(weight >= 1, "Weight should be >= 1"); + } + } +} diff --git a/src/tests_unit/rules/unitdiskmapping/grid.rs b/src/tests_unit/rules/unitdiskmapping/grid.rs new file mode 100644 index 0000000..a0d5759 --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/grid.rs @@ -0,0 +1,211 @@ +use super::*; + +#[test] +fn test_mapping_grid_create() { + let grid = MappingGrid::new(10, 10, 4); + assert_eq!(grid.size(), (10, 10)); + assert_eq!(grid.spacing(), 4); +} + +#[test] +fn test_mapping_grid_with_padding() { + let grid = MappingGrid::with_padding(8, 12, 3, 5); + assert_eq!(grid.size(), (8, 12)); + assert_eq!(grid.spacing(), 3); + assert_eq!(grid.padding(), 5); +} + +#[test] +fn test_mapping_grid_add_node() { + let mut grid = MappingGrid::new(10, 10, 4); + grid.add_node(2, 3, 1); + assert!(grid.is_occupied(2, 3)); + assert!(!grid.is_occupied(2, 4)); +} + +#[test] +fn test_mapping_grid_get_out_of_bounds() { + let grid = MappingGrid::new(5, 5, 2); + assert!(grid.get(0, 0).is_some()); + assert!(grid.get(4, 4).is_some()); + assert!(grid.get(5, 0).is_none()); + assert!(grid.get(0, 5).is_none()); + assert!(grid.get(10, 10).is_none()); +} + +#[test] +fn test_mapping_grid_add_node_doubled() { + let mut grid = MappingGrid::new(10, 10, 4); + grid.add_node(2, 3, 5); + assert_eq!(grid.get(2, 3), Some(&CellState::Occupied { weight: 5 })); + // Julia requires weights to match when doubling: + // @assert m[i,j].weight == node.weight + // Result keeps the same weight (not summed) + grid.add_node(2, 3, 5); + assert_eq!(grid.get(2, 3), Some(&CellState::Doubled { weight: 5 })); +} + +#[test] +fn test_mapping_grid_connect() { + let mut grid = MappingGrid::new(10, 10, 4); + grid.add_node(3, 4, 7); + assert_eq!(grid.get(3, 4), Some(&CellState::Occupied { weight: 7 })); + grid.connect(3, 4); + assert_eq!(grid.get(3, 4), Some(&CellState::Connected { weight: 7 })); +} + +#[test] +fn test_mapping_grid_connect_empty_cell() { + let mut grid = MappingGrid::new(10, 10, 4); + grid.connect(3, 4); + assert_eq!(grid.get(3, 4), Some(&CellState::Empty)); +} + +#[test] +fn test_mapping_grid_matches_pattern() { + let mut grid = MappingGrid::new(10, 10, 4); + grid.add_node(2, 2, 1); + grid.add_node(2, 3, 1); + grid.add_node(3, 2, 1); + + let pattern = vec![(0, 0), (0, 1), (1, 0)]; + assert!(grid.matches_pattern(&pattern, 2, 2)); + assert!(!grid.matches_pattern(&pattern, 0, 0)); +} + +#[test] +fn test_mapping_grid_matches_pattern_out_of_bounds() { + let grid = MappingGrid::new(5, 5, 2); + let pattern = vec![(0, 0), (1, 1)]; + assert!(!grid.matches_pattern(&pattern, 10, 10)); +} + +#[test] +fn test_mapping_grid_cross_at() { + let grid = MappingGrid::new(20, 20, 4); + // Julia's crossat uses larger position for col calculation (1-indexed) + // Julia: row = (hslot - 1) * spacing + 2 + padding = 4 + 2 + 2 = 8 + // Julia: col = (larger_vslot - 1) * spacing + 1 + padding = 8 + 1 + 2 = 11 + // Rust 0-indexed: row = 8 - 1 = 7, col = 11 - 1 = 10 + let (row, col) = grid.cross_at(1, 3, 2); + assert_eq!(row, 7); // 0-indexed + assert_eq!(col, 10); // 0-indexed + + let (row2, col2) = grid.cross_at(3, 1, 2); + assert_eq!((row, col), (row2, col2)); +} + +#[test] +fn test_cell_state_weight() { + assert_eq!(CellState::Empty.weight(), 0); + assert_eq!(CellState::Occupied { weight: 5 }.weight(), 5); + assert_eq!(CellState::Doubled { weight: 10 }.weight(), 10); + assert_eq!(CellState::Connected { weight: 3 }.weight(), 3); +} + +#[test] +fn test_cell_state_is_empty() { + assert!(CellState::Empty.is_empty()); + assert!(!CellState::Occupied { weight: 1 }.is_empty()); + assert!(!CellState::Doubled { weight: 2 }.is_empty()); + assert!(!CellState::Connected { weight: 1 }.is_empty()); +} + +#[test] +fn test_cell_state_is_occupied() { + assert!(!CellState::Empty.is_occupied()); + assert!(CellState::Occupied { weight: 1 }.is_occupied()); + assert!(CellState::Doubled { weight: 2 }.is_occupied()); + assert!(CellState::Connected { weight: 1 }.is_occupied()); +} + +#[test] +fn test_mapping_grid_set() { + let mut grid = MappingGrid::new(5, 5, 2); + grid.set(2, 3, CellState::Occupied { weight: 7 }); + assert_eq!(grid.get(2, 3), Some(&CellState::Occupied { weight: 7 })); + + // Out of bounds set should be ignored + grid.set(10, 10, CellState::Occupied { weight: 1 }); + assert!(grid.get(10, 10).is_none()); +} + +#[test] +fn test_mapping_grid_get_mut() { + let mut grid = MappingGrid::new(5, 5, 2); + grid.add_node(1, 1, 3); + + if let Some(cell) = grid.get_mut(1, 1) { + *cell = CellState::Connected { weight: 5 }; + } + assert_eq!(grid.get(1, 1), Some(&CellState::Connected { weight: 5 })); + + // Out of bounds get_mut should return None + assert!(grid.get_mut(10, 10).is_none()); +} + +#[test] +fn test_mapping_grid_occupied_coords() { + let mut grid = MappingGrid::new(5, 5, 2); + grid.add_node(1, 2, 1); + grid.add_node(3, 4, 2); + grid.add_node(0, 0, 1); + + let coords = grid.occupied_coords(); + assert_eq!(coords.len(), 3); + assert!(coords.contains(&(0, 0))); + assert!(coords.contains(&(1, 2))); + assert!(coords.contains(&(3, 4))); +} + +#[test] +fn test_mapping_grid_add_node_out_of_bounds() { + let mut grid = MappingGrid::new(5, 5, 2); + // Should silently ignore out of bounds + grid.add_node(10, 10, 1); + assert!(grid.get(10, 10).is_none()); +} + +#[test] +fn test_mapping_grid_connect_out_of_bounds() { + let mut grid = MappingGrid::new(5, 5, 2); + // Should silently ignore out of bounds + grid.connect(10, 10); +} + +#[test] +fn test_cell_state_display() { + assert_eq!(format!("{}", CellState::Empty), "⋅"); + assert_eq!(format!("{}", CellState::Occupied { weight: 1 }), "●"); + assert_eq!(format!("{}", CellState::Doubled { weight: 2 }), "◉"); + assert_eq!(format!("{}", CellState::Connected { weight: 1 }), "◇"); +} + +#[test] +fn test_mapping_grid_display() { + let mut grid = MappingGrid::new(3, 3, 2); + grid.add_node(0, 0, 1); + grid.add_node(1, 1, 1); + let display = format!("{}", grid); + assert!(display.contains("●")); // Has occupied nodes + assert!(display.contains("⋅")); // Has empty cells +} + +#[test] +fn test_mapping_grid_format_with_config_none() { + let mut grid = MappingGrid::new(3, 3, 2); + grid.add_node(1, 1, 1); + let output = grid.format_with_config(None); + assert!(output.contains("●")); // Occupied nodes +} + +#[test] +fn test_mapping_grid_format_with_config_some() { + let mut grid = MappingGrid::new(3, 3, 2); + grid.add_node(1, 1, 1); + // Config with node at (1,1) selected + let config = vec![0, 0, 0, 0, 1, 0, 0, 0, 0]; // 3x3 = 9 cells + let output = grid.format_with_config(Some(&config)); + // Should have some output + assert!(!output.is_empty()); +} diff --git a/src/tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs b/src/tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs new file mode 100644 index 0000000..a9261ec --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs @@ -0,0 +1,45 @@ +use super::*; + +#[test] +fn test_weighted_ksg_cross_false_mis_overhead() { + assert_eq!(WeightedKsgCross::.mis_overhead(), -2); +} + +#[test] +fn test_weighted_ksg_cross_true_mis_overhead() { + assert_eq!(WeightedKsgCross::.mis_overhead(), -2); +} + +#[test] +fn test_weighted_ksg_turn_mis_overhead() { + assert_eq!(WeightedKsgTurn.mis_overhead(), -2); +} + +#[test] +fn test_weighted_ksg_branch_weights() { + let branch = WeightedKsgBranch; + assert_eq!(branch.source_weights(), vec![2, 2, 2, 3, 2, 2, 2, 2]); + assert_eq!(branch.mapped_weights(), vec![2, 3, 2, 2, 2, 2]); +} + +#[test] +fn test_weighted_ksg_tcon_weights() { + let tcon = WeightedKsgTCon; + assert_eq!(tcon.source_weights(), vec![2, 1, 2, 2]); + assert_eq!(tcon.mapped_weights(), vec![2, 1, 2, 2]); +} + +#[test] +fn test_weighted_ksg_trivial_turn_weights() { + let turn = WeightedKsgTrivialTurn; + assert_eq!(turn.source_weights(), vec![1, 1]); + assert_eq!(turn.mapped_weights(), vec![1, 1]); +} + +#[test] +fn test_weighted_ksg_pattern_from_tape_idx() { + assert!(WeightedKsgPattern::from_tape_idx(0).is_some()); + assert!(WeightedKsgPattern::from_tape_idx(12).is_some()); + assert!(WeightedKsgPattern::from_tape_idx(100).is_some()); + assert!(WeightedKsgPattern::from_tape_idx(200).is_none()); +} diff --git a/src/tests_unit/rules/unitdiskmapping/ksg/mapping.rs b/src/tests_unit/rules/unitdiskmapping/ksg/mapping.rs new file mode 100644 index 0000000..e116887 --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/ksg/mapping.rs @@ -0,0 +1,101 @@ +use super::*; +use crate::topology::Graph; + +#[test] +fn test_embed_graph_path() { + // Path graph: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let result = embed_graph(3, &edges, &[0, 1, 2]); + + assert!(result.is_some()); + let grid = result.unwrap(); + assert!(!grid.occupied_coords().is_empty()); +} + +#[test] +fn test_map_unweighted_triangle() { + // Triangle graph + let edges = vec![(0, 1), (1, 2), (0, 2)]; + let result = map_unweighted(3, &edges); + + assert!(result.grid_graph.num_vertices() > 0); + // mis_overhead can be negative due to gadgets, so we just verify the function completes +} + +#[test] +fn test_map_weighted_triangle() { + // Triangle graph + let edges = vec![(0, 1), (1, 2), (0, 2)]; + let result = map_weighted(3, &edges); + + assert!(result.grid_graph.num_vertices() > 0); +} + +#[test] +fn test_mapping_result_config_back_unweighted() { + let edges = vec![(0, 1)]; + let result = map_unweighted(2, &edges); + + // Create a dummy config + let config: Vec = vec![0; result.grid_graph.num_vertices()]; + let original = result.map_config_back(&config); + + assert_eq!(original.len(), 2); +} + +#[test] +fn test_mapping_result_config_back_weighted() { + let edges = vec![(0, 1)]; + let result = map_weighted(2, &edges); + + // Create a dummy config + let config: Vec = vec![0; result.grid_graph.num_vertices()]; + let original = result.map_config_back(&config); + + assert_eq!(original.len(), 2); +} + +#[test] +fn test_map_config_copyback_simple() { + // Create a simple copyline + let line = CopyLine::new(0, 1, 1, 1, 1, 3); + let lines = vec![line]; + + // Create config with some nodes selected + let locs = lines[0].copyline_locations(PADDING, SPACING); + let (rows, cols) = (20, 20); + let mut config = vec![vec![0; cols]; rows]; + + // Select all nodes in copyline + for &(row, col, _) in &locs { + if row < rows && col < cols { + config[row][col] = 1; + } + } + + let doubled_cells = HashSet::new(); + let result = map_config_copyback(&lines, PADDING, SPACING, &config, &doubled_cells); + + // count = len(locs) (all selected with ci=1), overhead = len/2 + // result = count - overhead = n - n/2 = n/2 + let n = locs.len(); + let overhead = n / 2; + let expected = n - overhead; + assert_eq!(result[0], expected); +} + +#[test] +fn test_map_unweighted_with_method() { + let edges = vec![(0, 1), (1, 2)]; + let result = map_unweighted_with_method(3, &edges, PathDecompositionMethod::greedy()); + + assert!(result.grid_graph.num_vertices() > 0); +} + +#[test] +fn test_map_weighted_with_method() { + let edges = vec![(0, 1), (1, 2)]; + let result = map_weighted_with_method(3, &edges, PathDecompositionMethod::greedy()); + + assert!(result.grid_graph.num_vertices() > 0); +} diff --git a/src/tests_unit/rules/unitdiskmapping/pathdecomposition.rs b/src/tests_unit/rules/unitdiskmapping/pathdecomposition.rs new file mode 100644 index 0000000..7b2ec98 --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/pathdecomposition.rs @@ -0,0 +1,177 @@ +use super::*; + +#[test] +fn test_layout_empty() { + let layout = Layout::empty(5); + assert_eq!(layout.vertices.len(), 0); + assert_eq!(layout.vsep(), 0); + assert_eq!(layout.disconnected.len(), 5); + assert_eq!(layout.neighbors.len(), 0); +} + +#[test] +fn test_layout_new() { + // Path graph: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let layout = Layout::new(3, &edges, vec![0, 1, 2]); + assert_eq!(layout.vertices, vec![0, 1, 2]); + assert_eq!(layout.vsep(), 1); // Path has pathwidth 1 +} + +#[test] +fn test_vsep_and_neighbors_path() { + // Path: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let (vsep, _) = vsep_and_neighbors(3, &edges, &[0, 1, 2]); + assert_eq!(vsep, 1); +} + +#[test] +fn test_vsep_and_neighbors_star() { + // Star: 0 connected to 1, 2, 3 + let edges = vec![(0, 1), (0, 2), (0, 3)]; + // Order: 0, 1, 2, 3 - after adding 0, all others become neighbors + let (vsep, _) = vsep_and_neighbors(4, &edges, &[0, 1, 2, 3]); + assert_eq!(vsep, 3); // After adding 0, neighbors = {1, 2, 3} +} + +#[test] +fn test_extend() { + // Path: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let layout = Layout::empty(3); + let layout = extend(3, &edges, &layout, 0); + assert_eq!(layout.vertices, vec![0]); + assert!(layout.neighbors.contains(&1)); + assert!(layout.disconnected.contains(&2)); +} + +#[test] +fn test_greedy_decompose_path() { + // Path: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let layout = greedy_decompose(3, &edges); + assert_eq!(layout.vertices.len(), 3); + assert_eq!(layout.vsep(), 1); +} + +#[test] +fn test_greedy_decompose_triangle() { + // Triangle: 0-1, 1-2, 0-2 + let edges = vec![(0, 1), (1, 2), (0, 2)]; + let layout = greedy_decompose(3, &edges); + assert_eq!(layout.vertices.len(), 3); + assert_eq!(layout.vsep(), 2); // Triangle has pathwidth 2 +} + +#[test] +fn test_greedy_decompose_k4() { + // Complete graph K4 + let edges = vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]; + let layout = greedy_decompose(4, &edges); + assert_eq!(layout.vertices.len(), 4); + assert_eq!(layout.vsep(), 3); // K4 has pathwidth 3 +} + +#[test] +fn test_branch_and_bound_path() { + // Path: 0-1-2 + let edges = vec![(0, 1), (1, 2)]; + let layout = branch_and_bound(3, &edges); + assert_eq!(layout.vertices.len(), 3); + assert_eq!(layout.vsep(), 1); +} + +#[test] +fn test_branch_and_bound_triangle() { + // Triangle + let edges = vec![(0, 1), (1, 2), (0, 2)]; + let layout = branch_and_bound(3, &edges); + assert_eq!(layout.vertices.len(), 3); + assert_eq!(layout.vsep(), 2); +} + +#[test] +fn test_pathwidth_greedy() { + let edges = vec![(0, 1), (1, 2)]; + let layout = pathwidth(3, &edges, PathDecompositionMethod::greedy()); + assert_eq!(layout.vertices.len(), 3); + assert_eq!(layout.vsep(), 1); +} + +#[test] +fn test_pathwidth_minhthi() { + let edges = vec![(0, 1), (1, 2)]; + let layout = pathwidth(3, &edges, PathDecompositionMethod::MinhThiTrick); + assert_eq!(layout.vertices.len(), 3); + assert_eq!(layout.vsep(), 1); +} + +#[test] +fn test_vertex_order_from_layout() { + let layout = Layout { + vertices: vec![0, 1, 2], + vsep: 1, + neighbors: vec![], + disconnected: vec![], + }; + let order = vertex_order_from_layout(&layout); + // Returns vertices in same order as layout (matching Julia's behavior) + assert_eq!(order, vec![0, 1, 2]); +} + +#[test] +fn test_petersen_graph_pathwidth() { + // Petersen graph edges + let edges = vec![ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 0), // outer pentagon + (5, 7), + (7, 9), + (9, 6), + (6, 8), + (8, 5), // inner star + (0, 5), + (1, 6), + (2, 7), + (3, 8), + (4, 9), // connections + ]; + + let layout = pathwidth(10, &edges, PathDecompositionMethod::MinhThiTrick); + assert_eq!(layout.vertices.len(), 10); + // Petersen graph has pathwidth 5 + assert_eq!(layout.vsep(), 5); +} + +#[test] +fn test_cycle_graph_pathwidth() { + // Cycle C5: 0-1-2-3-4-0 + let edges = vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]; + let layout = pathwidth(5, &edges, PathDecompositionMethod::MinhThiTrick); + assert_eq!(layout.vertices.len(), 5); + // Cycle has pathwidth 2 + assert_eq!(layout.vsep(), 2); +} + +#[test] +fn test_disconnected_graph() { + // Two disconnected edges: 0-1, 2-3 + let edges = vec![(0, 1), (2, 3)]; + let layout = pathwidth(4, &edges, PathDecompositionMethod::MinhThiTrick); + assert_eq!(layout.vertices.len(), 4); + // Pathwidth is 1 (each component has pathwidth 1) + assert_eq!(layout.vsep(), 1); +} + +#[test] +fn test_empty_graph() { + // No edges + let edges: Vec<(usize, usize)> = vec![]; + let layout = pathwidth(5, &edges, PathDecompositionMethod::MinhThiTrick); + assert_eq!(layout.vertices.len(), 5); + assert_eq!(layout.vsep(), 0); // No edges means pathwidth 0 +} diff --git a/src/tests_unit/rules/unitdiskmapping/triangular/mapping.rs b/src/tests_unit/rules/unitdiskmapping/triangular/mapping.rs new file mode 100644 index 0000000..0374425 --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/triangular/mapping.rs @@ -0,0 +1,74 @@ +use super::*; +use crate::topology::Graph; + +#[test] +fn test_map_weighted_basic() { + let edges = vec![(0, 1), (1, 2)]; + let result = map_weighted(3, &edges); + + assert!(result.grid_graph.num_vertices() > 0); + assert!(matches!( + result.grid_graph.grid_type(), + GridType::Triangular { .. } + )); +} + +#[test] +fn test_map_weighted_with_method() { + let edges = vec![(0, 1), (1, 2)]; + let result = map_weighted_with_method(3, &edges, PathDecompositionMethod::MinhThiTrick); + + assert!(result.grid_graph.num_vertices() > 0); +} + +#[test] +fn test_map_weighted_with_order() { + let edges = vec![(0, 1), (1, 2)]; + let vertex_order = vec![0, 1, 2]; + let result = map_weighted_with_order(3, &edges, &vertex_order); + + assert!(result.grid_graph.num_vertices() > 0); +} + +#[test] +fn test_trace_centers() { + let edges = vec![(0, 1), (1, 2)]; + let result = map_weighted(3, &edges); + + let centers = trace_centers(&result); + assert_eq!(centers.len(), 3); + + // Centers should be valid grid positions + for (row, col) in ¢ers { + assert!(*row > 0); + assert!(*col > 0); + } +} + +#[test] +fn test_map_weights() { + let edges = vec![(0, 1), (1, 2)]; + let result = map_weighted(3, &edges); + + let source_weights = vec![0.5, 0.3, 0.7]; + let grid_weights = map_weights(&result, &source_weights); + + // Should have same length as grid nodes + assert_eq!(grid_weights.len(), result.grid_graph.num_vertices()); + + // All weights should be positive + assert!(grid_weights.iter().all(|&w| w > 0.0)); +} + +#[test] +fn test_weighted_ruleset() { + let ruleset = weighted_ruleset(); + assert_eq!(ruleset.len(), 13); +} + +#[test] +#[should_panic(expected = "num_vertices must be > 0")] +fn test_map_weighted_panics_on_zero_vertices() { + let edges: Vec<(usize, usize)> = vec![]; + map_weighted(0, &edges); +} diff --git a/src/tests_unit/rules/unitdiskmapping/triangular/mod.rs b/src/tests_unit/rules/unitdiskmapping/triangular/mod.rs new file mode 100644 index 0000000..4937b7d --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/triangular/mod.rs @@ -0,0 +1,121 @@ +use super::*; +use crate::topology::Graph; + +#[test] +fn test_triangular_cross_gadget() { + // Julia: Base.size(::TriCross{true}) = (6, 4) + let cross = TriCross::; + assert_eq!(cross.size(), (6, 4)); +} + +#[test] +fn test_map_graph_triangular() { + let edges = vec![(0, 1), (1, 2)]; + let result = map_graph_triangular(3, &edges); + + assert!(result.grid_graph.num_vertices() > 0); + assert!(matches!( + result.grid_graph.grid_type(), + GridType::Triangular { .. } + )); +} + +#[test] +fn test_triangular_cross_connected_gadget() { + // Julia: TriCross{true} - size (6,4), cross (2,2), overhead 1 + let cross = TriCross::; + assert_eq!(TriangularGadget::size(&cross), (6, 4)); + assert_eq!(TriangularGadget::cross_location(&cross), (2, 2)); + assert!(TriangularGadget::is_connected(&cross)); + assert_eq!(TriangularGadget::mis_overhead(&cross), 1); +} + +#[test] +fn test_triangular_cross_disconnected_gadget() { + // Julia: TriCross{false} - size (6,6), cross (2,4), overhead 3 + let cross = TriCross::; + assert_eq!(TriangularGadget::size(&cross), (6, 6)); + assert_eq!(TriangularGadget::cross_location(&cross), (2, 4)); + assert!(!TriangularGadget::is_connected(&cross)); + assert_eq!(TriangularGadget::mis_overhead(&cross), 3); +} + +#[test] +fn test_triangular_turn_gadget() { + // Julia: TriTurn - size (3,4), cross (2,2), overhead 0 + let turn = TriTurn; + assert_eq!(TriangularGadget::size(&turn), (3, 4)); + assert_eq!(TriangularGadget::mis_overhead(&turn), 0); + let (_, _, pins) = TriangularGadget::source_graph(&turn); + assert_eq!(pins.len(), 2); +} + +#[test] +fn test_triangular_branch_gadget() { + // Julia: TriBranch - size (6,4), cross (2,2), overhead 0 + let branch = TriBranch; + assert_eq!(TriangularGadget::size(&branch), (6, 4)); + assert_eq!(TriangularGadget::mis_overhead(&branch), 0); + let (_, _, pins) = TriangularGadget::source_graph(&branch); + assert_eq!(pins.len(), 3); +} + +#[test] +fn test_map_graph_triangular_with_order() { + let edges = vec![(0, 1), (1, 2)]; + let order = vec![2, 1, 0]; + let result = map_graph_triangular_with_order(3, &edges, &order); + + assert!(result.grid_graph.num_vertices() > 0); + assert_eq!(result.spacing, TRIANGULAR_SPACING); + assert_eq!(result.padding, TRIANGULAR_PADDING); +} + +#[test] +fn test_map_graph_triangular_single_vertex() { + let edges: Vec<(usize, usize)> = vec![]; + let result = map_graph_triangular(1, &edges); + + assert!(result.grid_graph.num_vertices() > 0); +} + +#[test] +#[should_panic(expected = "num_vertices must be > 0")] +fn test_map_graph_triangular_zero_vertices_panics() { + let edges: Vec<(usize, usize)> = vec![]; + map_graph_triangular(0, &edges); +} + +#[test] +fn test_triangular_gadgets_have_valid_pins() { + // Verify pin indices are within bounds for each gadget + fn check_gadget(gadget: &G, name: &str) { + let (source_locs, _, source_pins) = gadget.source_graph(); + let (mapped_locs, mapped_pins) = gadget.mapped_graph(); + + for &pin in &source_pins { + assert!( + pin < source_locs.len(), + "{}: Source pin {} out of bounds (len={})", + name, + pin, + source_locs.len() + ); + } + + for &pin in &mapped_pins { + assert!( + pin < mapped_locs.len(), + "{}: Mapped pin {} out of bounds (len={})", + name, + pin, + mapped_locs.len() + ); + } + } + + check_gadget(&TriCross::, "TriCross"); + check_gadget(&TriCross::, "TriCross"); + check_gadget(&TriTurn, "TriTurn"); + check_gadget(&TriBranch, "TriBranch"); +} diff --git a/src/tests_unit/rules/unitdiskmapping/weighted.rs b/src/tests_unit/rules/unitdiskmapping/weighted.rs new file mode 100644 index 0000000..e8523f0 --- /dev/null +++ b/src/tests_unit/rules/unitdiskmapping/weighted.rs @@ -0,0 +1,131 @@ +use super::*; + +#[test] +fn test_triturn_weighted() { + let weighted = TriTurn.weighted(); + assert_eq!(weighted.source_weights, vec![2, 2, 2, 2]); + assert_eq!(weighted.mapped_weights, vec![2, 2, 2, 2]); +} + +#[test] +fn test_tribranch_weighted() { + let weighted = TriBranch.weighted(); + // Julia: sw = [2,2,3,2,2,2,2,2,2], mw = [2,2,2,3,2,2,2,2,2] + assert_eq!(weighted.source_weights, vec![2, 2, 3, 2, 2, 2, 2, 2, 2]); + assert_eq!(weighted.mapped_weights, vec![2, 2, 2, 3, 2, 2, 2, 2, 2]); +} + +#[test] +fn test_tricross_true_weighted() { + let weighted = TriCross::.weighted(); + // Julia: sw = [2,2,2,2,2,2,2,2,2,2], mw = [3,2,3,3,2,2,2,2,2,2,2] + assert_eq!(weighted.source_weights, vec![2, 2, 2, 2, 2, 2, 2, 2, 2, 2]); + assert_eq!( + weighted.mapped_weights, + vec![3, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2] + ); +} + +#[test] +fn test_tricross_false_weighted() { + let weighted = TriCross::.weighted(); + // Julia: sw = [2,2,2,2,2,2,2,2,2,2,2,2], mw = [3,3,2,4,2,2,2,4,3,2,2,2,2,2,2,2] + assert_eq!( + weighted.source_weights, + vec![2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ); + assert_eq!( + weighted.mapped_weights, + vec![3, 3, 2, 4, 2, 2, 2, 4, 3, 2, 2, 2, 2, 2, 2, 2] + ); +} + +#[test] +fn test_all_weighted_gadgets_have_correct_lengths() { + use super::super::triangular::TriangularGadget; + + fn check(g: G, name: &str) { + let weighted = g.clone().weighted(); + let (src_locs, _, _) = g.source_graph(); + let (map_locs, _) = g.mapped_graph(); + assert_eq!( + weighted.source_weights.len(), + src_locs.len(), + "{}: source weights length mismatch", + name + ); + assert_eq!( + weighted.mapped_weights.len(), + map_locs.len(), + "{}: mapped weights length mismatch", + name + ); + } + + check(TriTurn, "TriTurn"); + check(TriBranch, "TriBranch"); + check(TriCross::, "TriCross"); + check(TriCross::, "TriCross"); + check(TriTConLeft, "TriTConLeft"); + check(TriTConDown, "TriTConDown"); + check(TriTConUp, "TriTConUp"); + check(TriTrivialTurnLeft, "TriTrivialTurnLeft"); + check(TriTrivialTurnRight, "TriTrivialTurnRight"); + check(TriEndTurn, "TriEndTurn"); + check(TriWTurn, "TriWTurn"); + check(TriBranchFix, "TriBranchFix"); + check(TriBranchFixB, "TriBranchFixB"); +} + +#[test] +fn test_triangular_weighted_ruleset_has_13_gadgets() { + let ruleset = super::triangular_weighted_ruleset(); + assert_eq!(ruleset.len(), 13); +} + +#[test] +fn test_trace_centers_basic() { + use crate::rules::unitdiskmapping::map_graph_triangular; + + let edges = vec![(0, 1), (1, 2)]; + let result = map_graph_triangular(3, &edges); + + let centers = super::trace_centers(&result); + assert_eq!(centers.len(), 3); + + // Centers should be valid grid positions + for (row, col) in ¢ers { + assert!(*row > 0); + assert!(*col > 0); + } +} + +#[test] +fn test_map_weights_basic() { + use crate::rules::unitdiskmapping::map_graph_triangular; + use crate::topology::Graph; + + let edges = vec![(0, 1), (1, 2)]; + let result = map_graph_triangular(3, &edges); + + let source_weights = vec![0.5, 0.3, 0.7]; + let grid_weights = super::map_weights(&result, &source_weights); + + // Should have same length as grid nodes + assert_eq!(grid_weights.len(), result.grid_graph.num_vertices()); + + // All weights should be positive + assert!(grid_weights.iter().all(|&w| w > 0.0)); +} + +#[test] +#[should_panic(expected = "all weights must be in range")] +fn test_map_weights_rejects_invalid() { + use crate::rules::unitdiskmapping::map_graph_triangular; + + let edges = vec![(0, 1)]; + let result = map_graph_triangular(2, &edges); + + let source_weights = vec![1.5, 0.3]; // Invalid: > 1 + super::map_weights(&result, &source_weights); +} diff --git a/src/tests_unit/rules/vertexcovering_ilp.rs b/src/tests_unit/rules/vertexcovering_ilp.rs new file mode 100644 index 0000000..108cc04 --- /dev/null +++ b/src/tests_unit/rules/vertexcovering_ilp.rs @@ -0,0 +1,280 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver, Solver}; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Triangle graph: 3 vertices, 3 edges + let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check ILP structure + assert_eq!(ilp.num_vars, 3, "Should have one variable per vertex"); + assert_eq!( + ilp.constraints.len(), + 3, + "Should have one constraint per edge" + ); + assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } + + // Each constraint should be x_i + x_j >= 1 + for constraint in &ilp.constraints { + assert_eq!(constraint.terms.len(), 2); + assert!((constraint.rhs - 1.0).abs() < 1e-9); + } +} + +#[test] +fn test_reduction_weighted() { + let problem = VertexCovering::with_weights(3, vec![(0, 1)], vec![5, 10, 15]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // Check that weights are correctly transferred to objective + let mut coeffs: Vec = vec![0.0; 3]; + for &(var, coef) in &ilp.objective { + coeffs[var] = coef; + } + assert!((coeffs[0] - 5.0).abs() < 1e-9); + assert!((coeffs[1] - 10.0).abs() < 1e-9); + assert!((coeffs[2] - 15.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solution_equals_brute_force_triangle() { + // Triangle graph: min VC = 2 vertices + let problem = VertexCovering::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force on original problem + let bf_solutions = bf.find_best(&problem); + + // Solve via ILP reduction + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Both should find optimal size = 2 + let bf_size: usize = bf_solutions[0].iter().sum(); + let ilp_size: usize = extracted.iter().sum(); + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify the ILP solution is valid for the original problem + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid, "Extracted solution should be valid"); +} + +#[test] +fn test_ilp_solution_equals_brute_force_path() { + // Path graph 0-1-2-3: min VC = 2 (e.g., {1, 2} or {0, 2} or {1, 3}) + let problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + // Solve with brute force + let bf_solutions = bf.find_best(&problem); + let bf_size: usize = bf_solutions[0].iter().sum(); + + // Solve via ILP + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size: usize = extracted.iter().sum(); + + assert_eq!(bf_size, 2); + assert_eq!(ilp_size, 2); + + // Verify validity + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_ilp_solution_equals_brute_force_weighted() { + // Weighted problem: vertex 1 has low weight and covers both edges + // 0 -- 1 -- 2 + // Weights: [100, 1, 100] + // Min VC by weight: just vertex 1 (weight 1) beats 0+2 (weight 200) + let problem = VertexCovering::with_weights(3, vec![(0, 1), (1, 2)], vec![100, 1, 100]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_obj = problem.solution_size(&bf_solutions[0]).size; + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_obj = problem.solution_size(&extracted).size; + + assert_eq!(bf_obj, 1); + assert_eq!(ilp_obj, 1); + + // Verify the solution selects vertex 1 + assert_eq!(extracted, vec![0, 1, 0]); +} + +#[test] +fn test_solution_extraction() { + let problem = VertexCovering::::new(4, vec![(0, 1), (2, 3)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + + // Test that extraction works correctly (1:1 mapping) + let ilp_solution = vec![1, 0, 0, 1]; + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 0, 0, 1]); + + // Verify this is a valid VC (covers edges 0-1 and 2-3) + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); +} + +#[test] +fn test_source_and_target_size() { + let problem = VertexCovering::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(4)); + + assert_eq!(target_size.get("num_vars"), Some(5)); + assert_eq!(target_size.get("num_constraints"), Some(4)); +} + +#[test] +fn test_empty_graph() { + // Graph with no edges: empty cover is valid + let problem = VertexCovering::::new(3, vec![]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.constraints.len(), 0); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // No vertices should be selected + assert_eq!(extracted, vec![0, 0, 0]); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 0); +} + +#[test] +fn test_complete_graph() { + // Complete graph K4: min VC = 3 (all but one vertex) + let problem = + VertexCovering::::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.constraints.len(), 6); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 3); +} + +#[test] +fn test_solve_reduced() { + // Test the ILPSolver::solve_reduced method + let problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver + .solve_reduced(&problem) + .expect("solve_reduced should work"); + + let sol_result = problem.solution_size(&solution); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); +} + +#[test] +fn test_bipartite_graph() { + // Bipartite graph: 0-2, 0-3, 1-2, 1-3 (complete bipartite K_{2,2}) + // Min VC = 2 (either side of the bipartition) + let problem = VertexCovering::::new(4, vec![(0, 2), (0, 3), (1, 2), (1, 3)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let sol_result = problem.solution_size(&extracted); + assert!(sol_result.is_valid); + assert_eq!(sol_result.size, 2); + + // Should select either {0, 1} or {2, 3} + let sum: usize = extracted.iter().sum(); + assert_eq!(sum, 2); +} + +#[test] +fn test_single_edge() { + // Single edge: min VC = 1 + let problem = VertexCovering::::new(2, vec![(0, 1)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_size: usize = bf_solutions[0].iter().sum(); + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size: usize = extracted.iter().sum(); + + assert_eq!(bf_size, 1); + assert_eq!(ilp_size, 1); +} + +#[test] +fn test_star_graph() { + // Star graph: center vertex 0 connected to all others + // Min VC = 1 (just the center) + let problem = VertexCovering::::new(5, vec![(0, 1), (0, 2), (0, 3), (0, 4)]); + let reduction: ReductionVCToILP = ReduceTo::::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&problem); + let bf_size: usize = bf_solutions[0].iter().sum(); + + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_size: usize = extracted.iter().sum(); + + assert_eq!(bf_size, 1); + assert_eq!(ilp_size, 1); + + // The optimal solution should select vertex 0 + assert_eq!(extracted[0], 1); +} diff --git a/src/tests_unit/rules/vertexcovering_independentset.rs b/src/tests_unit/rules/vertexcovering_independentset.rs new file mode 100644 index 0000000..8fafe8e --- /dev/null +++ b/src/tests_unit/rules/vertexcovering_independentset.rs @@ -0,0 +1,91 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_is_to_vc_reduction() { + // Triangle graph: max IS = 1, min VC = 2 + let is_problem = IndependentSet::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::>::reduce_to(&is_problem); + let vc_problem = reduction.target_problem(); + + // Solve the VC problem + let solver = BruteForce::new(); + let vc_solutions = solver.find_best(vc_problem); + + // Extract back to IS solutions + let is_solutions: Vec<_> = vc_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Verify IS solutions are valid and optimal + for sol in &is_solutions { + let size: usize = sol.iter().sum(); + assert_eq!(size, 1, "Max IS in triangle should be 1"); + } +} + +#[test] +fn test_vc_to_is_reduction() { + // Path graph 0-1-2: min VC = 1 (just vertex 1), max IS = 2 (vertices 0 and 2) + let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let is_problem = reduction.target_problem(); + + let solver = BruteForce::new(); + let is_solutions = solver.find_best(is_problem); + + let vc_solutions: Vec<_> = is_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Verify VC solutions + for sol in &vc_solutions { + let size: usize = sol.iter().sum(); + assert_eq!(size, 1, "Min VC in path should be 1"); + } +} + +#[test] +fn test_roundtrip_is_vc_is() { + let original = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let solver = BruteForce::new(); + let original_solutions = solver.find_best(&original); + + // IS -> VC -> IS + let reduction1 = ReduceTo::>::reduce_to(&original); + let vc = reduction1.target_problem().clone(); + let reduction2 = ReduceTo::>::reduce_to(&vc); + let roundtrip = reduction2.target_problem(); + + let roundtrip_solutions = solver.find_best(roundtrip); + + // Solutions should have same objective value + let orig_size: usize = original_solutions[0].iter().sum(); + let rt_size: usize = roundtrip_solutions[0].iter().sum(); + assert_eq!(orig_size, rt_size); +} + +#[test] +fn test_weighted_reduction() { + // Test with weighted problems + let is_problem = IndependentSet::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 20, 30]); + let reduction = ReduceTo::>::reduce_to(&is_problem); + let vc_problem = reduction.target_problem(); + + // Weights should be preserved + assert_eq!(vc_problem.weights_ref(), &vec![10, 20, 30]); +} + +#[test] +fn test_source_and_target_size() { + let is_problem = IndependentSet::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction = ReduceTo::>::reduce_to(&is_problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(target_size.get("num_vertices"), Some(5)); +} diff --git a/src/tests_unit/rules/vertexcovering_setcovering.rs b/src/tests_unit/rules/vertexcovering_setcovering.rs new file mode 100644 index 0000000..c69a4c9 --- /dev/null +++ b/src/tests_unit/rules/vertexcovering_setcovering.rs @@ -0,0 +1,184 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::ConstraintSatisfactionProblem; + +#[test] +fn test_vc_to_sc_basic() { + // Path graph 0-1-2 with edges (0,1) and (1,2) + // Vertex 0 covers edge 0 + // Vertex 1 covers edges 0 and 1 + // Vertex 2 covers edge 1 + let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_problem = reduction.target_problem(); + + // Check the sets are constructed correctly + assert_eq!(sc_problem.universe_size(), 2); // 2 edges + assert_eq!(sc_problem.num_sets(), 3); // 3 vertices + + // Set 0 (vertex 0): should contain edge 0 + assert_eq!(sc_problem.get_set(0), Some(&vec![0])); + // Set 1 (vertex 1): should contain edges 0 and 1 + assert_eq!(sc_problem.get_set(1), Some(&vec![0, 1])); + // Set 2 (vertex 2): should contain edge 1 + assert_eq!(sc_problem.get_set(2), Some(&vec![1])); +} + +#[test] +fn test_vc_to_sc_triangle() { + // Triangle graph: 3 vertices, 3 edges + // Edge indices: (0,1)->0, (1,2)->1, (0,2)->2 + let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_problem = reduction.target_problem(); + + assert_eq!(sc_problem.universe_size(), 3); + assert_eq!(sc_problem.num_sets(), 3); + + // Verify each vertex covers exactly 2 edges + for i in 0..3 { + let set = sc_problem.get_set(i).unwrap(); + assert_eq!(set.len(), 2); + } +} + +#[test] +fn test_vc_to_sc_solution_extraction() { + let vc_problem = VertexCovering::::new(3, vec![(0, 1), (1, 2)]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_problem = reduction.target_problem(); + + // Solve the SetCovering problem + let solver = BruteForce::new(); + let sc_solutions = solver.find_best(sc_problem); + + // Extract solutions back to VertexCovering + let vc_solutions: Vec<_> = sc_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + + // Verify extracted solutions are valid vertex covers + for sol in &vc_solutions { + assert!(vc_problem.solution_size(sol).is_valid); + } + + // The minimum should be selecting just vertex 1 (covers both edges) + let min_size: usize = vc_solutions[0].iter().sum(); + assert_eq!(min_size, 1); +} + +#[test] +fn test_vc_to_sc_optimality_preservation() { + // Test that optimal solutions are preserved through reduction + let vc_problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let solver = BruteForce::new(); + + // Solve VC directly + let direct_solutions = solver.find_best(&vc_problem); + let direct_size = direct_solutions[0].iter().sum::(); + + // Solve via reduction + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_solutions = solver.find_best(reduction.target_problem()); + let reduced_solutions: Vec<_> = sc_solutions + .iter() + .map(|s| reduction.extract_solution(s)) + .collect(); + let reduced_size = reduced_solutions[0].iter().sum::(); + + // Optimal sizes should match + assert_eq!(direct_size, reduced_size); +} + +#[test] +fn test_vc_to_sc_weighted() { + // Weighted problem: weights should be preserved + let vc_problem = VertexCovering::with_weights(3, vec![(0, 1), (1, 2)], vec![10, 1, 10]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_problem = reduction.target_problem(); + + // Weights should be preserved + assert_eq!(sc_problem.weights(), vec![10, 1, 10]); + + // Solve both ways + let solver = BruteForce::new(); + let vc_solutions = solver.find_best(&vc_problem); + let sc_solutions = solver.find_best(sc_problem); + + // Both should select vertex 1 (weight 1) + assert_eq!(vc_solutions[0], vec![0, 1, 0]); + assert_eq!(sc_solutions[0], vec![0, 1, 0]); +} + +#[test] +fn test_vc_to_sc_empty_graph() { + // Graph with no edges + let vc_problem = VertexCovering::::new(3, vec![]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_problem = reduction.target_problem(); + + assert_eq!(sc_problem.universe_size(), 0); + assert_eq!(sc_problem.num_sets(), 3); + + // All sets should be empty + for i in 0..3 { + assert!(sc_problem.get_set(i).unwrap().is_empty()); + } +} + +#[test] +fn test_vc_to_sc_source_target_size() { + let vc_problem = VertexCovering::::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + + let source_size = reduction.source_size(); + let target_size = reduction.target_size(); + + assert_eq!(source_size.get("num_vertices"), Some(5)); + assert_eq!(source_size.get("num_edges"), Some(4)); + assert_eq!(target_size.get("universe_size"), Some(4)); // edges become universe + assert_eq!(target_size.get("num_sets"), Some(5)); // vertices become sets +} + +#[test] +fn test_vc_to_sc_star_graph() { + // Star graph: center vertex 0 connected to all others + // Edges: (0,1), (0,2), (0,3) + let vc_problem = VertexCovering::::new(4, vec![(0, 1), (0, 2), (0, 3)]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_problem = reduction.target_problem(); + + // Vertex 0 should cover all 3 edges + assert_eq!(sc_problem.get_set(0), Some(&vec![0, 1, 2])); + // Other vertices cover only 1 edge each + assert_eq!(sc_problem.get_set(1), Some(&vec![0])); + assert_eq!(sc_problem.get_set(2), Some(&vec![1])); + assert_eq!(sc_problem.get_set(3), Some(&vec![2])); + + // Minimum cover should be just vertex 0 + let solver = BruteForce::new(); + let solutions = solver.find_best(&vc_problem); + assert_eq!(solutions[0], vec![1, 0, 0, 0]); +} + +#[test] +fn test_vc_to_sc_all_solutions_valid() { + // Ensure all solutions extracted from SC are valid VC solutions + let vc_problem = VertexCovering::::new(4, vec![(0, 1), (1, 2), (0, 2), (2, 3)]); + let reduction = ReduceTo::>::reduce_to(&vc_problem); + let sc_problem = reduction.target_problem(); + + let solver = BruteForce::new(); + let sc_solutions = solver.find_best(sc_problem); + + for sc_sol in &sc_solutions { + let vc_sol = reduction.extract_solution(sc_sol); + let sol_size = vc_problem.solution_size(&vc_sol); + assert!( + sol_size.is_valid, + "Extracted solution {:?} should be valid", + vc_sol + ); + } +} diff --git a/src/tests_unit/solvers/brute_force.rs b/src/tests_unit/solvers/brute_force.rs new file mode 100644 index 0000000..ab5b5a1 --- /dev/null +++ b/src/tests_unit/solvers/brute_force.rs @@ -0,0 +1,367 @@ +use super::*; +use crate::types::{EnergyMode, ProblemSize}; + +// Simple maximization problem: maximize sum of selected weights +#[derive(Clone)] +struct MaxSumProblem { + weights: Vec, +} + +impl Problem for MaxSumProblem { + const NAME: &'static str = "MaxSumProblem"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "i32")] + } + + type Size = i32; + + fn num_variables(&self) -> usize { + self.weights.len() + } + + fn num_flavors(&self) -> usize { + 2 + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![("variables", self.weights.len())]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::LargerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + let sum: i32 = config + .iter() + .zip(&self.weights) + .map(|(&c, &w)| if c == 1 { w } else { 0 }) + .sum(); + SolutionSize::valid(sum) + } +} + +// Simple minimization problem: minimize sum of selected weights +#[derive(Clone)] +struct MinSumProblem { + weights: Vec, +} + +impl Problem for MinSumProblem { + const NAME: &'static str = "MinSumProblem"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "i32")] + } + + type Size = i32; + + fn num_variables(&self) -> usize { + self.weights.len() + } + + fn num_flavors(&self) -> usize { + 2 + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![("variables", self.weights.len())]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::SmallerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + let sum: i32 = config + .iter() + .zip(&self.weights) + .map(|(&c, &w)| if c == 1 { w } else { 0 }) + .sum(); + SolutionSize::valid(sum) + } +} + +// Problem with validity constraint: select at most one +#[derive(Clone)] +struct SelectAtMostOneProblem { + weights: Vec, +} + +impl Problem for SelectAtMostOneProblem { + const NAME: &'static str = "SelectAtMostOneProblem"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "i32")] + } + + type Size = i32; + + fn num_variables(&self) -> usize { + self.weights.len() + } + + fn num_flavors(&self) -> usize { + 2 + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![("variables", self.weights.len())]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::LargerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + let selected: usize = config.iter().sum(); + let sum: i32 = config + .iter() + .zip(&self.weights) + .map(|(&c, &w)| if c == 1 { w } else { 0 }) + .sum(); + SolutionSize::new(sum, selected <= 1) + } +} + +#[test] +fn test_variant_for_test_problems() { + // Test that variant() works for all test problems + let v = MaxSumProblem::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0], ("graph", "SimpleGraph")); + assert_eq!(v[1], ("weight", "i32")); + + let v = MinSumProblem::variant(); + assert_eq!(v.len(), 2); + + let v = SelectAtMostOneProblem::variant(); + assert_eq!(v.len(), 2); + + let v = FloatProblem::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[1], ("weight", "f64")); +} + +#[test] +fn test_brute_force_maximization() { + let problem = MaxSumProblem { + weights: vec![1, 2, 3], + }; + let solver = BruteForce::new(); + + let best = solver.find_best(&problem); + assert_eq!(best.len(), 1); + assert_eq!(best[0], vec![1, 1, 1]); // Select all for max sum = 6 +} + +#[test] +fn test_brute_force_minimization() { + let problem = MinSumProblem { + weights: vec![1, 2, 3], + }; + let solver = BruteForce::new(); + + let best = solver.find_best(&problem); + assert_eq!(best.len(), 1); + assert_eq!(best[0], vec![0, 0, 0]); // Select none for min sum = 0 +} + +#[test] +fn test_brute_force_with_validity() { + let problem = SelectAtMostOneProblem { + weights: vec![1, 5, 3], + }; + let solver = BruteForce::new(); + + let best = solver.find_best(&problem); + assert_eq!(best.len(), 1); + assert_eq!(best[0], vec![0, 1, 0]); // Select weight 5 (max single) +} + +#[test] +fn test_brute_force_multiple_optimal() { + let problem = MaxSumProblem { + weights: vec![1, 1, 1], + }; + let solver = BruteForce::new(); + + let best = solver.find_best(&problem); + assert_eq!(best.len(), 1); + assert_eq!(best[0], vec![1, 1, 1]); // All equal, so only one optimal + + // Problem with multiple optimal solutions + let problem2 = SelectAtMostOneProblem { + weights: vec![5, 5, 3], + }; + let best2 = solver.find_best(&problem2); + assert_eq!(best2.len(), 2); // Both [1,0,0] and [0,1,0] give weight 5 +} + +#[test] +fn test_brute_force_with_size() { + let problem = MaxSumProblem { + weights: vec![1, 2, 3], + }; + let solver = BruteForce::new(); + + let best = solver.find_best_with_size(&problem); + assert_eq!(best.len(), 1); + assert_eq!(best[0].0, vec![1, 1, 1]); + assert_eq!(best[0].1.size, 6); + assert!(best[0].1.is_valid); +} + +#[test] +fn test_brute_force_empty_problem() { + let problem = MaxSumProblem { weights: vec![] }; + let solver = BruteForce::new(); + + let best = solver.find_best(&problem); + assert!(best.is_empty()); +} + +#[test] +fn test_brute_force_valid_only_false() { + let problem = SelectAtMostOneProblem { + weights: vec![1, 2, 3], + }; + let solver = BruteForce::new().valid_only(false); + + let best = solver.find_best(&problem); + // With valid_only=false, the best is selecting all (sum=6) even though invalid + assert_eq!(best.len(), 1); + assert_eq!(best[0], vec![1, 1, 1]); +} + +#[test] +fn test_brute_force_with_tolerance() { + let solver = BruteForce::with_tolerance(0.01, 0.01); + assert_eq!(solver.atol, 0.01); + assert_eq!(solver.rtol, 0.01); +} + +// Float problem for testing BruteForceFloat +#[derive(Clone)] +struct FloatProblem { + weights: Vec, +} + +impl Problem for FloatProblem { + const NAME: &'static str = "FloatProblem"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "f64")] + } + + type Size = f64; + + fn num_variables(&self) -> usize { + self.weights.len() + } + + fn num_flavors(&self) -> usize { + 2 + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![("variables", self.weights.len())]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::LargerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + let sum: f64 = config + .iter() + .zip(&self.weights) + .map(|(&c, &w)| if c == 1 { w } else { 0.0 }) + .sum(); + SolutionSize::valid(sum) + } +} + +#[test] +fn test_brute_force_float() { + use super::BruteForceFloat; + + let problem = FloatProblem { + weights: vec![1.0, 2.0, 3.0], + }; + let solver = BruteForce::new(); + + let best = solver.find_best_float(&problem); + assert_eq!(best.len(), 1); + assert_eq!(best[0].0, vec![1, 1, 1]); + assert!((best[0].1.size - 6.0).abs() < 1e-10); +} + +#[test] +fn test_brute_force_float_tolerance() { + use super::BruteForceFloat; + + // Problem where multiple solutions have nearly equal values + #[derive(Clone)] + struct NearlyEqualProblem; + + impl Problem for NearlyEqualProblem { + const NAME: &'static str = "NearlyEqualProblem"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "f64")] + } + + type Size = f64; + + fn num_variables(&self) -> usize { + 2 + } + + fn num_flavors(&self) -> usize { + 2 + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![("variables", 2)]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::LargerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + let size = match (config.first(), config.get(1)) { + (Some(1), Some(0)) => 10.0, + (Some(0), Some(1)) => 10.0 + 1e-12, // Nearly equal + _ => 0.0, + }; + SolutionSize::valid(size) + } + } + + let problem = NearlyEqualProblem; + let solver = BruteForce::with_tolerance(1e-10, 1e-10); + + let best = solver.find_best_float(&problem); + // Both should be considered optimal due to tolerance + assert_eq!(best.len(), 2); + + // Test variant for NearlyEqualProblem + let v = NearlyEqualProblem::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0], ("graph", "SimpleGraph")); + assert_eq!(v[1], ("weight", "f64")); +} + +#[test] +fn test_brute_force_float_empty() { + use super::BruteForceFloat; + + let problem = FloatProblem { weights: vec![] }; + let solver = BruteForce::new(); + + let best = solver.find_best_float(&problem); + assert!(best.is_empty()); +} diff --git a/src/tests_unit/solvers/ilp/solver.rs b/src/tests_unit/solvers/ilp/solver.rs new file mode 100644 index 0000000..ea705b7 --- /dev/null +++ b/src/tests_unit/solvers/ilp/solver.rs @@ -0,0 +1,246 @@ +use super::*; +use crate::models::optimization::{LinearConstraint, VarBounds}; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; + +#[test] +fn test_ilp_solver_basic_maximize() { + // Maximize x0 + 2*x1 subject to x0 + x1 <= 1, binary vars + let ilp = ILP::binary( + 2, + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 2.0)], + ObjectiveSense::Maximize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp); + + assert!(solution.is_some()); + let sol = solution.unwrap(); + + // Solution should be valid + let result = ilp.solution_size(&sol); + assert!(result.is_valid, "ILP solution should be valid"); + + // Optimal: x1=1, x0=0 => objective = 2 + assert!((result.size - 2.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solver_basic_minimize() { + // Minimize x0 + x1 subject to x0 + x1 >= 1, binary vars + let ilp = ILP::binary( + 2, + vec![LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Minimize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp); + + assert!(solution.is_some()); + let sol = solution.unwrap(); + + // Solution should be valid + let result = ilp.solution_size(&sol); + assert!(result.is_valid, "ILP solution should be valid"); + + // Optimal: one variable = 1, other = 0 => objective = 1 + assert!((result.size - 1.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_solver_matches_brute_force() { + // Maximize x0 + x1 + x2 subject to: + // x0 + x1 <= 1 + // x1 + x2 <= 1 + let ilp = ILP::binary( + 3, + vec![ + LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0), + LinearConstraint::le(vec![(1, 1.0), (2, 1.0)], 1.0), + ], + vec![(0, 1.0), (1, 1.0), (2, 1.0)], + ObjectiveSense::Maximize, + ); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solutions = bf.find_best(&ilp); + let ilp_solution = ilp_solver.solve(&ilp).unwrap(); + + // Both should find optimal value (2) + let bf_size = ilp.solution_size(&bf_solutions[0]).size; + let ilp_size = ilp.solution_size(&ilp_solution).size; + assert!( + (bf_size - ilp_size).abs() < 1e-9, + "ILP should find optimal solution" + ); +} + +#[test] +fn test_ilp_empty_problem() { + let ilp = ILP::empty(); + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp); + assert_eq!(solution, Some(vec![])); +} + +#[test] +fn test_ilp_equality_constraint() { + // Minimize x0 subject to x0 + x1 == 1, binary vars + let ilp = ILP::binary( + 2, + vec![LinearConstraint::eq(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0)], + ObjectiveSense::Minimize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp).unwrap(); + + let result = ilp.solution_size(&solution); + assert!(result.is_valid); + // Optimal: x0=0, x1=1 => objective = 0 + assert!((result.size - 0.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_non_binary_bounds() { + // Variables with larger ranges + // x0 in [0, 3], x1 in [0, 2] + // Maximize x0 + x1 subject to x0 + x1 <= 4 + let ilp = ILP::new( + 2, + vec![VarBounds::bounded(0, 3), VarBounds::bounded(0, 2)], + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 4.0)], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp).unwrap(); + + let result = ilp.solution_size(&solution); + assert!(result.is_valid); + // Optimal: x0=3, x1=2 => objective = 5 (3 + 2 = 5 <= 4 is false!) + // Wait, 3+2=5 > 4, so constraint is violated. Let's check actual optimal: + // x0=2, x1=2 => 4 <= 4 valid, obj=4 + // x0=3, x1=1 => 4 <= 4 valid, obj=4 + assert!((result.size - 4.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_negative_lower_bounds() { + // Variables with negative lower bounds + // x0 in [-2, 2], x1 in [-1, 1] + // Maximize x0 + x1 (no constraints) + let ilp = ILP::new( + 2, + vec![VarBounds::bounded(-2, 2), VarBounds::bounded(-1, 1)], + vec![], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp).unwrap(); + + let result = ilp.solution_size(&solution); + assert!(result.is_valid); + // Optimal: x0=2, x1=1 => objective = 3 + assert!((result.size - 3.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_config_to_values_roundtrip() { + // Ensure the config encoding/decoding works correctly + let ilp = ILP::new( + 2, + vec![VarBounds::bounded(-2, 2), VarBounds::bounded(1, 3)], + vec![], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp).unwrap(); + + // The solution should be valid + let result = ilp.solution_size(&solution); + assert!(result.is_valid); + // Optimal: x0=2, x1=3 => objective = 5 + assert!((result.size - 5.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_multiple_constraints() { + // Maximize 2*x0 + 3*x1 + x2 subject to: + // x0 + x1 + x2 <= 2 + // x0 + x1 >= 1 + // Binary vars + let ilp = ILP::binary( + 3, + vec![ + LinearConstraint::le(vec![(0, 1.0), (1, 1.0), (2, 1.0)], 2.0), + LinearConstraint::ge(vec![(0, 1.0), (1, 1.0)], 1.0), + ], + vec![(0, 2.0), (1, 3.0), (2, 1.0)], + ObjectiveSense::Maximize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp).unwrap(); + + let result = ilp.solution_size(&solution); + assert!(result.is_valid); + + // Check against brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_best(&ilp); + let bf_size = ilp.solution_size(&bf_solutions[0]).size; + + assert!( + (bf_size - result.size).abs() < 1e-9, + "ILP should match brute force" + ); +} + +#[test] +fn test_ilp_unconstrained() { + // Maximize x0 + x1, no constraints, binary vars + let ilp = ILP::binary( + 2, + vec![], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp).unwrap(); + + let result = ilp.solution_size(&solution); + assert!(result.is_valid); + // Optimal: both = 1 + assert!((result.size - 2.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_with_time_limit() { + let solver = ILPSolver::with_time_limit(10.0); + assert_eq!(solver.time_limit, Some(10.0)); + + // Should still work for simple problems + let ilp = ILP::binary( + 2, + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 1.0)], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + let solution = solver.solve(&ilp); + assert!(solution.is_some()); +} diff --git a/src/tests_unit/testing/macros.rs b/src/tests_unit/testing/macros.rs new file mode 100644 index 0000000..d4cd089 --- /dev/null +++ b/src/tests_unit/testing/macros.rs @@ -0,0 +1,33 @@ +use crate::prelude::*; +use crate::topology::SimpleGraph; + +// Test the quick_problem_test macro +#[test] +fn test_quick_problem_test_macro() { + quick_problem_test!( + IndependentSet, + new(3, vec![(0, 1), (1, 2)]), + solution: [1, 0, 1], + expected_size: 2, + is_valid: true + ); + + quick_problem_test!( + IndependentSet, + new(3, vec![(0, 1), (1, 2)]), + solution: [1, 1, 0], + expected_size: 2, + is_valid: false + ); +} + +// Test the complement_test macro +complement_test! { + name: test_is_vc_complement, + problem_a: IndependentSet, + problem_b: VertexCovering, + test_graphs: [ + (3, [(0, 1), (1, 2)]), + (4, [(0, 1), (1, 2), (2, 3), (0, 3)]), + ] +} diff --git a/src/tests_unit/testing/mod.rs b/src/tests_unit/testing/mod.rs new file mode 100644 index 0000000..336311b --- /dev/null +++ b/src/tests_unit/testing/mod.rs @@ -0,0 +1,37 @@ +use super::*; + +#[test] +fn test_graph_test_case() { + let case = GraphTestCase::new(3, vec![(0, 1), (1, 2)], vec![1, 0, 1], 2); + assert_eq!(case.num_vertices, 3); + assert_eq!(case.edges.len(), 2); + assert!(case.weights.is_none()); + assert!(case.optimal_size.is_none()); +} + +#[test] +fn test_graph_test_case_with_weights() { + let case = GraphTestCase::with_weights(3, vec![(0, 1)], vec![1, 2, 3], vec![0, 0, 1], 3); + assert!(case.weights.is_some()); + assert_eq!(case.weights.as_ref().unwrap(), &vec![1, 2, 3]); +} + +#[test] +fn test_graph_test_case_with_optimal() { + let case = GraphTestCase::new(3, vec![(0, 1)], vec![0, 0, 1], 1).with_optimal(2); + assert_eq!(case.optimal_size, Some(2)); +} + +#[test] +fn test_sat_test_case_satisfiable() { + let case = SatTestCase::satisfiable(2, vec![vec![1, 2], vec![-1]], vec![0, 1]); + assert!(case.is_satisfiable); + assert!(case.satisfying_assignment.is_some()); +} + +#[test] +fn test_sat_test_case_unsatisfiable() { + let case = SatTestCase::unsatisfiable(1, vec![vec![1], vec![-1]]); + assert!(!case.is_satisfiable); + assert!(case.satisfying_assignment.is_none()); +} diff --git a/src/tests_unit/topology/graph.rs b/src/tests_unit/topology/graph.rs new file mode 100644 index 0000000..a4e01d9 --- /dev/null +++ b/src/tests_unit/topology/graph.rs @@ -0,0 +1,135 @@ +use super::*; + +#[test] +fn test_simple_graph_new() { + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + assert_eq!(graph.num_vertices(), 4); + assert_eq!(graph.num_edges(), 3); +} + +#[test] +fn test_simple_graph_empty() { + let graph = SimpleGraph::empty(5); + assert_eq!(graph.num_vertices(), 5); + assert_eq!(graph.num_edges(), 0); +} + +#[test] +fn test_simple_graph_complete() { + let graph = SimpleGraph::complete(4); + assert_eq!(graph.num_vertices(), 4); + assert_eq!(graph.num_edges(), 6); // C(4,2) = 6 +} + +#[test] +fn test_simple_graph_path() { + let graph = SimpleGraph::path(5); + assert_eq!(graph.num_vertices(), 5); + assert_eq!(graph.num_edges(), 4); + assert!(graph.has_edge(0, 1)); + assert!(graph.has_edge(3, 4)); + assert!(!graph.has_edge(0, 4)); +} + +#[test] +fn test_simple_graph_cycle() { + let graph = SimpleGraph::cycle(4); + assert_eq!(graph.num_vertices(), 4); + assert_eq!(graph.num_edges(), 4); + assert!(graph.has_edge(0, 1)); + assert!(graph.has_edge(3, 0)); // Cycle edge +} + +#[test] +fn test_simple_graph_star() { + let graph = SimpleGraph::star(5); + assert_eq!(graph.num_vertices(), 5); + assert_eq!(graph.num_edges(), 4); + assert!(graph.has_edge(0, 1)); + assert!(graph.has_edge(0, 4)); + assert!(!graph.has_edge(1, 2)); +} + +#[test] +fn test_simple_graph_grid() { + let graph = SimpleGraph::grid(2, 3); + assert_eq!(graph.num_vertices(), 6); + // 2 rows: 2 horizontal edges per row = 4 + // 3 cols: 1 vertical edge per col = 3 + assert_eq!(graph.num_edges(), 7); +} + +#[test] +fn test_simple_graph_has_edge() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + assert!(graph.has_edge(0, 1)); + assert!(graph.has_edge(1, 0)); // Undirected + assert!(graph.has_edge(1, 2)); + assert!(!graph.has_edge(0, 2)); +} + +#[test] +fn test_simple_graph_neighbors() { + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); + let mut neighbors = graph.neighbors(0); + neighbors.sort(); + assert_eq!(neighbors, vec![1, 2, 3]); + assert_eq!(graph.neighbors(1), vec![0]); +} + +#[test] +fn test_simple_graph_degree() { + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); + assert_eq!(graph.degree(0), 3); + assert_eq!(graph.degree(1), 1); +} + +#[test] +fn test_simple_graph_is_empty() { + let empty = SimpleGraph::empty(0); + assert!(empty.is_empty()); + + let non_empty = SimpleGraph::empty(1); + assert!(!non_empty.is_empty()); +} + +#[test] +fn test_simple_graph_for_each_edge() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let mut count = 0; + graph.for_each_edge(|_, _| count += 1); + assert_eq!(count, 2); +} + +#[test] +fn test_simple_graph_eq() { + let g1 = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let g2 = SimpleGraph::new(3, vec![(1, 2), (0, 1)]); // Different order + let g3 = SimpleGraph::new(3, vec![(0, 1)]); + + assert_eq!(g1, g2); + assert_ne!(g1, g3); +} + +#[test] +#[should_panic(expected = "edge (0, 5) references vertex >= num_vertices")] +fn test_simple_graph_invalid_edge() { + SimpleGraph::new(3, vec![(0, 5)]); +} + +#[test] +fn test_simple_graph_cycle_small() { + // Test cycle with fewer than 3 vertices (should fall back to path) + let graph = SimpleGraph::cycle(2); + assert_eq!(graph.num_vertices(), 2); + assert_eq!(graph.num_edges(), 1); // Path: 0-1 + assert!(graph.has_edge(0, 1)); +} + +#[test] +fn test_simple_graph_eq_different_sizes() { + // Test PartialEq when graphs have different sizes + let g1 = SimpleGraph::new(3, vec![(0, 1)]); + let g2 = SimpleGraph::new(4, vec![(0, 1)]); // Different vertex count + assert_ne!(g1, g2); +} diff --git a/src/tests_unit/topology/grid_graph.rs b/src/tests_unit/topology/grid_graph.rs new file mode 100644 index 0000000..b6383d4 --- /dev/null +++ b/src/tests_unit/topology/grid_graph.rs @@ -0,0 +1,224 @@ +use super::*; + +#[test] +fn test_grid_graph_square_basic() { + let nodes = vec![ + GridNode::new(0, 0, 1), + GridNode::new(1, 0, 1), + GridNode::new(0, 1, 1), + ]; + // With radius 1.1: (0,0)-(1,0) dist=1.0 < 1.1, (0,0)-(0,1) dist=1.0 < 1.1, (1,0)-(0,1) dist=sqrt(2)>1.1 + // Using dist < radius (strict), so edges at exactly 1.0 are included with radius 1.1 + let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 1.1); + assert_eq!(grid.num_vertices(), 3); + // Only nodes at (0,0)-(1,0) and (0,0)-(0,1) are within radius 1.1 + assert_eq!(grid.edges().len(), 2); +} + +#[test] +fn test_grid_graph_triangular_basic() { + let nodes = vec![ + GridNode::new(0, 0, 1), + GridNode::new(1, 0, 1), + GridNode::new(0, 1, 1), + ]; + let grid = GridGraph::new( + GridType::Triangular { + offset_even_cols: false, + }, + (2, 2), + nodes, + 1.1, + ); + assert_eq!(grid.num_vertices(), 3); +} + +#[test] +fn test_grid_node_new() { + let node: GridNode = GridNode::new(5, 10, 42); + assert_eq!(node.row, 5); + assert_eq!(node.col, 10); + assert_eq!(node.weight, 42); +} + +#[test] +fn test_grid_graph_square_physical_position() { + let nodes = vec![GridNode::new(3, 4, 1)]; + let grid = GridGraph::new(GridType::Square, (10, 10), nodes, 1.0); + let pos = grid.physical_position(3, 4); + assert_eq!(pos, (3.0, 4.0)); +} + +#[test] +fn test_grid_graph_triangular_physical_position() { + let nodes = vec![GridNode::new(0, 0, 1)]; + let grid = GridGraph::new( + GridType::Triangular { + offset_even_cols: false, + }, + (10, 10), + nodes, + 1.0, + ); + + // Col 0 (even), offset_even_cols = false -> no offset + let pos0 = grid.physical_position(0, 0); + assert!((pos0.0 - 0.0).abs() < 1e-10); + assert!((pos0.1 - 0.0).abs() < 1e-10); + + // Col 1 (odd), offset_even_cols = false -> offset 0.5 + let pos1 = grid.physical_position(0, 1); + assert!((pos1.0 - 0.5).abs() < 1e-10); + assert!((pos1.1 - (3.0_f64.sqrt() / 2.0)).abs() < 1e-10); +} + +#[test] +fn test_grid_graph_triangular_offset_even() { + let nodes = vec![GridNode::new(0, 0, 1)]; + let grid = GridGraph::new( + GridType::Triangular { + offset_even_cols: true, + }, + (10, 10), + nodes, + 1.0, + ); + + // Col 0 (even), offset_even_cols = true -> offset 0.5 + let pos0 = grid.physical_position(0, 0); + assert!((pos0.0 - 0.5).abs() < 1e-10); + + // Col 1 (odd), offset_even_cols = true -> no offset + let pos1 = grid.physical_position(0, 1); + assert!((pos1.0 - 0.0).abs() < 1e-10); +} + +#[test] +fn test_grid_graph_edges_within_radius() { + // Square grid: place nodes at (0,0), (1,0), (2,0) + // Distance (0,0)-(1,0) = 1.0 + // Distance (0,0)-(2,0) = 2.0 + // Distance (1,0)-(2,0) = 1.0 + let nodes = vec![ + GridNode::new(0, 0, 1), + GridNode::new(1, 0, 1), + GridNode::new(2, 0, 1), + ]; + // Use radius 1.1 since edges are created for dist < radius (strict) + // With radius 1.0, no edges at exact distance 1.0 + // With radius 1.1, edges at distance 1.0 are included + let grid = GridGraph::new(GridType::Square, (3, 1), nodes, 1.1); + + // Only edges within radius 1.1: (0,1) and (1,2) with dist=1.0 + assert_eq!(grid.num_edges(), 2); + assert!(grid.has_edge(0, 1)); + assert!(grid.has_edge(1, 2)); + assert!(!grid.has_edge(0, 2)); // dist=2.0 >= 1.1 +} + +#[test] +fn test_grid_graph_neighbors() { + let nodes = vec![ + GridNode::new(0, 0, 1), + GridNode::new(1, 0, 1), + GridNode::new(0, 1, 1), + ]; + let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 1.5); + + let neighbors_0 = grid.neighbors(0); + assert_eq!(neighbors_0.len(), 2); + assert!(neighbors_0.contains(&1)); + assert!(neighbors_0.contains(&2)); +} + +#[test] +fn test_grid_graph_accessors() { + let nodes = vec![GridNode::new(0, 0, 10), GridNode::new(1, 0, 20)]; + let grid = GridGraph::new(GridType::Square, (5, 5), nodes, 2.0); + + assert_eq!(grid.grid_type(), GridType::Square); + assert_eq!(grid.size(), (5, 5)); + assert_eq!(grid.radius(), 2.0); + assert_eq!(grid.nodes().len(), 2); + assert_eq!(grid.node(0).map(|n| n.weight), Some(10)); + assert_eq!(grid.weight(1), Some(&20)); + assert_eq!(grid.weight(5), None); +} + +#[test] +fn test_grid_graph_node_position() { + let nodes = vec![GridNode::new(2, 3, 1)]; + let grid = GridGraph::new(GridType::Square, (10, 10), nodes, 1.0); + + let pos = grid.node_position(0); + assert_eq!(pos, Some((2.0, 3.0))); + assert_eq!(grid.node_position(1), None); +} + +#[test] +fn test_grid_graph_has_edge_symmetric() { + let nodes = vec![GridNode::new(0, 0, 1), GridNode::new(1, 0, 1)]; + let grid = GridGraph::new(GridType::Square, (2, 1), nodes, 1.5); + + assert!(grid.has_edge(0, 1)); + assert!(grid.has_edge(1, 0)); // Symmetric +} + +#[test] +fn test_grid_graph_empty() { + let nodes: Vec> = vec![]; + let grid = GridGraph::new(GridType::Square, (0, 0), nodes, 1.0); + + assert_eq!(grid.num_vertices(), 0); + assert_eq!(grid.num_edges(), 0); + assert!(grid.is_empty()); +} + +#[test] +fn test_grid_graph_graph_trait() { + let nodes = vec![ + GridNode::new(0, 0, 1), + GridNode::new(1, 0, 1), + GridNode::new(0, 1, 1), + ]; + // With radius 1.1: 2 edges at dist=1.0 (not including diagonal at sqrt(2)>1.1) + // Using dist < radius (strict), so edges at exactly 1.0 are included with radius 1.1 + let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 1.1); + + // Test Graph trait methods + assert_eq!(Graph::num_vertices(&grid), 3); + assert_eq!(Graph::num_edges(&grid), 2); + assert_eq!(grid.degree(0), 2); + assert_eq!(grid.degree(1), 1); + assert_eq!(grid.degree(2), 1); +} + +#[test] +fn test_grid_graph_display() { + let nodes = vec![GridNode::new(0, 0, 1), GridNode::new(1, 0, 2)]; + let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 2.0); + + // Test Display trait + let display_str = format!("{}", grid); + assert!(!display_str.is_empty()); +} + +#[test] +fn test_grid_graph_format_empty() { + let nodes: Vec> = vec![]; + let grid = GridGraph::new(GridType::Square, (0, 0), nodes, 1.0); + + // Empty grid should return "(empty grid graph)" + let formatted = grid.format_with_config(None, false); + assert_eq!(formatted, "(empty grid graph)"); +} + +#[test] +fn test_grid_graph_format_with_config() { + let nodes = vec![GridNode::new(0, 0, 1), GridNode::new(1, 0, 1)]; + let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 2.0); + + // Test format with config + let formatted = grid.format_with_config(Some(&[1, 0]), false); + assert!(!formatted.is_empty()); +} diff --git a/src/tests_unit/topology/hypergraph.rs b/src/tests_unit/topology/hypergraph.rs new file mode 100644 index 0000000..69e70d5 --- /dev/null +++ b/src/tests_unit/topology/hypergraph.rs @@ -0,0 +1,109 @@ +use super::*; + +#[test] +fn test_hypergraph_basic() { + let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); + assert_eq!(hg.num_vertices(), 4); + assert_eq!(hg.num_edges(), 2); +} + +#[test] +fn test_hypergraph_empty() { + let hg = HyperGraph::empty(5); + assert_eq!(hg.num_vertices(), 5); + assert_eq!(hg.num_edges(), 0); +} + +#[test] +fn test_hypergraph_neighbors() { + let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); + let neighbors = hg.neighbors(2); + assert!(neighbors.contains(&0)); + assert!(neighbors.contains(&1)); + assert!(neighbors.contains(&3)); + assert!(!neighbors.contains(&2)); // Not its own neighbor +} + +#[test] +fn test_hypergraph_has_edge() { + let hg = HyperGraph::new(4, vec![vec![0, 1, 2]]); + assert!(hg.has_edge(&[0, 1, 2])); + assert!(hg.has_edge(&[2, 1, 0])); // Order doesn't matter + assert!(!hg.has_edge(&[0, 1])); + assert!(!hg.has_edge(&[0, 1, 3])); +} + +#[test] +fn test_hypergraph_degree() { + let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); + assert_eq!(hg.degree(0), 1); + assert_eq!(hg.degree(2), 2); + assert_eq!(hg.degree(3), 1); +} + +#[test] +fn test_hypergraph_edges_containing() { + let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); + let edges = hg.edges_containing(2); + assert_eq!(edges.len(), 2); +} + +#[test] +fn test_hypergraph_add_edge() { + let mut hg = HyperGraph::empty(4); + hg.add_edge(vec![0, 1]); + hg.add_edge(vec![1, 2, 3]); + assert_eq!(hg.num_edges(), 2); +} + +#[test] +fn test_hypergraph_max_edge_size() { + let hg = HyperGraph::new(4, vec![vec![0, 1], vec![0, 1, 2, 3]]); + assert_eq!(hg.max_edge_size(), 4); +} + +#[test] +fn test_hypergraph_is_regular_graph() { + let regular = HyperGraph::new(3, vec![vec![0, 1], vec![1, 2]]); + assert!(regular.is_regular_graph()); + + let not_regular = HyperGraph::new(4, vec![vec![0, 1, 2]]); + assert!(!not_regular.is_regular_graph()); +} + +#[test] +fn test_hypergraph_to_graph_edges() { + let hg = HyperGraph::new(3, vec![vec![0, 1], vec![1, 2]]); + let edges = hg.to_graph_edges(); + assert!(edges.is_some()); + let edges = edges.unwrap(); + assert_eq!(edges.len(), 2); +} + +#[test] +fn test_hypergraph_to_graph_edges_not_regular() { + // Hypergraph with a hyperedge of size 3 (not a regular graph) + let hg = HyperGraph::new(4, vec![vec![0, 1, 2]]); + assert!(hg.to_graph_edges().is_none()); +} + +#[test] +fn test_hypergraph_get_edge() { + let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); + assert_eq!(hg.edge(0), Some(&vec![0, 1, 2])); + assert_eq!(hg.edge(1), Some(&vec![2, 3])); + assert_eq!(hg.edge(2), None); +} + +#[test] +#[should_panic(expected = "vertex index 5 out of bounds")] +fn test_hypergraph_invalid_vertex() { + HyperGraph::new(4, vec![vec![0, 5]]); +} + +#[test] +#[should_panic(expected = "vertex index 4 out of bounds")] +fn test_hypergraph_add_invalid_edge() { + let mut hg = HyperGraph::empty(4); + hg.add_edge(vec![0, 4]); +} diff --git a/src/tests_unit/topology/small_graphs.rs b/src/tests_unit/topology/small_graphs.rs new file mode 100644 index 0000000..ae7043e --- /dev/null +++ b/src/tests_unit/topology/small_graphs.rs @@ -0,0 +1,181 @@ +use super::*; + +#[test] +fn test_bull() { + let (n, edges) = bull(); + assert_eq!(n, 5); + assert_eq!(edges.len(), 5); +} + +#[test] +fn test_chvatal() { + let (n, edges) = chvatal(); + assert_eq!(n, 12); + assert_eq!(edges.len(), 24); +} + +#[test] +fn test_cubical() { + let (n, edges) = cubical(); + assert_eq!(n, 8); + assert_eq!(edges.len(), 12); +} + +#[test] +fn test_desargues() { + let (n, edges) = desargues(); + assert_eq!(n, 20); + assert_eq!(edges.len(), 30); +} + +#[test] +fn test_diamond() { + let (n, edges) = diamond(); + assert_eq!(n, 4); + assert_eq!(edges.len(), 5); +} + +#[test] +fn test_dodecahedral() { + let (n, edges) = dodecahedral(); + assert_eq!(n, 20); + assert_eq!(edges.len(), 30); +} + +#[test] +fn test_frucht() { + let (n, edges) = frucht(); + assert_eq!(n, 12); + assert_eq!(edges.len(), 18); +} + +#[test] +fn test_heawood() { + let (n, edges) = heawood(); + assert_eq!(n, 14); + assert_eq!(edges.len(), 21); +} + +#[test] +fn test_house() { + let (n, edges) = house(); + assert_eq!(n, 5); + assert_eq!(edges.len(), 6); +} + +#[test] +fn test_housex() { + let (n, edges) = housex(); + assert_eq!(n, 5); + assert_eq!(edges.len(), 8); +} + +#[test] +fn test_icosahedral() { + let (n, edges) = icosahedral(); + assert_eq!(n, 12); + assert_eq!(edges.len(), 30); +} + +#[test] +fn test_karate() { + let (n, edges) = karate(); + assert_eq!(n, 34); + assert_eq!(edges.len(), 78); +} + +#[test] +fn test_krackhardtkite() { + let (n, edges) = krackhardtkite(); + assert_eq!(n, 10); + assert_eq!(edges.len(), 18); +} + +#[test] +fn test_moebiuskantor() { + let (n, edges) = moebiuskantor(); + assert_eq!(n, 16); + assert_eq!(edges.len(), 24); +} + +#[test] +fn test_octahedral() { + let (n, edges) = octahedral(); + assert_eq!(n, 6); + assert_eq!(edges.len(), 12); +} + +#[test] +fn test_pappus() { + let (n, edges) = pappus(); + assert_eq!(n, 18); + assert_eq!(edges.len(), 27); +} + +#[test] +fn test_petersen() { + let (n, edges) = petersen(); + assert_eq!(n, 10); + assert_eq!(edges.len(), 15); +} + +#[test] +fn test_sedgewickmaze() { + let (n, edges) = sedgewickmaze(); + assert_eq!(n, 8); + assert_eq!(edges.len(), 10); +} + +#[test] +fn test_tetrahedral() { + let (n, edges) = tetrahedral(); + assert_eq!(n, 4); + assert_eq!(edges.len(), 6); +} + +#[test] +fn test_truncatedcube() { + let (n, edges) = truncatedcube(); + assert_eq!(n, 24); + assert_eq!(edges.len(), 36); +} + +#[test] +fn test_truncatedtetrahedron() { + let (n, edges) = truncatedtetrahedron(); + assert_eq!(n, 12); + assert_eq!(edges.len(), 18); +} + +#[test] +fn test_tutte() { + let (n, edges) = tutte(); + assert_eq!(n, 46); + assert_eq!(edges.len(), 69); +} + +#[test] +fn test_smallgraph() { + assert!(smallgraph("petersen").is_some()); + assert!(smallgraph("bull").is_some()); + assert!(smallgraph("nonexistent").is_none()); +} + +#[test] +fn test_available_graphs() { + let graphs = available_graphs(); + assert_eq!(graphs.len(), 22); + assert!(graphs.contains(&"petersen")); +} + +#[test] +fn test_all_graphs_have_valid_edges() { + for name in available_graphs() { + let (n, edges) = smallgraph(name).unwrap(); + for (u, v) in edges { + assert!(u < n, "{} has invalid edge: {} >= {}", name, u, n); + assert!(v < n, "{} has invalid edge: {} >= {}", name, v, n); + assert!(u != v, "{} has self-loop", name); + } + } +} diff --git a/src/tests_unit/topology/unit_disk_graph.rs b/src/tests_unit/topology/unit_disk_graph.rs new file mode 100644 index 0000000..877e28c --- /dev/null +++ b/src/tests_unit/topology/unit_disk_graph.rs @@ -0,0 +1,136 @@ +use super::*; + +#[test] +fn test_udg_basic() { + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (3.0, 0.0)], 1.0); + assert_eq!(udg.num_vertices(), 3); + assert_eq!(udg.num_edges(), 1); // Only 0-1 are within distance 1 +} + +#[test] +fn test_udg_unit() { + let udg = UnitDiskGraph::unit(vec![(0.0, 0.0), (0.5, 0.5)]); + assert_eq!(udg.radius(), 1.0); + // Distance is sqrt(0.5^2 + 0.5^2) ≈ 0.707 < 1, so connected + assert_eq!(udg.num_edges(), 1); +} + +#[test] +fn test_udg_has_edge() { + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (3.0, 0.0)], 1.0); + assert!(udg.has_edge(0, 1)); + assert!(udg.has_edge(1, 0)); // Symmetric + assert!(!udg.has_edge(0, 2)); + assert!(!udg.has_edge(1, 2)); +} + +#[test] +fn test_udg_neighbors() { + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (0.5, 0.5)], 1.0); + let neighbors = udg.neighbors(0); + // 0 is within 1.0 of both 1 and 2 + assert!(neighbors.contains(&1)); + assert!(neighbors.contains(&2)); +} + +#[test] +fn test_udg_degree() { + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (5.0, 5.0)], 1.5); + // Vertex 0 is connected to 1 and 2 + assert_eq!(udg.degree(0), 2); + // Vertex 3 is isolated + assert_eq!(udg.degree(3), 0); +} + +#[test] +fn test_udg_vertex_distance() { + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (3.0, 4.0)], 10.0); + let dist = udg.vertex_distance(0, 1); + assert_eq!(dist, Some(5.0)); // 3-4-5 triangle +} + +#[test] +fn test_udg_position() { + let udg = UnitDiskGraph::new(vec![(1.0, 2.0), (3.0, 4.0)], 1.0); + assert_eq!(udg.position(0), Some((1.0, 2.0))); + assert_eq!(udg.position(1), Some((3.0, 4.0))); + assert_eq!(udg.position(2), None); +} + +#[test] +fn test_udg_bounding_box() { + let udg = UnitDiskGraph::new(vec![(1.0, 2.0), (3.0, 4.0), (-1.0, 0.0)], 1.0); + let bbox = udg.bounding_box(); + assert!(bbox.is_some()); + let ((min_x, min_y), (max_x, max_y)) = bbox.unwrap(); + assert_eq!(min_x, -1.0); + assert_eq!(max_x, 3.0); + assert_eq!(min_y, 0.0); + assert_eq!(max_y, 4.0); +} + +#[test] +fn test_udg_empty_bounding_box() { + let udg = UnitDiskGraph::new(vec![], 1.0); + assert!(udg.bounding_box().is_none()); +} + +#[test] +fn test_udg_grid() { + let udg = UnitDiskGraph::grid(2, 3, 1.0, 1.0); + assert_eq!(udg.num_vertices(), 6); + // Grid with spacing 1.0 and radius 1.0: only horizontal/vertical neighbors connected + // Row 0: 0-1, 1-2 + // Row 1: 3-4, 4-5 + // Vertical: 0-3, 1-4, 2-5 + assert_eq!(udg.num_edges(), 7); +} + +#[test] +fn test_udg_grid_diagonal() { + // With radius > sqrt(2), diagonals are also connected + let udg = UnitDiskGraph::grid(2, 2, 1.0, 1.5); + assert_eq!(udg.num_vertices(), 4); + // All pairs are connected (4 edges: 0-1, 0-2, 0-3, 1-2, 1-3, 2-3) + // Actually: 0-1 (1.0), 0-2 (1.0), 1-3 (1.0), 2-3 (1.0), 0-3 (sqrt(2)≈1.41), 1-2 (sqrt(2)≈1.41) + assert_eq!(udg.num_edges(), 6); +} + +#[test] +fn test_udg_edges_list() { + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0)], 1.0); + let edges = udg.edges(); + assert_eq!(edges.len(), 1); + assert_eq!(edges[0], (0, 1)); +} + +#[test] +fn test_udg_positions() { + let udg = UnitDiskGraph::new(vec![(1.0, 2.0), (3.0, 4.0)], 1.0); + let positions = udg.positions(); + assert_eq!(positions.len(), 2); + assert_eq!(positions[0], (1.0, 2.0)); + assert_eq!(positions[1], (3.0, 4.0)); +} + +#[test] +fn test_udg_vertex_distance_invalid() { + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0)], 1.0); + assert_eq!(udg.vertex_distance(0, 5), None); + assert_eq!(udg.vertex_distance(5, 0), None); + assert_eq!(udg.vertex_distance(5, 6), None); +} + +#[test] +fn test_udg_graph_trait() { + // Test the Graph trait implementation + let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (0.5, 0.5)], 1.0); + // Use Graph trait methods + assert_eq!(Graph::num_vertices(&udg), 3); + assert!(Graph::num_edges(&udg) > 0); + assert!(Graph::has_edge(&udg, 0, 1)); + let edges = Graph::edges(&udg); + assert!(!edges.is_empty()); + let neighbors = Graph::neighbors(&udg, 0); + assert!(neighbors.contains(&1)); +} diff --git a/src/tests_unit/trait_consistency.rs b/src/tests_unit/trait_consistency.rs new file mode 100644 index 0000000..038964d --- /dev/null +++ b/src/tests_unit/trait_consistency.rs @@ -0,0 +1,137 @@ +use crate::models::graph::*; +use crate::models::optimization::*; +use crate::models::satisfiability::*; +use crate::models::set::*; +use crate::models::specialized::*; +use crate::prelude::*; +use crate::topology::SimpleGraph; + +fn check_problem_trait(problem: &P, name: &str) +where + P::Size: std::fmt::Debug, +{ + assert!( + problem.num_variables() > 0 || name.contains("empty"), + "{} should have variables", + name + ); + assert!( + problem.num_flavors() >= 2, + "{} should have at least 2 flavors", + name + ); + + let size = problem.problem_size(); + // Check that problem_size returns some meaningful data + assert!( + size.get("num_vertices").is_some() + || size.get("num_vars").is_some() + || size.get("num_sets").is_some() + || size.get("num_cars").is_some() + || size.get("rows").is_some() + || size.get("left_size").is_some() + || size.get("target").is_some() + || size.get("num_variables").is_some() + || size.get("num_colors").is_some() + || size.get("num_spins").is_some() + || size.get("num_edges").is_some(), + "{} problem_size should have meaningful data", + name + ); +} + +#[test] +fn test_all_problems_implement_trait_correctly() { + check_problem_trait( + &IndependentSet::::new(3, vec![(0, 1)]), + "IndependentSet", + ); + check_problem_trait( + &VertexCovering::::new(3, vec![(0, 1)]), + "VertexCovering", + ); + check_problem_trait(&MaxCut::::new(3, vec![(0, 1, 1)]), "MaxCut"); + check_problem_trait(&KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1)]), "KColoring"); + check_problem_trait(&DominatingSet::::new(3, vec![(0, 1)]), "DominatingSet"); + check_problem_trait(&MaximalIS::::new(3, vec![(0, 1)]), "MaximalIS"); + check_problem_trait(&Matching::::new(3, vec![(0, 1, 1)]), "Matching"); + check_problem_trait( + &Satisfiability::::new(3, vec![CNFClause::new(vec![1])]), + "SAT", + ); + check_problem_trait( + &SpinGlass::new(3, vec![((0, 1), 1.0)], vec![0.0; 3]), + "SpinGlass", + ); + check_problem_trait(&QUBO::from_matrix(vec![vec![1.0; 3]; 3]), "QUBO"); + check_problem_trait(&SetCovering::::new(3, vec![vec![0, 1]]), "SetCovering"); + check_problem_trait(&SetPacking::::new(vec![vec![0, 1]]), "SetPacking"); + check_problem_trait(&PaintShop::new(vec!["a", "a"]), "PaintShop"); + check_problem_trait(&BMF::new(vec![vec![true]], 1), "BMF"); + check_problem_trait(&BicliqueCover::new(2, 2, vec![(0, 2)], 1), "BicliqueCover"); + check_problem_trait(&Factoring::new(6, 2, 2), "Factoring"); + + let circuit = Circuit::new(vec![Assignment::new( + vec!["x".to_string()], + BooleanExpr::constant(true), + )]); + check_problem_trait(&CircuitSAT::::new(circuit), "CircuitSAT"); +} + +#[test] +fn test_energy_modes() { + // Minimization problems + assert!(VertexCovering::::new(2, vec![(0, 1)]) + .energy_mode() + .is_minimization()); + assert!(DominatingSet::::new(2, vec![(0, 1)]) + .energy_mode() + .is_minimization()); + assert!(SetCovering::::new(2, vec![vec![0, 1]]) + .energy_mode() + .is_minimization()); + assert!(PaintShop::new(vec!["a", "a"]) + .energy_mode() + .is_minimization()); + assert!(QUBO::from_matrix(vec![vec![1.0]]) + .energy_mode() + .is_minimization()); + assert!(SpinGlass::new(1, vec![], vec![0.0]) + .energy_mode() + .is_minimization()); + assert!(BMF::new(vec![vec![true]], 1) + .energy_mode() + .is_minimization()); + assert!(Factoring::new(6, 2, 2).energy_mode().is_minimization()); + assert!(KColoring::<2, SimpleGraph, i32>::new(2, vec![(0, 1)]) + .energy_mode() + .is_minimization()); + assert!(BicliqueCover::new(2, 2, vec![(0, 2)], 1) + .energy_mode() + .is_minimization()); + + // Maximization problems + assert!(IndependentSet::::new(2, vec![(0, 1)]) + .energy_mode() + .is_maximization()); + assert!(MaximalIS::::new(2, vec![(0, 1)]) + .energy_mode() + .is_maximization()); + assert!(MaxCut::::new(2, vec![(0, 1, 1)]) + .energy_mode() + .is_maximization()); + assert!(Matching::::new(2, vec![(0, 1, 1)]) + .energy_mode() + .is_maximization()); + assert!(SetPacking::::new(vec![vec![0]]) + .energy_mode() + .is_maximization()); + assert!(Satisfiability::::new(1, vec![CNFClause::new(vec![1])]) + .energy_mode() + .is_maximization()); + + let circuit = Circuit::new(vec![]); + assert!(CircuitSAT::::new(circuit) + .energy_mode() + .is_maximization()); +} diff --git a/src/tests_unit/traits.rs b/src/tests_unit/traits.rs new file mode 100644 index 0000000..372987d --- /dev/null +++ b/src/tests_unit/traits.rs @@ -0,0 +1,429 @@ +use super::*; + +// A simple test problem: select binary variables to maximize sum of weights +#[derive(Clone)] +struct SimpleWeightedProblem { + weights: Vec, +} + +impl Problem for SimpleWeightedProblem { + const NAME: &'static str = "SimpleWeightedProblem"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "i32")] + } + + type Size = i32; + + fn num_variables(&self) -> usize { + self.weights.len() + } + + fn num_flavors(&self) -> usize { + 2 + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![("variables", self.weights.len())]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::LargerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + let sum: i32 = config + .iter() + .zip(&self.weights) + .map(|(&c, &w)| if c == 1 { w } else { 0 }) + .sum(); + SolutionSize::valid(sum) + } +} + +// A simple CSP for testing +#[derive(Clone)] +struct SimpleCsp { + num_vars: usize, +} + +impl Problem for SimpleCsp { + const NAME: &'static str = "SimpleCsp"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "i32")] + } + + type Size = i32; + + fn num_variables(&self) -> usize { + self.num_vars + } + + fn num_flavors(&self) -> usize { + 2 + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![("variables", self.num_vars)]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::LargerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + csp_solution_size(self, config) + } +} + +impl ConstraintSatisfactionProblem for SimpleCsp { + fn constraints(&self) -> Vec { + // Constraint: at most one variable can be 1 + if self.num_vars >= 2 { + vec![LocalConstraint::new( + 2, + vec![0, 1], + vec![true, true, true, false], // (0,0), (0,1), (1,0) OK; (1,1) invalid + )] + } else { + vec![] + } + } + + fn objectives(&self) -> Vec> { + // Each variable contributes 1 if selected + (0..self.num_vars) + .map(|i| LocalSolutionSize::new(2, vec![i], vec![0, 1])) + .collect() + } + + fn weights(&self) -> Vec { + vec![1; self.num_vars] + } + + fn set_weights(&mut self, _weights: Vec) {} + + fn is_weighted(&self) -> bool { + false + } +} + +#[test] +fn test_variant_for_test_problems() { + // Test that variant() works for test problems + let v = SimpleWeightedProblem::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0], ("graph", "SimpleGraph")); + assert_eq!(v[1], ("weight", "i32")); + + let v = SimpleCsp::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0], ("graph", "SimpleGraph")); + assert_eq!(v[1], ("weight", "i32")); + + let v = MultiFlavorProblem::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0], ("graph", "SimpleGraph")); + assert_eq!(v[1], ("weight", "i32")); +} + +#[test] +fn test_simple_problem() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2, 3], + }; + + assert_eq!(problem.num_variables(), 3); + assert_eq!(problem.num_flavors(), 2); + assert_eq!(problem.variables(), 0..3); + assert_eq!(problem.flavors(), vec![0, 1]); + + let sol = problem.solution_size(&[0, 0, 0]); + assert_eq!(sol.size, 0); + assert!(sol.is_valid); + + let sol = problem.solution_size(&[1, 1, 1]); + assert_eq!(sol.size, 6); + assert!(sol.is_valid); + + let sol = problem.solution_size(&[1, 0, 1]); + assert_eq!(sol.size, 4); + assert!(sol.is_valid); +} + +#[test] +fn test_valid_config() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2, 3], + }; + + assert!(problem.is_valid_config(&[0, 1, 0])); + assert!(problem.is_valid_config(&[1, 1, 1])); + assert!(!problem.is_valid_config(&[0, 2, 0])); // invalid flavor + assert!(!problem.is_valid_config(&[0, 1])); // wrong length + assert!(!problem.is_valid_config(&[0, 1, 0, 1])); // wrong length +} + +#[test] +fn test_batch_evaluation() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2, 3], + }; + + let configs = vec![vec![0, 0, 0], vec![1, 1, 1], vec![1, 0, 1]]; + + let results = problem.solution_size_multiple(&configs); + assert_eq!(results.len(), 3); + assert_eq!(results[0].size, 0); + assert_eq!(results[1].size, 6); + assert_eq!(results[2].size, 4); +} + +#[test] +fn test_csp_solution_size() { + let problem = SimpleCsp { num_vars: 3 }; + + // Test valid configurations + let sol = problem.solution_size(&[0, 0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); + + let sol = problem.solution_size(&[1, 0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + let sol = problem.solution_size(&[0, 1, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + // Test invalid configuration (both 0 and 1 are 1) + let sol = problem.solution_size(&[1, 1, 0]); + assert!(!sol.is_valid); + assert_eq!(sol.size, 2); +} + +#[test] +fn test_csp_is_satisfied() { + let problem = SimpleCsp { num_vars: 3 }; + + assert!(problem.is_satisfied(&[0, 0, 0])); + assert!(problem.is_satisfied(&[1, 0, 0])); + assert!(problem.is_satisfied(&[0, 1, 0])); + assert!(!problem.is_satisfied(&[1, 1, 0])); +} + +#[test] +fn test_csp_compute_objective() { + let problem = SimpleCsp { num_vars: 3 }; + + assert_eq!(problem.compute_objective(&[0, 0, 0]), 0); + assert_eq!(problem.compute_objective(&[1, 0, 0]), 1); + assert_eq!(problem.compute_objective(&[1, 1, 0]), 2); + assert_eq!(problem.compute_objective(&[1, 1, 1]), 3); +} + +#[test] +fn test_csp_single_variable() { + // Test CSP with num_vars = 1 (no constraints, empty constraint list) + let problem = SimpleCsp { num_vars: 1 }; + + assert!(problem.constraints().is_empty()); + assert!(problem.is_satisfied(&[0])); // Always satisfied with no constraints + assert!(problem.is_satisfied(&[1])); + + let sol = problem.solution_size(&[0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); + + let sol = problem.solution_size(&[1]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); +} + +#[test] +fn test_csp_weights_and_weighted() { + let problem = SimpleCsp { num_vars: 3 }; + assert_eq!(problem.weights(), vec![1, 1, 1]); + assert!(!problem.is_weighted()); +} + +#[test] +fn test_csp_set_weights() { + let mut problem = SimpleCsp { num_vars: 3 }; + problem.set_weights(vec![10, 20, 30]); + // For SimpleCsp, set_weights is a no-op, so this just tests the call works + assert!(!problem.is_weighted()); +} + +#[test] +fn test_problem_size_metadata() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2, 3, 4, 5], + }; + + let size = problem.problem_size(); + assert_eq!(size.get("variables"), Some(5)); +} + +#[test] +fn test_energy_mode() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2, 3], + }; + assert!(problem.energy_mode().is_maximization()); +} + +#[test] +fn test_batch_evaluation_empty() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2, 3], + }; + + let configs: Vec> = vec![]; + let results = problem.solution_size_multiple(&configs); + assert!(results.is_empty()); +} + +#[test] +fn test_is_valid_config_empty_problem() { + let problem = SimpleWeightedProblem { weights: vec![] }; + + assert_eq!(problem.num_variables(), 0); + assert!(problem.is_valid_config(&[])); // Empty config for empty problem + assert!(!problem.is_valid_config(&[0])); // Non-empty config is invalid +} + +#[test] +fn test_variables_range() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2, 3, 4, 5], + }; + + let vars: Vec = problem.variables().collect(); + assert_eq!(vars, vec![0, 1, 2, 3, 4]); +} + +#[test] +fn test_flavors_list() { + let problem = SimpleWeightedProblem { + weights: vec![1, 2], + }; + + assert_eq!(problem.flavors(), vec![0, 1]); +} + +#[test] +fn test_csp_objectives() { + let problem = SimpleCsp { num_vars: 3 }; + let objectives = problem.objectives(); + + assert_eq!(objectives.len(), 3); + // Test that each objective evaluates correctly + assert_eq!(objectives[0].evaluate(&[0, 0, 0]), 0); + assert_eq!(objectives[0].evaluate(&[1, 0, 0]), 1); + assert_eq!(objectives[1].evaluate(&[0, 1, 0]), 1); + assert_eq!(objectives[2].evaluate(&[0, 0, 1]), 1); +} + +#[test] +fn test_csp_solution_size_helper_function() { + let problem = SimpleCsp { num_vars: 2 }; + + // Test via the helper function directly + let sol = csp_solution_size(&problem, &[0, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 0); + + let sol = csp_solution_size(&problem, &[1, 0]); + assert!(sol.is_valid); + assert_eq!(sol.size, 1); + + let sol = csp_solution_size(&problem, &[1, 1]); + assert!(!sol.is_valid); + assert_eq!(sol.size, 2); +} + +// Test problem with more than 2 flavors +#[derive(Clone)] +struct MultiFlavorProblem { + num_vars: usize, + num_flavors: usize, +} + +impl Problem for MultiFlavorProblem { + const NAME: &'static str = "MultiFlavorProblem"; + + fn variant() -> Vec<(&'static str, &'static str)> { + vec![("graph", "SimpleGraph"), ("weight", "i32")] + } + + type Size = i32; + + fn num_variables(&self) -> usize { + self.num_vars + } + + fn num_flavors(&self) -> usize { + self.num_flavors + } + + fn problem_size(&self) -> ProblemSize { + ProblemSize::new(vec![ + ("variables", self.num_vars), + ("flavors", self.num_flavors), + ]) + } + + fn energy_mode(&self) -> EnergyMode { + EnergyMode::SmallerSizeIsBetter + } + + fn solution_size(&self, config: &[usize]) -> SolutionSize { + let sum: i32 = config.iter().map(|&c| c as i32).sum(); + SolutionSize::valid(sum) + } +} + +#[test] +fn test_multi_flavor_problem() { + let problem = MultiFlavorProblem { + num_vars: 3, + num_flavors: 4, + }; + + assert_eq!(problem.num_flavors(), 4); + assert_eq!(problem.flavors(), vec![0, 1, 2, 3]); + assert!(problem.energy_mode().is_minimization()); + + // Valid configs + assert!(problem.is_valid_config(&[0, 1, 2])); + assert!(problem.is_valid_config(&[3, 3, 3])); + + // Invalid: flavor out of range + assert!(!problem.is_valid_config(&[0, 4, 0])); + assert!(!problem.is_valid_config(&[5, 0, 0])); + + let sol = problem.solution_size(&[0, 1, 2]); + assert_eq!(sol.size, 3); + + let sol = problem.solution_size(&[3, 3, 3]); + assert_eq!(sol.size, 9); +} + +#[test] +fn test_batch_evaluation_with_multi_flavor() { + let problem = MultiFlavorProblem { + num_vars: 2, + num_flavors: 3, + }; + + let configs = vec![vec![0, 0], vec![1, 1], vec![2, 2], vec![0, 2]]; + let results = problem.solution_size_multiple(&configs); + + assert_eq!(results.len(), 4); + assert_eq!(results[0].size, 0); + assert_eq!(results[1].size, 2); + assert_eq!(results[2].size, 4); + assert_eq!(results[3].size, 2); +} diff --git a/src/tests_unit/truth_table.rs b/src/tests_unit/truth_table.rs new file mode 100644 index 0000000..296bc56 --- /dev/null +++ b/src/tests_unit/truth_table.rs @@ -0,0 +1,195 @@ +use super::*; + +#[test] +fn test_and_gate() { + let and = TruthTable::and(2); + assert!(!and.evaluate(&[false, false])); + assert!(!and.evaluate(&[true, false])); + assert!(!and.evaluate(&[false, true])); + assert!(and.evaluate(&[true, true])); +} + +#[test] +fn test_or_gate() { + let or = TruthTable::or(2); + assert!(!or.evaluate(&[false, false])); + assert!(or.evaluate(&[true, false])); + assert!(or.evaluate(&[false, true])); + assert!(or.evaluate(&[true, true])); +} + +#[test] +fn test_not_gate() { + let not = TruthTable::not(); + assert!(not.evaluate(&[false])); + assert!(!not.evaluate(&[true])); +} + +#[test] +fn test_xor_gate() { + let xor = TruthTable::xor(2); + assert!(!xor.evaluate(&[false, false])); + assert!(xor.evaluate(&[true, false])); + assert!(xor.evaluate(&[false, true])); + assert!(!xor.evaluate(&[true, true])); +} + +#[test] +fn test_nand_gate() { + let nand = TruthTable::nand(2); + assert!(nand.evaluate(&[false, false])); + assert!(nand.evaluate(&[true, false])); + assert!(nand.evaluate(&[false, true])); + assert!(!nand.evaluate(&[true, true])); +} + +#[test] +fn test_implies() { + let imp = TruthTable::implies(); + assert!(imp.evaluate(&[false, false])); // F -> F = T + assert!(imp.evaluate(&[false, true])); // F -> T = T + assert!(!imp.evaluate(&[true, false])); // T -> F = F + assert!(imp.evaluate(&[true, true])); // T -> T = T +} + +#[test] +fn test_from_function() { + let majority = + TruthTable::from_function(3, |input| input.iter().filter(|&&b| b).count() >= 2); + assert!(!majority.evaluate(&[false, false, false])); + assert!(!majority.evaluate(&[true, false, false])); + assert!(majority.evaluate(&[true, true, false])); + assert!(majority.evaluate(&[true, true, true])); +} + +#[test] +fn test_evaluate_config() { + let and = TruthTable::and(2); + assert!(!and.evaluate_config(&[0, 0])); + assert!(!and.evaluate_config(&[1, 0])); + assert!(and.evaluate_config(&[1, 1])); +} + +#[test] +fn test_satisfiable() { + let or = TruthTable::or(2); + assert!(or.is_satisfiable()); + + let contradiction = TruthTable::from_outputs(2, vec![false, false, false, false]); + assert!(!contradiction.is_satisfiable()); + assert!(contradiction.is_contradiction()); +} + +#[test] +fn test_tautology() { + let tautology = TruthTable::from_outputs(2, vec![true, true, true, true]); + assert!(tautology.is_tautology()); + + let or = TruthTable::or(2); + assert!(!or.is_tautology()); +} + +#[test] +fn test_satisfying_assignments() { + let xor = TruthTable::xor(2); + let sat = xor.satisfying_assignments(); + assert_eq!(sat.len(), 2); + assert!(sat.contains(&vec![true, false])); + assert!(sat.contains(&vec![false, true])); +} + +#[test] +fn test_count() { + let and = TruthTable::and(2); + assert_eq!(and.count_ones(), 1); + assert_eq!(and.count_zeros(), 3); +} + +#[test] +fn test_index_to_input() { + let tt = TruthTable::and(3); + assert_eq!(tt.index_to_input(0), vec![false, false, false]); + assert_eq!(tt.index_to_input(1), vec![true, false, false]); + assert_eq!(tt.index_to_input(7), vec![true, true, true]); +} + +#[test] +fn test_outputs_vec() { + let and = TruthTable::and(2); + assert_eq!(and.outputs_vec(), vec![false, false, false, true]); +} + +#[test] +fn test_and_with() { + let a = TruthTable::from_outputs(1, vec![false, true]); + let b = TruthTable::from_outputs(1, vec![true, false]); + let result = a.and_with(&b); + assert_eq!(result.outputs_vec(), vec![false, false]); +} + +#[test] +fn test_or_with() { + let a = TruthTable::from_outputs(1, vec![false, true]); + let b = TruthTable::from_outputs(1, vec![true, false]); + let result = a.or_with(&b); + assert_eq!(result.outputs_vec(), vec![true, true]); +} + +#[test] +fn test_negate() { + let and = TruthTable::and(2); + let nand = and.negate(); + assert_eq!(nand.outputs_vec(), vec![true, true, true, false]); +} + +#[test] +fn test_num_rows() { + let tt = TruthTable::and(3); + assert_eq!(tt.num_rows(), 8); +} + +#[test] +fn test_3_input_and() { + let and3 = TruthTable::and(3); + assert!(!and3.evaluate(&[true, true, false])); + assert!(and3.evaluate(&[true, true, true])); +} + +#[test] +fn test_xnor() { + let xnor = TruthTable::xnor(2); + assert!(xnor.evaluate(&[false, false])); + assert!(!xnor.evaluate(&[true, false])); + assert!(!xnor.evaluate(&[false, true])); + assert!(xnor.evaluate(&[true, true])); +} + +#[test] +fn test_nor() { + let nor = TruthTable::nor(2); + assert!(nor.evaluate(&[false, false])); + assert!(!nor.evaluate(&[true, false])); + assert!(!nor.evaluate(&[false, true])); + assert!(!nor.evaluate(&[true, true])); +} + +#[test] +fn test_serialization() { + let and = TruthTable::and(2); + let json = serde_json::to_string(&and).unwrap(); + let deserialized: TruthTable = serde_json::from_str(&json).unwrap(); + assert_eq!(and, deserialized); +} + +#[test] +fn test_outputs() { + let and = TruthTable::and(2); + let outputs = and.outputs(); + assert_eq!(outputs.len(), 4); +} + +#[test] +fn test_num_inputs() { + let and = TruthTable::and(3); + assert_eq!(and.num_inputs(), 3); +} diff --git a/src/tests_unit/types.rs b/src/tests_unit/types.rs new file mode 100644 index 0000000..63ba29d --- /dev/null +++ b/src/tests_unit/types.rs @@ -0,0 +1,132 @@ +use super::*; + +#[test] +fn test_unweighted() { + let uw = Unweighted; + // Test get() method + assert_eq!(uw.get(0), 1); + assert_eq!(uw.get(100), 1); + assert_eq!(uw.get(usize::MAX), 1); + + // Test Display + assert_eq!(format!("{}", uw), "Unweighted"); + + // Test Clone, Copy, Default + let uw2 = uw; + let _uw3 = uw2; // Copy works (no clone needed) + let _uw4: Unweighted = Default::default(); + + // Test PartialEq + assert_eq!(Unweighted, Unweighted); +} + +#[test] +fn test_energy_mode() { + let max_mode = EnergyMode::LargerSizeIsBetter; + let min_mode = EnergyMode::SmallerSizeIsBetter; + + assert!(max_mode.is_maximization()); + assert!(!max_mode.is_minimization()); + assert!(!min_mode.is_maximization()); + assert!(min_mode.is_minimization()); + + assert!(max_mode.is_better(&10, &5)); + assert!(!max_mode.is_better(&5, &10)); + assert!(min_mode.is_better(&5, &10)); + assert!(!min_mode.is_better(&10, &5)); + + assert!(max_mode.is_better_or_equal(&10, &10)); + assert!(min_mode.is_better_or_equal(&10, &10)); +} + +#[test] +fn test_solution_size() { + let valid = SolutionSize::valid(42); + assert_eq!(valid.size, 42); + assert!(valid.is_valid); + + let invalid = SolutionSize::invalid(0); + assert!(!invalid.is_valid); + + let custom = SolutionSize::new(100, false); + assert_eq!(custom.size, 100); + assert!(!custom.is_valid); +} + +#[test] +fn test_solution_size_display() { + let valid = SolutionSize::valid(42); + assert_eq!(format!("{}", valid), "SolutionSize(42, valid)"); + + let invalid = SolutionSize::invalid(0); + assert_eq!(format!("{}", invalid), "SolutionSize(0, invalid)"); +} + +#[test] +fn test_problem_size() { + let ps = ProblemSize::new(vec![("vertices", 10), ("edges", 20)]); + assert_eq!(ps.get("vertices"), Some(10)); + assert_eq!(ps.get("edges"), Some(20)); + assert_eq!(ps.get("unknown"), None); +} + +#[test] +fn test_problem_size_display() { + let ps = ProblemSize::new(vec![("vertices", 10), ("edges", 20)]); + assert_eq!(format!("{}", ps), "ProblemSize{vertices: 10, edges: 20}"); + + let empty = ProblemSize::new(vec![]); + assert_eq!(format!("{}", empty), "ProblemSize{}"); + + let single = ProblemSize::new(vec![("n", 5)]); + assert_eq!(format!("{}", single), "ProblemSize{n: 5}"); +} + +#[test] +fn test_local_constraint() { + // Binary constraint on 2 variables: only (0,0) and (1,1) are valid + let constraint = LocalConstraint::new(2, vec![0, 1], vec![true, false, false, true]); + + assert!(constraint.is_satisfied(&[0, 0])); + assert!(!constraint.is_satisfied(&[0, 1])); + assert!(!constraint.is_satisfied(&[1, 0])); + assert!(constraint.is_satisfied(&[1, 1])); + assert_eq!(constraint.num_variables(), 2); +} + +#[test] +fn test_local_constraint_out_of_bounds() { + let constraint = LocalConstraint::new(2, vec![5, 6], vec![true, false, false, true]); + // Test with config that doesn't have indices 5 and 6 - defaults to 0 + assert!(constraint.is_satisfied(&[0, 0, 0])); +} + +#[test] +fn test_local_solution_size() { + // Binary objective on 1 variable: weight 0 for 0, weight 5 for 1 + let objective = LocalSolutionSize::new(2, vec![0], vec![0, 5]); + + assert_eq!(objective.evaluate(&[0]), 0); + assert_eq!(objective.evaluate(&[1]), 5); + assert_eq!(objective.num_variables(), 1); +} + +#[test] +fn test_local_solution_size_multi_variable() { + // Binary objective on 2 variables + let objective = LocalSolutionSize::new(2, vec![0, 1], vec![0, 1, 2, 3]); + assert_eq!(objective.evaluate(&[0, 0]), 0); + assert_eq!(objective.evaluate(&[0, 1]), 1); + assert_eq!(objective.evaluate(&[1, 0]), 2); + assert_eq!(objective.evaluate(&[1, 1]), 3); +} + +#[test] +fn test_numeric_weight_impls() { + fn assert_numeric_weight() {} + + assert_numeric_weight::(); + assert_numeric_weight::(); + assert_numeric_weight::(); + assert_numeric_weight::(); +} diff --git a/tests/rules/unitdiskmapping/common.rs b/src/tests_unit/unitdiskmapping_algorithms/common.rs similarity index 93% rename from tests/rules/unitdiskmapping/common.rs rename to src/tests_unit/unitdiskmapping_algorithms/common.rs index b9e2de8..6ca0cb0 100644 --- a/tests/rules/unitdiskmapping/common.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/common.rs @@ -1,11 +1,11 @@ //! Common test utilities for mapping tests. -use problemreductions::models::optimization::{LinearConstraint, ObjectiveSense, ILP}; -use problemreductions::models::IndependentSet; -use problemreductions::rules::unitdiskmapping::MappingResult; -use problemreductions::rules::{ReduceTo, ReductionResult}; -use problemreductions::solvers::ILPSolver; -use problemreductions::topology::{Graph, SimpleGraph}; +use crate::models::optimization::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::IndependentSet; +use crate::rules::unitdiskmapping::MappingResult; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::ILPSolver; +use crate::topology::{Graph, SimpleGraph}; /// Check if a configuration is a valid independent set. pub fn is_independent_set(edges: &[(usize, usize)], config: &[usize]) -> bool { diff --git a/tests/rules/unitdiskmapping/copyline.rs b/src/tests_unit/unitdiskmapping_algorithms/copyline.rs similarity index 99% rename from tests/rules/unitdiskmapping/copyline.rs rename to src/tests_unit/unitdiskmapping_algorithms/copyline.rs index 6b48172..a8581bd 100644 --- a/tests/rules/unitdiskmapping/copyline.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/copyline.rs @@ -1,7 +1,7 @@ //! Tests for copyline functionality (src/rules/mapping/copyline.rs). use super::common::solve_weighted_mis; -use problemreductions::rules::unitdiskmapping::{ +use crate::rules::unitdiskmapping::{ create_copylines, map_graph, map_graph_triangular, mis_overhead_copyline, CopyLine, }; diff --git a/tests/rules/unitdiskmapping/gadgets.rs b/src/tests_unit/unitdiskmapping_algorithms/gadgets.rs similarity index 98% rename from tests/rules/unitdiskmapping/gadgets.rs rename to src/tests_unit/unitdiskmapping_algorithms/gadgets.rs index 9c92668..e048c89 100644 --- a/tests/rules/unitdiskmapping/gadgets.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/gadgets.rs @@ -1,7 +1,7 @@ //! Tests for gadget properties (src/rules/mapping/gadgets.rs and triangular gadgets). use super::common::{solve_weighted_mis, triangular_edges}; -use problemreductions::rules::unitdiskmapping::{ +use crate::rules::unitdiskmapping::{ Branch, BranchFix, Cross, EndTurn, Mirror, Pattern, ReflectedGadget, RotatedGadget, TCon, TriBranch, TriBranchFix, TriBranchFixB, TriCross, TriEndTurn, TriTConDown, TriTConUp, TriTrivialTurnLeft, TriTrivialTurnRight, TriTurn, TriWTurn, TriangularGadget, TrivialTurn, @@ -855,7 +855,7 @@ fn test_weighted_ksg_pattern_source_matrix() { #[test] fn test_pattern_mapped_matrix() { - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::Pattern; let cross_mapped = Cross::.mapped_matrix(); assert!(!cross_mapped.is_empty()); @@ -944,7 +944,7 @@ fn test_gadget_connected_nodes() { #[test] fn test_build_standard_unit_disk_edges() { - use problemreductions::rules::unitdiskmapping::alpha_tensor::build_standard_unit_disk_edges; + use crate::rules::unitdiskmapping::alpha_tensor::build_standard_unit_disk_edges; // Simple test: two adjacent points let locs = vec![(0, 0), (1, 0)]; @@ -966,7 +966,7 @@ fn test_build_standard_unit_disk_edges() { #[test] fn test_build_triangular_unit_disk_edges() { - use problemreductions::rules::unitdiskmapping::alpha_tensor::build_triangular_unit_disk_edges; + use crate::rules::unitdiskmapping::alpha_tensor::build_triangular_unit_disk_edges; let locs = vec![(0, 0), (1, 0), (0, 1)]; let edges = build_triangular_unit_disk_edges(&locs); @@ -990,7 +990,7 @@ fn test_triangular_gadget_source_matrix() { #[test] fn test_triangular_gadget_mapped_matrix() { - use problemreductions::rules::unitdiskmapping::TriangularGadget; + use crate::rules::unitdiskmapping::TriangularGadget; let matrix = TriTurn.mapped_matrix(); assert!(!matrix.is_empty()); @@ -1025,7 +1025,7 @@ fn test_triangular_gadget_connected_nodes() { #[test] fn test_all_triangular_gadgets_source_matrix() { - use problemreductions::rules::unitdiskmapping::TriangularGadget; + use crate::rules::unitdiskmapping::TriangularGadget; fn check_matrix(gadget: G, name: &str) { let matrix = gadget.source_matrix(); @@ -1062,7 +1062,7 @@ fn test_all_triangular_gadgets_source_matrix() { #[test] fn test_all_triangular_gadgets_mapped_matrix() { - use problemreductions::rules::unitdiskmapping::TriangularGadget; + use crate::rules::unitdiskmapping::TriangularGadget; fn check_matrix(gadget: G, name: &str) { let matrix = gadget.mapped_matrix(); @@ -1393,7 +1393,7 @@ fn test_all_mirrors_valid_graphs() { // === Julia Tests: rotated_and_reflected counts === // From Julia's test/gadgets.jl -use problemreductions::rules::unitdiskmapping::{BranchFixB, DanglingLeg}; +use crate::rules::unitdiskmapping::{BranchFixB, DanglingLeg}; /// Count unique gadgets from all rotations (0, 1, 2, 3) and reflections (X, Y, Diag, OffDiag). /// Julia: length(rotated_and_reflected(gadget)) diff --git a/tests/rules/unitdiskmapping/gadgets_ground_truth.rs b/src/tests_unit/unitdiskmapping_algorithms/gadgets_ground_truth.rs similarity index 99% rename from tests/rules/unitdiskmapping/gadgets_ground_truth.rs rename to src/tests_unit/unitdiskmapping_algorithms/gadgets_ground_truth.rs index 10a7a99..1c0e7d7 100644 --- a/tests/rules/unitdiskmapping/gadgets_ground_truth.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/gadgets_ground_truth.rs @@ -3,7 +3,7 @@ //! The ground truth is generated by scripts/dump_gadgets.jl and stored in //! tests/data/gadgets_ground_truth.json -use problemreductions::rules::unitdiskmapping::{ +use crate::rules::unitdiskmapping::{ // Unweighted square gadgets Branch, BranchFix, diff --git a/tests/rules/unitdiskmapping/julia_comparison.rs b/src/tests_unit/unitdiskmapping_algorithms/julia_comparison.rs similarity index 98% rename from tests/rules/unitdiskmapping/julia_comparison.rs rename to src/tests_unit/unitdiskmapping_algorithms/julia_comparison.rs index 0167fad..bd9c954 100644 --- a/tests/rules/unitdiskmapping/julia_comparison.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/julia_comparison.rs @@ -5,7 +5,7 @@ //! - Weighted (square lattice with weights) //! - Triangular (triangular lattice with weights) -use problemreductions::rules::unitdiskmapping::{ +use crate::rules::unitdiskmapping::{ map_graph_triangular_with_order, map_graph_with_order, }; use serde::Deserialize; @@ -250,7 +250,7 @@ fn rust_triangular_gadget_overhead(idx: usize) -> i32 { /// Calculate copyline MIS overhead for triangular mode (matches Julia formula) fn copyline_overhead_triangular( - line: &problemreductions::rules::unitdiskmapping::CopyLine, + line: &crate::rules::unitdiskmapping::CopyLine, spacing: usize, ) -> i32 { let s = spacing as i32; @@ -479,7 +479,7 @@ fn print_comparison( fn compare_copy_lines( julia_lines: &[CopyLineInfo], - rust_lines: &[problemreductions::rules::unitdiskmapping::CopyLine], + rust_lines: &[crate::rules::unitdiskmapping::CopyLine], ) { println!("Copy lines:"); for jl in julia_lines { @@ -546,7 +546,7 @@ fn test_square_unweighted_petersen() { /// This tests the fix for the bug where connect() was incorrectly implemented. /// Julia's connect_cell! converts plain Occupied cells to Connected at crossing points. fn compare_connected_cells(name: &str) { - use problemreductions::rules::unitdiskmapping::CellState; + use crate::rules::unitdiskmapping::CellState; let julia = load_julia_trace(name, "unweighted"); let edges = get_graph_edges(&julia); @@ -565,7 +565,7 @@ fn compare_connected_cells(name: &str) { let rust_result = map_graph_with_order(num_vertices, &edges, &vertex_order); // Re-create the grid with connections to check Connected cell positions - let mut grid = problemreductions::rules::unitdiskmapping::MappingGrid::with_padding( + let mut grid = crate::rules::unitdiskmapping::MappingGrid::with_padding( rust_result.grid_graph.size().0, rust_result.grid_graph.size().1, rust_result.spacing, diff --git a/tests/rules/unitdiskmapping/map_graph.rs b/src/tests_unit/unitdiskmapping_algorithms/map_graph.rs similarity index 98% rename from tests/rules/unitdiskmapping/map_graph.rs rename to src/tests_unit/unitdiskmapping_algorithms/map_graph.rs index df22f80..90df6bd 100644 --- a/tests/rules/unitdiskmapping/map_graph.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/map_graph.rs @@ -3,8 +3,8 @@ //! Tests square lattice mapping, MappingResult, and config_back. use super::common::{is_independent_set, solve_mis, solve_mis_config}; -use problemreductions::rules::unitdiskmapping::{map_graph, map_graph_with_order, MappingResult}; -use problemreductions::topology::{smallgraph, Graph, GridType}; +use crate::rules::unitdiskmapping::{map_graph, map_graph_with_order, MappingResult}; +use crate::topology::{smallgraph, Graph, GridType}; // === Square Lattice Basic Tests === @@ -489,7 +489,7 @@ fn test_grid_graph_nodes_have_weights() { // === Tape Entry and Ruleset Tests === -use problemreductions::rules::unitdiskmapping::ksg::{ +use crate::rules::unitdiskmapping::ksg::{ crossing_ruleset_indices, tape_entry_mis_overhead, KsgTapeEntry, }; diff --git a/tests/rules/unitdiskmapping/mapping_result.rs b/src/tests_unit/unitdiskmapping_algorithms/mapping_result.rs similarity index 97% rename from tests/rules/unitdiskmapping/mapping_result.rs rename to src/tests_unit/unitdiskmapping_algorithms/mapping_result.rs index 7fe22c5..e5b0ffd 100644 --- a/tests/rules/unitdiskmapping/mapping_result.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/mapping_result.rs @@ -1,7 +1,7 @@ //! Tests for MappingResult utility methods and unapply functionality. -use problemreductions::rules::unitdiskmapping::{ksg, map_graph}; -use problemreductions::topology::{smallgraph, Graph}; +use crate::rules::unitdiskmapping::{ksg, map_graph}; +use crate::topology::{smallgraph, Graph}; // === MappingResult Utility Methods === @@ -126,7 +126,7 @@ fn test_weighted_mapping_result_format_config() { #[test] fn test_unapply_gadgets_empty_tape() { - use problemreductions::rules::unitdiskmapping::ksg::unapply_gadgets; + use crate::rules::unitdiskmapping::ksg::unapply_gadgets; let tape = vec![]; let mut config: Vec> = vec![vec![0; 5]; 5]; @@ -137,7 +137,7 @@ fn test_unapply_gadgets_empty_tape() { #[test] fn test_unapply_weighted_gadgets_empty_tape() { - use problemreductions::rules::unitdiskmapping::ksg::unapply_weighted_gadgets; + use crate::rules::unitdiskmapping::ksg::unapply_weighted_gadgets; let tape = vec![]; let mut config: Vec> = vec![vec![0; 5]; 5]; @@ -361,7 +361,7 @@ fn test_mis_size_preserved_house() { #[test] fn test_full_pipeline_triangular_diamond() { use super::common::{is_independent_set, solve_weighted_mis_config}; - use problemreductions::rules::unitdiskmapping::map_graph_triangular; + use crate::rules::unitdiskmapping::map_graph_triangular; let (n, edges) = smallgraph("diamond").unwrap(); let result = map_graph_triangular(n, &edges); @@ -385,7 +385,7 @@ fn test_full_pipeline_triangular_diamond() { #[test] fn test_full_pipeline_triangular_bull() { use super::common::{is_independent_set, solve_weighted_mis_config}; - use problemreductions::rules::unitdiskmapping::map_graph_triangular; + use crate::rules::unitdiskmapping::map_graph_triangular; let (n, edges) = smallgraph("bull").unwrap(); let result = map_graph_triangular(n, &edges); @@ -409,7 +409,7 @@ fn test_full_pipeline_triangular_bull() { #[test] fn test_full_pipeline_triangular_house() { use super::common::{is_independent_set, solve_weighted_mis_config}; - use problemreductions::rules::unitdiskmapping::map_graph_triangular; + use crate::rules::unitdiskmapping::map_graph_triangular; let (n, edges) = smallgraph("house").unwrap(); let result = map_graph_triangular(n, &edges); @@ -434,7 +434,7 @@ fn test_full_pipeline_triangular_house() { #[test] fn test_apply_and_unapply_gadget() { - use problemreductions::rules::unitdiskmapping::{ + use crate::rules::unitdiskmapping::{ apply_gadget, unapply_gadget, CellState, MappingGrid, Pattern, Turn, }; @@ -466,7 +466,7 @@ fn test_apply_and_unapply_gadget() { #[test] fn test_apply_gadget_at_various_positions() { - use problemreductions::rules::unitdiskmapping::{ + use crate::rules::unitdiskmapping::{ apply_gadget, CellState, MappingGrid, Pattern, Turn, }; diff --git a/tests/rules/unitdiskmapping/mod.rs b/src/tests_unit/unitdiskmapping_algorithms/mod.rs similarity index 100% rename from tests/rules/unitdiskmapping/mod.rs rename to src/tests_unit/unitdiskmapping_algorithms/mod.rs diff --git a/tests/rules/unitdiskmapping/triangular.rs b/src/tests_unit/unitdiskmapping_algorithms/triangular.rs similarity index 98% rename from tests/rules/unitdiskmapping/triangular.rs rename to src/tests_unit/unitdiskmapping_algorithms/triangular.rs index 568c947..8e797fb 100644 --- a/tests/rules/unitdiskmapping/triangular.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/triangular.rs @@ -1,10 +1,10 @@ //! Tests for triangular lattice mapping (src/rules/mapping/triangular.rs). use super::common::solve_weighted_grid_mis; -use problemreductions::rules::unitdiskmapping::{ +use crate::rules::unitdiskmapping::{ map_graph_triangular, map_graph_triangular_with_order, trace_centers, MappingResult, }; -use problemreductions::topology::{smallgraph, Graph}; +use crate::topology::{smallgraph, Graph}; use std::collections::HashMap; // === Basic Triangular Mapping Tests === @@ -345,8 +345,8 @@ fn test_trace_centers_triangle() { #[test] fn test_triangular_map_config_back_standard_graphs() { use super::common::{is_independent_set, solve_mis, solve_weighted_mis_config}; - use problemreductions::rules::unitdiskmapping::map_weights; - use problemreductions::topology::Graph; + use crate::rules::unitdiskmapping::map_weights; + use crate::topology::Graph; // All standard graphs (excluding tutte/karate which are slow) let graph_names = [ diff --git a/tests/rules/unitdiskmapping/weighted.rs b/src/tests_unit/unitdiskmapping_algorithms/weighted.rs similarity index 91% rename from tests/rules/unitdiskmapping/weighted.rs rename to src/tests_unit/unitdiskmapping_algorithms/weighted.rs index 3def581..ed5b474 100644 --- a/tests/rules/unitdiskmapping/weighted.rs +++ b/src/tests_unit/unitdiskmapping_algorithms/weighted.rs @@ -1,10 +1,10 @@ //! Tests for weighted mode functionality (src/rules/mapping/weighted.rs). -use problemreductions::rules::unitdiskmapping::{ +use crate::rules::unitdiskmapping::{ copyline_weighted_locations_triangular, map_graph_triangular, map_weights, trace_centers, CopyLine, }; -use problemreductions::topology::Graph; +use crate::topology::Graph; // === Trace Centers Tests === @@ -139,7 +139,7 @@ fn test_map_weights_wrong_length() { #[test] fn test_triangular_weighted_interface() { - use problemreductions::topology::smallgraph; + use crate::topology::smallgraph; let (n, edges) = smallgraph("bull").unwrap(); let result = map_graph_triangular(n, &edges); @@ -155,7 +155,7 @@ fn test_triangular_weighted_interface() { #[test] fn test_triangular_interface_full() { - use problemreductions::topology::smallgraph; + use crate::topology::smallgraph; let (n, edges) = smallgraph("diamond").unwrap(); let result = map_graph_triangular(n, &edges); @@ -223,7 +223,7 @@ fn test_triangular_copyline_weight_invariant() { #[test] fn test_weighted_gadgets_weight_conservation() { // For each weighted gadget, verify weight sums are consistent with MIS properties - use problemreductions::rules::unitdiskmapping::triangular_weighted_ruleset; + use crate::rules::unitdiskmapping::triangular_weighted_ruleset; let ruleset = triangular_weighted_ruleset(); for gadget in &ruleset { @@ -252,7 +252,7 @@ fn test_weighted_gadgets_weight_conservation() { #[test] fn test_weighted_gadgets_positive_weights() { // All individual weights should be positive - use problemreductions::rules::unitdiskmapping::triangular_weighted_ruleset; + use crate::rules::unitdiskmapping::triangular_weighted_ruleset; let ruleset = triangular_weighted_ruleset(); for gadget in &ruleset { @@ -269,8 +269,8 @@ fn test_weighted_gadgets_positive_weights() { #[test] fn test_map_config_back_extracts_valid_is_triangular() { - use problemreductions::rules::unitdiskmapping::map_graph_triangular; - use problemreductions::topology::{smallgraph, Graph}; + use crate::rules::unitdiskmapping::map_graph_triangular; + use crate::topology::{smallgraph, Graph}; let (n, edges) = smallgraph("bull").unwrap(); let result = map_graph_triangular(n, &edges); @@ -316,7 +316,7 @@ fn test_map_weights_preserves_total_weight() { #[test] fn test_trace_centers_consistency_with_config_back() { - use problemreductions::topology::smallgraph; + use crate::topology::smallgraph; let (n, edges) = smallgraph("diamond").unwrap(); let result = map_graph_triangular(n, &edges); @@ -369,8 +369,8 @@ fn test_trace_centers_consistency_with_config_back() { /// - Branch: source node 4 → weight 3; mapped node 2 → weight 3 #[test] fn test_square_gadget_trivial_turn_weights() { - use problemreductions::rules::unitdiskmapping::Pattern; - use problemreductions::rules::unitdiskmapping::TrivialTurn; + use crate::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::TrivialTurn; let trivial_turn = TrivialTurn; let source_weights = trivial_turn.source_weights(); @@ -411,8 +411,8 @@ fn test_square_gadget_trivial_turn_weights() { #[test] fn test_square_gadget_endturn_weights() { - use problemreductions::rules::unitdiskmapping::EndTurn; - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::EndTurn; + use crate::rules::unitdiskmapping::Pattern; let endturn = EndTurn; let source_weights = endturn.source_weights(); @@ -449,8 +449,8 @@ fn test_square_gadget_endturn_weights() { #[test] fn test_square_gadget_tcon_weights() { - use problemreductions::rules::unitdiskmapping::Pattern; - use problemreductions::rules::unitdiskmapping::TCon; + use crate::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::TCon; let tcon = TCon; let source_weights = tcon.source_weights(); @@ -499,8 +499,8 @@ fn test_square_gadget_tcon_weights() { #[test] fn test_square_gadget_branchfixb_weights() { - use problemreductions::rules::unitdiskmapping::BranchFixB; - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::BranchFixB; + use crate::rules::unitdiskmapping::Pattern; let branchfixb = BranchFixB; let source_weights = branchfixb.source_weights(); @@ -542,8 +542,8 @@ fn test_square_gadget_branchfixb_weights() { #[test] fn test_square_gadget_branch_weights() { - use problemreductions::rules::unitdiskmapping::Branch; - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::Branch; + use crate::rules::unitdiskmapping::Pattern; let branch = Branch; let source_weights = branch.source_weights(); @@ -576,8 +576,8 @@ fn test_square_gadget_branch_weights() { #[test] fn test_square_gadget_default_weights_cross_false() { - use problemreductions::rules::unitdiskmapping::Cross; - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::Cross; + use crate::rules::unitdiskmapping::Pattern; let cross = Cross::; for &w in &cross.source_weights() { @@ -590,8 +590,8 @@ fn test_square_gadget_default_weights_cross_false() { #[test] fn test_square_gadget_default_weights_cross_true() { - use problemreductions::rules::unitdiskmapping::Cross; - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::Cross; + use crate::rules::unitdiskmapping::Pattern; let cross = Cross::; for &w in &cross.source_weights() { @@ -604,8 +604,8 @@ fn test_square_gadget_default_weights_cross_true() { #[test] fn test_square_gadget_default_weights_turn() { - use problemreductions::rules::unitdiskmapping::Pattern; - use problemreductions::rules::unitdiskmapping::Turn; + use crate::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::Turn; let turn = Turn; for &w in &turn.source_weights() { @@ -618,8 +618,8 @@ fn test_square_gadget_default_weights_turn() { #[test] fn test_square_gadget_default_weights_wturn() { - use problemreductions::rules::unitdiskmapping::Pattern; - use problemreductions::rules::unitdiskmapping::WTurn; + use crate::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::WTurn; let wturn = WTurn; for &w in &wturn.source_weights() { @@ -632,8 +632,8 @@ fn test_square_gadget_default_weights_wturn() { #[test] fn test_square_gadget_default_weights_branchfix() { - use problemreductions::rules::unitdiskmapping::BranchFix; - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::BranchFix; + use crate::rules::unitdiskmapping::Pattern; let branchfix = BranchFix; for &w in &branchfix.source_weights() { @@ -646,8 +646,8 @@ fn test_square_gadget_default_weights_branchfix() { #[test] fn test_square_danglinleg_weights() { - use problemreductions::rules::unitdiskmapping::DanglingLeg; - use problemreductions::rules::unitdiskmapping::Pattern; + use crate::rules::unitdiskmapping::DanglingLeg; + use crate::rules::unitdiskmapping::Pattern; let danglinleg = DanglingLeg; let source_weights = danglinleg.source_weights(); @@ -698,10 +698,10 @@ fn test_square_danglinleg_weights() { #[test] fn test_weighted_map_config_back_standard_graphs() { use super::common::{is_independent_set, solve_mis}; - use problemreductions::models::optimization::{LinearConstraint, ObjectiveSense, ILP}; - use problemreductions::rules::unitdiskmapping::{map_graph_triangular, map_weights}; - use problemreductions::solvers::ILPSolver; - use problemreductions::topology::{smallgraph, Graph}; + use crate::models::optimization::{LinearConstraint, ObjectiveSense, ILP}; + use crate::rules::unitdiskmapping::{map_graph_triangular, map_weights}; + use crate::solvers::ILPSolver; + use crate::topology::{smallgraph, Graph}; // All standard graphs (excluding tutte/karate which are slow) let graph_names = [ diff --git a/src/tests_unit/variant.rs b/src/tests_unit/variant.rs new file mode 100644 index 0000000..fcaba91 --- /dev/null +++ b/src/tests_unit/variant.rs @@ -0,0 +1,143 @@ +use super::*; + +#[test] +fn test_short_type_name_primitive() { + assert_eq!(short_type_name::(), "i32"); + assert_eq!(short_type_name::(), "f64"); +} + +#[test] +fn test_short_type_name_struct() { + struct MyStruct; + assert_eq!(short_type_name::(), "MyStruct"); +} + +#[test] +fn test_const_usize_str() { + assert_eq!(const_usize_str::<1>(), "1"); + assert_eq!(const_usize_str::<2>(), "2"); + assert_eq!(const_usize_str::<3>(), "3"); + assert_eq!(const_usize_str::<4>(), "4"); + assert_eq!(const_usize_str::<5>(), "5"); + assert_eq!(const_usize_str::<6>(), "6"); + assert_eq!(const_usize_str::<7>(), "7"); + assert_eq!(const_usize_str::<8>(), "8"); + assert_eq!(const_usize_str::<9>(), "9"); + assert_eq!(const_usize_str::<10>(), "10"); + assert_eq!(const_usize_str::<11>(), "N"); + assert_eq!(const_usize_str::<100>(), "N"); +} + +#[test] +fn test_variant_for_problems() { + use crate::models::graph::{ + DominatingSet, IndependentSet, KColoring, Matching, MaxCut, MaximalIS, VertexCovering, + }; + use crate::models::optimization::{SpinGlass, QUBO}; + use crate::models::satisfiability::{KSatisfiability, Satisfiability}; + use crate::models::set::{SetCovering, SetPacking}; + use crate::models::specialized::{BicliqueCover, CircuitSAT, Factoring, PaintShop, BMF}; + use crate::topology::SimpleGraph; + use crate::traits::Problem; + + // Test IndependentSet variants + let v = IndependentSet::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].0, "graph"); + assert_eq!(v[0].1, "SimpleGraph"); + assert_eq!(v[1].0, "weight"); + assert_eq!(v[1].1, "i32"); + + let v = IndependentSet::::variant(); + assert_eq!(v[1].1, "f64"); + + // Test VertexCovering + let v = VertexCovering::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + assert_eq!(v[1].1, "i32"); + + // Test DominatingSet + let v = DominatingSet::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + + // Test Matching + let v = Matching::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + + // Test MaxCut + let v = MaxCut::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + + let v = MaxCut::::variant(); + assert_eq!(v[1].1, "f64"); + + // Test KColoring (has K, graph, and weight parameters) + let v = KColoring::<3, SimpleGraph, i32>::variant(); + assert_eq!(v.len(), 3); + assert_eq!(v[0], ("k", "3")); + assert_eq!(v[1], ("graph", "SimpleGraph")); + assert_eq!(v[2], ("weight", "i32")); + + // Test MaximalIS (no weight parameter) + let v = MaximalIS::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + + // Test Satisfiability + let v = Satisfiability::::variant(); + assert_eq!(v.len(), 2); + + // Test KSatisfiability + let v = KSatisfiability::<3, i32>::variant(); + assert_eq!(v.len(), 2); + + // Test SetPacking + let v = SetPacking::::variant(); + assert_eq!(v.len(), 2); + + // Test SetCovering + let v = SetCovering::::variant(); + assert_eq!(v.len(), 2); + + // Test SpinGlass + let v = SpinGlass::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[1].1, "f64"); + + let v = SpinGlass::::variant(); + assert_eq!(v[1].1, "i32"); + + // Test QUBO + let v = QUBO::::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[1].1, "f64"); + + // Test CircuitSAT + let v = CircuitSAT::::variant(); + assert_eq!(v.len(), 2); + + // Test Factoring (no type parameters) + let v = Factoring::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + assert_eq!(v[1].1, "i32"); + + // Test BicliqueCover (no type parameters) + let v = BicliqueCover::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + + // Test BMF (no type parameters) + let v = BMF::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); + + // Test PaintShop (no type parameters) + let v = PaintShop::variant(); + assert_eq!(v.len(), 2); + assert_eq!(v[0].1, "SimpleGraph"); +} diff --git a/src/topology/graph.rs b/src/topology/graph.rs index a151c55..f548790 100644 --- a/src/topology/graph.rs +++ b/src/topology/graph.rs @@ -261,140 +261,5 @@ impl PartialEq for SimpleGraph { impl Eq for SimpleGraph {} #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_simple_graph_new() { - let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); - assert_eq!(graph.num_vertices(), 4); - assert_eq!(graph.num_edges(), 3); - } - - #[test] - fn test_simple_graph_empty() { - let graph = SimpleGraph::empty(5); - assert_eq!(graph.num_vertices(), 5); - assert_eq!(graph.num_edges(), 0); - } - - #[test] - fn test_simple_graph_complete() { - let graph = SimpleGraph::complete(4); - assert_eq!(graph.num_vertices(), 4); - assert_eq!(graph.num_edges(), 6); // C(4,2) = 6 - } - - #[test] - fn test_simple_graph_path() { - let graph = SimpleGraph::path(5); - assert_eq!(graph.num_vertices(), 5); - assert_eq!(graph.num_edges(), 4); - assert!(graph.has_edge(0, 1)); - assert!(graph.has_edge(3, 4)); - assert!(!graph.has_edge(0, 4)); - } - - #[test] - fn test_simple_graph_cycle() { - let graph = SimpleGraph::cycle(4); - assert_eq!(graph.num_vertices(), 4); - assert_eq!(graph.num_edges(), 4); - assert!(graph.has_edge(0, 1)); - assert!(graph.has_edge(3, 0)); // Cycle edge - } - - #[test] - fn test_simple_graph_star() { - let graph = SimpleGraph::star(5); - assert_eq!(graph.num_vertices(), 5); - assert_eq!(graph.num_edges(), 4); - assert!(graph.has_edge(0, 1)); - assert!(graph.has_edge(0, 4)); - assert!(!graph.has_edge(1, 2)); - } - - #[test] - fn test_simple_graph_grid() { - let graph = SimpleGraph::grid(2, 3); - assert_eq!(graph.num_vertices(), 6); - // 2 rows: 2 horizontal edges per row = 4 - // 3 cols: 1 vertical edge per col = 3 - assert_eq!(graph.num_edges(), 7); - } - - #[test] - fn test_simple_graph_has_edge() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - assert!(graph.has_edge(0, 1)); - assert!(graph.has_edge(1, 0)); // Undirected - assert!(graph.has_edge(1, 2)); - assert!(!graph.has_edge(0, 2)); - } - - #[test] - fn test_simple_graph_neighbors() { - let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); - let mut neighbors = graph.neighbors(0); - neighbors.sort(); - assert_eq!(neighbors, vec![1, 2, 3]); - assert_eq!(graph.neighbors(1), vec![0]); - } - - #[test] - fn test_simple_graph_degree() { - let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); - assert_eq!(graph.degree(0), 3); - assert_eq!(graph.degree(1), 1); - } - - #[test] - fn test_simple_graph_is_empty() { - let empty = SimpleGraph::empty(0); - assert!(empty.is_empty()); - - let non_empty = SimpleGraph::empty(1); - assert!(!non_empty.is_empty()); - } - - #[test] - fn test_simple_graph_for_each_edge() { - let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let mut count = 0; - graph.for_each_edge(|_, _| count += 1); - assert_eq!(count, 2); - } - - #[test] - fn test_simple_graph_eq() { - let g1 = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); - let g2 = SimpleGraph::new(3, vec![(1, 2), (0, 1)]); // Different order - let g3 = SimpleGraph::new(3, vec![(0, 1)]); - - assert_eq!(g1, g2); - assert_ne!(g1, g3); - } - - #[test] - #[should_panic(expected = "edge (0, 5) references vertex >= num_vertices")] - fn test_simple_graph_invalid_edge() { - SimpleGraph::new(3, vec![(0, 5)]); - } - - #[test] - fn test_simple_graph_cycle_small() { - // Test cycle with fewer than 3 vertices (should fall back to path) - let graph = SimpleGraph::cycle(2); - assert_eq!(graph.num_vertices(), 2); - assert_eq!(graph.num_edges(), 1); // Path: 0-1 - assert!(graph.has_edge(0, 1)); - } - - #[test] - fn test_simple_graph_eq_different_sizes() { - // Test PartialEq when graphs have different sizes - let g1 = SimpleGraph::new(3, vec![(0, 1)]); - let g2 = SimpleGraph::new(4, vec![(0, 1)]); // Different vertex count - assert_ne!(g1, g2); - } -} +#[path = "../tests_unit/topology/graph.rs"] +mod tests; diff --git a/src/topology/grid_graph.rs b/src/topology/grid_graph.rs index cc6260e..a30d4cc 100644 --- a/src/topology/grid_graph.rs +++ b/src/topology/grid_graph.rs @@ -314,229 +314,5 @@ impl fmt::Display for GridGraph { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_grid_graph_square_basic() { - let nodes = vec![ - GridNode::new(0, 0, 1), - GridNode::new(1, 0, 1), - GridNode::new(0, 1, 1), - ]; - // With radius 1.1: (0,0)-(1,0) dist=1.0 < 1.1, (0,0)-(0,1) dist=1.0 < 1.1, (1,0)-(0,1) dist=sqrt(2)>1.1 - // Using dist < radius (strict), so edges at exactly 1.0 are included with radius 1.1 - let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 1.1); - assert_eq!(grid.num_vertices(), 3); - // Only nodes at (0,0)-(1,0) and (0,0)-(0,1) are within radius 1.1 - assert_eq!(grid.edges().len(), 2); - } - - #[test] - fn test_grid_graph_triangular_basic() { - let nodes = vec![ - GridNode::new(0, 0, 1), - GridNode::new(1, 0, 1), - GridNode::new(0, 1, 1), - ]; - let grid = GridGraph::new( - GridType::Triangular { - offset_even_cols: false, - }, - (2, 2), - nodes, - 1.1, - ); - assert_eq!(grid.num_vertices(), 3); - } - - #[test] - fn test_grid_node_new() { - let node: GridNode = GridNode::new(5, 10, 42); - assert_eq!(node.row, 5); - assert_eq!(node.col, 10); - assert_eq!(node.weight, 42); - } - - #[test] - fn test_grid_graph_square_physical_position() { - let nodes = vec![GridNode::new(3, 4, 1)]; - let grid = GridGraph::new(GridType::Square, (10, 10), nodes, 1.0); - let pos = grid.physical_position(3, 4); - assert_eq!(pos, (3.0, 4.0)); - } - - #[test] - fn test_grid_graph_triangular_physical_position() { - let nodes = vec![GridNode::new(0, 0, 1)]; - let grid = GridGraph::new( - GridType::Triangular { - offset_even_cols: false, - }, - (10, 10), - nodes, - 1.0, - ); - - // Col 0 (even), offset_even_cols = false -> no offset - let pos0 = grid.physical_position(0, 0); - assert!((pos0.0 - 0.0).abs() < 1e-10); - assert!((pos0.1 - 0.0).abs() < 1e-10); - - // Col 1 (odd), offset_even_cols = false -> offset 0.5 - let pos1 = grid.physical_position(0, 1); - assert!((pos1.0 - 0.5).abs() < 1e-10); - assert!((pos1.1 - (3.0_f64.sqrt() / 2.0)).abs() < 1e-10); - } - - #[test] - fn test_grid_graph_triangular_offset_even() { - let nodes = vec![GridNode::new(0, 0, 1)]; - let grid = GridGraph::new( - GridType::Triangular { - offset_even_cols: true, - }, - (10, 10), - nodes, - 1.0, - ); - - // Col 0 (even), offset_even_cols = true -> offset 0.5 - let pos0 = grid.physical_position(0, 0); - assert!((pos0.0 - 0.5).abs() < 1e-10); - - // Col 1 (odd), offset_even_cols = true -> no offset - let pos1 = grid.physical_position(0, 1); - assert!((pos1.0 - 0.0).abs() < 1e-10); - } - - #[test] - fn test_grid_graph_edges_within_radius() { - // Square grid: place nodes at (0,0), (1,0), (2,0) - // Distance (0,0)-(1,0) = 1.0 - // Distance (0,0)-(2,0) = 2.0 - // Distance (1,0)-(2,0) = 1.0 - let nodes = vec![ - GridNode::new(0, 0, 1), - GridNode::new(1, 0, 1), - GridNode::new(2, 0, 1), - ]; - // Use radius 1.1 since edges are created for dist < radius (strict) - // With radius 1.0, no edges at exact distance 1.0 - // With radius 1.1, edges at distance 1.0 are included - let grid = GridGraph::new(GridType::Square, (3, 1), nodes, 1.1); - - // Only edges within radius 1.1: (0,1) and (1,2) with dist=1.0 - assert_eq!(grid.num_edges(), 2); - assert!(grid.has_edge(0, 1)); - assert!(grid.has_edge(1, 2)); - assert!(!grid.has_edge(0, 2)); // dist=2.0 >= 1.1 - } - - #[test] - fn test_grid_graph_neighbors() { - let nodes = vec![ - GridNode::new(0, 0, 1), - GridNode::new(1, 0, 1), - GridNode::new(0, 1, 1), - ]; - let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 1.5); - - let neighbors_0 = grid.neighbors(0); - assert_eq!(neighbors_0.len(), 2); - assert!(neighbors_0.contains(&1)); - assert!(neighbors_0.contains(&2)); - } - - #[test] - fn test_grid_graph_accessors() { - let nodes = vec![GridNode::new(0, 0, 10), GridNode::new(1, 0, 20)]; - let grid = GridGraph::new(GridType::Square, (5, 5), nodes, 2.0); - - assert_eq!(grid.grid_type(), GridType::Square); - assert_eq!(grid.size(), (5, 5)); - assert_eq!(grid.radius(), 2.0); - assert_eq!(grid.nodes().len(), 2); - assert_eq!(grid.node(0).map(|n| n.weight), Some(10)); - assert_eq!(grid.weight(1), Some(&20)); - assert_eq!(grid.weight(5), None); - } - - #[test] - fn test_grid_graph_node_position() { - let nodes = vec![GridNode::new(2, 3, 1)]; - let grid = GridGraph::new(GridType::Square, (10, 10), nodes, 1.0); - - let pos = grid.node_position(0); - assert_eq!(pos, Some((2.0, 3.0))); - assert_eq!(grid.node_position(1), None); - } - - #[test] - fn test_grid_graph_has_edge_symmetric() { - let nodes = vec![GridNode::new(0, 0, 1), GridNode::new(1, 0, 1)]; - let grid = GridGraph::new(GridType::Square, (2, 1), nodes, 1.5); - - assert!(grid.has_edge(0, 1)); - assert!(grid.has_edge(1, 0)); // Symmetric - } - - #[test] - fn test_grid_graph_empty() { - let nodes: Vec> = vec![]; - let grid = GridGraph::new(GridType::Square, (0, 0), nodes, 1.0); - - assert_eq!(grid.num_vertices(), 0); - assert_eq!(grid.num_edges(), 0); - assert!(grid.is_empty()); - } - - #[test] - fn test_grid_graph_graph_trait() { - let nodes = vec![ - GridNode::new(0, 0, 1), - GridNode::new(1, 0, 1), - GridNode::new(0, 1, 1), - ]; - // With radius 1.1: 2 edges at dist=1.0 (not including diagonal at sqrt(2)>1.1) - // Using dist < radius (strict), so edges at exactly 1.0 are included with radius 1.1 - let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 1.1); - - // Test Graph trait methods - assert_eq!(Graph::num_vertices(&grid), 3); - assert_eq!(Graph::num_edges(&grid), 2); - assert_eq!(grid.degree(0), 2); - assert_eq!(grid.degree(1), 1); - assert_eq!(grid.degree(2), 1); - } - - #[test] - fn test_grid_graph_display() { - let nodes = vec![GridNode::new(0, 0, 1), GridNode::new(1, 0, 2)]; - let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 2.0); - - // Test Display trait - let display_str = format!("{}", grid); - assert!(!display_str.is_empty()); - } - - #[test] - fn test_grid_graph_format_empty() { - let nodes: Vec> = vec![]; - let grid = GridGraph::new(GridType::Square, (0, 0), nodes, 1.0); - - // Empty grid should return "(empty grid graph)" - let formatted = grid.format_with_config(None, false); - assert_eq!(formatted, "(empty grid graph)"); - } - - #[test] - fn test_grid_graph_format_with_config() { - let nodes = vec![GridNode::new(0, 0, 1), GridNode::new(1, 0, 1)]; - let grid = GridGraph::new(GridType::Square, (2, 2), nodes, 2.0); - - // Test format with config - let formatted = grid.format_with_config(Some(&[1, 0]), false); - assert!(!formatted.is_empty()); - } -} +#[path = "../tests_unit/topology/grid_graph.rs"] +mod tests; diff --git a/src/topology/hypergraph.rs b/src/topology/hypergraph.rs index ffc9e46..399afa2 100644 --- a/src/topology/hypergraph.rs +++ b/src/topology/hypergraph.rs @@ -147,114 +147,5 @@ impl HyperGraph { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_hypergraph_basic() { - let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); - assert_eq!(hg.num_vertices(), 4); - assert_eq!(hg.num_edges(), 2); - } - - #[test] - fn test_hypergraph_empty() { - let hg = HyperGraph::empty(5); - assert_eq!(hg.num_vertices(), 5); - assert_eq!(hg.num_edges(), 0); - } - - #[test] - fn test_hypergraph_neighbors() { - let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); - let neighbors = hg.neighbors(2); - assert!(neighbors.contains(&0)); - assert!(neighbors.contains(&1)); - assert!(neighbors.contains(&3)); - assert!(!neighbors.contains(&2)); // Not its own neighbor - } - - #[test] - fn test_hypergraph_has_edge() { - let hg = HyperGraph::new(4, vec![vec![0, 1, 2]]); - assert!(hg.has_edge(&[0, 1, 2])); - assert!(hg.has_edge(&[2, 1, 0])); // Order doesn't matter - assert!(!hg.has_edge(&[0, 1])); - assert!(!hg.has_edge(&[0, 1, 3])); - } - - #[test] - fn test_hypergraph_degree() { - let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); - assert_eq!(hg.degree(0), 1); - assert_eq!(hg.degree(2), 2); - assert_eq!(hg.degree(3), 1); - } - - #[test] - fn test_hypergraph_edges_containing() { - let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); - let edges = hg.edges_containing(2); - assert_eq!(edges.len(), 2); - } - - #[test] - fn test_hypergraph_add_edge() { - let mut hg = HyperGraph::empty(4); - hg.add_edge(vec![0, 1]); - hg.add_edge(vec![1, 2, 3]); - assert_eq!(hg.num_edges(), 2); - } - - #[test] - fn test_hypergraph_max_edge_size() { - let hg = HyperGraph::new(4, vec![vec![0, 1], vec![0, 1, 2, 3]]); - assert_eq!(hg.max_edge_size(), 4); - } - - #[test] - fn test_hypergraph_is_regular_graph() { - let regular = HyperGraph::new(3, vec![vec![0, 1], vec![1, 2]]); - assert!(regular.is_regular_graph()); - - let not_regular = HyperGraph::new(4, vec![vec![0, 1, 2]]); - assert!(!not_regular.is_regular_graph()); - } - - #[test] - fn test_hypergraph_to_graph_edges() { - let hg = HyperGraph::new(3, vec![vec![0, 1], vec![1, 2]]); - let edges = hg.to_graph_edges(); - assert!(edges.is_some()); - let edges = edges.unwrap(); - assert_eq!(edges.len(), 2); - } - - #[test] - fn test_hypergraph_to_graph_edges_not_regular() { - // Hypergraph with a hyperedge of size 3 (not a regular graph) - let hg = HyperGraph::new(4, vec![vec![0, 1, 2]]); - assert!(hg.to_graph_edges().is_none()); - } - - #[test] - fn test_hypergraph_get_edge() { - let hg = HyperGraph::new(4, vec![vec![0, 1, 2], vec![2, 3]]); - assert_eq!(hg.edge(0), Some(&vec![0, 1, 2])); - assert_eq!(hg.edge(1), Some(&vec![2, 3])); - assert_eq!(hg.edge(2), None); - } - - #[test] - #[should_panic(expected = "vertex index 5 out of bounds")] - fn test_hypergraph_invalid_vertex() { - HyperGraph::new(4, vec![vec![0, 5]]); - } - - #[test] - #[should_panic(expected = "vertex index 4 out of bounds")] - fn test_hypergraph_add_invalid_edge() { - let mut hg = HyperGraph::empty(4); - hg.add_edge(vec![0, 4]); - } -} +#[path = "../tests_unit/topology/hypergraph.rs"] +mod tests; diff --git a/src/topology/small_graphs.rs b/src/topology/small_graphs.rs index 5178851..c0179d4 100644 --- a/src/topology/small_graphs.rs +++ b/src/topology/small_graphs.rs @@ -766,186 +766,5 @@ pub fn available_graphs() -> Vec<&'static str> { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_bull() { - let (n, edges) = bull(); - assert_eq!(n, 5); - assert_eq!(edges.len(), 5); - } - - #[test] - fn test_chvatal() { - let (n, edges) = chvatal(); - assert_eq!(n, 12); - assert_eq!(edges.len(), 24); - } - - #[test] - fn test_cubical() { - let (n, edges) = cubical(); - assert_eq!(n, 8); - assert_eq!(edges.len(), 12); - } - - #[test] - fn test_desargues() { - let (n, edges) = desargues(); - assert_eq!(n, 20); - assert_eq!(edges.len(), 30); - } - - #[test] - fn test_diamond() { - let (n, edges) = diamond(); - assert_eq!(n, 4); - assert_eq!(edges.len(), 5); - } - - #[test] - fn test_dodecahedral() { - let (n, edges) = dodecahedral(); - assert_eq!(n, 20); - assert_eq!(edges.len(), 30); - } - - #[test] - fn test_frucht() { - let (n, edges) = frucht(); - assert_eq!(n, 12); - assert_eq!(edges.len(), 18); - } - - #[test] - fn test_heawood() { - let (n, edges) = heawood(); - assert_eq!(n, 14); - assert_eq!(edges.len(), 21); - } - - #[test] - fn test_house() { - let (n, edges) = house(); - assert_eq!(n, 5); - assert_eq!(edges.len(), 6); - } - - #[test] - fn test_housex() { - let (n, edges) = housex(); - assert_eq!(n, 5); - assert_eq!(edges.len(), 8); - } - - #[test] - fn test_icosahedral() { - let (n, edges) = icosahedral(); - assert_eq!(n, 12); - assert_eq!(edges.len(), 30); - } - - #[test] - fn test_karate() { - let (n, edges) = karate(); - assert_eq!(n, 34); - assert_eq!(edges.len(), 78); - } - - #[test] - fn test_krackhardtkite() { - let (n, edges) = krackhardtkite(); - assert_eq!(n, 10); - assert_eq!(edges.len(), 18); - } - - #[test] - fn test_moebiuskantor() { - let (n, edges) = moebiuskantor(); - assert_eq!(n, 16); - assert_eq!(edges.len(), 24); - } - - #[test] - fn test_octahedral() { - let (n, edges) = octahedral(); - assert_eq!(n, 6); - assert_eq!(edges.len(), 12); - } - - #[test] - fn test_pappus() { - let (n, edges) = pappus(); - assert_eq!(n, 18); - assert_eq!(edges.len(), 27); - } - - #[test] - fn test_petersen() { - let (n, edges) = petersen(); - assert_eq!(n, 10); - assert_eq!(edges.len(), 15); - } - - #[test] - fn test_sedgewickmaze() { - let (n, edges) = sedgewickmaze(); - assert_eq!(n, 8); - assert_eq!(edges.len(), 10); - } - - #[test] - fn test_tetrahedral() { - let (n, edges) = tetrahedral(); - assert_eq!(n, 4); - assert_eq!(edges.len(), 6); - } - - #[test] - fn test_truncatedcube() { - let (n, edges) = truncatedcube(); - assert_eq!(n, 24); - assert_eq!(edges.len(), 36); - } - - #[test] - fn test_truncatedtetrahedron() { - let (n, edges) = truncatedtetrahedron(); - assert_eq!(n, 12); - assert_eq!(edges.len(), 18); - } - - #[test] - fn test_tutte() { - let (n, edges) = tutte(); - assert_eq!(n, 46); - assert_eq!(edges.len(), 69); - } - - #[test] - fn test_smallgraph() { - assert!(smallgraph("petersen").is_some()); - assert!(smallgraph("bull").is_some()); - assert!(smallgraph("nonexistent").is_none()); - } - - #[test] - fn test_available_graphs() { - let graphs = available_graphs(); - assert_eq!(graphs.len(), 22); - assert!(graphs.contains(&"petersen")); - } - - #[test] - fn test_all_graphs_have_valid_edges() { - for name in available_graphs() { - let (n, edges) = smallgraph(name).unwrap(); - for (u, v) in edges { - assert!(u < n, "{} has invalid edge: {} >= {}", name, u, n); - assert!(v < n, "{} has invalid edge: {} >= {}", name, v, n); - assert!(u != v, "{} has self-loop", name); - } - } - } -} +#[path = "../tests_unit/topology/small_graphs.rs"] +mod tests; diff --git a/src/topology/unit_disk_graph.rs b/src/topology/unit_disk_graph.rs index 750743a..921ce0a 100644 --- a/src/topology/unit_disk_graph.rs +++ b/src/topology/unit_disk_graph.rs @@ -229,141 +229,5 @@ impl Graph for UnitDiskGraph { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_udg_basic() { - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (3.0, 0.0)], 1.0); - assert_eq!(udg.num_vertices(), 3); - assert_eq!(udg.num_edges(), 1); // Only 0-1 are within distance 1 - } - - #[test] - fn test_udg_unit() { - let udg = UnitDiskGraph::unit(vec![(0.0, 0.0), (0.5, 0.5)]); - assert_eq!(udg.radius(), 1.0); - // Distance is sqrt(0.5^2 + 0.5^2) ≈ 0.707 < 1, so connected - assert_eq!(udg.num_edges(), 1); - } - - #[test] - fn test_udg_has_edge() { - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (3.0, 0.0)], 1.0); - assert!(udg.has_edge(0, 1)); - assert!(udg.has_edge(1, 0)); // Symmetric - assert!(!udg.has_edge(0, 2)); - assert!(!udg.has_edge(1, 2)); - } - - #[test] - fn test_udg_neighbors() { - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (0.5, 0.5)], 1.0); - let neighbors = udg.neighbors(0); - // 0 is within 1.0 of both 1 and 2 - assert!(neighbors.contains(&1)); - assert!(neighbors.contains(&2)); - } - - #[test] - fn test_udg_degree() { - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (5.0, 5.0)], 1.5); - // Vertex 0 is connected to 1 and 2 - assert_eq!(udg.degree(0), 2); - // Vertex 3 is isolated - assert_eq!(udg.degree(3), 0); - } - - #[test] - fn test_udg_vertex_distance() { - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (3.0, 4.0)], 10.0); - let dist = udg.vertex_distance(0, 1); - assert_eq!(dist, Some(5.0)); // 3-4-5 triangle - } - - #[test] - fn test_udg_position() { - let udg = UnitDiskGraph::new(vec![(1.0, 2.0), (3.0, 4.0)], 1.0); - assert_eq!(udg.position(0), Some((1.0, 2.0))); - assert_eq!(udg.position(1), Some((3.0, 4.0))); - assert_eq!(udg.position(2), None); - } - - #[test] - fn test_udg_bounding_box() { - let udg = UnitDiskGraph::new(vec![(1.0, 2.0), (3.0, 4.0), (-1.0, 0.0)], 1.0); - let bbox = udg.bounding_box(); - assert!(bbox.is_some()); - let ((min_x, min_y), (max_x, max_y)) = bbox.unwrap(); - assert_eq!(min_x, -1.0); - assert_eq!(max_x, 3.0); - assert_eq!(min_y, 0.0); - assert_eq!(max_y, 4.0); - } - - #[test] - fn test_udg_empty_bounding_box() { - let udg = UnitDiskGraph::new(vec![], 1.0); - assert!(udg.bounding_box().is_none()); - } - - #[test] - fn test_udg_grid() { - let udg = UnitDiskGraph::grid(2, 3, 1.0, 1.0); - assert_eq!(udg.num_vertices(), 6); - // Grid with spacing 1.0 and radius 1.0: only horizontal/vertical neighbors connected - // Row 0: 0-1, 1-2 - // Row 1: 3-4, 4-5 - // Vertical: 0-3, 1-4, 2-5 - assert_eq!(udg.num_edges(), 7); - } - - #[test] - fn test_udg_grid_diagonal() { - // With radius > sqrt(2), diagonals are also connected - let udg = UnitDiskGraph::grid(2, 2, 1.0, 1.5); - assert_eq!(udg.num_vertices(), 4); - // All pairs are connected (4 edges: 0-1, 0-2, 0-3, 1-2, 1-3, 2-3) - // Actually: 0-1 (1.0), 0-2 (1.0), 1-3 (1.0), 2-3 (1.0), 0-3 (sqrt(2)≈1.41), 1-2 (sqrt(2)≈1.41) - assert_eq!(udg.num_edges(), 6); - } - - #[test] - fn test_udg_edges_list() { - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0)], 1.0); - let edges = udg.edges(); - assert_eq!(edges.len(), 1); - assert_eq!(edges[0], (0, 1)); - } - - #[test] - fn test_udg_positions() { - let udg = UnitDiskGraph::new(vec![(1.0, 2.0), (3.0, 4.0)], 1.0); - let positions = udg.positions(); - assert_eq!(positions.len(), 2); - assert_eq!(positions[0], (1.0, 2.0)); - assert_eq!(positions[1], (3.0, 4.0)); - } - - #[test] - fn test_udg_vertex_distance_invalid() { - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0)], 1.0); - assert_eq!(udg.vertex_distance(0, 5), None); - assert_eq!(udg.vertex_distance(5, 0), None); - assert_eq!(udg.vertex_distance(5, 6), None); - } - - #[test] - fn test_udg_graph_trait() { - // Test the Graph trait implementation - let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (1.0, 0.0), (0.5, 0.5)], 1.0); - // Use Graph trait methods - assert_eq!(Graph::num_vertices(&udg), 3); - assert!(Graph::num_edges(&udg) > 0); - assert!(Graph::has_edge(&udg, 0, 1)); - let edges = Graph::edges(&udg); - assert!(!edges.is_empty()); - let neighbors = Graph::neighbors(&udg, 0); - assert!(neighbors.contains(&1)); - } -} +#[path = "../tests_unit/topology/unit_disk_graph.rs"] +mod tests; diff --git a/src/traits.rs b/src/traits.rs index 1fbb2a4..8716bb2 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -114,434 +114,5 @@ pub fn csp_solution_size( } #[cfg(test)] -mod tests { - use super::*; - - // A simple test problem: select binary variables to maximize sum of weights - #[derive(Clone)] - struct SimpleWeightedProblem { - weights: Vec, - } - - impl Problem for SimpleWeightedProblem { - const NAME: &'static str = "SimpleWeightedProblem"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "i32")] - } - - type Size = i32; - - fn num_variables(&self) -> usize { - self.weights.len() - } - - fn num_flavors(&self) -> usize { - 2 - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![("variables", self.weights.len())]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::LargerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - let sum: i32 = config - .iter() - .zip(&self.weights) - .map(|(&c, &w)| if c == 1 { w } else { 0 }) - .sum(); - SolutionSize::valid(sum) - } - } - - // A simple CSP for testing - #[derive(Clone)] - struct SimpleCsp { - num_vars: usize, - } - - impl Problem for SimpleCsp { - const NAME: &'static str = "SimpleCsp"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "i32")] - } - - type Size = i32; - - fn num_variables(&self) -> usize { - self.num_vars - } - - fn num_flavors(&self) -> usize { - 2 - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![("variables", self.num_vars)]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::LargerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - csp_solution_size(self, config) - } - } - - impl ConstraintSatisfactionProblem for SimpleCsp { - fn constraints(&self) -> Vec { - // Constraint: at most one variable can be 1 - if self.num_vars >= 2 { - vec![LocalConstraint::new( - 2, - vec![0, 1], - vec![true, true, true, false], // (0,0), (0,1), (1,0) OK; (1,1) invalid - )] - } else { - vec![] - } - } - - fn objectives(&self) -> Vec> { - // Each variable contributes 1 if selected - (0..self.num_vars) - .map(|i| LocalSolutionSize::new(2, vec![i], vec![0, 1])) - .collect() - } - - fn weights(&self) -> Vec { - vec![1; self.num_vars] - } - - fn set_weights(&mut self, _weights: Vec) {} - - fn is_weighted(&self) -> bool { - false - } - } - - #[test] - fn test_variant_for_test_problems() { - // Test that variant() works for test problems - let v = SimpleWeightedProblem::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0], ("graph", "SimpleGraph")); - assert_eq!(v[1], ("weight", "i32")); - - let v = SimpleCsp::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0], ("graph", "SimpleGraph")); - assert_eq!(v[1], ("weight", "i32")); - - let v = MultiFlavorProblem::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0], ("graph", "SimpleGraph")); - assert_eq!(v[1], ("weight", "i32")); - } - - #[test] - fn test_simple_problem() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2, 3], - }; - - assert_eq!(problem.num_variables(), 3); - assert_eq!(problem.num_flavors(), 2); - assert_eq!(problem.variables(), 0..3); - assert_eq!(problem.flavors(), vec![0, 1]); - - let sol = problem.solution_size(&[0, 0, 0]); - assert_eq!(sol.size, 0); - assert!(sol.is_valid); - - let sol = problem.solution_size(&[1, 1, 1]); - assert_eq!(sol.size, 6); - assert!(sol.is_valid); - - let sol = problem.solution_size(&[1, 0, 1]); - assert_eq!(sol.size, 4); - assert!(sol.is_valid); - } - - #[test] - fn test_valid_config() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2, 3], - }; - - assert!(problem.is_valid_config(&[0, 1, 0])); - assert!(problem.is_valid_config(&[1, 1, 1])); - assert!(!problem.is_valid_config(&[0, 2, 0])); // invalid flavor - assert!(!problem.is_valid_config(&[0, 1])); // wrong length - assert!(!problem.is_valid_config(&[0, 1, 0, 1])); // wrong length - } - - #[test] - fn test_batch_evaluation() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2, 3], - }; - - let configs = vec![vec![0, 0, 0], vec![1, 1, 1], vec![1, 0, 1]]; - - let results = problem.solution_size_multiple(&configs); - assert_eq!(results.len(), 3); - assert_eq!(results[0].size, 0); - assert_eq!(results[1].size, 6); - assert_eq!(results[2].size, 4); - } - - #[test] - fn test_csp_solution_size() { - let problem = SimpleCsp { num_vars: 3 }; - - // Test valid configurations - let sol = problem.solution_size(&[0, 0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - - let sol = problem.solution_size(&[1, 0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - let sol = problem.solution_size(&[0, 1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - // Test invalid configuration (both 0 and 1 are 1) - let sol = problem.solution_size(&[1, 1, 0]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 2); - } - - #[test] - fn test_csp_is_satisfied() { - let problem = SimpleCsp { num_vars: 3 }; - - assert!(problem.is_satisfied(&[0, 0, 0])); - assert!(problem.is_satisfied(&[1, 0, 0])); - assert!(problem.is_satisfied(&[0, 1, 0])); - assert!(!problem.is_satisfied(&[1, 1, 0])); - } - - #[test] - fn test_csp_compute_objective() { - let problem = SimpleCsp { num_vars: 3 }; - - assert_eq!(problem.compute_objective(&[0, 0, 0]), 0); - assert_eq!(problem.compute_objective(&[1, 0, 0]), 1); - assert_eq!(problem.compute_objective(&[1, 1, 0]), 2); - assert_eq!(problem.compute_objective(&[1, 1, 1]), 3); - } - - #[test] - fn test_csp_single_variable() { - // Test CSP with num_vars = 1 (no constraints, empty constraint list) - let problem = SimpleCsp { num_vars: 1 }; - - assert!(problem.constraints().is_empty()); - assert!(problem.is_satisfied(&[0])); // Always satisfied with no constraints - assert!(problem.is_satisfied(&[1])); - - let sol = problem.solution_size(&[0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - - let sol = problem.solution_size(&[1]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - } - - #[test] - fn test_csp_weights_and_weighted() { - let problem = SimpleCsp { num_vars: 3 }; - assert_eq!(problem.weights(), vec![1, 1, 1]); - assert!(!problem.is_weighted()); - } - - #[test] - fn test_csp_set_weights() { - let mut problem = SimpleCsp { num_vars: 3 }; - problem.set_weights(vec![10, 20, 30]); - // For SimpleCsp, set_weights is a no-op, so this just tests the call works - assert!(!problem.is_weighted()); - } - - #[test] - fn test_problem_size_metadata() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2, 3, 4, 5], - }; - - let size = problem.problem_size(); - assert_eq!(size.get("variables"), Some(5)); - } - - #[test] - fn test_energy_mode() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2, 3], - }; - assert!(problem.energy_mode().is_maximization()); - } - - #[test] - fn test_batch_evaluation_empty() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2, 3], - }; - - let configs: Vec> = vec![]; - let results = problem.solution_size_multiple(&configs); - assert!(results.is_empty()); - } - - #[test] - fn test_is_valid_config_empty_problem() { - let problem = SimpleWeightedProblem { weights: vec![] }; - - assert_eq!(problem.num_variables(), 0); - assert!(problem.is_valid_config(&[])); // Empty config for empty problem - assert!(!problem.is_valid_config(&[0])); // Non-empty config is invalid - } - - #[test] - fn test_variables_range() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2, 3, 4, 5], - }; - - let vars: Vec = problem.variables().collect(); - assert_eq!(vars, vec![0, 1, 2, 3, 4]); - } - - #[test] - fn test_flavors_list() { - let problem = SimpleWeightedProblem { - weights: vec![1, 2], - }; - - assert_eq!(problem.flavors(), vec![0, 1]); - } - - #[test] - fn test_csp_objectives() { - let problem = SimpleCsp { num_vars: 3 }; - let objectives = problem.objectives(); - - assert_eq!(objectives.len(), 3); - // Test that each objective evaluates correctly - assert_eq!(objectives[0].evaluate(&[0, 0, 0]), 0); - assert_eq!(objectives[0].evaluate(&[1, 0, 0]), 1); - assert_eq!(objectives[1].evaluate(&[0, 1, 0]), 1); - assert_eq!(objectives[2].evaluate(&[0, 0, 1]), 1); - } - - #[test] - fn test_csp_solution_size_helper_function() { - let problem = SimpleCsp { num_vars: 2 }; - - // Test via the helper function directly - let sol = csp_solution_size(&problem, &[0, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 0); - - let sol = csp_solution_size(&problem, &[1, 0]); - assert!(sol.is_valid); - assert_eq!(sol.size, 1); - - let sol = csp_solution_size(&problem, &[1, 1]); - assert!(!sol.is_valid); - assert_eq!(sol.size, 2); - } - - // Test problem with more than 2 flavors - #[derive(Clone)] - struct MultiFlavorProblem { - num_vars: usize, - num_flavors: usize, - } - - impl Problem for MultiFlavorProblem { - const NAME: &'static str = "MultiFlavorProblem"; - - fn variant() -> Vec<(&'static str, &'static str)> { - vec![("graph", "SimpleGraph"), ("weight", "i32")] - } - - type Size = i32; - - fn num_variables(&self) -> usize { - self.num_vars - } - - fn num_flavors(&self) -> usize { - self.num_flavors - } - - fn problem_size(&self) -> ProblemSize { - ProblemSize::new(vec![ - ("variables", self.num_vars), - ("flavors", self.num_flavors), - ]) - } - - fn energy_mode(&self) -> EnergyMode { - EnergyMode::SmallerSizeIsBetter - } - - fn solution_size(&self, config: &[usize]) -> SolutionSize { - let sum: i32 = config.iter().map(|&c| c as i32).sum(); - SolutionSize::valid(sum) - } - } - - #[test] - fn test_multi_flavor_problem() { - let problem = MultiFlavorProblem { - num_vars: 3, - num_flavors: 4, - }; - - assert_eq!(problem.num_flavors(), 4); - assert_eq!(problem.flavors(), vec![0, 1, 2, 3]); - assert!(problem.energy_mode().is_minimization()); - - // Valid configs - assert!(problem.is_valid_config(&[0, 1, 2])); - assert!(problem.is_valid_config(&[3, 3, 3])); - - // Invalid: flavor out of range - assert!(!problem.is_valid_config(&[0, 4, 0])); - assert!(!problem.is_valid_config(&[5, 0, 0])); - - let sol = problem.solution_size(&[0, 1, 2]); - assert_eq!(sol.size, 3); - - let sol = problem.solution_size(&[3, 3, 3]); - assert_eq!(sol.size, 9); - } - - #[test] - fn test_batch_evaluation_with_multi_flavor() { - let problem = MultiFlavorProblem { - num_vars: 2, - num_flavors: 3, - }; - - let configs = vec![vec![0, 0], vec![1, 1], vec![2, 2], vec![0, 2]]; - let results = problem.solution_size_multiple(&configs); - - assert_eq!(results.len(), 4); - assert_eq!(results[0].size, 0); - assert_eq!(results[1].size, 2); - assert_eq!(results[2].size, 4); - assert_eq!(results[3].size, 2); - } -} +#[path = "tests_unit/traits.rs"] +mod tests; diff --git a/src/truth_table.rs b/src/truth_table.rs index 0d51a6b..cf71b75 100644 --- a/src/truth_table.rs +++ b/src/truth_table.rs @@ -281,200 +281,5 @@ impl TruthTable { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_and_gate() { - let and = TruthTable::and(2); - assert!(!and.evaluate(&[false, false])); - assert!(!and.evaluate(&[true, false])); - assert!(!and.evaluate(&[false, true])); - assert!(and.evaluate(&[true, true])); - } - - #[test] - fn test_or_gate() { - let or = TruthTable::or(2); - assert!(!or.evaluate(&[false, false])); - assert!(or.evaluate(&[true, false])); - assert!(or.evaluate(&[false, true])); - assert!(or.evaluate(&[true, true])); - } - - #[test] - fn test_not_gate() { - let not = TruthTable::not(); - assert!(not.evaluate(&[false])); - assert!(!not.evaluate(&[true])); - } - - #[test] - fn test_xor_gate() { - let xor = TruthTable::xor(2); - assert!(!xor.evaluate(&[false, false])); - assert!(xor.evaluate(&[true, false])); - assert!(xor.evaluate(&[false, true])); - assert!(!xor.evaluate(&[true, true])); - } - - #[test] - fn test_nand_gate() { - let nand = TruthTable::nand(2); - assert!(nand.evaluate(&[false, false])); - assert!(nand.evaluate(&[true, false])); - assert!(nand.evaluate(&[false, true])); - assert!(!nand.evaluate(&[true, true])); - } - - #[test] - fn test_implies() { - let imp = TruthTable::implies(); - assert!(imp.evaluate(&[false, false])); // F -> F = T - assert!(imp.evaluate(&[false, true])); // F -> T = T - assert!(!imp.evaluate(&[true, false])); // T -> F = F - assert!(imp.evaluate(&[true, true])); // T -> T = T - } - - #[test] - fn test_from_function() { - let majority = - TruthTable::from_function(3, |input| input.iter().filter(|&&b| b).count() >= 2); - assert!(!majority.evaluate(&[false, false, false])); - assert!(!majority.evaluate(&[true, false, false])); - assert!(majority.evaluate(&[true, true, false])); - assert!(majority.evaluate(&[true, true, true])); - } - - #[test] - fn test_evaluate_config() { - let and = TruthTable::and(2); - assert!(!and.evaluate_config(&[0, 0])); - assert!(!and.evaluate_config(&[1, 0])); - assert!(and.evaluate_config(&[1, 1])); - } - - #[test] - fn test_satisfiable() { - let or = TruthTable::or(2); - assert!(or.is_satisfiable()); - - let contradiction = TruthTable::from_outputs(2, vec![false, false, false, false]); - assert!(!contradiction.is_satisfiable()); - assert!(contradiction.is_contradiction()); - } - - #[test] - fn test_tautology() { - let tautology = TruthTable::from_outputs(2, vec![true, true, true, true]); - assert!(tautology.is_tautology()); - - let or = TruthTable::or(2); - assert!(!or.is_tautology()); - } - - #[test] - fn test_satisfying_assignments() { - let xor = TruthTable::xor(2); - let sat = xor.satisfying_assignments(); - assert_eq!(sat.len(), 2); - assert!(sat.contains(&vec![true, false])); - assert!(sat.contains(&vec![false, true])); - } - - #[test] - fn test_count() { - let and = TruthTable::and(2); - assert_eq!(and.count_ones(), 1); - assert_eq!(and.count_zeros(), 3); - } - - #[test] - fn test_index_to_input() { - let tt = TruthTable::and(3); - assert_eq!(tt.index_to_input(0), vec![false, false, false]); - assert_eq!(tt.index_to_input(1), vec![true, false, false]); - assert_eq!(tt.index_to_input(7), vec![true, true, true]); - } - - #[test] - fn test_outputs_vec() { - let and = TruthTable::and(2); - assert_eq!(and.outputs_vec(), vec![false, false, false, true]); - } - - #[test] - fn test_and_with() { - let a = TruthTable::from_outputs(1, vec![false, true]); - let b = TruthTable::from_outputs(1, vec![true, false]); - let result = a.and_with(&b); - assert_eq!(result.outputs_vec(), vec![false, false]); - } - - #[test] - fn test_or_with() { - let a = TruthTable::from_outputs(1, vec![false, true]); - let b = TruthTable::from_outputs(1, vec![true, false]); - let result = a.or_with(&b); - assert_eq!(result.outputs_vec(), vec![true, true]); - } - - #[test] - fn test_negate() { - let and = TruthTable::and(2); - let nand = and.negate(); - assert_eq!(nand.outputs_vec(), vec![true, true, true, false]); - } - - #[test] - fn test_num_rows() { - let tt = TruthTable::and(3); - assert_eq!(tt.num_rows(), 8); - } - - #[test] - fn test_3_input_and() { - let and3 = TruthTable::and(3); - assert!(!and3.evaluate(&[true, true, false])); - assert!(and3.evaluate(&[true, true, true])); - } - - #[test] - fn test_xnor() { - let xnor = TruthTable::xnor(2); - assert!(xnor.evaluate(&[false, false])); - assert!(!xnor.evaluate(&[true, false])); - assert!(!xnor.evaluate(&[false, true])); - assert!(xnor.evaluate(&[true, true])); - } - - #[test] - fn test_nor() { - let nor = TruthTable::nor(2); - assert!(nor.evaluate(&[false, false])); - assert!(!nor.evaluate(&[true, false])); - assert!(!nor.evaluate(&[false, true])); - assert!(!nor.evaluate(&[true, true])); - } - - #[test] - fn test_serialization() { - let and = TruthTable::and(2); - let json = serde_json::to_string(&and).unwrap(); - let deserialized: TruthTable = serde_json::from_str(&json).unwrap(); - assert_eq!(and, deserialized); - } - - #[test] - fn test_outputs() { - let and = TruthTable::and(2); - let outputs = and.outputs(); - assert_eq!(outputs.len(), 4); - } - - #[test] - fn test_num_inputs() { - let and = TruthTable::and(3); - assert_eq!(and.num_inputs(), 3); - } -} +#[path = "tests_unit/truth_table.rs"] +mod tests; diff --git a/src/types.rs b/src/types.rs index 7265ee9..1cb8f29 100644 --- a/src/types.rs +++ b/src/types.rs @@ -294,137 +294,5 @@ impl LocalSolutionSize { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_unweighted() { - let uw = Unweighted; - // Test get() method - assert_eq!(uw.get(0), 1); - assert_eq!(uw.get(100), 1); - assert_eq!(uw.get(usize::MAX), 1); - - // Test Display - assert_eq!(format!("{}", uw), "Unweighted"); - - // Test Clone, Copy, Default - let uw2 = uw; - let _uw3 = uw2; // Copy works (no clone needed) - let _uw4: Unweighted = Default::default(); - - // Test PartialEq - assert_eq!(Unweighted, Unweighted); - } - - #[test] - fn test_energy_mode() { - let max_mode = EnergyMode::LargerSizeIsBetter; - let min_mode = EnergyMode::SmallerSizeIsBetter; - - assert!(max_mode.is_maximization()); - assert!(!max_mode.is_minimization()); - assert!(!min_mode.is_maximization()); - assert!(min_mode.is_minimization()); - - assert!(max_mode.is_better(&10, &5)); - assert!(!max_mode.is_better(&5, &10)); - assert!(min_mode.is_better(&5, &10)); - assert!(!min_mode.is_better(&10, &5)); - - assert!(max_mode.is_better_or_equal(&10, &10)); - assert!(min_mode.is_better_or_equal(&10, &10)); - } - - #[test] - fn test_solution_size() { - let valid = SolutionSize::valid(42); - assert_eq!(valid.size, 42); - assert!(valid.is_valid); - - let invalid = SolutionSize::invalid(0); - assert!(!invalid.is_valid); - - let custom = SolutionSize::new(100, false); - assert_eq!(custom.size, 100); - assert!(!custom.is_valid); - } - - #[test] - fn test_solution_size_display() { - let valid = SolutionSize::valid(42); - assert_eq!(format!("{}", valid), "SolutionSize(42, valid)"); - - let invalid = SolutionSize::invalid(0); - assert_eq!(format!("{}", invalid), "SolutionSize(0, invalid)"); - } - - #[test] - fn test_problem_size() { - let ps = ProblemSize::new(vec![("vertices", 10), ("edges", 20)]); - assert_eq!(ps.get("vertices"), Some(10)); - assert_eq!(ps.get("edges"), Some(20)); - assert_eq!(ps.get("unknown"), None); - } - - #[test] - fn test_problem_size_display() { - let ps = ProblemSize::new(vec![("vertices", 10), ("edges", 20)]); - assert_eq!(format!("{}", ps), "ProblemSize{vertices: 10, edges: 20}"); - - let empty = ProblemSize::new(vec![]); - assert_eq!(format!("{}", empty), "ProblemSize{}"); - - let single = ProblemSize::new(vec![("n", 5)]); - assert_eq!(format!("{}", single), "ProblemSize{n: 5}"); - } - - #[test] - fn test_local_constraint() { - // Binary constraint on 2 variables: only (0,0) and (1,1) are valid - let constraint = LocalConstraint::new(2, vec![0, 1], vec![true, false, false, true]); - - assert!(constraint.is_satisfied(&[0, 0])); - assert!(!constraint.is_satisfied(&[0, 1])); - assert!(!constraint.is_satisfied(&[1, 0])); - assert!(constraint.is_satisfied(&[1, 1])); - assert_eq!(constraint.num_variables(), 2); - } - - #[test] - fn test_local_constraint_out_of_bounds() { - let constraint = LocalConstraint::new(2, vec![5, 6], vec![true, false, false, true]); - // Test with config that doesn't have indices 5 and 6 - defaults to 0 - assert!(constraint.is_satisfied(&[0, 0, 0])); - } - - #[test] - fn test_local_solution_size() { - // Binary objective on 1 variable: weight 0 for 0, weight 5 for 1 - let objective = LocalSolutionSize::new(2, vec![0], vec![0, 5]); - - assert_eq!(objective.evaluate(&[0]), 0); - assert_eq!(objective.evaluate(&[1]), 5); - assert_eq!(objective.num_variables(), 1); - } - - #[test] - fn test_local_solution_size_multi_variable() { - // Binary objective on 2 variables - let objective = LocalSolutionSize::new(2, vec![0, 1], vec![0, 1, 2, 3]); - assert_eq!(objective.evaluate(&[0, 0]), 0); - assert_eq!(objective.evaluate(&[0, 1]), 1); - assert_eq!(objective.evaluate(&[1, 0]), 2); - assert_eq!(objective.evaluate(&[1, 1]), 3); - } - - #[test] - fn test_numeric_weight_impls() { - fn assert_numeric_weight() {} - - assert_numeric_weight::(); - assert_numeric_weight::(); - assert_numeric_weight::(); - assert_numeric_weight::(); - } -} +#[path = "tests_unit/types.rs"] +mod tests; diff --git a/src/variant.rs b/src/variant.rs index 6050de5..a6ed3de 100644 --- a/src/variant.rs +++ b/src/variant.rs @@ -40,148 +40,5 @@ pub fn short_type_name() -> &'static str { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_short_type_name_primitive() { - assert_eq!(short_type_name::(), "i32"); - assert_eq!(short_type_name::(), "f64"); - } - - #[test] - fn test_short_type_name_struct() { - struct MyStruct; - assert_eq!(short_type_name::(), "MyStruct"); - } - - #[test] - fn test_const_usize_str() { - assert_eq!(const_usize_str::<1>(), "1"); - assert_eq!(const_usize_str::<2>(), "2"); - assert_eq!(const_usize_str::<3>(), "3"); - assert_eq!(const_usize_str::<4>(), "4"); - assert_eq!(const_usize_str::<5>(), "5"); - assert_eq!(const_usize_str::<6>(), "6"); - assert_eq!(const_usize_str::<7>(), "7"); - assert_eq!(const_usize_str::<8>(), "8"); - assert_eq!(const_usize_str::<9>(), "9"); - assert_eq!(const_usize_str::<10>(), "10"); - assert_eq!(const_usize_str::<11>(), "N"); - assert_eq!(const_usize_str::<100>(), "N"); - } - - #[test] - fn test_variant_for_problems() { - use crate::models::graph::{ - DominatingSet, IndependentSet, KColoring, Matching, MaxCut, MaximalIS, VertexCovering, - }; - use crate::models::optimization::{SpinGlass, QUBO}; - use crate::models::satisfiability::{KSatisfiability, Satisfiability}; - use crate::models::set::{SetCovering, SetPacking}; - use crate::models::specialized::{BicliqueCover, CircuitSAT, Factoring, PaintShop, BMF}; - use crate::topology::SimpleGraph; - use crate::traits::Problem; - - // Test IndependentSet variants - let v = IndependentSet::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].0, "graph"); - assert_eq!(v[0].1, "SimpleGraph"); - assert_eq!(v[1].0, "weight"); - assert_eq!(v[1].1, "i32"); - - let v = IndependentSet::::variant(); - assert_eq!(v[1].1, "f64"); - - // Test VertexCovering - let v = VertexCovering::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - assert_eq!(v[1].1, "i32"); - - // Test DominatingSet - let v = DominatingSet::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - - // Test Matching - let v = Matching::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - - // Test MaxCut - let v = MaxCut::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - - let v = MaxCut::::variant(); - assert_eq!(v[1].1, "f64"); - - // Test KColoring (has K, graph, and weight parameters) - let v = KColoring::<3, SimpleGraph, i32>::variant(); - assert_eq!(v.len(), 3); - assert_eq!(v[0], ("k", "3")); - assert_eq!(v[1], ("graph", "SimpleGraph")); - assert_eq!(v[2], ("weight", "i32")); - - // Test MaximalIS (no weight parameter) - let v = MaximalIS::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - - // Test Satisfiability - let v = Satisfiability::::variant(); - assert_eq!(v.len(), 2); - - // Test KSatisfiability - let v = KSatisfiability::<3, i32>::variant(); - assert_eq!(v.len(), 2); - - // Test SetPacking - let v = SetPacking::::variant(); - assert_eq!(v.len(), 2); - - // Test SetCovering - let v = SetCovering::::variant(); - assert_eq!(v.len(), 2); - - // Test SpinGlass - let v = SpinGlass::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[1].1, "f64"); - - let v = SpinGlass::::variant(); - assert_eq!(v[1].1, "i32"); - - // Test QUBO - let v = QUBO::::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[1].1, "f64"); - - // Test CircuitSAT - let v = CircuitSAT::::variant(); - assert_eq!(v.len(), 2); - - // Test Factoring (no type parameters) - let v = Factoring::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - assert_eq!(v[1].1, "i32"); - - // Test BicliqueCover (no type parameters) - let v = BicliqueCover::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - - // Test BMF (no type parameters) - let v = BMF::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - - // Test PaintShop (no type parameters) - let v = PaintShop::variant(); - assert_eq!(v.len(), 2); - assert_eq!(v[0].1, "SimpleGraph"); - } -} +#[path = "tests_unit/variant.rs"] +mod tests; diff --git a/tests/main.rs b/tests/main.rs new file mode 100644 index 0000000..aa16c63 --- /dev/null +++ b/tests/main.rs @@ -0,0 +1,4 @@ +#[path = "suites/integration.rs"] +mod integration; +#[path = "suites/reductions.rs"] +mod reductions; diff --git a/tests/property/graph_properties.rs b/tests/property/graph_properties.rs deleted file mode 100644 index 2ce3c7b..0000000 --- a/tests/property/graph_properties.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! Property-based tests for graph problems. -//! -//! These tests verify mathematical invariants like: -//! - Independent Set complement is Vertex Cover -//! - Solution validity preservation under subsets - -use problemreductions::models::graph::{IndependentSet, VertexCovering}; -use problemreductions::prelude::*; -use problemreductions::topology::SimpleGraph; -use proptest::prelude::*; -use std::collections::HashSet; - -/// Strategy for generating random graphs with up to `max_vertices` vertices. -fn graph_strategy(max_vertices: usize) -> impl Strategy)> { - (1..=max_vertices).prop_flat_map(|n| { - let edge_strategy = (0..n, 0..n) - .prop_filter("no self loops", |(u, v)| u != v) - .prop_map(|(u, v)| if u < v { (u, v) } else { (v, u) }); - - prop::collection::vec(edge_strategy, 0..n * 2).prop_map(move |edges| { - let unique: HashSet<_> = edges.into_iter().collect(); - (n, unique.into_iter().collect()) - }) - }) -} - -proptest! { - /// Property: For any graph, the complement of a maximum independent set - /// is a minimum vertex cover, and their sizes sum to n. - #[test] - fn independent_set_complement_is_vertex_cover((n, edges) in graph_strategy(8)) { - let is_problem = IndependentSet::::new(n, edges.clone()); - let vc_problem = VertexCovering::::new(n, edges); - - let solver = BruteForce::new(); - let is_solutions = solver.find_best(&is_problem); - let vc_solutions = solver.find_best(&vc_problem); - - let is_size: usize = is_solutions[0].iter().sum(); - let vc_size: usize = vc_solutions[0].iter().sum(); - - // IS size + VC size = n (for optimal solutions) - prop_assert_eq!(is_size + vc_size, n); - } - - /// Property: Any subset of a valid independent set is also a valid independent set. - #[test] - fn valid_solution_stays_valid_under_subset((n, edges) in graph_strategy(6)) { - let problem = IndependentSet::::new(n, edges); - let solver = BruteForce::new(); - - for sol in solver.find_best(&problem) { - // Any subset of an IS is also an IS - for i in 0..n { - let mut subset = sol.clone(); - subset[i] = 0; - prop_assert!(problem.solution_size(&subset).is_valid); - } - } - } - - /// Property: A vertex cover with additional vertices is still a valid cover. - #[test] - fn vertex_cover_superset_is_valid((n, edges) in graph_strategy(6)) { - let problem = VertexCovering::::new(n, edges); - let solver = BruteForce::new(); - - for sol in solver.find_best(&problem) { - // Adding any vertex to a VC still gives a valid VC - for i in 0..n { - let mut superset = sol.clone(); - superset[i] = 1; - prop_assert!(problem.solution_size(&superset).is_valid); - } - } - } - - /// Property: The complement of any valid independent set is a valid vertex cover. - #[test] - fn is_complement_is_vc((n, edges) in graph_strategy(7)) { - let is_problem = IndependentSet::::new(n, edges.clone()); - let vc_problem = VertexCovering::::new(n, edges); - let solver = BruteForce::new(); - - // Get all valid independent sets (not just optimal) - for sol in solver.find_best(&is_problem) { - // The complement should be a valid vertex cover - let complement: Vec = sol.iter().map(|&x| 1 - x).collect(); - prop_assert!(vc_problem.solution_size(&complement).is_valid, - "Complement of IS {:?} should be valid VC", sol); - } - } - - /// Property: Empty selection is always a valid (but possibly non-optimal) independent set. - #[test] - fn empty_is_always_valid_is((n, edges) in graph_strategy(10)) { - let problem = IndependentSet::::new(n, edges); - let empty = vec![0; n]; - prop_assert!(problem.solution_size(&empty).is_valid); - } - - /// Property: Full selection is always a valid (but possibly non-optimal) vertex cover - /// (when there is at least one vertex). - #[test] - fn full_is_always_valid_vc((n, edges) in graph_strategy(10)) { - let problem = VertexCovering::::new(n, edges); - let full = vec![1; n]; - prop_assert!(problem.solution_size(&full).is_valid); - } - - /// Property: Solution size is non-negative for independent sets. - #[test] - fn is_size_non_negative((n, edges) in graph_strategy(8)) { - let problem = IndependentSet::::new(n, edges); - let solver = BruteForce::new(); - - for sol in solver.find_best(&problem) { - let size = problem.solution_size(&sol); - prop_assert!(size.size >= 0); - } - } -} - -#[cfg(test)] -mod additional_tests { - use super::*; - - /// Test that the graph strategy generates valid graphs. - #[test] - fn test_graph_strategy_sanity() { - use proptest::test_runner::TestRunner; - - let mut runner = TestRunner::default(); - let strategy = graph_strategy(5); - - for _ in 0..10 { - let (n, edges) = strategy.new_tree(&mut runner).unwrap().current(); - - // Check all edges are valid - for (u, v) in &edges { - assert!(*u < n, "Edge source out of bounds"); - assert!(*v < n, "Edge target out of bounds"); - assert!(u != v, "Self-loop detected"); - } - - // Check no duplicate edges - let unique: HashSet<_> = edges.iter().collect(); - assert_eq!(unique.len(), edges.len(), "Duplicate edges detected"); - } - } -} diff --git a/tests/rules/mod.rs b/tests/rules/mod.rs deleted file mode 100644 index 94fb762..0000000 --- a/tests/rules/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Tests for the rules module (src/rules/). - -pub mod unitdiskmapping; diff --git a/tests/rules_unitdiskmapping.rs b/tests/rules_unitdiskmapping.rs deleted file mode 100644 index 1b936d9..0000000 --- a/tests/rules_unitdiskmapping.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Tests for the mapping module. -//! -//! This is the entry point for tests in tests/rules/mapping/. - -mod rules; diff --git a/tests/integration_tests.rs b/tests/suites/integration.rs similarity index 73% rename from tests/integration_tests.rs rename to tests/suites/integration.rs index baf7c42..5bdbec3 100644 --- a/tests/integration_tests.rs +++ b/tests/suites/integration.rs @@ -440,138 +440,3 @@ mod weighted_problems { assert_eq!(best_weight, 10); } } - -/// Tests for Problem trait consistency. -mod trait_consistency { - use super::*; - - fn check_problem_trait(problem: &P, name: &str) - where - P::Size: std::fmt::Debug, - { - assert!( - problem.num_variables() > 0 || name.contains("empty"), - "{} should have variables", - name - ); - assert!( - problem.num_flavors() >= 2, - "{} should have at least 2 flavors", - name - ); - - let size = problem.problem_size(); - // Check that problem_size returns some meaningful data - assert!( - size.get("num_vertices").is_some() - || size.get("num_vars").is_some() - || size.get("num_sets").is_some() - || size.get("num_cars").is_some() - || size.get("rows").is_some() - || size.get("left_size").is_some() - || size.get("target").is_some() - || size.get("num_variables").is_some() - || size.get("num_colors").is_some() - || size.get("num_spins").is_some() - || size.get("num_edges").is_some(), - "{} problem_size should have meaningful data", - name - ); - } - - #[test] - fn test_all_problems_implement_trait_correctly() { - check_problem_trait( - &IndependentSet::::new(3, vec![(0, 1)]), - "IndependentSet", - ); - check_problem_trait( - &VertexCovering::::new(3, vec![(0, 1)]), - "VertexCovering", - ); - check_problem_trait(&MaxCut::::new(3, vec![(0, 1, 1)]), "MaxCut"); - check_problem_trait(&KColoring::<3, SimpleGraph, i32>::new(3, vec![(0, 1)]), "KColoring"); - check_problem_trait(&DominatingSet::::new(3, vec![(0, 1)]), "DominatingSet"); - check_problem_trait(&MaximalIS::::new(3, vec![(0, 1)]), "MaximalIS"); - check_problem_trait(&Matching::::new(3, vec![(0, 1, 1)]), "Matching"); - check_problem_trait( - &Satisfiability::::new(3, vec![CNFClause::new(vec![1])]), - "SAT", - ); - check_problem_trait( - &SpinGlass::new(3, vec![((0, 1), 1.0)], vec![0.0; 3]), - "SpinGlass", - ); - check_problem_trait(&QUBO::from_matrix(vec![vec![1.0; 3]; 3]), "QUBO"); - check_problem_trait(&SetCovering::::new(3, vec![vec![0, 1]]), "SetCovering"); - check_problem_trait(&SetPacking::::new(vec![vec![0, 1]]), "SetPacking"); - check_problem_trait(&PaintShop::new(vec!["a", "a"]), "PaintShop"); - check_problem_trait(&BMF::new(vec![vec![true]], 1), "BMF"); - check_problem_trait(&BicliqueCover::new(2, 2, vec![(0, 2)], 1), "BicliqueCover"); - check_problem_trait(&Factoring::new(6, 2, 2), "Factoring"); - - let circuit = Circuit::new(vec![Assignment::new( - vec!["x".to_string()], - BooleanExpr::constant(true), - )]); - check_problem_trait(&CircuitSAT::::new(circuit), "CircuitSAT"); - } - - #[test] - fn test_energy_modes() { - // Minimization problems - assert!(VertexCovering::::new(2, vec![(0, 1)]) - .energy_mode() - .is_minimization()); - assert!(DominatingSet::::new(2, vec![(0, 1)]) - .energy_mode() - .is_minimization()); - assert!(SetCovering::::new(2, vec![vec![0, 1]]) - .energy_mode() - .is_minimization()); - assert!(PaintShop::new(vec!["a", "a"]) - .energy_mode() - .is_minimization()); - assert!(QUBO::from_matrix(vec![vec![1.0]]) - .energy_mode() - .is_minimization()); - assert!(SpinGlass::new(1, vec![], vec![0.0]) - .energy_mode() - .is_minimization()); - assert!(BMF::new(vec![vec![true]], 1) - .energy_mode() - .is_minimization()); - assert!(Factoring::new(6, 2, 2).energy_mode().is_minimization()); - assert!(KColoring::<2, SimpleGraph, i32>::new(2, vec![(0, 1)]) - .energy_mode() - .is_minimization()); - assert!(BicliqueCover::new(2, 2, vec![(0, 2)], 1) - .energy_mode() - .is_minimization()); - - // Maximization problems - assert!(IndependentSet::::new(2, vec![(0, 1)]) - .energy_mode() - .is_maximization()); - assert!(MaximalIS::::new(2, vec![(0, 1)]) - .energy_mode() - .is_maximization()); - assert!(MaxCut::::new(2, vec![(0, 1, 1)]) - .energy_mode() - .is_maximization()); - assert!(Matching::::new(2, vec![(0, 1, 1)]) - .energy_mode() - .is_maximization()); - assert!(SetPacking::::new(vec![vec![0]]) - .energy_mode() - .is_maximization()); - assert!(Satisfiability::::new(1, vec![CNFClause::new(vec![1])]) - .energy_mode() - .is_maximization()); - - let circuit = Circuit::new(vec![]); - assert!(CircuitSAT::::new(circuit) - .energy_mode() - .is_maximization()); - } -} diff --git a/tests/reduction_tests.rs b/tests/suites/reductions.rs similarity index 88% rename from tests/reduction_tests.rs rename to tests/suites/reductions.rs index 2c22e16..f02ebf2 100644 --- a/tests/reduction_tests.rs +++ b/tests/suites/reductions.rs @@ -4,7 +4,6 @@ //! solutions can be properly extracted through the reduction pipeline. use problemreductions::prelude::*; -use problemreductions::rules::ReductionGraph; use problemreductions::topology::SimpleGraph; /// Tests for IndependentSet <-> VertexCovering reductions. @@ -350,71 +349,6 @@ mod sg_maxcut_reductions { } } -/// Tests for ReductionGraph path finding. -mod reduction_graph_tests { - use super::*; - - #[test] - fn test_direct_reduction_exists() { - let graph = ReductionGraph::new(); - - assert!(graph.has_direct_reduction::, VertexCovering>()); - assert!(graph.has_direct_reduction::, IndependentSet>()); - assert!(graph.has_direct_reduction::, SetPacking>()); - assert!(graph.has_direct_reduction::, QUBO>()); - assert!(graph.has_direct_reduction::, MaxCut>()); - } - - #[test] - fn test_find_direct_path() { - let graph = ReductionGraph::new(); - - let paths = graph.find_paths::, VertexCovering>(); - assert!(!paths.is_empty()); - assert_eq!(paths[0].len(), 1); // One reduction step (direct) - } - - #[test] - fn test_find_indirect_path() { - let graph = ReductionGraph::new(); - - // SetPacking -> IndependentSet -> VertexCovering - let paths = graph.find_paths::, VertexCovering>(); - assert!(!paths.is_empty()); - - // Should have a path of length 2 (indirect) - let shortest = graph.find_shortest_path::, VertexCovering>(); - assert!(shortest.is_some()); - assert_eq!(shortest.unwrap().len(), 2); - } - - #[test] - fn test_no_path_exists() { - let graph = ReductionGraph::new(); - - // No reduction from QUBO to SetPacking (different type families) - let paths = graph.find_paths::, SetPacking>(); - assert!(paths.is_empty()); - } - - #[test] - fn test_bidirectional_paths() { - let graph = ReductionGraph::new(); - - // IS <-> VC is bidirectional - assert!(!graph - .find_paths::, VertexCovering>() - .is_empty()); - assert!(!graph - .find_paths::, IndependentSet>() - .is_empty()); - - // SG <-> QUBO is bidirectional - assert!(!graph.find_paths::, QUBO>().is_empty()); - assert!(!graph.find_paths::, SpinGlass>().is_empty()); - } -} - /// Tests for topology types integration. mod topology_tests { use super::*; From 9ed09d1896a5121c4ef601b71463152ca1282d07 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Sun, 8 Feb 2026 00:39:59 +0800 Subject: [PATCH 2/3] Rename src/tests_unit/ to src/unit_tests/ and fix coverage gap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename tests_unit → unit_tests for clearer distinction from src/testing/ - Fix test_brute_force_satisfiable: use valid_only(false) so the loop body actually executes on the UNSAT instance, covering the 2 missing lines Co-Authored-By: Claude Opus 4.6 --- .claude/CLAUDE.md | 2 +- .claude/rules/testing.md | 6 +++--- src/config.rs | 2 +- src/graph_types.rs | 2 +- src/io.rs | 2 +- src/lib.rs | 10 +++++----- src/models/graph/clique.rs | 2 +- src/models/graph/dominating_set.rs | 2 +- src/models/graph/independent_set.rs | 2 +- src/models/graph/kcoloring.rs | 2 +- src/models/graph/matching.rs | 2 +- src/models/graph/max_cut.rs | 2 +- src/models/graph/maximal_is.rs | 2 +- src/models/graph/vertex_covering.rs | 2 +- src/models/optimization/ilp.rs | 2 +- src/models/optimization/qubo.rs | 2 +- src/models/optimization/spin_glass.rs | 2 +- src/models/satisfiability/ksat.rs | 2 +- src/models/satisfiability/sat.rs | 2 +- src/models/set/set_covering.rs | 2 +- src/models/set/set_packing.rs | 2 +- src/models/specialized/biclique_cover.rs | 2 +- src/models/specialized/bmf.rs | 2 +- src/models/specialized/circuit.rs | 2 +- src/models/specialized/factoring.rs | 2 +- src/models/specialized/paintshop.rs | 2 +- src/polynomial.rs | 2 +- src/registry/category.rs | 2 +- src/registry/info.rs | 2 +- src/rules/circuit_spinglass.rs | 2 +- src/rules/clique_ilp.rs | 2 +- src/rules/coloring_ilp.rs | 2 +- src/rules/cost.rs | 2 +- src/rules/dominatingset_ilp.rs | 2 +- src/rules/factoring_circuit.rs | 2 +- src/rules/factoring_ilp.rs | 2 +- src/rules/graph.rs | 2 +- src/rules/independentset_ilp.rs | 2 +- src/rules/independentset_setpacking.rs | 2 +- src/rules/matching_ilp.rs | 2 +- src/rules/matching_setpacking.rs | 2 +- src/rules/registry.rs | 2 +- src/rules/sat_coloring.rs | 2 +- src/rules/sat_dominatingset.rs | 2 +- src/rules/sat_independentset.rs | 2 +- src/rules/sat_ksat.rs | 2 +- src/rules/setcovering_ilp.rs | 2 +- src/rules/setpacking_ilp.rs | 2 +- src/rules/spinglass_maxcut.rs | 2 +- src/rules/spinglass_qubo.rs | 2 +- src/rules/traits.rs | 2 +- src/rules/unitdiskmapping/alpha_tensor.rs | 2 +- src/rules/unitdiskmapping/copyline.rs | 2 +- src/rules/unitdiskmapping/grid.rs | 2 +- src/rules/unitdiskmapping/ksg/gadgets_weighted.rs | 2 +- src/rules/unitdiskmapping/ksg/mapping.rs | 2 +- src/rules/unitdiskmapping/pathdecomposition.rs | 2 +- src/rules/unitdiskmapping/triangular/mapping.rs | 2 +- src/rules/unitdiskmapping/triangular/mod.rs | 2 +- src/rules/unitdiskmapping/weighted.rs | 2 +- src/rules/vertexcovering_ilp.rs | 2 +- src/rules/vertexcovering_independentset.rs | 2 +- src/rules/vertexcovering_setcovering.rs | 2 +- src/solvers/brute_force.rs | 2 +- src/solvers/ilp/solver.rs | 2 +- src/testing/macros.rs | 2 +- src/testing/mod.rs | 2 +- src/topology/graph.rs | 2 +- src/topology/grid_graph.rs | 2 +- src/topology/hypergraph.rs | 2 +- src/topology/small_graphs.rs | 2 +- src/topology/unit_disk_graph.rs | 2 +- src/traits.rs | 2 +- src/truth_table.rs | 2 +- src/types.rs | 2 +- src/{tests_unit => unit_tests}/config.rs | 0 src/{tests_unit => unit_tests}/graph_models.rs | 0 src/{tests_unit => unit_tests}/graph_types.rs | 0 src/{tests_unit => unit_tests}/io.rs | 0 src/{tests_unit => unit_tests}/models/graph/clique.rs | 0 .../models/graph/dominating_set.rs | 0 .../models/graph/independent_set.rs | 0 .../models/graph/kcoloring.rs | 0 .../models/graph/matching.rs | 0 src/{tests_unit => unit_tests}/models/graph/max_cut.rs | 0 .../models/graph/maximal_is.rs | 0 .../models/graph/vertex_covering.rs | 0 .../models/optimization/ilp.rs | 0 .../models/optimization/qubo.rs | 0 .../models/optimization/spin_glass.rs | 0 .../models/satisfiability/ksat.rs | 0 .../models/satisfiability/sat.rs | 7 ++++--- .../models/set/set_covering.rs | 0 .../models/set/set_packing.rs | 0 .../models/specialized/biclique_cover.rs | 0 .../models/specialized/bmf.rs | 0 .../models/specialized/circuit.rs | 0 .../models/specialized/factoring.rs | 0 .../models/specialized/paintshop.rs | 0 src/{tests_unit => unit_tests}/polynomial.rs | 0 src/{tests_unit => unit_tests}/property.rs | 0 src/{tests_unit => unit_tests}/reduction_graph.rs | 0 src/{tests_unit => unit_tests}/registry/category.rs | 0 src/{tests_unit => unit_tests}/registry/info.rs | 0 .../rules/circuit_spinglass.rs | 0 src/{tests_unit => unit_tests}/rules/clique_ilp.rs | 0 src/{tests_unit => unit_tests}/rules/coloring_ilp.rs | 0 src/{tests_unit => unit_tests}/rules/cost.rs | 0 .../rules/dominatingset_ilp.rs | 0 .../rules/factoring_circuit.rs | 0 src/{tests_unit => unit_tests}/rules/factoring_ilp.rs | 0 src/{tests_unit => unit_tests}/rules/graph.rs | 0 .../rules/independentset_ilp.rs | 0 .../rules/independentset_setpacking.rs | 0 src/{tests_unit => unit_tests}/rules/matching_ilp.rs | 0 .../rules/matching_setpacking.rs | 0 src/{tests_unit => unit_tests}/rules/registry.rs | 0 src/{tests_unit => unit_tests}/rules/sat_coloring.rs | 0 .../rules/sat_dominatingset.rs | 0 .../rules/sat_independentset.rs | 0 src/{tests_unit => unit_tests}/rules/sat_ksat.rs | 0 .../rules/setcovering_ilp.rs | 0 src/{tests_unit => unit_tests}/rules/setpacking_ilp.rs | 0 .../rules/spinglass_maxcut.rs | 0 src/{tests_unit => unit_tests}/rules/spinglass_qubo.rs | 0 src/{tests_unit => unit_tests}/rules/traits.rs | 0 .../rules/unitdiskmapping/alpha_tensor.rs | 0 .../rules/unitdiskmapping/copyline.rs | 0 .../rules/unitdiskmapping/grid.rs | 0 .../rules/unitdiskmapping/ksg/gadgets_weighted.rs | 0 .../rules/unitdiskmapping/ksg/mapping.rs | 0 .../rules/unitdiskmapping/pathdecomposition.rs | 0 .../rules/unitdiskmapping/triangular/mapping.rs | 0 .../rules/unitdiskmapping/triangular/mod.rs | 0 .../rules/unitdiskmapping/weighted.rs | 0 .../rules/vertexcovering_ilp.rs | 0 .../rules/vertexcovering_independentset.rs | 0 .../rules/vertexcovering_setcovering.rs | 0 src/{tests_unit => unit_tests}/solvers/brute_force.rs | 0 src/{tests_unit => unit_tests}/solvers/ilp/solver.rs | 0 src/{tests_unit => unit_tests}/testing/macros.rs | 0 src/{tests_unit => unit_tests}/testing/mod.rs | 0 src/{tests_unit => unit_tests}/topology/graph.rs | 0 src/{tests_unit => unit_tests}/topology/grid_graph.rs | 0 src/{tests_unit => unit_tests}/topology/hypergraph.rs | 0 .../topology/small_graphs.rs | 0 .../topology/unit_disk_graph.rs | 0 src/{tests_unit => unit_tests}/trait_consistency.rs | 0 src/{tests_unit => unit_tests}/traits.rs | 0 src/{tests_unit => unit_tests}/truth_table.rs | 0 src/{tests_unit => unit_tests}/types.rs | 0 .../unitdiskmapping_algorithms/common.rs | 0 .../unitdiskmapping_algorithms/copyline.rs | 0 .../unitdiskmapping_algorithms/gadgets.rs | 0 .../unitdiskmapping_algorithms/gadgets_ground_truth.rs | 0 .../unitdiskmapping_algorithms/julia_comparison.rs | 0 .../unitdiskmapping_algorithms/map_graph.rs | 0 .../unitdiskmapping_algorithms/mapping_result.rs | 0 .../unitdiskmapping_algorithms/mod.rs | 0 .../unitdiskmapping_algorithms/triangular.rs | 0 .../unitdiskmapping_algorithms/weighted.rs | 0 src/{tests_unit => unit_tests}/variant.rs | 0 src/variant.rs | 2 +- 163 files changed, 86 insertions(+), 85 deletions(-) rename src/{tests_unit => unit_tests}/config.rs (100%) rename src/{tests_unit => unit_tests}/graph_models.rs (100%) rename src/{tests_unit => unit_tests}/graph_types.rs (100%) rename src/{tests_unit => unit_tests}/io.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/clique.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/dominating_set.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/independent_set.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/kcoloring.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/matching.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/max_cut.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/maximal_is.rs (100%) rename src/{tests_unit => unit_tests}/models/graph/vertex_covering.rs (100%) rename src/{tests_unit => unit_tests}/models/optimization/ilp.rs (100%) rename src/{tests_unit => unit_tests}/models/optimization/qubo.rs (100%) rename src/{tests_unit => unit_tests}/models/optimization/spin_glass.rs (100%) rename src/{tests_unit => unit_tests}/models/satisfiability/ksat.rs (100%) rename src/{tests_unit => unit_tests}/models/satisfiability/sat.rs (97%) rename src/{tests_unit => unit_tests}/models/set/set_covering.rs (100%) rename src/{tests_unit => unit_tests}/models/set/set_packing.rs (100%) rename src/{tests_unit => unit_tests}/models/specialized/biclique_cover.rs (100%) rename src/{tests_unit => unit_tests}/models/specialized/bmf.rs (100%) rename src/{tests_unit => unit_tests}/models/specialized/circuit.rs (100%) rename src/{tests_unit => unit_tests}/models/specialized/factoring.rs (100%) rename src/{tests_unit => unit_tests}/models/specialized/paintshop.rs (100%) rename src/{tests_unit => unit_tests}/polynomial.rs (100%) rename src/{tests_unit => unit_tests}/property.rs (100%) rename src/{tests_unit => unit_tests}/reduction_graph.rs (100%) rename src/{tests_unit => unit_tests}/registry/category.rs (100%) rename src/{tests_unit => unit_tests}/registry/info.rs (100%) rename src/{tests_unit => unit_tests}/rules/circuit_spinglass.rs (100%) rename src/{tests_unit => unit_tests}/rules/clique_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/coloring_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/cost.rs (100%) rename src/{tests_unit => unit_tests}/rules/dominatingset_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/factoring_circuit.rs (100%) rename src/{tests_unit => unit_tests}/rules/factoring_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/graph.rs (100%) rename src/{tests_unit => unit_tests}/rules/independentset_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/independentset_setpacking.rs (100%) rename src/{tests_unit => unit_tests}/rules/matching_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/matching_setpacking.rs (100%) rename src/{tests_unit => unit_tests}/rules/registry.rs (100%) rename src/{tests_unit => unit_tests}/rules/sat_coloring.rs (100%) rename src/{tests_unit => unit_tests}/rules/sat_dominatingset.rs (100%) rename src/{tests_unit => unit_tests}/rules/sat_independentset.rs (100%) rename src/{tests_unit => unit_tests}/rules/sat_ksat.rs (100%) rename src/{tests_unit => unit_tests}/rules/setcovering_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/setpacking_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/spinglass_maxcut.rs (100%) rename src/{tests_unit => unit_tests}/rules/spinglass_qubo.rs (100%) rename src/{tests_unit => unit_tests}/rules/traits.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/alpha_tensor.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/copyline.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/grid.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/ksg/gadgets_weighted.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/ksg/mapping.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/pathdecomposition.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/triangular/mapping.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/triangular/mod.rs (100%) rename src/{tests_unit => unit_tests}/rules/unitdiskmapping/weighted.rs (100%) rename src/{tests_unit => unit_tests}/rules/vertexcovering_ilp.rs (100%) rename src/{tests_unit => unit_tests}/rules/vertexcovering_independentset.rs (100%) rename src/{tests_unit => unit_tests}/rules/vertexcovering_setcovering.rs (100%) rename src/{tests_unit => unit_tests}/solvers/brute_force.rs (100%) rename src/{tests_unit => unit_tests}/solvers/ilp/solver.rs (100%) rename src/{tests_unit => unit_tests}/testing/macros.rs (100%) rename src/{tests_unit => unit_tests}/testing/mod.rs (100%) rename src/{tests_unit => unit_tests}/topology/graph.rs (100%) rename src/{tests_unit => unit_tests}/topology/grid_graph.rs (100%) rename src/{tests_unit => unit_tests}/topology/hypergraph.rs (100%) rename src/{tests_unit => unit_tests}/topology/small_graphs.rs (100%) rename src/{tests_unit => unit_tests}/topology/unit_disk_graph.rs (100%) rename src/{tests_unit => unit_tests}/trait_consistency.rs (100%) rename src/{tests_unit => unit_tests}/traits.rs (100%) rename src/{tests_unit => unit_tests}/truth_table.rs (100%) rename src/{tests_unit => unit_tests}/types.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/common.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/copyline.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/gadgets.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/gadgets_ground_truth.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/julia_comparison.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/map_graph.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/mapping_result.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/mod.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/triangular.rs (100%) rename src/{tests_unit => unit_tests}/unitdiskmapping_algorithms/weighted.rs (100%) rename src/{tests_unit => unit_tests}/variant.rs (100%) diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index 71c27ea..e0d979f 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -26,7 +26,7 @@ make test clippy export-graph # Must pass before PR - `src/traits.rs` - `Problem`, `ConstraintSatisfactionProblem` traits - `src/rules/traits.rs` - `ReduceTo`, `ReductionResult` traits - `src/registry/` - Compile-time reduction metadata collection -- `src/tests_unit/` - Unit test files (extracted from inline `mod tests` blocks via `#[path]`) +- `src/unit_tests/` - Unit test files (extracted from inline `mod tests` blocks via `#[path]`) - `tests/main.rs` - User-facing integration tests only (modules in `tests/suites/`) ### Trait Hierarchy diff --git a/.claude/rules/testing.md b/.claude/rules/testing.md index ba3d52d..64bacc3 100644 --- a/.claude/rules/testing.md +++ b/.claude/rules/testing.md @@ -50,12 +50,12 @@ make coverage # >95% for new code ## Test File Organization -Unit tests live in `src/tests_unit/`, mirroring `src/` structure. Source files reference them via `#[path]`: +Unit tests live in `src/unit_tests/`, mirroring `src/` structure. Source files reference them via `#[path]`: ```rust // In src/rules/foo_bar.rs: #[cfg(test)] -#[path = "../tests_unit/rules/foo_bar.rs"] +#[path = "../unit_tests/rules/foo_bar.rs"] mod tests; ``` @@ -67,4 +67,4 @@ Integration tests are consolidated into a single binary at `tests/main.rs`, with - Don't skip closed-loop tests for reductions - Don't test only happy paths - include edge cases - Don't ignore clippy warnings -- Don't add inline `mod tests` blocks in `src/` — use `src/tests_unit/` with `#[path]` +- Don't add inline `mod tests` blocks in `src/` — use `src/unit_tests/` with `#[path]` diff --git a/src/config.rs b/src/config.rs index 6f97df0..fb2614c 100644 --- a/src/config.rs +++ b/src/config.rs @@ -108,5 +108,5 @@ pub fn bits_to_config(bits: &[bool]) -> Vec { } #[cfg(test)] -#[path = "tests_unit/config.rs"] +#[path = "unit_tests/config.rs"] mod tests; diff --git a/src/graph_types.rs b/src/graph_types.rs index 1198232..4802ed8 100644 --- a/src/graph_types.rs +++ b/src/graph_types.rs @@ -67,5 +67,5 @@ declare_graph_subtype!(PlanarGraph => SimpleGraph); declare_graph_subtype!(BipartiteGraph => SimpleGraph); #[cfg(test)] -#[path = "tests_unit/graph_types.rs"] +#[path = "unit_tests/graph_types.rs"] mod tests; diff --git a/src/io.rs b/src/io.rs index 4f69391..1442713 100644 --- a/src/io.rs +++ b/src/io.rs @@ -129,5 +129,5 @@ pub fn write_file>(path: P, contents: &str) -> Result<()> { } #[cfg(test)] -#[path = "tests_unit/io.rs"] +#[path = "unit_tests/io.rs"] mod tests; diff --git a/src/lib.rs b/src/lib.rs index e30e7a7..8b80dff 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -115,17 +115,17 @@ pub use types::{EnergyMode, LocalConstraint, LocalSolutionSize, ProblemSize, Sol pub use problemreductions_macros::reduction; #[cfg(test)] -#[path = "tests_unit/graph_models.rs"] +#[path = "unit_tests/graph_models.rs"] mod test_graph_models; #[cfg(test)] -#[path = "tests_unit/property.rs"] +#[path = "unit_tests/property.rs"] mod test_property; #[cfg(test)] -#[path = "tests_unit/reduction_graph.rs"] +#[path = "unit_tests/reduction_graph.rs"] mod test_reduction_graph; #[cfg(test)] -#[path = "tests_unit/trait_consistency.rs"] +#[path = "unit_tests/trait_consistency.rs"] mod test_trait_consistency; #[cfg(test)] -#[path = "tests_unit/unitdiskmapping_algorithms/mod.rs"] +#[path = "unit_tests/unitdiskmapping_algorithms/mod.rs"] mod test_unitdiskmapping_algorithms; diff --git a/src/models/graph/clique.rs b/src/models/graph/clique.rs index edf1251..b18b13a 100644 --- a/src/models/graph/clique.rs +++ b/src/models/graph/clique.rs @@ -297,5 +297,5 @@ pub fn is_clique(num_vertices: usize, edges: &[(usize, usize)], selected: &[bool } #[cfg(test)] -#[path = "../../tests_unit/models/graph/clique.rs"] +#[path = "../../unit_tests/models/graph/clique.rs"] mod tests; diff --git a/src/models/graph/dominating_set.rs b/src/models/graph/dominating_set.rs index 5c5e5de..24c7c36 100644 --- a/src/models/graph/dominating_set.rs +++ b/src/models/graph/dominating_set.rs @@ -275,5 +275,5 @@ pub fn is_dominating_set(num_vertices: usize, edges: &[(usize, usize)], selected } #[cfg(test)] -#[path = "../../tests_unit/models/graph/dominating_set.rs"] +#[path = "../../unit_tests/models/graph/dominating_set.rs"] mod tests; diff --git a/src/models/graph/independent_set.rs b/src/models/graph/independent_set.rs index 7bf6e97..9c097e7 100644 --- a/src/models/graph/independent_set.rs +++ b/src/models/graph/independent_set.rs @@ -265,5 +265,5 @@ pub fn is_independent_set( } #[cfg(test)] -#[path = "../../tests_unit/models/graph/independent_set.rs"] +#[path = "../../unit_tests/models/graph/independent_set.rs"] mod tests; diff --git a/src/models/graph/kcoloring.rs b/src/models/graph/kcoloring.rs index 2350ee1..92d6c66 100644 --- a/src/models/graph/kcoloring.rs +++ b/src/models/graph/kcoloring.rs @@ -228,5 +228,5 @@ pub fn is_valid_coloring( } #[cfg(test)] -#[path = "../../tests_unit/models/graph/kcoloring.rs"] +#[path = "../../unit_tests/models/graph/kcoloring.rs"] mod tests; diff --git a/src/models/graph/matching.rs b/src/models/graph/matching.rs index 4d65463..3a7b322 100644 --- a/src/models/graph/matching.rs +++ b/src/models/graph/matching.rs @@ -309,5 +309,5 @@ pub fn is_matching(num_vertices: usize, edges: &[(usize, usize)], selected: &[bo } #[cfg(test)] -#[path = "../../tests_unit/models/graph/matching.rs"] +#[path = "../../unit_tests/models/graph/matching.rs"] mod tests; diff --git a/src/models/graph/max_cut.rs b/src/models/graph/max_cut.rs index 32a25fc..84bcd63 100644 --- a/src/models/graph/max_cut.rs +++ b/src/models/graph/max_cut.rs @@ -258,5 +258,5 @@ where } #[cfg(test)] -#[path = "../../tests_unit/models/graph/max_cut.rs"] +#[path = "../../unit_tests/models/graph/max_cut.rs"] mod tests; diff --git a/src/models/graph/maximal_is.rs b/src/models/graph/maximal_is.rs index c94015f..76549db 100644 --- a/src/models/graph/maximal_is.rs +++ b/src/models/graph/maximal_is.rs @@ -316,5 +316,5 @@ pub fn is_maximal_independent_set( } #[cfg(test)] -#[path = "../../tests_unit/models/graph/maximal_is.rs"] +#[path = "../../unit_tests/models/graph/maximal_is.rs"] mod tests; diff --git a/src/models/graph/vertex_covering.rs b/src/models/graph/vertex_covering.rs index 171ca50..33133b5 100644 --- a/src/models/graph/vertex_covering.rs +++ b/src/models/graph/vertex_covering.rs @@ -242,5 +242,5 @@ pub fn is_vertex_cover(num_vertices: usize, edges: &[(usize, usize)], selected: } #[cfg(test)] -#[path = "../../tests_unit/models/graph/vertex_covering.rs"] +#[path = "../../unit_tests/models/graph/vertex_covering.rs"] mod tests; diff --git a/src/models/optimization/ilp.rs b/src/models/optimization/ilp.rs index 7ef3303..6c3f524 100644 --- a/src/models/optimization/ilp.rs +++ b/src/models/optimization/ilp.rs @@ -381,5 +381,5 @@ impl Problem for ILP { } #[cfg(test)] -#[path = "../../tests_unit/models/optimization/ilp.rs"] +#[path = "../../unit_tests/models/optimization/ilp.rs"] mod tests; diff --git a/src/models/optimization/qubo.rs b/src/models/optimization/qubo.rs index 03e474b..771104c 100644 --- a/src/models/optimization/qubo.rs +++ b/src/models/optimization/qubo.rs @@ -173,5 +173,5 @@ where } #[cfg(test)] -#[path = "../../tests_unit/models/optimization/qubo.rs"] +#[path = "../../unit_tests/models/optimization/qubo.rs"] mod tests; diff --git a/src/models/optimization/spin_glass.rs b/src/models/optimization/spin_glass.rs index e98feb3..373da69 100644 --- a/src/models/optimization/spin_glass.rs +++ b/src/models/optimization/spin_glass.rs @@ -230,5 +230,5 @@ where } #[cfg(test)] -#[path = "../../tests_unit/models/optimization/spin_glass.rs"] +#[path = "../../unit_tests/models/optimization/spin_glass.rs"] mod tests; diff --git a/src/models/satisfiability/ksat.rs b/src/models/satisfiability/ksat.rs index 50dfcd4..304059a 100644 --- a/src/models/satisfiability/ksat.rs +++ b/src/models/satisfiability/ksat.rs @@ -308,5 +308,5 @@ where } #[cfg(test)] -#[path = "../../tests_unit/models/satisfiability/ksat.rs"] +#[path = "../../unit_tests/models/satisfiability/ksat.rs"] mod tests; diff --git a/src/models/satisfiability/sat.rs b/src/models/satisfiability/sat.rs index a983002..489054b 100644 --- a/src/models/satisfiability/sat.rs +++ b/src/models/satisfiability/sat.rs @@ -345,5 +345,5 @@ pub fn is_satisfying_assignment( } #[cfg(test)] -#[path = "../../tests_unit/models/satisfiability/sat.rs"] +#[path = "../../unit_tests/models/satisfiability/sat.rs"] mod tests; diff --git a/src/models/set/set_covering.rs b/src/models/set/set_covering.rs index 26fab1c..7eac815 100644 --- a/src/models/set/set_covering.rs +++ b/src/models/set/set_covering.rs @@ -241,5 +241,5 @@ pub fn is_set_cover(universe_size: usize, sets: &[Vec], selected: &[bool] } #[cfg(test)] -#[path = "../../tests_unit/models/set/set_covering.rs"] +#[path = "../../unit_tests/models/set/set_covering.rs"] mod tests; diff --git a/src/models/set/set_packing.rs b/src/models/set/set_packing.rs index 999e4b2..5d8bc4e 100644 --- a/src/models/set/set_packing.rs +++ b/src/models/set/set_packing.rs @@ -234,5 +234,5 @@ pub fn is_set_packing(sets: &[Vec], selected: &[bool]) -> bool { } #[cfg(test)] -#[path = "../../tests_unit/models/set/set_packing.rs"] +#[path = "../../unit_tests/models/set/set_packing.rs"] mod tests; diff --git a/src/models/specialized/biclique_cover.rs b/src/models/specialized/biclique_cover.rs index 86512fa..d40b536 100644 --- a/src/models/specialized/biclique_cover.rs +++ b/src/models/specialized/biclique_cover.rs @@ -233,5 +233,5 @@ pub fn is_biclique_cover( } #[cfg(test)] -#[path = "../../tests_unit/models/specialized/biclique_cover.rs"] +#[path = "../../unit_tests/models/specialized/biclique_cover.rs"] mod tests; diff --git a/src/models/specialized/bmf.rs b/src/models/specialized/bmf.rs index 182fb2b..8addf66 100644 --- a/src/models/specialized/bmf.rs +++ b/src/models/specialized/bmf.rs @@ -209,5 +209,5 @@ pub fn matrix_hamming_distance(a: &[Vec], b: &[Vec]) -> usize { } #[cfg(test)] -#[path = "../../tests_unit/models/specialized/bmf.rs"] +#[path = "../../unit_tests/models/specialized/bmf.rs"] mod tests; diff --git a/src/models/specialized/circuit.rs b/src/models/specialized/circuit.rs index 9309981..c324a54 100644 --- a/src/models/specialized/circuit.rs +++ b/src/models/specialized/circuit.rs @@ -329,5 +329,5 @@ pub fn is_circuit_satisfying(circuit: &Circuit, assignments: &HashMap bool { } #[cfg(test)] -#[path = "../../tests_unit/models/specialized/factoring.rs"] +#[path = "../../unit_tests/models/specialized/factoring.rs"] mod tests; diff --git a/src/models/specialized/paintshop.rs b/src/models/specialized/paintshop.rs index 6b8e88a..f268e8f 100644 --- a/src/models/specialized/paintshop.rs +++ b/src/models/specialized/paintshop.rs @@ -181,5 +181,5 @@ pub fn count_paint_switches(coloring: &[usize]) -> usize { } #[cfg(test)] -#[path = "../../tests_unit/models/specialized/paintshop.rs"] +#[path = "../../unit_tests/models/specialized/paintshop.rs"] mod tests; diff --git a/src/polynomial.rs b/src/polynomial.rs index e16aedb..a41d283 100644 --- a/src/polynomial.rs +++ b/src/polynomial.rs @@ -126,5 +126,5 @@ macro_rules! poly { } #[cfg(test)] -#[path = "tests_unit/polynomial.rs"] +#[path = "unit_tests/polynomial.rs"] mod tests; diff --git a/src/registry/category.rs b/src/registry/category.rs index 4e6285f..affea79 100644 --- a/src/registry/category.rs +++ b/src/registry/category.rs @@ -322,5 +322,5 @@ impl SpecializedSubcategory { } #[cfg(test)] -#[path = "../tests_unit/registry/category.rs"] +#[path = "../unit_tests/registry/category.rs"] mod tests; diff --git a/src/registry/info.rs b/src/registry/info.rs index b3332b8..6564ced 100644 --- a/src/registry/info.rs +++ b/src/registry/info.rs @@ -267,5 +267,5 @@ pub trait ProblemMetadata { } #[cfg(test)] -#[path = "../tests_unit/registry/info.rs"] +#[path = "../unit_tests/registry/info.rs"] mod tests; diff --git a/src/rules/circuit_spinglass.rs b/src/rules/circuit_spinglass.rs index d3a979c..ddc91cc 100644 --- a/src/rules/circuit_spinglass.rs +++ b/src/rules/circuit_spinglass.rs @@ -459,5 +459,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/circuit_spinglass.rs"] +#[path = "../unit_tests/rules/circuit_spinglass.rs"] mod tests; diff --git a/src/rules/clique_ilp.rs b/src/rules/clique_ilp.rs index 4b66719..ad6505f 100644 --- a/src/rules/clique_ilp.rs +++ b/src/rules/clique_ilp.rs @@ -95,5 +95,5 @@ impl ReduceTo for Clique { } #[cfg(test)] -#[path = "../tests_unit/rules/clique_ilp.rs"] +#[path = "../unit_tests/rules/clique_ilp.rs"] mod tests; diff --git a/src/rules/coloring_ilp.rs b/src/rules/coloring_ilp.rs index 33e0d5b..96c5047 100644 --- a/src/rules/coloring_ilp.rs +++ b/src/rules/coloring_ilp.rs @@ -166,5 +166,5 @@ where pub type ReductionColoringToILP = ReductionKColoringToILP<3, SimpleGraph, i32>; #[cfg(test)] -#[path = "../tests_unit/rules/coloring_ilp.rs"] +#[path = "../unit_tests/rules/coloring_ilp.rs"] mod tests; diff --git a/src/rules/cost.rs b/src/rules/cost.rs index 9a198b4..6658887 100644 --- a/src/rules/cost.rs +++ b/src/rules/cost.rs @@ -79,5 +79,5 @@ impl f64> PathCostFn for CustomCost for DominatingSet { } #[cfg(test)] -#[path = "../tests_unit/rules/dominatingset_ilp.rs"] +#[path = "../unit_tests/rules/dominatingset_ilp.rs"] mod tests; diff --git a/src/rules/factoring_circuit.rs b/src/rules/factoring_circuit.rs index b86dfff..4d71cb5 100644 --- a/src/rules/factoring_circuit.rs +++ b/src/rules/factoring_circuit.rs @@ -284,5 +284,5 @@ impl ReduceTo> for Factoring { } #[cfg(test)] -#[path = "../tests_unit/rules/factoring_circuit.rs"] +#[path = "../unit_tests/rules/factoring_circuit.rs"] mod tests; diff --git a/src/rules/factoring_ilp.rs b/src/rules/factoring_ilp.rs index 8d6d971..235a647 100644 --- a/src/rules/factoring_ilp.rs +++ b/src/rules/factoring_ilp.rs @@ -275,5 +275,5 @@ impl ReduceTo for Factoring { } #[cfg(test)] -#[path = "../tests_unit/rules/factoring_ilp.rs"] +#[path = "../unit_tests/rules/factoring_ilp.rs"] mod tests; diff --git a/src/rules/graph.rs b/src/rules/graph.rs index 9800df1..e9c9cc5 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -687,5 +687,5 @@ impl ReductionGraph { } #[cfg(test)] -#[path = "../tests_unit/rules/graph.rs"] +#[path = "../unit_tests/rules/graph.rs"] mod tests; diff --git a/src/rules/independentset_ilp.rs b/src/rules/independentset_ilp.rs index 488a497..137f209 100644 --- a/src/rules/independentset_ilp.rs +++ b/src/rules/independentset_ilp.rs @@ -90,5 +90,5 @@ impl ReduceTo for IndependentSet { } #[cfg(test)] -#[path = "../tests_unit/rules/independentset_ilp.rs"] +#[path = "../unit_tests/rules/independentset_ilp.rs"] mod tests; diff --git a/src/rules/independentset_setpacking.rs b/src/rules/independentset_setpacking.rs index 15554b7..0544da7 100644 --- a/src/rules/independentset_setpacking.rs +++ b/src/rules/independentset_setpacking.rs @@ -156,5 +156,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/independentset_setpacking.rs"] +#[path = "../unit_tests/rules/independentset_setpacking.rs"] mod tests; diff --git a/src/rules/matching_ilp.rs b/src/rules/matching_ilp.rs index acf2281..fdcab58 100644 --- a/src/rules/matching_ilp.rs +++ b/src/rules/matching_ilp.rs @@ -95,5 +95,5 @@ impl ReduceTo for Matching { } #[cfg(test)] -#[path = "../tests_unit/rules/matching_ilp.rs"] +#[path = "../unit_tests/rules/matching_ilp.rs"] mod tests; diff --git a/src/rules/matching_setpacking.rs b/src/rules/matching_setpacking.rs index fd9148a..8d7cbd0 100644 --- a/src/rules/matching_setpacking.rs +++ b/src/rules/matching_setpacking.rs @@ -85,5 +85,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/matching_setpacking.rs"] +#[path = "../unit_tests/rules/matching_setpacking.rs"] mod tests; diff --git a/src/rules/registry.rs b/src/rules/registry.rs index b5d01e2..8e10deb 100644 --- a/src/rules/registry.rs +++ b/src/rules/registry.rs @@ -87,5 +87,5 @@ impl std::fmt::Debug for ReductionEntry { inventory::collect!(ReductionEntry); #[cfg(test)] -#[path = "../tests_unit/rules/registry.rs"] +#[path = "../unit_tests/rules/registry.rs"] mod tests; diff --git a/src/rules/sat_coloring.rs b/src/rules/sat_coloring.rs index 78be439..c402325 100644 --- a/src/rules/sat_coloring.rs +++ b/src/rules/sat_coloring.rs @@ -353,5 +353,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/sat_coloring.rs"] +#[path = "../unit_tests/rules/sat_coloring.rs"] mod tests; diff --git a/src/rules/sat_dominatingset.rs b/src/rules/sat_dominatingset.rs index 0ef6339..4f3dc7e 100644 --- a/src/rules/sat_dominatingset.rs +++ b/src/rules/sat_dominatingset.rs @@ -199,5 +199,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/sat_dominatingset.rs"] +#[path = "../unit_tests/rules/sat_dominatingset.rs"] mod tests; diff --git a/src/rules/sat_independentset.rs b/src/rules/sat_independentset.rs index 0b06a4c..f71e026 100644 --- a/src/rules/sat_independentset.rs +++ b/src/rules/sat_independentset.rs @@ -189,5 +189,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/sat_independentset.rs"] +#[path = "../unit_tests/rules/sat_independentset.rs"] mod tests; diff --git a/src/rules/sat_ksat.rs b/src/rules/sat_ksat.rs index 967be35..f8b2227 100644 --- a/src/rules/sat_ksat.rs +++ b/src/rules/sat_ksat.rs @@ -224,7 +224,7 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/sat_ksat.rs"] +#[path = "../unit_tests/rules/sat_ksat.rs"] mod tests; // Register SAT -> KSAT reduction manually (generated by macro, can't use #[reduction]) diff --git a/src/rules/setcovering_ilp.rs b/src/rules/setcovering_ilp.rs index 08702fb..1e76cbc 100644 --- a/src/rules/setcovering_ilp.rs +++ b/src/rules/setcovering_ilp.rs @@ -98,5 +98,5 @@ impl ReduceTo for SetCovering { } #[cfg(test)] -#[path = "../tests_unit/rules/setcovering_ilp.rs"] +#[path = "../unit_tests/rules/setcovering_ilp.rs"] mod tests; diff --git a/src/rules/setpacking_ilp.rs b/src/rules/setpacking_ilp.rs index 000a2eb..450ce61 100644 --- a/src/rules/setpacking_ilp.rs +++ b/src/rules/setpacking_ilp.rs @@ -89,5 +89,5 @@ impl ReduceTo for SetPacking { } #[cfg(test)] -#[path = "../tests_unit/rules/setpacking_ilp.rs"] +#[path = "../unit_tests/rules/setpacking_ilp.rs"] mod tests; diff --git a/src/rules/spinglass_maxcut.rs b/src/rules/spinglass_maxcut.rs index b0460eb..ea77e82 100644 --- a/src/rules/spinglass_maxcut.rs +++ b/src/rules/spinglass_maxcut.rs @@ -204,5 +204,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/spinglass_maxcut.rs"] +#[path = "../unit_tests/rules/spinglass_maxcut.rs"] mod tests; diff --git a/src/rules/spinglass_qubo.rs b/src/rules/spinglass_qubo.rs index 543397c..8233c58 100644 --- a/src/rules/spinglass_qubo.rs +++ b/src/rules/spinglass_qubo.rs @@ -180,5 +180,5 @@ impl ReduceTo> for SpinGlass { } #[cfg(test)] -#[path = "../tests_unit/rules/spinglass_qubo.rs"] +#[path = "../unit_tests/rules/spinglass_qubo.rs"] mod tests; diff --git a/src/rules/traits.rs b/src/rules/traits.rs index d832826..e2ddc13 100644 --- a/src/rules/traits.rs +++ b/src/rules/traits.rs @@ -68,5 +68,5 @@ pub trait ReduceTo: Problem { } #[cfg(test)] -#[path = "../tests_unit/rules/traits.rs"] +#[path = "../unit_tests/rules/traits.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/alpha_tensor.rs b/src/rules/unitdiskmapping/alpha_tensor.rs index 35e701a..81e7cc0 100644 --- a/src/rules/unitdiskmapping/alpha_tensor.rs +++ b/src/rules/unitdiskmapping/alpha_tensor.rs @@ -365,5 +365,5 @@ pub fn verify_triangular_gadget( } #[cfg(test)] -#[path = "../../tests_unit/rules/unitdiskmapping/alpha_tensor.rs"] +#[path = "../../unit_tests/rules/unitdiskmapping/alpha_tensor.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/copyline.rs b/src/rules/unitdiskmapping/copyline.rs index 7d60378..d74d7ac 100644 --- a/src/rules/unitdiskmapping/copyline.rs +++ b/src/rules/unitdiskmapping/copyline.rs @@ -525,5 +525,5 @@ pub fn mis_overhead_copyline_triangular(line: &CopyLine, spacing: usize) -> i32 } #[cfg(test)] -#[path = "../../tests_unit/rules/unitdiskmapping/copyline.rs"] +#[path = "../../unit_tests/rules/unitdiskmapping/copyline.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/grid.rs b/src/rules/unitdiskmapping/grid.rs index 63f6848..27ae7aa 100644 --- a/src/rules/unitdiskmapping/grid.rs +++ b/src/rules/unitdiskmapping/grid.rs @@ -314,5 +314,5 @@ impl fmt::Display for MappingGrid { } #[cfg(test)] -#[path = "../../tests_unit/rules/unitdiskmapping/grid.rs"] +#[path = "../../unit_tests/rules/unitdiskmapping/grid.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs b/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs index 0b64a9f..78d0f60 100644 --- a/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs +++ b/src/rules/unitdiskmapping/ksg/gadgets_weighted.rs @@ -1368,5 +1368,5 @@ pub fn map_config_back_pattern( } #[cfg(test)] -#[path = "../../../tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs"] +#[path = "../../../unit_tests/rules/unitdiskmapping/ksg/gadgets_weighted.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/ksg/mapping.rs b/src/rules/unitdiskmapping/ksg/mapping.rs index d7b3990..3ab4d6e 100644 --- a/src/rules/unitdiskmapping/ksg/mapping.rs +++ b/src/rules/unitdiskmapping/ksg/mapping.rs @@ -624,5 +624,5 @@ pub fn map_weighted_with_order( } #[cfg(test)] -#[path = "../../../tests_unit/rules/unitdiskmapping/ksg/mapping.rs"] +#[path = "../../../unit_tests/rules/unitdiskmapping/ksg/mapping.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/pathdecomposition.rs b/src/rules/unitdiskmapping/pathdecomposition.rs index ae19558..11afda7 100644 --- a/src/rules/unitdiskmapping/pathdecomposition.rs +++ b/src/rules/unitdiskmapping/pathdecomposition.rs @@ -465,5 +465,5 @@ pub fn vertex_order_from_layout(layout: &Layout) -> Vec { } #[cfg(test)] -#[path = "../../tests_unit/rules/unitdiskmapping/pathdecomposition.rs"] +#[path = "../../unit_tests/rules/unitdiskmapping/pathdecomposition.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/triangular/mapping.rs b/src/rules/unitdiskmapping/triangular/mapping.rs index 9221143..a705ad8 100644 --- a/src/rules/unitdiskmapping/triangular/mapping.rs +++ b/src/rules/unitdiskmapping/triangular/mapping.rs @@ -292,5 +292,5 @@ pub fn map_weights(result: &MappingResult, source_weights: &[f64]) -> Vec { } #[cfg(test)] -#[path = "../../../tests_unit/rules/unitdiskmapping/triangular/mapping.rs"] +#[path = "../../../unit_tests/rules/unitdiskmapping/triangular/mapping.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/triangular/mod.rs b/src/rules/unitdiskmapping/triangular/mod.rs index d20eff2..db5d4ef 100644 --- a/src/rules/unitdiskmapping/triangular/mod.rs +++ b/src/rules/unitdiskmapping/triangular/mod.rs @@ -1627,5 +1627,5 @@ pub fn map_graph_triangular_with_order( } #[cfg(test)] -#[path = "../../../tests_unit/rules/unitdiskmapping/triangular/mod.rs"] +#[path = "../../../unit_tests/rules/unitdiskmapping/triangular/mod.rs"] mod tests; diff --git a/src/rules/unitdiskmapping/weighted.rs b/src/rules/unitdiskmapping/weighted.rs index 3d8ee3f..8683c13 100644 --- a/src/rules/unitdiskmapping/weighted.rs +++ b/src/rules/unitdiskmapping/weighted.rs @@ -485,5 +485,5 @@ pub fn map_weights(result: &MappingResult, source_weights: &[f64]) -> Vec { } #[cfg(test)] -#[path = "../../tests_unit/rules/unitdiskmapping/weighted.rs"] +#[path = "../../unit_tests/rules/unitdiskmapping/weighted.rs"] mod tests; diff --git a/src/rules/vertexcovering_ilp.rs b/src/rules/vertexcovering_ilp.rs index 68eab15..733e789 100644 --- a/src/rules/vertexcovering_ilp.rs +++ b/src/rules/vertexcovering_ilp.rs @@ -90,5 +90,5 @@ impl ReduceTo for VertexCovering { } #[cfg(test)] -#[path = "../tests_unit/rules/vertexcovering_ilp.rs"] +#[path = "../unit_tests/rules/vertexcovering_ilp.rs"] mod tests; diff --git a/src/rules/vertexcovering_independentset.rs b/src/rules/vertexcovering_independentset.rs index 2b97c75..496aac7 100644 --- a/src/rules/vertexcovering_independentset.rs +++ b/src/rules/vertexcovering_independentset.rs @@ -137,5 +137,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/vertexcovering_independentset.rs"] +#[path = "../unit_tests/rules/vertexcovering_independentset.rs"] mod tests; diff --git a/src/rules/vertexcovering_setcovering.rs b/src/rules/vertexcovering_setcovering.rs index 9a0cd6b..b581d54 100644 --- a/src/rules/vertexcovering_setcovering.rs +++ b/src/rules/vertexcovering_setcovering.rs @@ -91,5 +91,5 @@ where } #[cfg(test)] -#[path = "../tests_unit/rules/vertexcovering_setcovering.rs"] +#[path = "../unit_tests/rules/vertexcovering_setcovering.rs"] mod tests; diff --git a/src/solvers/brute_force.rs b/src/solvers/brute_force.rs index 7aa8689..868e6f0 100644 --- a/src/solvers/brute_force.rs +++ b/src/solvers/brute_force.rs @@ -176,5 +176,5 @@ impl BruteForceFloat for BruteForce { } #[cfg(test)] -#[path = "../tests_unit/solvers/brute_force.rs"] +#[path = "../unit_tests/solvers/brute_force.rs"] mod tests; diff --git a/src/solvers/ilp/solver.rs b/src/solvers/ilp/solver.rs index 4ef1313..1076990 100644 --- a/src/solvers/ilp/solver.rs +++ b/src/solvers/ilp/solver.rs @@ -167,5 +167,5 @@ impl ILPSolver { } #[cfg(test)] -#[path = "../../tests_unit/solvers/ilp/solver.rs"] +#[path = "../../unit_tests/solvers/ilp/solver.rs"] mod tests; diff --git a/src/testing/macros.rs b/src/testing/macros.rs index cea4e68..7a75266 100644 --- a/src/testing/macros.rs +++ b/src/testing/macros.rs @@ -243,5 +243,5 @@ macro_rules! quick_problem_test { } #[cfg(test)] -#[path = "../tests_unit/testing/macros.rs"] +#[path = "../unit_tests/testing/macros.rs"] mod tests; diff --git a/src/testing/mod.rs b/src/testing/mod.rs index 0559901..43f8ea5 100644 --- a/src/testing/mod.rs +++ b/src/testing/mod.rs @@ -181,5 +181,5 @@ impl SatTestCase { } #[cfg(test)] -#[path = "../tests_unit/testing/mod.rs"] +#[path = "../unit_tests/testing/mod.rs"] mod tests; diff --git a/src/topology/graph.rs b/src/topology/graph.rs index f548790..04d8d41 100644 --- a/src/topology/graph.rs +++ b/src/topology/graph.rs @@ -261,5 +261,5 @@ impl PartialEq for SimpleGraph { impl Eq for SimpleGraph {} #[cfg(test)] -#[path = "../tests_unit/topology/graph.rs"] +#[path = "../unit_tests/topology/graph.rs"] mod tests; diff --git a/src/topology/grid_graph.rs b/src/topology/grid_graph.rs index a30d4cc..4c7b466 100644 --- a/src/topology/grid_graph.rs +++ b/src/topology/grid_graph.rs @@ -314,5 +314,5 @@ impl fmt::Display for GridGraph { } #[cfg(test)] -#[path = "../tests_unit/topology/grid_graph.rs"] +#[path = "../unit_tests/topology/grid_graph.rs"] mod tests; diff --git a/src/topology/hypergraph.rs b/src/topology/hypergraph.rs index 399afa2..88018a6 100644 --- a/src/topology/hypergraph.rs +++ b/src/topology/hypergraph.rs @@ -147,5 +147,5 @@ impl HyperGraph { } #[cfg(test)] -#[path = "../tests_unit/topology/hypergraph.rs"] +#[path = "../unit_tests/topology/hypergraph.rs"] mod tests; diff --git a/src/topology/small_graphs.rs b/src/topology/small_graphs.rs index c0179d4..2fb9360 100644 --- a/src/topology/small_graphs.rs +++ b/src/topology/small_graphs.rs @@ -766,5 +766,5 @@ pub fn available_graphs() -> Vec<&'static str> { } #[cfg(test)] -#[path = "../tests_unit/topology/small_graphs.rs"] +#[path = "../unit_tests/topology/small_graphs.rs"] mod tests; diff --git a/src/topology/unit_disk_graph.rs b/src/topology/unit_disk_graph.rs index 921ce0a..82ceaf2 100644 --- a/src/topology/unit_disk_graph.rs +++ b/src/topology/unit_disk_graph.rs @@ -229,5 +229,5 @@ impl Graph for UnitDiskGraph { } #[cfg(test)] -#[path = "../tests_unit/topology/unit_disk_graph.rs"] +#[path = "../unit_tests/topology/unit_disk_graph.rs"] mod tests; diff --git a/src/traits.rs b/src/traits.rs index 8716bb2..f275354 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -114,5 +114,5 @@ pub fn csp_solution_size( } #[cfg(test)] -#[path = "tests_unit/traits.rs"] +#[path = "unit_tests/traits.rs"] mod tests; diff --git a/src/truth_table.rs b/src/truth_table.rs index cf71b75..139373b 100644 --- a/src/truth_table.rs +++ b/src/truth_table.rs @@ -281,5 +281,5 @@ impl TruthTable { } #[cfg(test)] -#[path = "tests_unit/truth_table.rs"] +#[path = "unit_tests/truth_table.rs"] mod tests; diff --git a/src/types.rs b/src/types.rs index 1cb8f29..daeb003 100644 --- a/src/types.rs +++ b/src/types.rs @@ -294,5 +294,5 @@ impl LocalSolutionSize { } #[cfg(test)] -#[path = "tests_unit/types.rs"] +#[path = "unit_tests/types.rs"] mod tests; diff --git a/src/tests_unit/config.rs b/src/unit_tests/config.rs similarity index 100% rename from src/tests_unit/config.rs rename to src/unit_tests/config.rs diff --git a/src/tests_unit/graph_models.rs b/src/unit_tests/graph_models.rs similarity index 100% rename from src/tests_unit/graph_models.rs rename to src/unit_tests/graph_models.rs diff --git a/src/tests_unit/graph_types.rs b/src/unit_tests/graph_types.rs similarity index 100% rename from src/tests_unit/graph_types.rs rename to src/unit_tests/graph_types.rs diff --git a/src/tests_unit/io.rs b/src/unit_tests/io.rs similarity index 100% rename from src/tests_unit/io.rs rename to src/unit_tests/io.rs diff --git a/src/tests_unit/models/graph/clique.rs b/src/unit_tests/models/graph/clique.rs similarity index 100% rename from src/tests_unit/models/graph/clique.rs rename to src/unit_tests/models/graph/clique.rs diff --git a/src/tests_unit/models/graph/dominating_set.rs b/src/unit_tests/models/graph/dominating_set.rs similarity index 100% rename from src/tests_unit/models/graph/dominating_set.rs rename to src/unit_tests/models/graph/dominating_set.rs diff --git a/src/tests_unit/models/graph/independent_set.rs b/src/unit_tests/models/graph/independent_set.rs similarity index 100% rename from src/tests_unit/models/graph/independent_set.rs rename to src/unit_tests/models/graph/independent_set.rs diff --git a/src/tests_unit/models/graph/kcoloring.rs b/src/unit_tests/models/graph/kcoloring.rs similarity index 100% rename from src/tests_unit/models/graph/kcoloring.rs rename to src/unit_tests/models/graph/kcoloring.rs diff --git a/src/tests_unit/models/graph/matching.rs b/src/unit_tests/models/graph/matching.rs similarity index 100% rename from src/tests_unit/models/graph/matching.rs rename to src/unit_tests/models/graph/matching.rs diff --git a/src/tests_unit/models/graph/max_cut.rs b/src/unit_tests/models/graph/max_cut.rs similarity index 100% rename from src/tests_unit/models/graph/max_cut.rs rename to src/unit_tests/models/graph/max_cut.rs diff --git a/src/tests_unit/models/graph/maximal_is.rs b/src/unit_tests/models/graph/maximal_is.rs similarity index 100% rename from src/tests_unit/models/graph/maximal_is.rs rename to src/unit_tests/models/graph/maximal_is.rs diff --git a/src/tests_unit/models/graph/vertex_covering.rs b/src/unit_tests/models/graph/vertex_covering.rs similarity index 100% rename from src/tests_unit/models/graph/vertex_covering.rs rename to src/unit_tests/models/graph/vertex_covering.rs diff --git a/src/tests_unit/models/optimization/ilp.rs b/src/unit_tests/models/optimization/ilp.rs similarity index 100% rename from src/tests_unit/models/optimization/ilp.rs rename to src/unit_tests/models/optimization/ilp.rs diff --git a/src/tests_unit/models/optimization/qubo.rs b/src/unit_tests/models/optimization/qubo.rs similarity index 100% rename from src/tests_unit/models/optimization/qubo.rs rename to src/unit_tests/models/optimization/qubo.rs diff --git a/src/tests_unit/models/optimization/spin_glass.rs b/src/unit_tests/models/optimization/spin_glass.rs similarity index 100% rename from src/tests_unit/models/optimization/spin_glass.rs rename to src/unit_tests/models/optimization/spin_glass.rs diff --git a/src/tests_unit/models/satisfiability/ksat.rs b/src/unit_tests/models/satisfiability/ksat.rs similarity index 100% rename from src/tests_unit/models/satisfiability/ksat.rs rename to src/unit_tests/models/satisfiability/ksat.rs diff --git a/src/tests_unit/models/satisfiability/sat.rs b/src/unit_tests/models/satisfiability/sat.rs similarity index 97% rename from src/tests_unit/models/satisfiability/sat.rs rename to src/unit_tests/models/satisfiability/sat.rs index aa094f7..79fbe28 100644 --- a/src/tests_unit/models/satisfiability/sat.rs +++ b/src/unit_tests/models/satisfiability/sat.rs @@ -108,11 +108,12 @@ fn test_brute_force_satisfiable() { CNFClause::new(vec![-1, -2]), ], ); - let solver = BruteForce::new(); + let solver = BruteForce::new().valid_only(false); let solutions = solver.find_best(&problem); - // This is unsatisfiable, so no valid solutions - // BruteForce will return configs with max satisfied clauses + // This is unsatisfiable, so no valid solutions exist + // BruteForce with valid_only=false returns configs with max satisfied clauses + assert!(!solutions.is_empty()); for sol in &solutions { // Best we can do is satisfy 2 out of 3 clauses assert!(!problem.solution_size(sol).is_valid); diff --git a/src/tests_unit/models/set/set_covering.rs b/src/unit_tests/models/set/set_covering.rs similarity index 100% rename from src/tests_unit/models/set/set_covering.rs rename to src/unit_tests/models/set/set_covering.rs diff --git a/src/tests_unit/models/set/set_packing.rs b/src/unit_tests/models/set/set_packing.rs similarity index 100% rename from src/tests_unit/models/set/set_packing.rs rename to src/unit_tests/models/set/set_packing.rs diff --git a/src/tests_unit/models/specialized/biclique_cover.rs b/src/unit_tests/models/specialized/biclique_cover.rs similarity index 100% rename from src/tests_unit/models/specialized/biclique_cover.rs rename to src/unit_tests/models/specialized/biclique_cover.rs diff --git a/src/tests_unit/models/specialized/bmf.rs b/src/unit_tests/models/specialized/bmf.rs similarity index 100% rename from src/tests_unit/models/specialized/bmf.rs rename to src/unit_tests/models/specialized/bmf.rs diff --git a/src/tests_unit/models/specialized/circuit.rs b/src/unit_tests/models/specialized/circuit.rs similarity index 100% rename from src/tests_unit/models/specialized/circuit.rs rename to src/unit_tests/models/specialized/circuit.rs diff --git a/src/tests_unit/models/specialized/factoring.rs b/src/unit_tests/models/specialized/factoring.rs similarity index 100% rename from src/tests_unit/models/specialized/factoring.rs rename to src/unit_tests/models/specialized/factoring.rs diff --git a/src/tests_unit/models/specialized/paintshop.rs b/src/unit_tests/models/specialized/paintshop.rs similarity index 100% rename from src/tests_unit/models/specialized/paintshop.rs rename to src/unit_tests/models/specialized/paintshop.rs diff --git a/src/tests_unit/polynomial.rs b/src/unit_tests/polynomial.rs similarity index 100% rename from src/tests_unit/polynomial.rs rename to src/unit_tests/polynomial.rs diff --git a/src/tests_unit/property.rs b/src/unit_tests/property.rs similarity index 100% rename from src/tests_unit/property.rs rename to src/unit_tests/property.rs diff --git a/src/tests_unit/reduction_graph.rs b/src/unit_tests/reduction_graph.rs similarity index 100% rename from src/tests_unit/reduction_graph.rs rename to src/unit_tests/reduction_graph.rs diff --git a/src/tests_unit/registry/category.rs b/src/unit_tests/registry/category.rs similarity index 100% rename from src/tests_unit/registry/category.rs rename to src/unit_tests/registry/category.rs diff --git a/src/tests_unit/registry/info.rs b/src/unit_tests/registry/info.rs similarity index 100% rename from src/tests_unit/registry/info.rs rename to src/unit_tests/registry/info.rs diff --git a/src/tests_unit/rules/circuit_spinglass.rs b/src/unit_tests/rules/circuit_spinglass.rs similarity index 100% rename from src/tests_unit/rules/circuit_spinglass.rs rename to src/unit_tests/rules/circuit_spinglass.rs diff --git a/src/tests_unit/rules/clique_ilp.rs b/src/unit_tests/rules/clique_ilp.rs similarity index 100% rename from src/tests_unit/rules/clique_ilp.rs rename to src/unit_tests/rules/clique_ilp.rs diff --git a/src/tests_unit/rules/coloring_ilp.rs b/src/unit_tests/rules/coloring_ilp.rs similarity index 100% rename from src/tests_unit/rules/coloring_ilp.rs rename to src/unit_tests/rules/coloring_ilp.rs diff --git a/src/tests_unit/rules/cost.rs b/src/unit_tests/rules/cost.rs similarity index 100% rename from src/tests_unit/rules/cost.rs rename to src/unit_tests/rules/cost.rs diff --git a/src/tests_unit/rules/dominatingset_ilp.rs b/src/unit_tests/rules/dominatingset_ilp.rs similarity index 100% rename from src/tests_unit/rules/dominatingset_ilp.rs rename to src/unit_tests/rules/dominatingset_ilp.rs diff --git a/src/tests_unit/rules/factoring_circuit.rs b/src/unit_tests/rules/factoring_circuit.rs similarity index 100% rename from src/tests_unit/rules/factoring_circuit.rs rename to src/unit_tests/rules/factoring_circuit.rs diff --git a/src/tests_unit/rules/factoring_ilp.rs b/src/unit_tests/rules/factoring_ilp.rs similarity index 100% rename from src/tests_unit/rules/factoring_ilp.rs rename to src/unit_tests/rules/factoring_ilp.rs diff --git a/src/tests_unit/rules/graph.rs b/src/unit_tests/rules/graph.rs similarity index 100% rename from src/tests_unit/rules/graph.rs rename to src/unit_tests/rules/graph.rs diff --git a/src/tests_unit/rules/independentset_ilp.rs b/src/unit_tests/rules/independentset_ilp.rs similarity index 100% rename from src/tests_unit/rules/independentset_ilp.rs rename to src/unit_tests/rules/independentset_ilp.rs diff --git a/src/tests_unit/rules/independentset_setpacking.rs b/src/unit_tests/rules/independentset_setpacking.rs similarity index 100% rename from src/tests_unit/rules/independentset_setpacking.rs rename to src/unit_tests/rules/independentset_setpacking.rs diff --git a/src/tests_unit/rules/matching_ilp.rs b/src/unit_tests/rules/matching_ilp.rs similarity index 100% rename from src/tests_unit/rules/matching_ilp.rs rename to src/unit_tests/rules/matching_ilp.rs diff --git a/src/tests_unit/rules/matching_setpacking.rs b/src/unit_tests/rules/matching_setpacking.rs similarity index 100% rename from src/tests_unit/rules/matching_setpacking.rs rename to src/unit_tests/rules/matching_setpacking.rs diff --git a/src/tests_unit/rules/registry.rs b/src/unit_tests/rules/registry.rs similarity index 100% rename from src/tests_unit/rules/registry.rs rename to src/unit_tests/rules/registry.rs diff --git a/src/tests_unit/rules/sat_coloring.rs b/src/unit_tests/rules/sat_coloring.rs similarity index 100% rename from src/tests_unit/rules/sat_coloring.rs rename to src/unit_tests/rules/sat_coloring.rs diff --git a/src/tests_unit/rules/sat_dominatingset.rs b/src/unit_tests/rules/sat_dominatingset.rs similarity index 100% rename from src/tests_unit/rules/sat_dominatingset.rs rename to src/unit_tests/rules/sat_dominatingset.rs diff --git a/src/tests_unit/rules/sat_independentset.rs b/src/unit_tests/rules/sat_independentset.rs similarity index 100% rename from src/tests_unit/rules/sat_independentset.rs rename to src/unit_tests/rules/sat_independentset.rs diff --git a/src/tests_unit/rules/sat_ksat.rs b/src/unit_tests/rules/sat_ksat.rs similarity index 100% rename from src/tests_unit/rules/sat_ksat.rs rename to src/unit_tests/rules/sat_ksat.rs diff --git a/src/tests_unit/rules/setcovering_ilp.rs b/src/unit_tests/rules/setcovering_ilp.rs similarity index 100% rename from src/tests_unit/rules/setcovering_ilp.rs rename to src/unit_tests/rules/setcovering_ilp.rs diff --git a/src/tests_unit/rules/setpacking_ilp.rs b/src/unit_tests/rules/setpacking_ilp.rs similarity index 100% rename from src/tests_unit/rules/setpacking_ilp.rs rename to src/unit_tests/rules/setpacking_ilp.rs diff --git a/src/tests_unit/rules/spinglass_maxcut.rs b/src/unit_tests/rules/spinglass_maxcut.rs similarity index 100% rename from src/tests_unit/rules/spinglass_maxcut.rs rename to src/unit_tests/rules/spinglass_maxcut.rs diff --git a/src/tests_unit/rules/spinglass_qubo.rs b/src/unit_tests/rules/spinglass_qubo.rs similarity index 100% rename from src/tests_unit/rules/spinglass_qubo.rs rename to src/unit_tests/rules/spinglass_qubo.rs diff --git a/src/tests_unit/rules/traits.rs b/src/unit_tests/rules/traits.rs similarity index 100% rename from src/tests_unit/rules/traits.rs rename to src/unit_tests/rules/traits.rs diff --git a/src/tests_unit/rules/unitdiskmapping/alpha_tensor.rs b/src/unit_tests/rules/unitdiskmapping/alpha_tensor.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/alpha_tensor.rs rename to src/unit_tests/rules/unitdiskmapping/alpha_tensor.rs diff --git a/src/tests_unit/rules/unitdiskmapping/copyline.rs b/src/unit_tests/rules/unitdiskmapping/copyline.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/copyline.rs rename to src/unit_tests/rules/unitdiskmapping/copyline.rs diff --git a/src/tests_unit/rules/unitdiskmapping/grid.rs b/src/unit_tests/rules/unitdiskmapping/grid.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/grid.rs rename to src/unit_tests/rules/unitdiskmapping/grid.rs diff --git a/src/tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs b/src/unit_tests/rules/unitdiskmapping/ksg/gadgets_weighted.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/ksg/gadgets_weighted.rs rename to src/unit_tests/rules/unitdiskmapping/ksg/gadgets_weighted.rs diff --git a/src/tests_unit/rules/unitdiskmapping/ksg/mapping.rs b/src/unit_tests/rules/unitdiskmapping/ksg/mapping.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/ksg/mapping.rs rename to src/unit_tests/rules/unitdiskmapping/ksg/mapping.rs diff --git a/src/tests_unit/rules/unitdiskmapping/pathdecomposition.rs b/src/unit_tests/rules/unitdiskmapping/pathdecomposition.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/pathdecomposition.rs rename to src/unit_tests/rules/unitdiskmapping/pathdecomposition.rs diff --git a/src/tests_unit/rules/unitdiskmapping/triangular/mapping.rs b/src/unit_tests/rules/unitdiskmapping/triangular/mapping.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/triangular/mapping.rs rename to src/unit_tests/rules/unitdiskmapping/triangular/mapping.rs diff --git a/src/tests_unit/rules/unitdiskmapping/triangular/mod.rs b/src/unit_tests/rules/unitdiskmapping/triangular/mod.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/triangular/mod.rs rename to src/unit_tests/rules/unitdiskmapping/triangular/mod.rs diff --git a/src/tests_unit/rules/unitdiskmapping/weighted.rs b/src/unit_tests/rules/unitdiskmapping/weighted.rs similarity index 100% rename from src/tests_unit/rules/unitdiskmapping/weighted.rs rename to src/unit_tests/rules/unitdiskmapping/weighted.rs diff --git a/src/tests_unit/rules/vertexcovering_ilp.rs b/src/unit_tests/rules/vertexcovering_ilp.rs similarity index 100% rename from src/tests_unit/rules/vertexcovering_ilp.rs rename to src/unit_tests/rules/vertexcovering_ilp.rs diff --git a/src/tests_unit/rules/vertexcovering_independentset.rs b/src/unit_tests/rules/vertexcovering_independentset.rs similarity index 100% rename from src/tests_unit/rules/vertexcovering_independentset.rs rename to src/unit_tests/rules/vertexcovering_independentset.rs diff --git a/src/tests_unit/rules/vertexcovering_setcovering.rs b/src/unit_tests/rules/vertexcovering_setcovering.rs similarity index 100% rename from src/tests_unit/rules/vertexcovering_setcovering.rs rename to src/unit_tests/rules/vertexcovering_setcovering.rs diff --git a/src/tests_unit/solvers/brute_force.rs b/src/unit_tests/solvers/brute_force.rs similarity index 100% rename from src/tests_unit/solvers/brute_force.rs rename to src/unit_tests/solvers/brute_force.rs diff --git a/src/tests_unit/solvers/ilp/solver.rs b/src/unit_tests/solvers/ilp/solver.rs similarity index 100% rename from src/tests_unit/solvers/ilp/solver.rs rename to src/unit_tests/solvers/ilp/solver.rs diff --git a/src/tests_unit/testing/macros.rs b/src/unit_tests/testing/macros.rs similarity index 100% rename from src/tests_unit/testing/macros.rs rename to src/unit_tests/testing/macros.rs diff --git a/src/tests_unit/testing/mod.rs b/src/unit_tests/testing/mod.rs similarity index 100% rename from src/tests_unit/testing/mod.rs rename to src/unit_tests/testing/mod.rs diff --git a/src/tests_unit/topology/graph.rs b/src/unit_tests/topology/graph.rs similarity index 100% rename from src/tests_unit/topology/graph.rs rename to src/unit_tests/topology/graph.rs diff --git a/src/tests_unit/topology/grid_graph.rs b/src/unit_tests/topology/grid_graph.rs similarity index 100% rename from src/tests_unit/topology/grid_graph.rs rename to src/unit_tests/topology/grid_graph.rs diff --git a/src/tests_unit/topology/hypergraph.rs b/src/unit_tests/topology/hypergraph.rs similarity index 100% rename from src/tests_unit/topology/hypergraph.rs rename to src/unit_tests/topology/hypergraph.rs diff --git a/src/tests_unit/topology/small_graphs.rs b/src/unit_tests/topology/small_graphs.rs similarity index 100% rename from src/tests_unit/topology/small_graphs.rs rename to src/unit_tests/topology/small_graphs.rs diff --git a/src/tests_unit/topology/unit_disk_graph.rs b/src/unit_tests/topology/unit_disk_graph.rs similarity index 100% rename from src/tests_unit/topology/unit_disk_graph.rs rename to src/unit_tests/topology/unit_disk_graph.rs diff --git a/src/tests_unit/trait_consistency.rs b/src/unit_tests/trait_consistency.rs similarity index 100% rename from src/tests_unit/trait_consistency.rs rename to src/unit_tests/trait_consistency.rs diff --git a/src/tests_unit/traits.rs b/src/unit_tests/traits.rs similarity index 100% rename from src/tests_unit/traits.rs rename to src/unit_tests/traits.rs diff --git a/src/tests_unit/truth_table.rs b/src/unit_tests/truth_table.rs similarity index 100% rename from src/tests_unit/truth_table.rs rename to src/unit_tests/truth_table.rs diff --git a/src/tests_unit/types.rs b/src/unit_tests/types.rs similarity index 100% rename from src/tests_unit/types.rs rename to src/unit_tests/types.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/common.rs b/src/unit_tests/unitdiskmapping_algorithms/common.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/common.rs rename to src/unit_tests/unitdiskmapping_algorithms/common.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/copyline.rs b/src/unit_tests/unitdiskmapping_algorithms/copyline.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/copyline.rs rename to src/unit_tests/unitdiskmapping_algorithms/copyline.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/gadgets.rs b/src/unit_tests/unitdiskmapping_algorithms/gadgets.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/gadgets.rs rename to src/unit_tests/unitdiskmapping_algorithms/gadgets.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/gadgets_ground_truth.rs b/src/unit_tests/unitdiskmapping_algorithms/gadgets_ground_truth.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/gadgets_ground_truth.rs rename to src/unit_tests/unitdiskmapping_algorithms/gadgets_ground_truth.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/julia_comparison.rs b/src/unit_tests/unitdiskmapping_algorithms/julia_comparison.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/julia_comparison.rs rename to src/unit_tests/unitdiskmapping_algorithms/julia_comparison.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/map_graph.rs b/src/unit_tests/unitdiskmapping_algorithms/map_graph.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/map_graph.rs rename to src/unit_tests/unitdiskmapping_algorithms/map_graph.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/mapping_result.rs b/src/unit_tests/unitdiskmapping_algorithms/mapping_result.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/mapping_result.rs rename to src/unit_tests/unitdiskmapping_algorithms/mapping_result.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/mod.rs b/src/unit_tests/unitdiskmapping_algorithms/mod.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/mod.rs rename to src/unit_tests/unitdiskmapping_algorithms/mod.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/triangular.rs b/src/unit_tests/unitdiskmapping_algorithms/triangular.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/triangular.rs rename to src/unit_tests/unitdiskmapping_algorithms/triangular.rs diff --git a/src/tests_unit/unitdiskmapping_algorithms/weighted.rs b/src/unit_tests/unitdiskmapping_algorithms/weighted.rs similarity index 100% rename from src/tests_unit/unitdiskmapping_algorithms/weighted.rs rename to src/unit_tests/unitdiskmapping_algorithms/weighted.rs diff --git a/src/tests_unit/variant.rs b/src/unit_tests/variant.rs similarity index 100% rename from src/tests_unit/variant.rs rename to src/unit_tests/variant.rs diff --git a/src/variant.rs b/src/variant.rs index a6ed3de..0481b4d 100644 --- a/src/variant.rs +++ b/src/variant.rs @@ -40,5 +40,5 @@ pub fn short_type_name() -> &'static str { } #[cfg(test)] -#[path = "tests_unit/variant.rs"] +#[path = "unit_tests/variant.rs"] mod tests; From ef21f92d0971c41d61dc7d8b98efb020665b8898 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Sun, 8 Feb 2026 08:50:37 +0800 Subject: [PATCH 3/3] Fix review feedback: portable temp paths, consistent ConfigIterator, accurate comment - Replace hardcoded /tmp/ paths in IO tests with std::env::temp_dir() + unique timestamps - Fix ConfigIterator::total() to return 0 when num_variables == 0, matching iteration behavior - Fix factoring test comment to accurately describe the assertion (at least one, not both) Co-Authored-By: Claude Opus 4.6 --- src/config.rs | 8 ++++++-- src/unit_tests/config.rs | 2 +- src/unit_tests/io.rs | 9 +++++++-- src/unit_tests/models/specialized/factoring.rs | 2 +- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/src/config.rs b/src/config.rs index fb2614c..6965712 100644 --- a/src/config.rs +++ b/src/config.rs @@ -15,8 +15,12 @@ pub struct ConfigIterator { impl ConfigIterator { /// Create a new configuration iterator. pub fn new(num_variables: usize, num_flavors: usize) -> Self { - let total_configs = num_flavors.pow(num_variables as u32); - let current = if num_variables == 0 || num_flavors == 0 { + let total_configs = if num_variables == 0 || num_flavors == 0 { + 0 + } else { + num_flavors.pow(num_variables as u32) + }; + let current = if total_configs == 0 { None } else { Some(vec![0; num_variables]) diff --git a/src/unit_tests/config.rs b/src/unit_tests/config.rs index 2620c74..dfe54d9 100644 --- a/src/unit_tests/config.rs +++ b/src/unit_tests/config.rs @@ -34,7 +34,7 @@ fn test_config_iterator_ternary() { #[test] fn test_config_iterator_empty() { let iter = ConfigIterator::new(0, 2); - assert_eq!(iter.total(), 1); + assert_eq!(iter.total(), 0); let configs: Vec<_> = iter.collect(); assert_eq!(configs.len(), 0); // Empty because num_variables is 0 } diff --git a/src/unit_tests/io.rs b/src/unit_tests/io.rs index c59398e..64b155b 100644 --- a/src/unit_tests/io.rs +++ b/src/unit_tests/io.rs @@ -2,6 +2,7 @@ use super::*; use crate::models::graph::IndependentSet; use crate::topology::SimpleGraph; use std::fs; +use std::time::{SystemTime, UNIX_EPOCH}; #[test] fn test_to_json() { @@ -33,7 +34,9 @@ fn test_json_compact() { #[test] fn test_file_roundtrip() { let problem = IndependentSet::::new(4, vec![(0, 1), (1, 2), (2, 3)]); - let path = "/tmp/test_problem.json"; + let ts = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + let path = std::env::temp_dir().join(format!("test_problem_{ts}.json")); + let path = path.to_str().unwrap(); // Write write_problem(&problem, path, FileFormat::Json).unwrap(); @@ -63,7 +66,9 @@ fn test_file_format_from_extension() { #[test] fn test_read_write_file() { - let path = "/tmp/test_io.txt"; + let ts = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + let path = std::env::temp_dir().join(format!("test_io_{ts}.txt")); + let path = path.to_str().unwrap(); let contents = "Hello, World!"; write_file(path, contents).unwrap(); diff --git a/src/unit_tests/models/specialized/factoring.rs b/src/unit_tests/models/specialized/factoring.rs index a4a766b..42eb65b 100644 --- a/src/unit_tests/models/specialized/factoring.rs +++ b/src/unit_tests/models/specialized/factoring.rs @@ -104,7 +104,7 @@ fn test_brute_force_prime() { let solutions = solver.find_best(&problem); let factor_pairs: Vec<_> = solutions.iter().map(|s| problem.read_factors(s)).collect(); - // Should find (1,7) and (7,1) + // Should find at least one of (1,7) or (7,1) assert!(factor_pairs.contains(&(1, 7)) || factor_pairs.contains(&(7, 1))); }