From 514d4e960716d4047b259d41997551ef3845f6fb Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Sun, 8 Feb 2026 13:58:20 +0800 Subject: [PATCH 1/3] Add QUBO test data generation script and ground truth datasets - Add scripts/generate_qubo_tests.py using qubogen to generate QUBO ground truth for 7 problem types: MaxCut, VertexCovering, IndependentSet, Coloring, SetPacking, KSatisfiability (2-SAT), and ILP - Add scripts/ Python project managed by uv (pyproject.toml, uv.lock) - Add tests/data/qubo/*.json with brute-force optimal solutions - Add `make qubo-testdata` target to regenerate test data - Update .gitignore for .venv/ Ref #18 Co-Authored-By: Claude Opus 4.6 --- .gitignore | 3 +- Makefile | 7 +- scripts/.python-version | 1 + scripts/generate_qubo_tests.py | 263 +++++++++++++++++++ scripts/pyproject.toml | 9 + scripts/uv.lock | 55 ++++ tests/data/qubo/coloring_to_qubo.json | 1 + tests/data/qubo/ilp_to_qubo.json | 1 + tests/data/qubo/independentset_to_qubo.json | 1 + tests/data/qubo/ksatisfiability_to_qubo.json | 1 + tests/data/qubo/maxcut_to_qubo.json | 1 + tests/data/qubo/setpacking_to_qubo.json | 1 + tests/data/qubo/vertexcovering_to_qubo.json | 1 + 13 files changed, 343 insertions(+), 2 deletions(-) create mode 100644 scripts/.python-version create mode 100644 scripts/generate_qubo_tests.py create mode 100644 scripts/pyproject.toml create mode 100644 scripts/uv.lock create mode 100644 tests/data/qubo/coloring_to_qubo.json create mode 100644 tests/data/qubo/ilp_to_qubo.json create mode 100644 tests/data/qubo/independentset_to_qubo.json create mode 100644 tests/data/qubo/ksatisfiability_to_qubo.json create mode 100644 tests/data/qubo/maxcut_to_qubo.json create mode 100644 tests/data/qubo/setpacking_to_qubo.json create mode 100644 tests/data/qubo/vertexcovering_to_qubo.json diff --git a/.gitignore b/.gitignore index 64f2d8c..42aaf3f 100644 --- a/.gitignore +++ b/.gitignore @@ -38,11 +38,12 @@ quickcheck-tests.json /dist/ /build/ -# Python (for pre-commit) +# Python __pycache__/ *.py[cod] *$py.class .Python +.venv/ venv/ env/ ENV/ diff --git a/Makefile b/Makefile index bce574f..47fee9f 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Makefile for problemreductions -.PHONY: help build test fmt clippy doc mdbook paper clean coverage rust-export compare +.PHONY: help build test fmt clippy doc mdbook paper clean coverage rust-export compare qubo-testdata # Default target help: @@ -18,6 +18,7 @@ help: @echo " check - Quick check (fmt + clippy + test)" @echo " rust-export - Generate Rust mapping JSON exports" @echo " compare - Generate and compare Rust mapping exports" + @echo " qubo-testdata - Regenerate QUBO test data (requires uv)" # Build the project build: @@ -65,6 +66,10 @@ clean: check: fmt-check clippy test @echo "✅ All checks passed!" +# Regenerate QUBO test data from Python (requires uv) +qubo-testdata: + cd scripts && uv run python generate_qubo_tests.py + # Generate Rust mapping JSON exports for all graphs and modes GRAPHS := diamond bull house petersen MODES := unweighted weighted triangular diff --git a/scripts/.python-version b/scripts/.python-version new file mode 100644 index 0000000..e4fba21 --- /dev/null +++ b/scripts/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/scripts/generate_qubo_tests.py b/scripts/generate_qubo_tests.py new file mode 100644 index 0000000..d806e0c --- /dev/null +++ b/scripts/generate_qubo_tests.py @@ -0,0 +1,263 @@ +"""Generate QUBO test datasets using qubogen. + +For each supported problem type, creates a small instance, reduces it to QUBO +via qubogen, brute-force solves both sides, and exports JSON ground truth +to tests/data/qubo/. + +Usage: + uv run python scripts/generate_qubo_tests.py +""" + +import json +import os +from itertools import product +from pathlib import Path + +import numpy as np + +# Monkey-patch for qubogen compatibility with numpy >= 1.24 +np.float = np.float64 +np.int = np.int_ +np.bool = np.bool_ + +import qubogen + + +def brute_force_qubo(Q: np.ndarray) -> dict: + """Brute-force solve a QUBO: minimize x^T Q x over binary x.""" + n = Q.shape[0] + best_val = float("inf") + best_configs = [] + for bits in product(range(2), repeat=n): + x = np.array(bits, dtype=float) + val = float(x @ Q @ x) + if val < best_val - 1e-9: + best_val = val + best_configs = [list(bits)] + elif abs(val - best_val) < 1e-9: + best_configs.append(list(bits)) + return {"value": best_val, "configs": best_configs} + + +def save_test(name: str, data: dict, outdir: Path): + """Save test data as compact JSON.""" + path = outdir / f"{name}.json" + with open(path, "w") as f: + json.dump(data, f, separators=(",", ":")) + print(f" wrote {path} ({path.stat().st_size} bytes)") + + +def generate_maxcut(outdir: Path): + """MaxCut on a small graph (4 nodes, 4 edges).""" + edges = [(0, 1), (1, 2), (2, 3), (0, 3)] + n_nodes = 4 + g = qubogen.Graph(edges=np.array(edges), n_nodes=n_nodes) + Q = qubogen.qubo_max_cut(g) + + qubo_result = brute_force_qubo(Q) + + # MaxCut maximizes cut edges; QUBO minimizes, so optimal QUBO value + # corresponds to maximum cut (negated). + save_test("maxcut_to_qubo", { + "problem": "MaxCut", + "source": {"num_vertices": n_nodes, "edges": edges}, + "qubo_matrix": Q.tolist(), + "qubo_num_vars": int(Q.shape[0]), + "qubo_optimal": qubo_result, + }, outdir) + + +def generate_vertex_covering(outdir: Path): + """Minimum Vertex Cover on a small graph (4 nodes, 5 edges).""" + edges = [(0, 1), (1, 2), (2, 3), (0, 3), (0, 2)] + n_nodes = 4 + penalty = 8.0 + g = qubogen.Graph(edges=np.array(edges), n_nodes=n_nodes) + Q = qubogen.qubo_mvc(g, penalty=penalty) + + qubo_result = brute_force_qubo(Q) + + save_test("vertexcovering_to_qubo", { + "problem": "VertexCovering", + "source": {"num_vertices": n_nodes, "edges": edges, "penalty": penalty}, + "qubo_matrix": Q.tolist(), + "qubo_num_vars": int(Q.shape[0]), + "qubo_optimal": qubo_result, + }, outdir) + + +def generate_independent_set(outdir: Path): + """Independent Set on a small graph. + + IndependentSet is the complement of VertexCover: maximize |S| s.t. no + two adjacent vertices are in S. We formulate as QUBO by negating the + linear terms of MVC (minimize -|S| + penalty * constraint violations). + """ + edges = [(0, 1), (1, 2), (2, 3), (0, 3)] + n_nodes = 4 + penalty = 8.0 + g = qubogen.Graph(edges=np.array(edges), n_nodes=n_nodes) + + # Independent set QUBO: maximize sum(x_i) s.t. x_i*x_j = 0 for edges + # = minimize -sum(x_i) + P * sum_{(i,j)} x_i*x_j + Q = np.zeros((n_nodes, n_nodes)) + for i in range(n_nodes): + Q[i][i] = -1.0 + for i, j in edges: + Q[i][j] += penalty + + qubo_result = brute_force_qubo(Q) + + save_test("independentset_to_qubo", { + "problem": "IndependentSet", + "source": {"num_vertices": n_nodes, "edges": edges, "penalty": penalty}, + "qubo_matrix": Q.tolist(), + "qubo_num_vars": int(Q.shape[0]), + "qubo_optimal": qubo_result, + }, outdir) + + +def generate_graph_coloring(outdir: Path): + """Graph Coloring on a small graph (3 nodes triangle, 3 colors).""" + edges = [(0, 1), (1, 2), (0, 2)] + n_nodes = 3 + n_color = 3 + penalty = 10.0 + g = qubogen.Graph(edges=np.array(edges), n_nodes=n_nodes) + Q = qubogen.qubo_graph_coloring(g, n_color=n_color, penalty=penalty) + + qubo_result = brute_force_qubo(Q) + + # QUBO variables: n_nodes * n_color (one-hot encoding) + save_test("coloring_to_qubo", { + "problem": "Coloring", + "source": { + "num_vertices": n_nodes, + "edges": edges, + "num_colors": n_color, + "penalty": penalty, + }, + "qubo_matrix": Q.tolist(), + "qubo_num_vars": int(Q.shape[0]), + "qubo_optimal": qubo_result, + }, outdir) + + +def generate_set_packing(outdir: Path): + """Set Packing: select maximum-weight non-overlapping sets.""" + # 3 sets over 4 elements + # set 0: {0, 2} + # set 1: {1, 2} + # set 2: {0, 3} + sets = [[0, 2], [1, 2], [0, 3]] + n_elements = 4 + n_sets = len(sets) + weights = [1.0, 2.0, 1.5] + penalty = 8.0 + + # Build incidence matrix (elements x sets) + a = np.zeros((n_elements, n_sets)) + for j, s in enumerate(sets): + for i in s: + a[i][j] = 1 + + Q = qubogen.qubo_set_pack(a, np.array(weights), penalty=penalty) + + qubo_result = brute_force_qubo(Q) + + save_test("setpacking_to_qubo", { + "problem": "SetPacking", + "source": { + "sets": sets, + "num_elements": n_elements, + "weights": weights, + "penalty": penalty, + }, + "qubo_matrix": Q.tolist(), + "qubo_num_vars": int(Q.shape[0]), + "qubo_optimal": qubo_result, + }, outdir) + + +def generate_max2sat(outdir: Path): + """Max 2-SAT: maximize satisfied clauses.""" + # 3 variables, 4 clauses: + # (x0 OR x1), (NOT x0 OR x2), (x1 OR NOT x2), (NOT x1 OR NOT x2) + literals = np.array([[0, 1], [0, 2], [1, 2], [1, 2]]) + signs = np.array( + [[True, True], [False, True], [True, False], [False, False]] + ) + + c = qubogen.Clauses(literals=literals, signs=signs) + Q = qubogen.qubo_max2sat(c) + + qubo_result = brute_force_qubo(Q) + + # Convert to list-of-clauses format matching our KSatisfiability model + clauses = [] + for i in range(len(literals)): + clause = [] + for j in range(2): + var = int(literals[i][j]) + negated = not bool(signs[i][j]) + clause.append({"variable": var, "negated": negated}) + clauses.append(clause) + + save_test("ksatisfiability_to_qubo", { + "problem": "KSatisfiability", + "source": {"num_variables": 3, "clauses": clauses}, + "qubo_matrix": Q.tolist(), + "qubo_num_vars": int(Q.shape[0]), + "qubo_optimal": qubo_result, + }, outdir) + + +def generate_ilp(outdir: Path): + """Binary ILP (General 0/1 Programming): min c^T x, s.t. Ax <= b.""" + # 3 variables + # minimize: x0 + 2*x1 + 3*x2 + # s.t.: x0 + x1 <= 1 + # x1 + x2 <= 1 + cost = np.array([1.0, 2.0, 3.0]) + A = np.array([[1.0, 1.0, 0.0], [0.0, 1.0, 1.0]]) + b = np.array([1.0, 1.0]) + sign = np.array([-1, -1]) # -1 means <= + penalty = 10.0 + + Q = qubogen.qubo_general01(cost, A, b, sign, penalty=penalty) + + qubo_result = brute_force_qubo(Q) + + save_test("ilp_to_qubo", { + "problem": "ILP", + "source": { + "num_variables": 3, + "objective": cost.tolist(), + "constraints_lhs": A.tolist(), + "constraints_rhs": b.tolist(), + "constraint_signs": sign.tolist(), + "penalty": penalty, + }, + "qubo_matrix": Q.tolist(), + "qubo_num_vars": int(Q.shape[0]), + "qubo_optimal": qubo_result, + }, outdir) + + +def main(): + outdir = Path(__file__).resolve().parent.parent / "tests" / "data" / "qubo" + outdir.mkdir(parents=True, exist_ok=True) + + print("Generating QUBO test datasets...") + generate_maxcut(outdir) + generate_vertex_covering(outdir) + generate_independent_set(outdir) + generate_graph_coloring(outdir) + generate_set_packing(outdir) + generate_max2sat(outdir) + generate_ilp(outdir) + print("Done.") + + +if __name__ == "__main__": + main() diff --git a/scripts/pyproject.toml b/scripts/pyproject.toml new file mode 100644 index 0000000..a61d0e9 --- /dev/null +++ b/scripts/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "scripts" +version = "0.1.0" +description = "Test data generation scripts for problem-reductions" +requires-python = ">=3.12" +dependencies = [ + "numpy>=1.26,<2", + "qubogen>=0.1.1", +] diff --git a/scripts/uv.lock b/scripts/uv.lock new file mode 100644 index 0000000..58b3004 --- /dev/null +++ b/scripts/uv.lock @@ -0,0 +1,55 @@ +version = 1 +revision = 2 +requires-python = ">=3.12" + +[[package]] +name = "networkx" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/51/63fe664f3908c97be9d2e4f1158eb633317598cfa6e1fc14af5383f17512/networkx-3.6.1.tar.gz", hash = "sha256:26b7c357accc0c8cde558ad486283728b65b6a95d85ee1cd66bafab4c8168509", size = 2517025, upload-time = "2025-12-08T17:02:39.908Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" }, +] + +[[package]] +name = "numpy" +version = "1.26.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/6e/09db70a523a96d25e115e71cc56a6f9031e7b8cd166c1ac8438307c14058/numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010", size = 15786129, upload-time = "2024-02-06T00:26:44.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/12/8f2020a8e8b8383ac0177dc9570aad031a3beb12e38847f7129bacd96228/numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218", size = 20335901, upload-time = "2024-02-05T23:55:32.801Z" }, + { url = "https://files.pythonhosted.org/packages/75/5b/ca6c8bd14007e5ca171c7c03102d17b4f4e0ceb53957e8c44343a9546dcc/numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b", size = 13685868, upload-time = "2024-02-05T23:55:56.28Z" }, + { url = "https://files.pythonhosted.org/packages/79/f8/97f10e6755e2a7d027ca783f63044d5b1bc1ae7acb12afe6a9b4286eac17/numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b", size = 13925109, upload-time = "2024-02-05T23:56:20.368Z" }, + { url = "https://files.pythonhosted.org/packages/0f/50/de23fde84e45f5c4fda2488c759b69990fd4512387a8632860f3ac9cd225/numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed", size = 17950613, upload-time = "2024-02-05T23:56:56.054Z" }, + { url = "https://files.pythonhosted.org/packages/4c/0c/9c603826b6465e82591e05ca230dfc13376da512b25ccd0894709b054ed0/numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a", size = 13572172, upload-time = "2024-02-05T23:57:21.56Z" }, + { url = "https://files.pythonhosted.org/packages/76/8c/2ba3902e1a0fc1c74962ea9bb33a534bb05984ad7ff9515bf8d07527cadd/numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0", size = 17786643, upload-time = "2024-02-05T23:57:56.585Z" }, + { url = "https://files.pythonhosted.org/packages/28/4a/46d9e65106879492374999e76eb85f87b15328e06bd1550668f79f7b18c6/numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110", size = 5677803, upload-time = "2024-02-05T23:58:08.963Z" }, + { url = "https://files.pythonhosted.org/packages/16/2e/86f24451c2d530c88daf997cb8d6ac622c1d40d19f5a031ed68a4b73a374/numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", size = 15517754, upload-time = "2024-02-05T23:58:36.364Z" }, +] + +[[package]] +name = "qubogen" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "networkx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/c2/f8fad31da8e0029e867522ca94adb174ee4614e5c287a693b725961e4eff/qubogen-0.1.1.tar.gz", hash = "sha256:e54f4b454ded6deb292a3168ce0790bdabecc2fc464e7a225a585504fd87aa75", size = 4377, upload-time = "2019-02-27T14:14:47.914Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/d1/3b7177184c953a6a4a173d7838cc4291cd43a5b35dab27ca17277d8576f6/qubogen-0.1.1-py3-none-any.whl", hash = "sha256:f5a7ce03374c7e75977cf88c5e7bc6f0da4a4998ed7989893869728ec0e413fe", size = 5847, upload-time = "2019-02-27T14:14:46.446Z" }, +] + +[[package]] +name = "scripts" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "numpy" }, + { name = "qubogen" }, +] + +[package.metadata] +requires-dist = [ + { name = "numpy", specifier = ">=1.26,<2" }, + { name = "qubogen", specifier = ">=0.1.1" }, +] diff --git a/tests/data/qubo/coloring_to_qubo.json b/tests/data/qubo/coloring_to_qubo.json new file mode 100644 index 0000000..f10b02e --- /dev/null +++ b/tests/data/qubo/coloring_to_qubo.json @@ -0,0 +1 @@ +{"problem":"Coloring","source":{"num_vertices":3,"edges":[[0,1],[1,2],[0,2]],"num_colors":3,"penalty":10.0},"qubo_matrix":[[-10.0,10.0,10.0,5.0,0.0,0.0,5.0,0.0,0.0],[10.0,-10.0,10.0,0.0,5.0,0.0,0.0,5.0,0.0],[10.0,10.0,-10.0,0.0,0.0,5.0,0.0,0.0,5.0],[5.0,0.0,0.0,-10.0,10.0,10.0,5.0,0.0,0.0],[0.0,5.0,0.0,10.0,-10.0,10.0,0.0,5.0,0.0],[0.0,0.0,5.0,10.0,10.0,-10.0,0.0,0.0,5.0],[5.0,0.0,0.0,5.0,0.0,0.0,-10.0,10.0,10.0],[0.0,5.0,0.0,0.0,5.0,0.0,10.0,-10.0,10.0],[0.0,0.0,5.0,0.0,0.0,5.0,10.0,10.0,-10.0]],"qubo_num_vars":9,"qubo_optimal":{"value":-30.0,"configs":[[0,0,1,0,1,0,1,0,0],[0,0,1,1,0,0,0,1,0],[0,1,0,0,0,1,1,0,0],[0,1,0,1,0,0,0,0,1],[1,0,0,0,0,1,0,1,0],[1,0,0,0,1,0,0,0,1]]}} \ No newline at end of file diff --git a/tests/data/qubo/ilp_to_qubo.json b/tests/data/qubo/ilp_to_qubo.json new file mode 100644 index 0000000..a740860 --- /dev/null +++ b/tests/data/qubo/ilp_to_qubo.json @@ -0,0 +1 @@ +{"problem":"ILP","source":{"num_variables":3,"objective":[1.0,2.0,3.0],"constraints_lhs":[[1.0,1.0,0.0],[0.0,1.0,1.0]],"constraints_rhs":[1.0,1.0],"constraint_signs":[-1,-1],"penalty":10.0},"qubo_matrix":[[-11.0,10.0,0.0],[10.0,-22.0,10.0],[0.0,10.0,-13.0]],"qubo_num_vars":3,"qubo_optimal":{"value":-24.0,"configs":[[1,0,1]]}} \ No newline at end of file diff --git a/tests/data/qubo/independentset_to_qubo.json b/tests/data/qubo/independentset_to_qubo.json new file mode 100644 index 0000000..571dba1 --- /dev/null +++ b/tests/data/qubo/independentset_to_qubo.json @@ -0,0 +1 @@ +{"problem":"IndependentSet","source":{"num_vertices":4,"edges":[[0,1],[1,2],[2,3],[0,3]],"penalty":8.0},"qubo_matrix":[[-1.0,8.0,0.0,8.0],[0.0,-1.0,8.0,0.0],[0.0,0.0,-1.0,8.0],[0.0,0.0,0.0,-1.0]],"qubo_num_vars":4,"qubo_optimal":{"value":-2.0,"configs":[[0,1,0,1],[1,0,1,0]]}} \ No newline at end of file diff --git a/tests/data/qubo/ksatisfiability_to_qubo.json b/tests/data/qubo/ksatisfiability_to_qubo.json new file mode 100644 index 0000000..585a784 --- /dev/null +++ b/tests/data/qubo/ksatisfiability_to_qubo.json @@ -0,0 +1 @@ +{"problem":"KSatisfiability","source":{"num_variables":3,"clauses":[[{"variable":0,"negated":false},{"variable":1,"negated":false}],[{"variable":0,"negated":true},{"variable":2,"negated":false}],[{"variable":1,"negated":false},{"variable":2,"negated":true}],[{"variable":1,"negated":true},{"variable":2,"negated":true}]]},"qubo_matrix":[[0.0,0.5,-0.5],[0.5,-1.0,0.0],[-0.5,0.0,1.0]],"qubo_num_vars":3,"qubo_optimal":{"value":-1.0,"configs":[[0,1,0]]}} \ No newline at end of file diff --git a/tests/data/qubo/maxcut_to_qubo.json b/tests/data/qubo/maxcut_to_qubo.json new file mode 100644 index 0000000..f272db9 --- /dev/null +++ b/tests/data/qubo/maxcut_to_qubo.json @@ -0,0 +1 @@ +{"problem":"MaxCut","source":{"num_vertices":4,"edges":[[0,1],[1,2],[2,3],[0,3]]},"qubo_matrix":[[-2.0,1.0,0.0,1.0],[1.0,-2.0,1.0,0.0],[0.0,1.0,-2.0,1.0],[1.0,0.0,1.0,-2.0]],"qubo_num_vars":4,"qubo_optimal":{"value":-4.0,"configs":[[0,1,0,1],[1,0,1,0]]}} \ No newline at end of file diff --git a/tests/data/qubo/setpacking_to_qubo.json b/tests/data/qubo/setpacking_to_qubo.json new file mode 100644 index 0000000..b9b86e2 --- /dev/null +++ b/tests/data/qubo/setpacking_to_qubo.json @@ -0,0 +1 @@ +{"problem":"SetPacking","source":{"sets":[[0,2],[1,2],[0,3]],"num_elements":4,"weights":[1.0,2.0,1.5],"penalty":8.0},"qubo_matrix":[[-1.0,4.0,4.0],[4.0,-2.0,0.0],[4.0,0.0,-1.5]],"qubo_num_vars":3,"qubo_optimal":{"value":-3.5,"configs":[[0,1,1]]}} \ No newline at end of file diff --git a/tests/data/qubo/vertexcovering_to_qubo.json b/tests/data/qubo/vertexcovering_to_qubo.json new file mode 100644 index 0000000..06479d1 --- /dev/null +++ b/tests/data/qubo/vertexcovering_to_qubo.json @@ -0,0 +1 @@ +{"problem":"VertexCovering","source":{"num_vertices":4,"edges":[[0,1],[1,2],[2,3],[0,3],[0,2]],"penalty":8.0},"qubo_matrix":[[-23.0,4.0,4.0,4.0],[4.0,-15.0,4.0,0.0],[4.0,4.0,-23.0,4.0],[4.0,0.0,4.0,-15.0]],"qubo_num_vars":4,"qubo_optimal":{"value":-38.0,"configs":[[1,0,1,0]]}} \ No newline at end of file From d595a53b133a9be44c82346dedd7d760aac58423 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Sun, 8 Feb 2026 14:09:51 +0800 Subject: [PATCH 2/3] Add related projects section to README Co-Authored-By: Claude Opus 4.6 --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 5b803a1..aa1512e 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,13 @@ make check # Quick check before commit (fmt + clippy + test) ``` +## Related Projects + +- **[Karp](https://github.com/REA1/karp)** — A DSL (built on Racket) for writing and testing Karp reductions between NP-complete problems ([PLDI 2022 paper](https://dl.acm.org/doi/abs/10.1145/3519939.3523732)). Focused on education and proof verification rather than a solver pipeline. +- **[Complexity Zoo](https://complexityzoo.net/)** — Comprehensive catalog of 550+ computational complexity classes (Scott Aaronson). +- **[A Compendium of NP Optimization Problems](https://www.csc.kth.se/tcs/compendium/)** — Online catalog of NP optimization problems with approximability results (Crescenzi & Kann). +- **Computers and Intractability** (Garey & Johnson, 1979) — The classic reference cataloging 300+ NP-complete problems with reductions. The most cited book in computer science. + ## License MIT License - see [LICENSE](LICENSE) for details. From 229469446dfe7ce332aca69f8a67476cb4649649 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Mon, 9 Feb 2026 15:07:30 +0800 Subject: [PATCH 3/3] update --- .claude/CLAUDE.md | 4 + .claude/rules/adding-reductions.md | 70 ++- .claude/skills/issue-to-pr.md | 52 +-- docs/plans/2026-02-08-qubo-reductions-plan.md | 414 ++++++++++++++++++ 4 files changed, 498 insertions(+), 42 deletions(-) create mode 100644 docs/plans/2026-02-08-qubo-reductions-plan.md diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index 289937f..72a8fee 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -10,6 +10,7 @@ make clippy # Lint make export-graph # Regenerate reduction graph make paper # Build Typst paper make coverage # Generate coverage report (>95% required) +make qubo-testdata # Regenerate QUBO ground truth JSON ``` ## Verify Changes @@ -28,6 +29,9 @@ make test clippy export-graph # Must pass before PR - `src/registry/` - Compile-time reduction metadata collection - `src/unit_tests/` - Unit test files (mirroring `src/` structure, referenced via `#[path]`) - `tests/main.rs` - Integration tests (modules in `tests/suites/`) +- `tests/data/` - Ground truth JSON for integration tests +- `scripts/` - Python test data generation scripts (managed with `uv`) +- `docs/plans/` - Implementation plans ### Trait Hierarchy diff --git a/.claude/rules/adding-reductions.md b/.claude/rules/adding-reductions.md index d9c01cb..20411ac 100644 --- a/.claude/rules/adding-reductions.md +++ b/.claude/rules/adding-reductions.md @@ -5,19 +5,59 @@ paths: # Adding a Reduction Rule (A → B) +## 0. Brainstorm & Generate Test Data First + +Before writing any Rust code, follow this workflow: + +1. **Brainstorm the reduction** — use `superpowers:brainstorming` to discuss with the user: + - Research the mathematical formulation (paper, textbook, or derive it) + - Understand the variable mapping and constraint encoding + - Discuss implementation approach: penalty values, matrix construction, solution extraction + - Read reference implementations in the codebase (e.g., `src/rules/spinglass_qubo.rs`) to understand conventions + - Agree on scope (weighted vs unweighted, specific graph types, const generics) +2. **Generate ground truth test data** — use an existing library (e.g., Python with qubogen, qubovert, or networkx) to create small instances, reduce them, brute-force solve both sides, and export as JSON to `tests/data//`. It is recommended to download the relevant package and check the existing tests to understand how to construct tests. To generate the test data, you can use the following command: + ```bash + # Example: generate QUBO test data + cd scripts && uv run python generate_qubo_tests.py + ``` +3. **Create a practical example** — design a small, explainable instance for `examples/` (e.g., "wireless tower placement" for IndependentSet, "map coloring" for Coloring). This example will also appear in the `docs/paper/reductions.typ`. +4. **Write the implementation plan** — save to `docs/plans/` using `superpowers:writing-plans`. The plan must include implementation details from the brainstorming session (formulas, penalty terms, matrix construction, variable indexing). + ## 1. Implementation -Create `src/rules/_.rs`: + +Create `src/rules/_.rs` following the pattern in `src/rules/spinglass_qubo.rs`: ```rust -use problemreductions::reduction; +use crate::reduction; + +#[derive(Debug, Clone)] +pub struct ReductionSourceToTarget { + target: TargetProblem<...>, + source_size: ProblemSize, + // + any metadata needed for extract_solution +} + +impl ReductionResult for ReductionSourceToTarget { + type Source = SourceProblem<...>; + type Target = TargetProblem<...>; + + fn target_problem(&self) -> &Self::Target { &self.target } + fn extract_solution(&self, target_solution: &[usize]) -> Vec { ... } + fn source_size(&self) -> ProblemSize { self.source_size.clone() } + fn target_size(&self) -> ProblemSize { self.target.problem_size() } +} #[reduction( overhead = { ReductionOverhead::new(vec![...]) } )] -impl ReduceTo> for SourceProblem { +impl ReduceTo> for SourceProblem<...> { type Result = ReductionSourceToTarget; fn reduce_to(&self) -> Self::Result { ... } } + +#[cfg(test)] +#[path = "../unit_tests/rules/_.rs"] +mod tests; ``` The `#[reduction]` macro auto-generates the `inventory::submit!` call. Optional attributes: `source_graph`, `target_graph`, `source_weighted`, `target_weighted`. @@ -28,25 +68,39 @@ mod source_target; pub use source_target::ReductionSourceToTarget; ``` -## 2. Closed-Loop Test (Required) +## 2. Tests (Required) + +- **Unit tests** in `src/unit_tests/rules/_.rs` — closed-loop + edge cases. See `rules/testing.md`. +- **Integration tests** in `tests/suites/reductions.rs` — compare against JSON ground truth from step 0. +- Test name: `test__to__closed_loop` -See `rules/testing.md` for the full pattern. Test name: `test__to__closed_loop`. +## 3. Example Program + +Add a round-trip demo to `examples/` showing a practical, explainable instance: +1. Create source problem with a real-world story +2. Reduce to target, solve, extract solution +3. Print human-readable explanation + +## 4. Documentation -## 3. Documentation Update `docs/paper/reductions.typ` (see `rules/documentation.md` for the pattern): - Add theorem + proof sketch -- Add code example +- Add Rust code example from the example program - Add to summary table with overhead and citation +The goal is to 1. prove the correctness of the reduction to human beings. 2. provide a minimal working example to the readers. + Citations must be verifiable. Use `[Folklore]` or `—` for trivial reductions. -## 4. Regenerate Reduction Graph +## 5. Regenerate Reduction Graph ```bash make export-graph ``` ## Anti-patterns +- Don't write Rust code before understanding the math and having test data - Don't create reductions without closed-loop tests - Don't forget `inventory::submit!` registration (reduction graph won't update) - Don't hardcode weights - use generic `W` parameter - Don't skip overhead polynomial specification +- Don't skip the example program — every reduction needs an explainable demo diff --git a/.claude/skills/issue-to-pr.md b/.claude/skills/issue-to-pr.md index f494605..3e53217 100644 --- a/.claude/skills/issue-to-pr.md +++ b/.claude/skills/issue-to-pr.md @@ -19,13 +19,15 @@ Convert a GitHub issue into an actionable PR with a plan that auto-triggers Clau digraph issue_to_pr { "Receive issue number" [shape=box]; "Fetch issue with gh" [shape=box]; + "Check the rules to follow" [shape=box]; "Brainstorm with user" [shape=box]; "Write plan file" [shape=box]; "Create branch and PR" [shape=box]; "PR triggers [action]" [shape=doublecircle]; "Receive issue number" -> "Fetch issue with gh"; - "Fetch issue with gh" -> "Brainstorm with user"; + "Fetch issue with gh" -> "Check the rules to follow"; + "Check the rules to follow" -> "Brainstorm with user"; "Brainstorm with user" -> "Write plan file"; "Write plan file" -> "Create branch and PR"; "Create branch and PR" -> "PR triggers [action]"; @@ -53,36 +55,18 @@ Present issue summary to user. **REQUIRED:** Invoke `superpowers:brainstorming` skill with the issue context (if superpowers plugin is available). Otherwise, conduct a manual brainstorming discussion with the user. -This ensures: -- User intent is clarified -- Multiple approaches are explored -- Requirements are understood before planning +Brainstorming must cover: +- **User intent** — clarify what the issue is asking for +- **Multiple approaches** — explore 2-3 different implementation strategies +- **Implementation details** — discuss the mathematical formulation, data structures, variable mappings, constraint encodings, and any non-obvious design choices +- **Existing patterns** — read reference implementations in the codebase (e.g., `spinglass_qubo.rs` for reductions) to understand the conventions +- **Scope** — agree on which variants to implement (e.g., unweighted only, specific K values) Do NOT skip brainstorming. Do NOT write a plan without user discussion. ### 4. Write Plan -After brainstorming concludes, write plan to `issue--.md` in the repo root: - -```markdown -# - -Issue: #<number> - -## Context -<Brief problem statement> - -## Approach -<Chosen approach from brainstorming> - -## Tasks -1. <Specific implementation task> -2. <Another task> -... - -## Acceptance Criteria -- <Criteria from issue/brainstorming> -``` +After brainstorming concludes, write plan to `docs/plans/YYYY-MM-DD-<slug>.md` using `superpowers:writing-plans`: ### 5. Create PR @@ -90,8 +74,8 @@ Issue: #<number> # Create branch git checkout -b issue-<number>-<slug> -# Stage only the plan file -git add issue-<number>-<slug>.md +# Stage the plan file +git add docs/plans/<plan-file>.md # Commit git commit -m "Add plan for #<number>: <title>" @@ -117,19 +101,19 @@ User: /issue-to-pr 42 Claude: Let me fetch issue #42... -[Fetches issue: "Add dark mode support"] +[Fetches issue: "Add IndependentSet → QUBO reduction"] -I'll use superpowers:brainstorming to explore this with you. +I'll read the rules to follow in .claude/rules/adding-reductions.md and use superpowers:brainstorming to explore this with you. [Invokes brainstorming - discusses approaches, user preferences, scope] -Based on our discussion, I'll create the plan... +Based on our discussion, I'll create the plan with superpowers:writing-plans... -[Writes docs/plans/issue-42-dark-mode.md] +[Writes docs/plans/2026-02-09-independentset-to-qubo.md] [Creates branch, commits, pushes] [Creates PR with body starting with "[action]"] -Created PR #45: Fix #42: Add dark mode support +Created PR #45: Fix #42: Add IndependentSet → QUBO reduction The [action] trigger will automatically execute the plan. ``` @@ -139,5 +123,5 @@ The [action] trigger will automatically execute the plan. |---------|-----| | Skipping brainstorming | Always use superpowers:brainstorming (or manual discussion) first | | `[action]` not at start | PR body must BEGIN with `[action]` | -| Including code in PR | Only commit the plan file | +| Including implementation code in initial PR | First PR: plan only | | Generic plan | Use specifics from brainstorming | diff --git a/docs/plans/2026-02-08-qubo-reductions-plan.md b/docs/plans/2026-02-08-qubo-reductions-plan.md new file mode 100644 index 0000000..7c1a53c --- /dev/null +++ b/docs/plans/2026-02-08-qubo-reductions-plan.md @@ -0,0 +1,414 @@ +# Problem-to-QUBO Reductions Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Implement 7 reductions from NP-hard problems to QUBO, with tests, examples, and paper documentation (Issue #18). + +**Architecture:** Each reduction creates a `QUBO<f64>` matrix encoding the source problem's objective + constraints as penalty terms. All reductions follow the existing pattern in `src/rules/spinglass_qubo.rs`: a result struct implementing `ReductionResult`, a `ReduceTo` impl with `#[reduction]` macro, unit tests via `#[path]`, and integration tests in `tests/suites/reductions.rs`. + +**Tech Stack:** Rust, `#[reduction]` proc macro, `inventory` for registration, `BruteForce` solver for tests. Ground truth JSON in `tests/data/qubo/` (already generated via PR #29). + +**Branch:** `issue-18-qubo-reductions` (already exists, PR #29) + +--- + +### Task 1: IndependentSet → QUBO + +Maximize weighted IS = minimize `-Σ w_i·x_i + P·Σ_{(i,j)∈E} x_i·x_j` where `P > Σ w_i`. + +**Files:** +- Create: `src/rules/independentset_qubo.rs` +- Create: `src/unit_tests/rules/independentset_qubo.rs` +- Modify: `src/rules/mod.rs` — add `mod independentset_qubo;` + `pub use` + +**Step 1: Write unit test** + +File: `src/unit_tests/rules/independentset_qubo.rs` + +```rust +use super::*; +use crate::solvers::{BruteForce, Solver}; + +#[test] +fn test_independentset_to_qubo_closed_loop() { + // Path graph: 0-1-2-3 (4 vertices, 3 edges) + // Maximum IS = {0, 2} or {1, 3} (size 2) + let is = IndependentSet::<SimpleGraph, Unweighted>::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let reduction = ReduceTo::<QUBO<f64>>::reduce_to(&is); + let qubo = reduction.target_problem(); + + let solver = BruteForce::new(); + let qubo_solutions = solver.find_best(qubo); + + for sol in &qubo_solutions { + let extracted = reduction.extract_solution(sol); + assert!(is.solution_size(&extracted).is_valid); + // IS of size 2 + assert_eq!(extracted.iter().filter(|&&x| x == 1).count(), 2); + } +} + +#[test] +fn test_independentset_to_qubo_triangle() { + // Triangle: 0-1-2 (complete graph K3) + // Maximum IS = any single vertex (size 1) + let is = IndependentSet::<SimpleGraph, Unweighted>::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let reduction = ReduceTo::<QUBO<f64>>::reduce_to(&is); + let qubo = reduction.target_problem(); + + let solver = BruteForce::new(); + let qubo_solutions = solver.find_best(qubo); + + for sol in &qubo_solutions { + let extracted = reduction.extract_solution(sol); + assert!(is.solution_size(&extracted).is_valid); + assert_eq!(extracted.iter().filter(|&&x| x == 1).count(), 1); + } +} +``` + +**Step 2: Run test, verify it fails** + +Run: `cargo test --all-features test_independentset_to_qubo` +Expected: compilation error (module not found) + +**Step 3: Write reduction implementation** + +File: `src/rules/independentset_qubo.rs` + +```rust +//! Reduction from IndependentSet to QUBO. +//! +//! Maximize Σ w_i·x_i s.t. x_i·x_j = 0 for (i,j) ∈ E +//! = Minimize -Σ w_i·x_i + P·Σ_{(i,j)∈E} x_i·x_j +//! +//! Q[i][i] = -w_i, Q[i][j] = P for edges. P = 1 + Σ w_i. + +use crate::models::graph::IndependentSet; +use crate::models::optimization::QUBO; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::{ProblemSize, Unweighted}; + +#[derive(Debug, Clone)] +pub struct ReductionISToQUBO { + target: QUBO<f64>, + source_size: ProblemSize, +} + +impl ReductionResult for ReductionISToQUBO { + type Source = IndependentSet<SimpleGraph, Unweighted>; + type Target = QUBO<f64>; + + fn target_problem(&self) -> &Self::Target { &self.target } + fn extract_solution(&self, target_solution: &[usize]) -> Vec<usize> { + target_solution.to_vec() + } + fn source_size(&self) -> ProblemSize { self.source_size.clone() } + fn target_size(&self) -> ProblemSize { self.target.problem_size() } +} + +#[reduction( + source_graph = "SimpleGraph", + overhead = { ReductionOverhead::new(vec![("num_vars", poly!(num_vertices))]) } +)] +impl ReduceTo<QUBO<f64>> for IndependentSet<SimpleGraph, Unweighted> { + type Result = ReductionISToQUBO; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let edges = self.edges(); + let penalty = 1.0 + n as f64; // P > sum of unit weights + + let mut matrix = vec![vec![0.0; n]; n]; + for i in 0..n { + matrix[i][i] = -1.0; // -w_i (unit weight) + } + for (u, v) in &edges { + let (i, j) = if u < v { (*u, *v) } else { (*v, *u) }; + matrix[i][j] += penalty; + } + + ReductionISToQUBO { + target: QUBO::from_matrix(matrix), + source_size: self.problem_size(), + } + } +} + +#[cfg(test)] +#[path = "../unit_tests/rules/independentset_qubo.rs"] +mod tests; +``` + +**Step 4: Register in `src/rules/mod.rs`** + +Add after `mod spinglass_qubo;`: +```rust +mod independentset_qubo; +``` + +Add after `pub use spinglass_qubo::...`: +```rust +pub use independentset_qubo::ReductionISToQUBO; +``` + +**Step 5: Run tests** + +Run: `cargo test --all-features test_independentset_to_qubo` +Expected: PASS + +**Step 6: Run clippy + full test suite** + +Run: `make test clippy` +Expected: all pass, no warnings + +**Step 7: Commit** + +```bash +git add src/rules/independentset_qubo.rs src/unit_tests/rules/independentset_qubo.rs src/rules/mod.rs +git commit -m "feat: add IndependentSet → QUBO reduction" +``` + +--- + +### Task 2: VertexCovering → QUBO + +Minimize `Σ w_i·x_i + P·Σ_{(i,j)∈E} (1-x_i)(1-x_j)`. Expanding: `Q[i][i] = w_i - P·deg(i)`, `Q[i][j] = P`. + +**Files:** +- Create: `src/rules/vertexcovering_qubo.rs` +- Create: `src/unit_tests/rules/vertexcovering_qubo.rs` +- Modify: `src/rules/mod.rs` + +Same pattern as Task 1. Key differences: +- VC minimizes (same as QUBO), so no sign flip on objective +- Penalty enforces: every edge has at least one endpoint selected +- `Q[i][i] = 1.0 - penalty * degree(i)`, `Q[i][j] = penalty` for edges +- Penalty `P = 1 + n` (unit weights) +- Test: cycle graph C4 (4 vertices, 4 edges) → min VC = 2 vertices + +**Step 1: Write test** (same structure as Task 1) +**Step 2: Verify fails** +**Step 3: Implement** — struct `ReductionVCToQUBO`, same boilerplate +**Step 4: Register in mod.rs** +**Step 5-6: Test + clippy** +**Step 7: Commit** `"feat: add VertexCovering → QUBO reduction"` + +--- + +### Task 3: MaxCut → QUBO + +Maximize cut = Σ_{(i,j)∈E} w_ij·(x_i⊕x_j). Minimize negative: `Q[i][i] = -Σ_j w_ij`, `Q[i][j] = 2·w_ij` (upper triangular). + +Note: MaxCut edges carry weights. Use `self.edges()` which returns `Vec<(usize, usize, W)>`. + +**Files:** +- Create: `src/rules/maxcut_qubo.rs` +- Create: `src/unit_tests/rules/maxcut_qubo.rs` +- Modify: `src/rules/mod.rs` + +Key: MaxCut is `MaxCut<SimpleGraph, W>` with edge weights. For unweighted, use `MaxCut::unweighted(n, edges)`. + +- `Q[i][j] = 2·w_ij` for i < j (upper triangular; the `w_ij(x_i + x_j - 2x_ix_j)` formula) +- `Q[i][i] = -Σ_{j:(i,j)∈E} w_ij` +- Test: cycle C4 → max cut = 4 (all edges cut by bipartition) +- No penalty needed — MaxCut is unconstrained + +**Step 1-7:** Same flow. Commit: `"feat: add MaxCut → QUBO reduction"` + +--- + +### Task 4: Coloring (KColoring) → QUBO + +One-hot encoding: `x_{v,c} = 1` iff vertex v gets color c. QUBO index: `v*K + c`. + +- One-hot penalty: `P₁·Σ_v (1 - Σ_c x_{v,c})²` +- Edge penalty: `P₂·Σ_{(u,v)∈E} Σ_c x_{u,c}·x_{v,c}` +- QUBO has `n·K` variables + +**Special:** `KColoring<const K: usize, G, W>` uses const generic. For the reduction, we implement for a specific K (e.g., `K=3`). Or better: implement for generic K using the existing pattern. + +Actually, looking at `coloring_ilp.rs`, there are two reductions: +- `ReductionColoringToILP` for `Coloring<SimpleGraph, W>` (deprecated Coloring type?) +- `ReductionKColoringToILP<const K: usize, W>` for `KColoring<K, SimpleGraph, W>` + +We should implement for `KColoring<K, SimpleGraph, Unweighted>`. The `extract_solution` decodes one-hot: for each vertex, find which color bit is 1. + +The struct needs to store `num_vertices` and `K` for extraction. + +**Files:** +- Create: `src/rules/coloring_qubo.rs` +- Create: `src/unit_tests/rules/coloring_qubo.rs` +- Modify: `src/rules/mod.rs` + +**Test:** Triangle K3, 3 colors → exactly 6 valid colorings (3! permutations). + +**Step 1-7:** Same flow. Commit: `"feat: add KColoring → QUBO reduction"` + +--- + +### Task 5: SetPacking → QUBO + +Same structure as IS on intersection graph: `Q[i][i] = -w_i`, `Q[i][j] = P` for overlapping pairs. + +Use `self.overlapping_pairs()` to get conflicting set pairs. + +**Files:** +- Create: `src/rules/setpacking_qubo.rs` +- Create: `src/unit_tests/rules/setpacking_qubo.rs` +- Modify: `src/rules/mod.rs` + +**Test:** 3 sets with some overlaps → verify max packing found. + +**Step 1-7:** Same flow. Commit: `"feat: add SetPacking → QUBO reduction"` + +--- + +### Task 6: KSatisfiability (K=2) → QUBO + +Max-2-SAT penalty formulation. Each clause contributes to Q based on literal signs. + +For clause `(l₁ ∨ l₂)` where `l = x` or `l = ¬x`: +- `(x_i ∨ x_j)`: penalty `(1-x_i)(1-x_j)` = `1 - x_i - x_j + x_ix_j` +- `(¬x_i ∨ x_j)`: penalty `x_i(1-x_j)` = `x_i - x_ix_j` +- `(x_i ∨ ¬x_j)`: penalty `(1-x_i)x_j` = `x_j - x_ix_j` +- `(¬x_i ∨ ¬x_j)`: penalty `x_ix_j` + +CNFClause uses 1-indexed signed integers: positive = variable, negative = negated. E.g., `[1, -2]` = `(x₁ ∨ ¬x₂)`. + +**Files:** +- Create: `src/rules/ksatisfiability_qubo.rs` +- Create: `src/unit_tests/rules/ksatisfiability_qubo.rs` +- Modify: `src/rules/mod.rs` + +**Test:** 3 vars, 4 clauses → verify all clauses satisfied by extracted solution. + +**Step 1-7:** Same flow. Commit: `"feat: add KSatisfiability(K=2) → QUBO reduction"` + +--- + +### Task 7: ILP (binary) → QUBO + +Binary ILP: `min c^T x s.t. Ax ≤ b`. Feature-gated behind `ilp`. + +Formulation: `Q[i][i] += c_i` (objective) + `P·Σ_k (Σ_j a_{kj}·x_j - b_k)²` (constraint penalties). + +Expanding the quadratic penalty for constraint k: +- `Q[i][j] += P·a_{ki}·a_{kj}` for i ≤ j +- `Q[i][i] += P·a_{ki}·(a_{ki} - 2·b_k)` (diagonal adjustment) + +ILP fields are public: `self.constraints`, `self.objective`, `self.sense`, `self.bounds`, `self.num_vars`. + +Only valid for binary ILP (all bounds = [0,1]). Should assert this. + +For Maximize objectives, negate the objective coefficients (QUBO minimizes). + +**Files:** +- Create: `src/rules/ilp_qubo.rs` (with `#[cfg(feature = "ilp")]`) +- Create: `src/unit_tests/rules/ilp_qubo.rs` +- Modify: `src/rules/mod.rs` + +**Test:** Binary ILP with 3 vars, 2 constraints → verify feasible optimal found. + +**Step 1-7:** Same flow. Commit: `"feat: add ILP (binary) → QUBO reduction"` + +--- + +### Task 8: Integration Tests + +Add integration tests in `tests/suites/reductions.rs` that load JSON ground truth from `tests/data/qubo/` and compare against Rust reductions. + +**Files:** +- Modify: `tests/suites/reductions.rs` + +For each reduction, add a module like: +```rust +mod is_qubo_reductions { + use super::*; + + #[test] + fn test_is_to_qubo_ground_truth() { + // Load JSON, create source problem, reduce, verify QUBO matrix and optimal match + } +} +``` + +**Commit:** `"test: add integration tests for QUBO reductions against ground truth"` + +--- + +### Task 9: Example Program + +Create `examples/qubo_reductions.rs` demonstrating all 7 reductions with practical stories. + +**File:** Create `examples/qubo_reductions.rs` + +Each demo: +1. Create a small practical instance (e.g., "Find the largest non-conflicting set of wireless towers") +2. Reduce to QUBO +3. Solve with BruteForce +4. Extract and explain the solution + +Run: `cargo run --example qubo_reductions --features ilp` + +**Commit:** `"docs: add QUBO reductions example program"` + +--- + +### Task 10: Paper Documentation + +Update `docs/paper/reductions.typ` with 7 new theorems. + +**File:** Modify `docs/paper/reductions.typ` + +For each reduction: +1. Add theorem in Section 3.1 (trivial reductions — these are standard penalty formulations) +2. Add proof with the QUBO formulation +3. Add Rust code example (from `examples/qubo_reductions.rs`) +4. Update summary table with overhead and reference + +Also update `@def:qubo` to list new "Reduces from" links. + +Run: `make export-graph && make paper` + +**Commit:** `"docs: add QUBO reduction theorems and examples to paper"` + +--- + +### Task 11: Final Verification + +```bash +make test # All tests pass +make clippy # No warnings +make export-graph # Reduction graph updated +make paper # Paper compiles +make coverage # >95% for new code +``` + +**Commit:** any final fixups + +--- + +## Key Reference Files + +| Purpose | Path | +|---------|------| +| Model pattern | `src/rules/spinglass_qubo.rs` | +| Test pattern | `src/unit_tests/rules/spinglass_qubo.rs` | +| Module registry | `src/rules/mod.rs` | +| Integration tests | `tests/suites/reductions.rs` | +| ILP feature gate | `src/rules/mod.rs:28-45` (example) | +| Ground truth JSON | `tests/data/qubo/*.json` | +| Paper | `docs/paper/reductions.typ` | +| IS model | `src/models/graph/independent_set.rs` | +| VC model | `src/models/graph/vertex_covering.rs` | +| MaxCut model | `src/models/graph/max_cut.rs` | +| KColoring model | `src/models/graph/kcoloring.rs` | +| SetPacking model | `src/models/set/set_packing.rs` | +| KSat model | `src/models/satisfiability/ksat.rs` | +| ILP model | `src/models/optimization/ilp.rs` |