diff --git a/Cargo.lock b/Cargo.lock index 804f7400..8a1ebb42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -65,15 +65,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "aligned-vec" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" -dependencies = [ - "equator", -] - [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -301,8 +292,6 @@ dependencies = [ [[package]] name = "alloy-evm" version = "0.23.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527b47dc39850c6168002ddc1f7a2063e15d26137c1bb5330f6065a7524c1aa9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -431,8 +420,6 @@ dependencies = [ [[package]] name = "alloy-op-evm" version = "0.23.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eea81517a852d9e3b03979c10febe00aacc3d50fbd34c5c30281051773285f7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2113,147 +2100,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "boa_ast" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc119a5ad34c3f459062a96907f53358989b173d104258891bb74f95d93747e8" -dependencies = [ - "bitflags 2.10.0", - "boa_interner", - "boa_macros", - "boa_string", - "indexmap 2.12.1", - "num-bigint", - "rustc-hash 2.1.1", -] - -[[package]] -name = "boa_engine" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e637ec52ea66d76b0ca86180c259d6c7bb6e6a6e14b2f36b85099306d8b00cc3" -dependencies = [ - "aligned-vec", - "arrayvec", - "bitflags 2.10.0", - "boa_ast", - "boa_gc", - "boa_interner", - "boa_macros", - "boa_parser", - "boa_string", - "bytemuck", - "cfg-if", - "cow-utils", - "dashmap 6.1.0", - "dynify", - "fast-float2", - "float16", - "futures-channel", - "futures-concurrency", - "futures-lite", - "hashbrown 0.16.1", - "icu_normalizer", - "indexmap 2.12.1", - "intrusive-collections", - "itertools 0.14.0", - "num-bigint", - "num-integer", - "num-traits", - "num_enum", - "paste", - "portable-atomic", - "rand 0.9.2", - "regress", - "rustc-hash 2.1.1", - "ryu-js", - "serde", - "serde_json", - "small_btree", - "static_assertions", - "tag_ptr", - "tap", - "thin-vec", - "thiserror 2.0.17", - "time", - "xsum", -] - -[[package]] -name = "boa_gc" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1179f690cbfcbe5364cceee5f1cb577265bb6f07b0be6f210aabe270adcf9da" -dependencies = [ - "boa_macros", - "boa_string", - "hashbrown 0.16.1", - "thin-vec", -] - -[[package]] -name = "boa_interner" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9626505d33dc63d349662437297df1d3afd9d5fc4a2b3ad34e5e1ce879a78848" -dependencies = [ - "boa_gc", - "boa_macros", - "hashbrown 0.16.1", - "indexmap 2.12.1", - "once_cell", - "phf", - "rustc-hash 2.1.1", - "static_assertions", -] - -[[package]] -name = "boa_macros" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f36418a46544b152632c141b0a0b7a453cd69ca150caeef83aee9e2f4b48b7d" -dependencies = [ - "cfg-if", - "cow-utils", - "proc-macro2", - "quote", - "syn 2.0.111", - "synstructure 0.13.2", -] - -[[package]] -name = "boa_parser" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f99bf5b684f0de946378fcfe5f38c3a0fbd51cbf83a0f39ff773a0e218541f" -dependencies = [ - "bitflags 2.10.0", - "boa_ast", - "boa_interner", - "boa_macros", - "fast-float2", - "icu_properties", - "num-bigint", - "num-traits", - "regress", - "rustc-hash 2.1.1", -] - -[[package]] -name = "boa_string" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ce9d7aa5563a2e14eab111e2ae1a06a69a812f6c0c3d843196c9d03fbef440" -dependencies = [ - "fast-float2", - "itoa", - "paste", - "rustc-hash 2.1.1", - "ryu-js", - "static_assertions", -] - [[package]] name = "bollard" version = "0.18.1" @@ -2390,20 +2236,6 @@ name = "bytemuck" version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" -dependencies = [ - "bytemuck_derive", -] - -[[package]] -name = "bytemuck_derive" -version = "1.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] [[package]] name = "byteorder" @@ -2873,16 +2705,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "cordyceps" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" -dependencies = [ - "loom", - "tracing", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -2918,12 +2740,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "cow-utils" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "417bef24afe1460300965a25ff4a24b8b45ad011948302ec221e8a0a81eb2c79" - [[package]] name = "cpufeatures" version = "0.2.17" @@ -3410,12 +3226,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "diatomic-waker" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" - [[package]] name = "diff" version = "0.1.13" @@ -3594,26 +3404,6 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" -[[package]] -name = "dynify" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81acb15628a3e22358bf73de5e7e62360b8a777dbcb5fc9ac7dfa9ae73723747" -dependencies = [ - "dynify-macros", -] - -[[package]] -name = "dynify-macros" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "ecdsa" version = "0.16.9" @@ -3783,26 +3573,6 @@ dependencies = [ "syn 2.0.111", ] -[[package]] -name = "equator" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" -dependencies = [ - "equator-macro", -] - -[[package]] -name = "equator-macro" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "equivalent" version = "1.0.2" @@ -3921,12 +3691,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "fast-float2" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8eb564c5c7423d25c886fb561d1e4ee69f72354d16918afa32c08811f6b6a55" - [[package]] name = "fastrand" version = "2.3.0" @@ -4005,12 +3769,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "fixedbitset" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" - [[package]] name = "flate2" version = "1.1.5" @@ -4021,16 +3779,6 @@ dependencies = [ "miniz_oxide", ] -[[package]] -name = "float16" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bffafbd079d520191c7c2779ae9cf757601266cf4167d3f659ff09617ff8483" -dependencies = [ - "cfg-if", - "rustc_version 0.2.3", -] - [[package]] name = "fnv" version = "1.0.7" @@ -4119,19 +3867,6 @@ dependencies = [ "futures-util", ] -[[package]] -name = "futures-buffered" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" -dependencies = [ - "cordyceps", - "diatomic-waker", - "futures-core", - "pin-project-lite", - "spin", -] - [[package]] name = "futures-channel" version = "0.3.31" @@ -4142,21 +3877,6 @@ dependencies = [ "futures-sink", ] -[[package]] -name = "futures-concurrency" -version = "7.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eb68017df91f2e477ed4bea586c59eaecaa47ed885a770d0444e21e62572cd2" -dependencies = [ - "fixedbitset", - "futures-buffered", - "futures-core", - "futures-lite", - "pin-project", - "slab", - "smallvec", -] - [[package]] name = "futures-core" version = "0.3.31" @@ -4187,10 +3907,7 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ - "fastrand", "futures-core", - "futures-io", - "parking", "pin-project-lite", ] @@ -4262,20 +3979,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" -[[package]] -name = "generator" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" -dependencies = [ - "cc", - "cfg-if", - "libc", - "log", - "rustversion", - "windows 0.61.3", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -4821,7 +4524,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core 0.61.2", ] [[package]] @@ -4872,8 +4575,6 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "write16", "zerovec", ] @@ -5158,15 +4859,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "intrusive-collections" -version = "0.9.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "189d0897e4cbe8c75efedf3502c18c887b05046e59d28404d4d8e46cbc4d1e86" -dependencies = [ - "memoffset", -] - [[package]] name = "iocuddle" version = "0.1.1" @@ -6168,19 +5860,6 @@ version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" -[[package]] -name = "loom" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" -dependencies = [ - "cfg-if", - "generator", - "scoped-tls", - "tracing", - "tracing-subscriber 0.3.20", -] - [[package]] name = "lru" version = "0.12.5" @@ -6326,15 +6005,6 @@ dependencies = [ "libc", ] -[[package]] -name = "memoffset" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" -dependencies = [ - "autocfg", -] - [[package]] name = "metrics" version = "0.24.2" @@ -6803,7 +6473,6 @@ checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", - "serde", ] [[package]] @@ -7221,8 +6890,7 @@ dependencies = [ [[package]] name = "op-revm" version = "12.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e31622d03b29c826e48800f4c8f389c8a9c440eb796a3e35203561a288f12985" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "auto_impl", "revm", @@ -8473,16 +8141,6 @@ version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" -[[package]] -name = "regress" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2057b2325e68a893284d1538021ab90279adac1139957ca2a74426c6f118fb48" -dependencies = [ - "hashbrown 0.16.1", - "memchr", -] - [[package]] name = "reqwest" version = "0.12.24" @@ -8541,7 +8199,6 @@ checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -8587,7 +8244,6 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8611,7 +8267,6 @@ dependencies = [ [[package]] name = "reth-chain-state" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8642,7 +8297,6 @@ dependencies = [ [[package]] name = "reth-chainspec" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8662,7 +8316,6 @@ dependencies = [ [[package]] name = "reth-cli" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-genesis", "clap", @@ -8676,7 +8329,6 @@ dependencies = [ [[package]] name = "reth-cli-commands" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8751,7 +8403,6 @@ dependencies = [ [[package]] name = "reth-cli-runner" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "reth-tasks", "tokio", @@ -8761,7 +8412,6 @@ dependencies = [ [[package]] name = "reth-cli-util" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -8779,7 +8429,6 @@ dependencies = [ [[package]] name = "reth-codecs" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8799,7 +8448,6 @@ dependencies = [ [[package]] name = "reth-codecs-derive" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "proc-macro2", "quote", @@ -8809,7 +8457,6 @@ dependencies = [ [[package]] name = "reth-config" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "eyre", "humantime-serde", @@ -8824,7 +8471,6 @@ dependencies = [ [[package]] name = "reth-consensus" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -8837,7 +8483,6 @@ dependencies = [ [[package]] name = "reth-consensus-common" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8849,7 +8494,6 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8875,7 +8519,6 @@ dependencies = [ [[package]] name = "reth-db" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "derive_more", @@ -8901,7 +8544,6 @@ dependencies = [ [[package]] name = "reth-db-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8929,7 +8571,6 @@ dependencies = [ [[package]] name = "reth-db-common" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8959,7 +8600,6 @@ dependencies = [ [[package]] name = "reth-db-models" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -8974,7 +8614,6 @@ dependencies = [ [[package]] name = "reth-discv4" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "alloy-rlp", @@ -8999,7 +8638,6 @@ dependencies = [ [[package]] name = "reth-discv5" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "alloy-rlp", @@ -9023,7 +8661,6 @@ dependencies = [ [[package]] name = "reth-dns-discovery" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "data-encoding", @@ -9047,7 +8684,6 @@ dependencies = [ [[package]] name = "reth-downloaders" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9082,7 +8718,6 @@ dependencies = [ [[package]] name = "reth-ecies" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "aes", "alloy-primitives 1.4.1", @@ -9113,7 +8748,6 @@ dependencies = [ [[package]] name = "reth-engine-local" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -9137,7 +8771,6 @@ dependencies = [ [[package]] name = "reth-engine-primitives" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9162,7 +8795,6 @@ dependencies = [ [[package]] name = "reth-engine-service" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "futures", "pin-project", @@ -9184,7 +8816,6 @@ dependencies = [ [[package]] name = "reth-engine-tree" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9239,7 +8870,6 @@ dependencies = [ [[package]] name = "reth-engine-util" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9267,7 +8897,6 @@ dependencies = [ [[package]] name = "reth-era" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9283,7 +8912,6 @@ dependencies = [ [[package]] name = "reth-era-downloader" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "bytes", @@ -9298,7 +8926,6 @@ dependencies = [ [[package]] name = "reth-era-utils" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -9320,7 +8947,6 @@ dependencies = [ [[package]] name = "reth-errors" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -9331,7 +8957,6 @@ dependencies = [ [[package]] name = "reth-eth-wire" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-chains", "alloy-primitives 1.4.1", @@ -9359,7 +8984,6 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9380,7 +9004,6 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "clap", "eyre", @@ -9404,7 +9027,6 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9420,7 +9042,6 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -9438,7 +9059,6 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -9451,7 +9071,6 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9480,7 +9099,6 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9500,7 +9118,6 @@ dependencies = [ [[package]] name = "reth-etl" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "rayon", "reth-db-api", @@ -9510,7 +9127,6 @@ dependencies = [ [[package]] name = "reth-evm" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9533,7 +9149,6 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9553,7 +9168,6 @@ dependencies = [ [[package]] name = "reth-execution-errors" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-evm", "alloy-primitives 1.4.1", @@ -9566,7 +9180,6 @@ dependencies = [ [[package]] name = "reth-execution-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9584,7 +9197,6 @@ dependencies = [ [[package]] name = "reth-exex" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9622,7 +9234,6 @@ dependencies = [ [[package]] name = "reth-exex-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -9636,7 +9247,6 @@ dependencies = [ [[package]] name = "reth-fs-util" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "serde", "serde_json", @@ -9646,7 +9256,6 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -9674,7 +9283,6 @@ dependencies = [ [[package]] name = "reth-ipc" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "bytes", "futures", @@ -9694,7 +9302,6 @@ dependencies = [ [[package]] name = "reth-libmdbx" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -9710,7 +9317,6 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "bindgen 0.71.1", "cc", @@ -9719,7 +9325,6 @@ dependencies = [ [[package]] name = "reth-metrics" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "futures", "metrics", @@ -9731,7 +9336,6 @@ dependencies = [ [[package]] name = "reth-net-banlist" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", ] @@ -9739,7 +9343,6 @@ dependencies = [ [[package]] name = "reth-net-nat" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "futures-util", "if-addrs 0.14.0", @@ -9753,7 +9356,6 @@ dependencies = [ [[package]] name = "reth-network" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9808,7 +9410,6 @@ dependencies = [ [[package]] name = "reth-network-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -9833,7 +9434,6 @@ dependencies = [ [[package]] name = "reth-network-p2p" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9856,7 +9456,6 @@ dependencies = [ [[package]] name = "reth-network-peers" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "alloy-rlp", @@ -9871,7 +9470,6 @@ dependencies = [ [[package]] name = "reth-network-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -9885,7 +9483,6 @@ dependencies = [ [[package]] name = "reth-nippy-jar" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "anyhow", "bincode", @@ -9902,7 +9499,6 @@ dependencies = [ [[package]] name = "reth-node-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -9926,7 +9522,6 @@ dependencies = [ [[package]] name = "reth-node-builder" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9994,7 +9589,6 @@ dependencies = [ [[package]] name = "reth-node-core" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10047,7 +9641,6 @@ dependencies = [ [[package]] name = "reth-node-ethereum" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-network", @@ -10085,7 +9678,6 @@ dependencies = [ [[package]] name = "reth-node-ethstats" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -10109,7 +9701,6 @@ dependencies = [ [[package]] name = "reth-node-events" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10133,7 +9724,6 @@ dependencies = [ [[package]] name = "reth-node-metrics" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "eyre", "http", @@ -10155,7 +9745,6 @@ dependencies = [ [[package]] name = "reth-node-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "reth-chainspec", "reth-db-api", @@ -10167,7 +9756,6 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10195,7 +9783,6 @@ dependencies = [ [[package]] name = "reth-optimism-cli" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10245,7 +9832,6 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10270,7 +9856,6 @@ dependencies = [ [[package]] name = "reth-optimism-evm" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10298,7 +9883,6 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10337,7 +9921,6 @@ dependencies = [ [[package]] name = "reth-optimism-forks" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-op-hardforks", "alloy-primitives 1.4.1", @@ -10348,7 +9931,6 @@ dependencies = [ [[package]] name = "reth-optimism-node" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -10394,7 +9976,6 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10434,7 +10015,6 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10454,7 +10034,6 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10515,7 +10094,6 @@ dependencies = [ [[package]] name = "reth-optimism-storage" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "reth-optimism-primitives", @@ -10525,7 +10103,6 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10561,7 +10138,6 @@ dependencies = [ [[package]] name = "reth-payload-builder" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -10582,7 +10158,6 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "pin-project", "reth-payload-primitives", @@ -10594,7 +10169,6 @@ dependencies = [ [[package]] name = "reth-payload-primitives" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -10614,7 +10188,6 @@ dependencies = [ [[package]] name = "reth-payload-util" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -10624,7 +10197,6 @@ dependencies = [ [[package]] name = "reth-payload-validator" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -10634,7 +10206,6 @@ dependencies = [ [[package]] name = "reth-primitives" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "c-kzg", @@ -10648,7 +10219,6 @@ dependencies = [ [[package]] name = "reth-primitives-traits" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10681,7 +10251,6 @@ dependencies = [ [[package]] name = "reth-provider" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10725,7 +10294,6 @@ dependencies = [ [[package]] name = "reth-prune" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10752,7 +10320,6 @@ dependencies = [ [[package]] name = "reth-prune-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "arbitrary", @@ -10767,7 +10334,6 @@ dependencies = [ [[package]] name = "reth-ress-protocol" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -10786,7 +10352,6 @@ dependencies = [ [[package]] name = "reth-ress-provider" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -10813,7 +10378,6 @@ dependencies = [ [[package]] name = "reth-revm" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "reth-primitives-traits", @@ -10826,7 +10390,6 @@ dependencies = [ [[package]] name = "reth-rpc" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10889,7 +10452,6 @@ dependencies = [ "reth-transaction-pool", "reth-trie-common", "revm", - "revm-inspectors", "revm-primitives", "serde", "serde_json", @@ -10905,7 +10467,6 @@ dependencies = [ [[package]] name = "reth-rpc-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-genesis", @@ -10933,7 +10494,6 @@ dependencies = [ [[package]] name = "reth-rpc-builder" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-network", "alloy-provider", @@ -10972,7 +10532,6 @@ dependencies = [ [[package]] name = "reth-rpc-convert" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -10999,7 +10558,6 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -11029,7 +10587,6 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -11065,7 +10622,6 @@ dependencies = [ "reth-transaction-pool", "reth-trie-common", "revm", - "revm-inspectors", "tokio", "tracing", ] @@ -11073,7 +10629,6 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11108,7 +10663,6 @@ dependencies = [ "reth-transaction-pool", "reth-trie", "revm", - "revm-inspectors", "schnellru", "serde", "thiserror 2.0.17", @@ -11120,7 +10674,6 @@ dependencies = [ [[package]] name = "reth-rpc-layer" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-rpc-types-engine", "http", @@ -11134,7 +10687,6 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -11150,7 +10702,6 @@ dependencies = [ [[package]] name = "reth-stages" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11198,7 +10749,6 @@ dependencies = [ [[package]] name = "reth-stages-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -11225,7 +10775,6 @@ dependencies = [ [[package]] name = "reth-stages-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "arbitrary", @@ -11239,7 +10788,6 @@ dependencies = [ [[package]] name = "reth-static-file" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "parking_lot", @@ -11259,7 +10807,6 @@ dependencies = [ [[package]] name = "reth-static-file-types" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "clap", @@ -11271,7 +10818,6 @@ dependencies = [ [[package]] name = "reth-storage-api" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11294,7 +10840,6 @@ dependencies = [ [[package]] name = "reth-storage-errors" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-eips", "alloy-primitives 1.4.1", @@ -11310,7 +10855,6 @@ dependencies = [ [[package]] name = "reth-tasks" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "auto_impl", "dyn-clone", @@ -11328,7 +10872,6 @@ dependencies = [ [[package]] name = "reth-testing-utils" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11344,7 +10887,6 @@ dependencies = [ [[package]] name = "reth-tokio-util" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "tokio", "tokio-stream", @@ -11354,7 +10896,6 @@ dependencies = [ [[package]] name = "reth-tracing" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "clap", "eyre", @@ -11371,7 +10912,6 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "clap", "eyre", @@ -11388,7 +10928,6 @@ dependencies = [ [[package]] name = "reth-transaction-pool" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11429,7 +10968,6 @@ dependencies = [ [[package]] name = "reth-trie" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11454,7 +10992,6 @@ dependencies = [ [[package]] name = "reth-trie-common" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-consensus", "alloy-primitives 1.4.1", @@ -11481,7 +11018,6 @@ dependencies = [ [[package]] name = "reth-trie-db" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "reth-db-api", @@ -11494,7 +11030,6 @@ dependencies = [ [[package]] name = "reth-trie-parallel" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "alloy-rlp", @@ -11519,7 +11054,6 @@ dependencies = [ [[package]] name = "reth-trie-sparse" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "alloy-rlp", @@ -11538,7 +11072,6 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "alloy-primitives 1.4.1", "alloy-rlp", @@ -11556,7 +11089,6 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" version = "1.9.3" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.9.3#27a8c0f5a6dfb27dea84c5751776ecabdd069646" dependencies = [ "zstd", ] @@ -11564,8 +11096,7 @@ dependencies = [ [[package]] name = "revm" version = "31.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb67a5223602113cae59a305acde2d9936bc18f2478dda879a6124b267cebfb6" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "revm-bytecode", "revm-context", @@ -11583,8 +11114,7 @@ dependencies = [ [[package]] name = "revm-bytecode" version = "7.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2c6b5e6e8dd1e28a4a60e5f46615d4ef0809111c9e63208e55b5c7058200fb0" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "bitvec", "phf", @@ -11595,8 +11125,7 @@ dependencies = [ [[package]] name = "revm-context" version = "11.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92850e150f4f99d46c05a20ad0cd09286a7ad4ee21866fffb87101de6e602231" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "bitvec", "cfg-if", @@ -11607,13 +11136,13 @@ dependencies = [ "revm-primitives", "revm-state", "serde", + "tracing", ] [[package]] name = "revm-context-interface" version = "12.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d701e2c2347d65216b066489ab22a0a8e1f7b2568256110d73a7d5eff3385c" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11623,13 +11152,13 @@ dependencies = [ "revm-primitives", "revm-state", "serde", + "tracing", ] [[package]] name = "revm-database" -version = "9.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "980d8d6bba78c5dd35b83abbb6585b0b902eb25ea4448ed7bfba6283b0337191" +version = "9.0.5" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11642,8 +11171,7 @@ dependencies = [ [[package]] name = "revm-database-interface" version = "8.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cce03e3780287b07abe58faf4a7f5d8be7e81321f93ccf3343c8f7755602bae" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "auto_impl", "either", @@ -11655,8 +11183,7 @@ dependencies = [ [[package]] name = "revm-handler" version = "12.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45418ed95cfdf0cb19effdbb7633cf2144cab7fb0e6ffd6b0eb9117a50adff6" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "auto_impl", "derive-where", @@ -11674,8 +11201,7 @@ dependencies = [ [[package]] name = "revm-inspector" version = "12.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c99801eac7da06cc112df2244bd5a64024f4ef21240e923b26e73c4b4a0e5da6" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "auto_impl", "either", @@ -11689,31 +11215,10 @@ dependencies = [ "serde_json", ] -[[package]] -name = "revm-inspectors" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21caa99f22184a6818946362778cccd3ff02f743c1e085bee87700671570ecb7" -dependencies = [ - "alloy-primitives 1.4.1", - "alloy-rpc-types-eth", - "alloy-rpc-types-trace", - "alloy-sol-types 1.4.1", - "anstyle", - "boa_engine", - "boa_gc", - "colorchoice", - "revm", - "serde", - "serde_json", - "thiserror 2.0.17", -] - [[package]] name = "revm-interpreter" version = "29.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22789ce92c5808c70185e3bc49732f987dc6fd907f77828c8d3470b2299c9c65" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11725,8 +11230,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "29.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968b124028960201abf6d6bf8e223f15fadebb4307df6b7dc9244a0aab5d2d05" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11750,8 +11254,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "21.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29e161db429d465c09ba9cbff0df49e31049fe6b549e28eb0b7bd642fcbd4412" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "alloy-primitives 1.4.1", "num_enum", @@ -11762,8 +11265,7 @@ dependencies = [ [[package]] name = "revm-state" version = "8.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d8be953b7e374dbdea0773cf360debed8df394ea8d82a8b240a6b5da37592fc" +source = "git+https://github.com/meyer9/revm?branch=meyer9%2Flazy-load-accounts-31.0.2#96abf4000f745f677fdce49ecda9434401b8a60a" dependencies = [ "bitflags 2.10.0", "revm-bytecode", @@ -12016,15 +11518,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - [[package]] name = "rustc_version" version = "0.3.3" @@ -12199,12 +11692,6 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" -[[package]] -name = "ryu-js" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd29631678d6fb0903b69223673e122c32e9ae559d0960a38d574695ebc0ea15" - [[package]] name = "same-file" version = "1.0.6" @@ -12258,12 +11745,6 @@ dependencies = [ "hashbrown 0.13.2", ] -[[package]] -name = "scoped-tls" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" - [[package]] name = "scopeguard" version = "1.2.0" @@ -12362,22 +11843,13 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser 0.7.0", -] - [[package]] name = "semver" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser 0.10.3", + "semver-parser", ] [[package]] @@ -12390,12 +11862,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "semver-parser" version = "0.10.3" @@ -12749,15 +12215,6 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" -[[package]] -name = "small_btree" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ba60d2df92ba73864714808ca68c059734853e6ab722b40e1cf543ebb3a057a" -dependencies = [ - "arrayvec", -] - [[package]] name = "smallvec" version = "1.15.1" @@ -12827,12 +12284,6 @@ dependencies = [ "sha1", ] -[[package]] -name = "spin" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" - [[package]] name = "spki" version = "0.7.3" @@ -13059,12 +12510,6 @@ dependencies = [ "libc", ] -[[package]] -name = "tag_ptr" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0e973b34477b7823833469eb0f5a3a60370fef7a453e02d751b59180d0a5a05" - [[package]] name = "tagptr" version = "0.2.0" @@ -13190,12 +12635,6 @@ dependencies = [ "url", ] -[[package]] -name = "thin-vec" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "144f754d318415ac792f9d69fc87abbbfc043ce2ef041c60f16ad828f638717d" - [[package]] name = "thiserror" version = "1.0.69" @@ -13293,7 +12732,6 @@ checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", - "js-sys", "libc", "num-conv", "num_threads", @@ -14147,12 +13585,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -15086,12 +14518,6 @@ version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" version = "0.6.2" @@ -15197,12 +14623,6 @@ dependencies = [ "xml-rs", ] -[[package]] -name = "xsum" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0637d3a5566a82fa5214bae89087bc8c9fb94cd8e8a3c07feb691bb8d9c632db" - [[package]] name = "yamux" version = "0.12.1" diff --git a/Cargo.toml b/Cargo.toml index a338c9b0..72dcca72 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,7 @@ repository = "https://github.com/flashbots/op-rbuilder" exclude = [".github/"] [workspace] -members = [ "crates/op-rbuilder", "crates/p2p", "crates/tdx-quote-provider"] +members = ["crates/op-rbuilder", "crates/p2p", "crates/tdx-quote-provider"] default-members = ["crates/op-rbuilder"] resolver = "2" @@ -132,7 +132,7 @@ alloy-primitives = { version = "1.4.1", default-features = false, features = [ alloy-rlp = "0.3.10" alloy-chains = "0.2.5" alloy-contract = { version = "1.0.41" } -alloy-evm = { version = "0.23.0", default-features = false } +alloy-evm = { path = "../evm/crates/evm" } alloy-provider = { version = "1.0.41", features = [ "ipc", "pubsub", @@ -165,7 +165,7 @@ alloy-hardforks = "0.4.4" rollup-boost = { git = "https://github.com/flashbots/rollup-boost", tag = "v0.7.11" } # optimism -alloy-op-evm = { version = "0.23.0", default-features = false } +alloy-op-evm = { path = "../evm/crates/op-evm" } alloy-op-hardforks = "0.4.4" op-alloy-rpc-types = { version = "0.22.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false } @@ -216,4 +216,72 @@ opentelemetry = { version = "0.31", features = ["trace"] } # Base Path concurrent-queue = "2.5.0" -tips-core = { git = "https://github.com/base/tips", rev = "c08eaa4fe10c26de8911609b41ddab4918698325", default-features = false } \ No newline at end of file +tips-core = { git = "https://github.com/base/tips", rev = "c08eaa4fe10c26de8911609b41ddab4918698325", default-features = false } + +[patch."https://github.com/paradigmxyz/reth"] +reth = { path = "../reth/bin/reth" } +reth-chain-state = { path = "../reth/crates/chain-state" } +reth-cli = { path = "../reth/crates/cli/cli" } +reth-cli-commands = { path = "../reth/crates/cli/commands" } +reth-cli-util = { path = "../reth/crates/cli/util" } +reth-db = { path = "../reth/crates/storage/db" } +reth-db-common = { path = "../reth/crates/storage/db-common" } +reth-errors = { path = "../reth/crates/errors" } +reth-payload-builder = { path = "../reth/crates/payload/builder" } +reth-node-api = { path = "../reth/crates/node/api" } +reth-rpc-engine-api = { path = "../reth/crates/rpc/rpc-engine-api" } +reth-node-ethereum = { path = "../reth/crates/ethereum/node" } +reth-trie = { path = "../reth/crates/trie/trie" } +reth-trie-parallel = { path = "../reth/crates/trie/parallel" } +reth-basic-payload-builder = { path = "../reth/crates/payload/basic" } +reth-node-core = { path = "../reth/crates/node/core" } +reth-primitives = { path = "../reth/crates/primitives" } +reth-primitives-traits = { path = "../reth/crates/primitives-traits" } +reth-provider = { path = "../reth/crates/storage/provider" } +reth-chainspec = { path = "../reth/crates/chainspec" } +reth-storage-api = { path = "../reth/crates/storage/storage-api" } +reth-rpc-api = { path = "../reth/crates/rpc/rpc-api" } +reth-evm = { path = "../reth/crates/evm/evm" } +reth-evm-ethereum = { path = "../reth/crates/ethereum/evm" } +reth-execution-errors = { path = "../reth/crates/evm/execution-errors" } +reth-exex = { path = "../reth/crates/exex/exex" } +reth-tasks = { path = "../reth/crates/tasks" } +reth-metrics = { path = "../reth/crates/metrics" } +reth-trie-db = { path = "../reth/crates/trie/db" } +reth-payload-primitives = { path = "../reth/crates/payload/primitives" } +reth-transaction-pool = { path = "../reth/crates/transaction-pool" } +reth-execution-types = { path = "../reth/crates/evm/execution-types" } +reth-revm = { path = "../reth/crates/revm" } +reth-payload-builder-primitives = { path = "../reth/crates/payload/builder-primitives" } +reth-payload-util = { path = "../reth/crates/payload/util" } +reth-rpc-layer = { path = "../reth/crates/rpc/rpc-layer" } +reth-network-peers = { path = "../reth/crates/net/peers" } +reth-testing-utils = { path = "../reth/testing/testing-utils" } +reth-node-builder = { path = "../reth/crates/node/builder" } +reth-rpc-eth-types = { path = "../reth/crates/rpc/rpc-eth-types" } +reth-tracing-otlp = { path = "../reth/crates/tracing-otlp" } +reth-ipc = { path = "../reth/crates/rpc/ipc" } +reth-optimism-primitives = { path = "../reth/crates/optimism/primitives" } +reth-optimism-consensus = { path = "../reth/crates/optimism/consensus" } +reth-optimism-cli = { path = "../reth/crates/optimism/cli" } +reth-optimism-forks = { path = "../reth/crates/optimism/hardforks" } +reth-optimism-evm = { path = "../reth/crates/optimism/evm" } +reth-optimism-node = { path = "../reth/crates/optimism/node" } +reth-optimism-payload-builder = { path = "../reth/crates/optimism/payload" } +reth-optimism-chainspec = { path = "../reth/crates/optimism/chainspec" } +reth-optimism-txpool = { path = "../reth/crates/optimism/txpool" } +reth-optimism-rpc = { path = "../reth/crates/optimism/rpc" } + +[patch.crates-io] +revm = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-bytecode = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-database = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-state = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-primitives = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-interpreter = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-inspector = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-context = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-context-interface = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +revm-database-interface = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +op-revm = { git = "https://github.com/meyer9/revm", branch = "meyer9/lazy-load-accounts-31.0.2" } +alloy-evm = { path = "../evm/crates/evm" } diff --git a/crates/op-rbuilder/src/args/op.rs b/crates/op-rbuilder/src/args/op.rs index 4dc0663c..1f3eee67 100644 --- a/crates/op-rbuilder/src/args/op.rs +++ b/crates/op-rbuilder/src/args/op.rs @@ -58,6 +58,11 @@ pub struct OpRbuilderArgs { )] pub resource_metering_buffer_size: usize, + /// Number of parallel threads for transaction execution. + /// Defaults to the number of available CPU cores. + #[arg(long = "builder.parallel-threads", env = "BUILDER_PARALLEL_THREADS")] + pub parallel_threads: Option, + /// Path to builder playgorund to automatically start up the node connected to it #[arg( long = "builder.playground", diff --git a/crates/op-rbuilder/src/block_stm/README.md b/crates/op-rbuilder/src/block_stm/README.md new file mode 100644 index 00000000..a8ff9b87 --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/README.md @@ -0,0 +1,339 @@ +# Block-STM Parallel Transaction Execution + +This module implements Block-STM style parallel transaction execution for the OP Stack payload builder. It enables speculative parallel execution of transactions with automatic conflict detection and resolution. + +## Overview + +Block-STM (Software Transactional Memory) is a parallel execution engine that: +1. **Speculatively executes** all transactions in parallel +2. **Tracks read/write sets** during execution for conflict detection +3. **Detects conflicts** via validation of read sets +4. **Re-executes** conflicting transactions with updated state +5. **Commits in order** to maintain sequential semantics + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────────┐ +│ Payload Builder │ +│ │ +│ ┌────────────────────────────────────────────────────────────────┐ │ +│ │ execute_best_transactions_parallel │ │ +│ └────────────────────────────────────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌────────────────────────────────────────────────────────────────┐ │ +│ │ Scheduler │ │ +│ │ - Dispatches tasks to worker threads │ │ +│ │ - Manages abort/re-execution on conflicts │ │ +│ │ - Ensures in-order commits │ │ +│ └────────────────────────────────────────────────────────────────┘ │ +│ │ │ +│ ┌────────────────────────┼────────────────────────┐ │ +│ ▼ ▼ ▼ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ Worker 0 │ │ Worker 1 │ │ Worker N │ │ +│ │ │ │ │ │ │ │ +│ │ ┌─────────┐ │ │ ┌─────────┐ │ │ ┌─────────┐ │ │ +│ │ │Versioned│ │ │ │Versioned│ │ │ │Versioned│ │ │ +│ │ │Database │ │ │ │Database │ │ │ │Database │ │ │ +│ │ └────┬────┘ │ │ └────┬────┘ │ │ └────┬────┘ │ │ +│ └──────┼──────┘ └──────┼──────┘ └──────┼──────┘ │ +│ │ │ │ │ +│ └────────────────────────┼────────────────────────┘ │ +│ ▼ │ +│ ┌────────────────────────────────────────────────────────────────┐ │ +│ │ MVHashMap │ │ +│ │ - Multi-version data structure for all state keys │ │ +│ │ - Tracks writes per (txn_idx, incarnation) │ │ +│ │ - Enables reads of earlier transactions' writes │ │ +│ └────────────────────────────────────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌────────────────────────────────────────────────────────────────┐ │ +│ │ Base State (Read-Only) │ │ +│ │ - Shared reference to State │ │ +│ │ - Fallback for keys not in MVHashMap │ │ +│ └────────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +## Components + +### Types (`types.rs`) + +Core type definitions: + +| Type | Description | +|------|-------------| +| `TxnIndex` | Transaction index in the block (u32) | +| `Incarnation` | Execution attempt number (u32) | +| `Version` | Tuple of (TxnIndex, Incarnation) identifying a specific execution | +| `ExecutionStatus` | State machine: PendingScheduling → Executing → Executed → Committed | +| `Task` | Work unit: Execute, Validate, NoTask, Done | +| `EvmStateKey` | EVM state identifier (Balance, Nonce, Code, Storage) | +| `EvmStateValue` | Corresponding state values | +| `ReadResult` | Result of reading from MVHashMap (Value, NotFound, Aborted) | + +### MVHashMap (`mv_hashmap.rs`) + +Multi-version data structure that stores versioned writes: + +```rust +// Structure (conceptual) +HashMap> + +struct VersionedEntry { + incarnation: Incarnation, + value: EvmStateValue, + dependents: HashSet, // For push-based invalidation +} +``` + +**Key Operations:** +- `read(txn_idx, key)` → Returns the latest write from txn < txn_idx +- `apply_writes(txn_idx, incarnation, writes)` → Records transaction's writes +- `delete_writes(txn_idx)` → Removes writes on abort +- `mark_aborted(txn_idx)` → Returns dependent transactions to abort + +### CapturedReads (`captured_reads.rs`) + +Tracks what each transaction read during execution: + +```rust +struct CapturedReads { + reads: HashMap, +} + +struct CapturedRead { + version: Option, // None = read from base state + value: EvmStateValue, +} +``` + +Used during validation to check if reads are still valid (no conflicting writes occurred). + +### VersionedDatabase (`db_adapter.rs`) + +Implements `revm::Database` for use with the EVM: + +```rust +struct VersionedDatabase<'a, BaseDB> { + txn_idx: TxnIndex, + incarnation: Incarnation, + mv_hashmap: &'a MVHashMap, + base_db: &'a BaseDB, + captured_reads: Mutex, + aborted: Mutex>, +} +``` + +**Read Flow:** +1. Check MVHashMap for writes from earlier transactions +2. If found and not aborted → return value, record read +3. If aborted → mark self as aborted (will re-execute) +4. If not found → read from base_db, record read + +### Scheduler (`scheduler.rs`) + +Coordinates parallel execution: + +```rust +struct Scheduler { + num_txns: usize, + txn_states: Vec>, + execution_queue: Mutex>, + commit_idx: AtomicUsize, // Next transaction to commit + // ... +} +``` + +**Task Flow:** +1. Workers call `next_task()` to get work +2. Execute transaction with `VersionedDatabase` +3. Call `finish_execution()` with read/write sets +4. Scheduler validates and commits in order +5. On conflict → abort and re-schedule + +### WriteSet (`view.rs`) + +Collects writes during transaction execution: + +```rust +struct WriteSet { + writes: HashMap, +} +``` + +## Execution Flow + +### 1. Initialization + +```rust +let scheduler = Scheduler::new(num_candidates); +let mv_hashmap = MVHashMap::new(num_candidates); +let execution_results = vec![None; num_candidates]; +``` + +### 2. Parallel Execution Phase + +Each worker thread: + +```rust +loop { + match scheduler.next_task() { + Task::Execute { txn_idx, incarnation } => { + // Create versioned database for this transaction + let versioned_db = VersionedDatabase::new( + txn_idx, incarnation, &mv_hashmap, &base_db + ); + + // Wrap in State for EVM + let mut tx_state = State::builder() + .with_database(versioned_db) + .build(); + + // Execute transaction + let result = evm.transact(&tx); + + // Check for abort condition + if tx_state.database.was_aborted() { + // Will be re-scheduled + scheduler.finish_execution(..., success=false); + continue; + } + + // Build write set from state changes + let write_set = build_write_set(&state); + let captured_reads = tx_state.database.take_captured_reads(); + + // Report to scheduler (may trigger commit) + scheduler.finish_execution( + txn_idx, incarnation, + captured_reads, write_set, + gas_used, success, &mv_hashmap + ); + } + Task::Done => break, + // ... + } +} +``` + +### 3. Validation & Commit + +The scheduler's `try_commit()` validates transactions in order: + +```rust +fn try_commit(&self, mv_hashmap: &MVHashMap) { + loop { + let commit_idx = self.commit_idx.load(); + let state = self.txn_states[commit_idx].read(); + + match state.status { + ExecutionStatus::Executed(incarnation) => { + // Validate read set + if self.validate_transaction(commit_idx, &state, mv_hashmap) { + // Commit! + state.status = ExecutionStatus::Committed; + self.commit_idx.fetch_add(1); + } else { + // Conflict detected, abort and re-execute + self.abort(commit_idx, mv_hashmap); + return; + } + } + _ => return, // Not ready yet + } + } +} +``` + +### 4. Sequential Commit Phase + +After all workers complete, process results in order: + +```rust +for (txn_idx, result) in execution_results.iter().enumerate() { + if let Some(tx_result) = result { + // Update cumulative gas + info.cumulative_gas_used += tx_result.gas_used; + + // Build receipt with correct cumulative gas + let receipt = build_receipt(tx_result, info.cumulative_gas_used); + info.receipts.push(receipt); + + // Load accounts into cache and commit state + for address in tx_result.state.keys() { + db.load_cache_account(*address); + } + db.commit(tx_result.state); + } +} +``` + +## Conflict Detection + +A conflict occurs when: +1. Transaction A reads key K at version V +2. Transaction B (where B < A) writes to key K at version V' > V +3. Transaction A's read is now stale + +**Detection via Read Set Validation:** +```rust +fn validate_transaction(&self, txn_idx: TxnIndex, state: &TxnState) -> bool { + for (key, captured_read) in state.reads.iter() { + let current = mv_hashmap.read(txn_idx, key); + + // Check if read version matches current version + if captured_read.version != current.version { + return false; // Conflict! + } + } + true +} +``` + +## EVM State Mapping + +| EVM State | EvmStateKey | EvmStateValue | +|-----------|-------------|---------------| +| Account balance | `Balance(Address)` | `Balance(U256)` | +| Account nonce | `Nonce(Address)` | `Nonce(u64)` | +| Contract code | `Code(Address)` | `Code(Bytes)` | +| Code hash | `CodeHash(Address)` | `CodeHash(B256)` | +| Storage slot | `Storage(Address, U256)` | `Storage(U256)` | + +Note: Block hashes and code (by hash) are not tracked as dependencies since they are immutable within a block. + +## Performance Considerations + +1. **Thread Count**: Configurable via `--builder.parallel-threads` CLI flag or `BUILDER_PARALLEL_THREADS` env var. Defaults to the number of available CPU cores. + - **`parallel_threads == 1`**: Disables parallel execution, uses sequential execution with `CachedReads` for better repeated-read performance + - **`parallel_threads > 1`**: Enables Block-STM parallel execution + - More threads help with many independent transactions + - Diminishing returns with high-conflict workloads + - Memory overhead per thread (each maintains local state) + +2. **Conflict Rate**: High conflict rates reduce parallelism benefit + - Common patterns: DEX swaps to same pool, token transfers + - Low-conflict blocks benefit most + +3. **Overhead**: Parallel execution adds overhead from: + - MVHashMap lookups + - Read set tracking + - Validation and potential re-execution + +4. **Optimal Scenarios**: + - Many independent transactions + - Low state contention + - Complex transactions (amortizes overhead) + +## Future Improvements + +- [x] Configurable thread count +- [ ] Metrics for conflict rate and re-execution count +- [ ] Adaptive parallelism based on conflict patterns +- [ ] Pre-execution dependency analysis +- [ ] Resource group optimization (batch related storage slots) + diff --git a/crates/op-rbuilder/src/block_stm/captured_reads.rs b/crates/op-rbuilder/src/block_stm/captured_reads.rs new file mode 100644 index 00000000..20b5e5d7 --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/captured_reads.rs @@ -0,0 +1,332 @@ +//! Captured Reads - Read Set Tracking for Block-STM +//! +//! During transaction execution, we need to track all state reads to: +//! 1. Detect conflicts when validating +//! 2. Enable push-based invalidation when a dependency is aborted +//! +//! The `CapturedReads` struct records all reads performed during execution, +//! including the version (which transaction wrote the value) for validation. + +use crate::block_stm::types::{EvmStateKey, EvmStateValue, ResolvedBalance, Version}; +use alloy_primitives::{Address, U256}; +use std::collections::HashMap; + +/// A single captured read operation. +#[derive(Debug, Clone)] +pub struct CapturedRead { + /// The version from which the value was read. + /// None means the value was read from base state (not from any transaction). + pub version: Option, + /// The value that was observed. + pub value: EvmStateValue, +} + +impl CapturedRead { + /// Create a new captured read from a transaction's write. + pub fn from_version(version: Version, value: EvmStateValue) -> Self { + Self { + version: Some(version), + value, + } + } + + /// Create a new captured read from base state. + pub fn from_base_state(value: EvmStateValue) -> Self { + Self { + version: None, + value, + } + } +} + +/// A captured resolved balance read (with deltas applied). +#[derive(Debug, Clone)] +pub struct CapturedResolvedBalance { + /// The address whose balance was resolved + pub address: Address, + /// The base value before deltas + pub base_value: U256, + /// The version of the base value (None if from storage) + pub base_version: Option, + /// The total delta that was applied + pub total_delta: U256, + /// The final resolved value + pub resolved_value: U256, + /// All versions that contributed deltas + pub contributors: Vec, +} + +impl CapturedResolvedBalance { + /// Create from a ResolvedBalance. + pub fn from_resolved(address: Address, resolved: ResolvedBalance) -> Self { + Self { + address, + base_value: resolved.base_value, + base_version: resolved.base_version, + total_delta: resolved.total_delta, + resolved_value: resolved.resolved_value, + contributors: resolved.contributors, + } + } +} + +/// Tracks all reads performed during a transaction's execution. +/// +/// Used for: +/// - Validation: checking if any reads have become stale +/// - Dependency tracking: knowing which transactions this one depends on +#[derive(Debug, Default)] +pub struct CapturedReads { + /// Map from state key to the read that was performed. + reads: HashMap, + /// Resolved balance reads (balance reads that included deltas). + /// These are tracked separately because they depend on multiple transactions. + resolved_balances: HashMap, +} + +impl CapturedReads { + /// Create a new empty CapturedReads. + pub fn new() -> Self { + Self { + reads: HashMap::new(), + resolved_balances: HashMap::new(), + } + } + + /// Record a read from a transaction's write. + pub fn capture_read(&mut self, key: EvmStateKey, version: Version, value: EvmStateValue) { + self.reads + .insert(key, CapturedRead::from_version(version, value)); + } + + /// Record a read from base state. + pub fn capture_base_read(&mut self, key: EvmStateKey, value: EvmStateValue) { + self.reads.insert(key, CapturedRead::from_base_state(value)); + } + + /// Record a resolved balance read (balance with deltas applied). + pub fn capture_resolved_balance(&mut self, address: Address, resolved: ResolvedBalance) { + self.resolved_balances + .insert(address, CapturedResolvedBalance::from_resolved(address, resolved)); + } + + /// Get all captured reads. + pub fn reads(&self) -> &HashMap { + &self.reads + } + + /// Get all captured resolved balances. + pub fn resolved_balances(&self) -> &HashMap { + &self.resolved_balances + } + + /// Get the set of transaction indices that this transaction depends on. + /// Includes dependencies from both regular reads and resolved balance reads. + pub fn dependencies(&self) -> impl Iterator + '_ { + // Dependencies from regular reads + let read_deps = self + .reads + .values() + .filter_map(|read| read.version.map(|v| v.txn_idx)); + + // Dependencies from resolved balances (all contributors) + let balance_deps = self + .resolved_balances + .values() + .flat_map(|rb| { + rb.base_version + .iter() + .map(|v| v.txn_idx) + .chain(rb.contributors.iter().map(|v| v.txn_idx)) + }); + + read_deps.chain(balance_deps) + } + + /// Check if any read depends on the given transaction index. + pub fn depends_on(&self, txn_idx: u32) -> bool { + // Check regular reads + let has_read_dep = self + .reads + .values() + .any(|read| read.version.map(|v| v.txn_idx) == Some(txn_idx)); + + if has_read_dep { + return true; + } + + // Check resolved balances (base version + contributors) + self.resolved_balances.values().any(|rb| { + rb.base_version.map(|v| v.txn_idx) == Some(txn_idx) + || rb.contributors.iter().any(|v| v.txn_idx == txn_idx) + }) + } + + /// Clear all captured reads (for re-execution). + pub fn clear(&mut self) { + self.reads.clear(); + self.resolved_balances.clear(); + } + + /// Get the number of reads captured (regular reads + resolved balances). + pub fn len(&self) -> usize { + self.reads.len() + self.resolved_balances.len() + } + + /// Check if no reads have been captured. + pub fn is_empty(&self) -> bool { + self.reads.is_empty() && self.resolved_balances.is_empty() + } + + /// Get the original balance for an address (if it was read). + /// Returns None if the balance was never read. + pub fn get_balance(&self, address: Address) -> Option { + let key = EvmStateKey::Balance(address); + self.reads.get(&key).and_then(|read| { + if let EvmStateValue::Balance(balance) = read.value { + Some(balance) + } else { + None + } + }) + } + + /// Get the original nonce for an address (if it was read). + /// Returns None if the nonce was never read. + pub fn get_nonce(&self, address: Address) -> Option { + let key = EvmStateKey::Nonce(address); + self.reads.get(&key).and_then(|read| { + if let EvmStateValue::Nonce(nonce) = read.value { + Some(nonce) + } else { + None + } + }) + } + + /// Get the original code hash for an address (if it was read). + /// Returns None if the code hash was never read. + pub fn get_code_hash(&self, address: Address) -> Option { + let key = EvmStateKey::CodeHash(address); + self.reads.get(&key).and_then(|read| { + if let EvmStateValue::CodeHash(hash) = read.value { + Some(hash) + } else { + None + } + }) + } +} + +/// Result of validating a transaction's read set. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ValidationResult { + /// All reads are still valid. + Valid, + /// A read has become invalid due to a conflicting write. + Invalid { + /// The key that has a conflict. + key: EvmStateKey, + /// The version we originally read from. + original_version: Option, + /// The new version that invalidates our read. + new_version: Option, + }, + /// A read from an aborted transaction was detected. + ReadFromAborted { + /// The key that was read from an aborted transaction. + key: EvmStateKey, + /// The aborted transaction index. + aborted_txn_idx: u32, + }, +} + +impl ValidationResult { + /// Returns true if the validation passed. + pub fn is_valid(&self) -> bool { + matches!(self, ValidationResult::Valid) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{Address, U256}; + + fn test_key(slot: u64) -> EvmStateKey { + EvmStateKey::Storage(Address::ZERO, U256::from(slot)) + } + + fn test_value(v: u64) -> EvmStateValue { + EvmStateValue::Storage(U256::from(v)) + } + + #[test] + fn test_capture_read() { + let mut reads = CapturedReads::new(); + let key = test_key(1); + let version = Version::new(0, 0); + let value = test_value(42); + + reads.capture_read(key.clone(), version, value.clone()); + + assert_eq!(reads.len(), 1); + let captured = reads.reads().get(&key).unwrap(); + assert_eq!(captured.version, Some(version)); + assert_eq!(captured.value, value); + } + + #[test] + fn test_capture_base_read() { + let mut reads = CapturedReads::new(); + let key = test_key(1); + let value = test_value(42); + + reads.capture_base_read(key.clone(), value.clone()); + + let captured = reads.reads().get(&key).unwrap(); + assert_eq!(captured.version, None); + assert_eq!(captured.value, value); + } + + #[test] + fn test_dependencies() { + let mut reads = CapturedReads::new(); + + // Read from tx0 + reads.capture_read(test_key(1), Version::new(0, 0), test_value(100)); + // Read from tx2 + reads.capture_read(test_key(2), Version::new(2, 0), test_value(200)); + // Read from base state + reads.capture_base_read(test_key(3), test_value(300)); + + let deps: Vec<_> = reads.dependencies().collect(); + assert_eq!(deps.len(), 2); + assert!(deps.contains(&0)); + assert!(deps.contains(&2)); + } + + #[test] + fn test_depends_on() { + let mut reads = CapturedReads::new(); + reads.capture_read(test_key(1), Version::new(0, 0), test_value(100)); + + assert!(reads.depends_on(0)); + assert!(!reads.depends_on(1)); + assert!(!reads.depends_on(2)); + } + + #[test] + fn test_clear() { + let mut reads = CapturedReads::new(); + reads.capture_read(test_key(1), Version::new(0, 0), test_value(100)); + reads.capture_base_read(test_key(2), test_value(200)); + + assert_eq!(reads.len(), 2); + + reads.clear(); + + assert!(reads.is_empty()); + } +} + diff --git a/crates/op-rbuilder/src/block_stm/db_adapter.rs b/crates/op-rbuilder/src/block_stm/db_adapter.rs new file mode 100644 index 00000000..41a5cf2b --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/db_adapter.rs @@ -0,0 +1,548 @@ +//! Database Adapter for Block-STM Parallel Execution +//! +//! This module provides a `VersionedDatabase` that implements the revm `Database` trait +//! while routing reads through the MVHashMap for versioned state access. +//! +//! # How It Works +//! +//! 1. For each read operation, first check MVHashMap for writes from earlier transactions +//! 2. If found, return the versioned value and track the dependency +//! 3. If not found, read from base state and track as a base dependency +//! 4. All writes go to a local WriteSet (committed to MVHashMap after execution) + +use crate::block_stm::{ + captured_reads::CapturedReads, + mv_hashmap::MVHashMap, + types::{EvmStateKey, EvmStateValue, Incarnation, ReadResult, TxnIndex, Version}, + view::WriteSet, +}; +use alloy_primitives::{Address, B256, U256}; +use derive_more::Debug; +use revm::{Database, DatabaseRef, bytecode::Bytecode}; +use revm::database_interface::DBErrorMarker; +use revm::state::AccountInfo; +use std::{collections::HashMap}; +use std::sync::Mutex; +use tracing::trace; + +/// Error type for versioned database operations. +#[derive(Debug, Clone)] +pub enum VersionedDbError { + /// Read encountered an aborted transaction - need to abort and retry + ReadAborted { aborted_txn_idx: TxnIndex }, + /// Base database error + BaseDbError(String), +} + +impl std::fmt::Display for VersionedDbError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + VersionedDbError::ReadAborted { aborted_txn_idx } => { + write!(f, "Read from aborted transaction {}", aborted_txn_idx) + } + VersionedDbError::BaseDbError(e) => write!(f, "Base DB error: {}", e), + } + } +} + +impl std::error::Error for VersionedDbError {} + +impl DBErrorMarker for VersionedDbError {} + +/// Account state cached during execution. +#[derive(Debug, Clone, Default)] +struct CachedAccount { + /// Account info (balance, nonce, code_hash) + info: Option, +} + +/// A versioned database that routes reads through MVHashMap. +/// +/// This implements the revm `Database` trait, allowing it to be used +/// directly with the EVM for parallel execution. +#[derive(Debug)] +pub struct VersionedDatabase<'a, BaseDB> { + /// Transaction index this database is for + txn_idx: TxnIndex, + /// Incarnation of this execution + incarnation: Incarnation, + /// The multi-version hash map + mv_hashmap: &'a MVHashMap, + /// The base database for reads not in MVHashMap + base_db: &'a BaseDB, + /// Captured reads for dependency tracking + captured_reads: Mutex, + /// Local write buffer (applied to MVHashMap after execution) + writes: Mutex, + /// Cached account data to avoid re-reading + account_cache: Mutex>, + /// Whether an abort condition was encountered + aborted: Mutex>, +} + +impl<'a, BaseDB> VersionedDatabase<'a, BaseDB> { + /// Create a new versioned database for a transaction. + pub fn new( + txn_idx: TxnIndex, + incarnation: Incarnation, + mv_hashmap: &'a MVHashMap, + base_db: &'a BaseDB, + ) -> Self { + Self { + txn_idx, + incarnation, + mv_hashmap, + base_db, + captured_reads: Mutex::new(CapturedReads::new()), + writes: Mutex::new(WriteSet::new()), + account_cache: Mutex::new(HashMap::new()), + aborted: Mutex::new(None), + } + } + + /// Get the transaction index. + pub fn txn_idx(&self) -> TxnIndex { + self.txn_idx + } + + /// Get the incarnation. + pub fn incarnation(&self) -> Incarnation { + self.incarnation + } + + /// Check if execution was aborted due to reading from an aborted transaction. + pub fn was_aborted(&self) -> Option { + *self.aborted.lock().unwrap() + } + + /// Take the captured reads (consumes internal state). + pub fn take_captured_reads(&self) -> CapturedReads { + std::mem::take(&mut *self.captured_reads.lock().unwrap()) + } + + /// Take the write set (consumes internal state). + pub fn take_writes(&self) -> WriteSet { + std::mem::take(&mut *self.writes.lock().unwrap()) + } + + /// Record a read from MVHashMap. + fn record_versioned_read(&self, key: EvmStateKey, version: Version, value: EvmStateValue) { + self.captured_reads.lock().unwrap().capture_read(key, version, value); + } + + /// Record a read from base state. + fn record_base_read(&self, key: EvmStateKey, value: EvmStateValue) { + self.captured_reads.lock().unwrap().capture_base_read(key, value); + } + + /// Record a resolved balance read (balance with deltas applied). + fn record_resolved_balance(&self, address: Address, resolved: crate::block_stm::types::ResolvedBalance) { + self.captured_reads + .lock() + .unwrap() + .capture_resolved_balance(address, resolved); + } + + /// Mark execution as aborted. + fn mark_aborted(&self, aborted_txn_idx: TxnIndex) { + *self.aborted.lock().unwrap() = Some(aborted_txn_idx); + } + + /// Resolve a balance including any pending deltas. + /// + /// This handles the case where earlier transactions have written balance deltas + /// (e.g., fee increments) that need to be applied to the balance. + fn resolve_balance_with_deltas( + &self, + address: Address, + base_value: U256, + base_version: Option, + ) -> Result { + // Check if there are pending deltas for this address + if !self.mv_hashmap.has_pending_deltas(&address, self.txn_idx) { + // No deltas, just return the base value (already recorded) + return Ok(base_value); + } + + // Resolve deltas + match self.mv_hashmap.resolve_balance(address, self.txn_idx, base_value, base_version) { + Ok(resolved) => { + let final_value = resolved.resolved_value; + + trace!( + txn_idx = self.txn_idx, + address = %address, + base_value = %base_value, + total_delta = %resolved.total_delta, + resolved_value = %final_value, + num_contributors = resolved.contributors.len(), + "Resolved balance with deltas" + ); + + // Record the resolved balance read (tracks all contributors) + self.record_resolved_balance(address, resolved); + + Ok(final_value) + } + Err(aborted_txn_idx) => { + trace!( + txn_idx = self.txn_idx, + address = %address, + aborted_txn = aborted_txn_idx, + "Read delta from aborted transaction" + ); + self.mark_aborted(aborted_txn_idx); + Err(VersionedDbError::ReadAborted { aborted_txn_idx }) + } + } + } +} + +impl<'a, BaseDB> Database for VersionedDatabase<'a, BaseDB> +where + BaseDB: revm::DatabaseRef, + ::Error: std::fmt::Display, +{ + type Error = VersionedDbError; + + fn basic(&mut self, address: Address) -> Result, VersionedDbError> { + self.basic_ref(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.code_by_hash_ref(code_hash) + } + + fn storage(&mut self, address: Address, slot: U256) -> Result { + self.storage_ref(address, slot) + } + + fn block_hash(&mut self, number: u64) -> Result { + self.block_hash_ref(number) + } +} + +/// Implementation for base databases that implement DatabaseRef. +impl<'a, BaseDB> DatabaseRef for VersionedDatabase<'a, BaseDB> +where + BaseDB: revm::DatabaseRef, + ::Error: std::fmt::Display, +{ + type Error = VersionedDbError; + + /// Read account info (balance, nonce, code_hash). + fn basic_ref(&self, address: Address) -> Result, VersionedDbError> { + // Check cache first + { + let cache = self.account_cache.lock().unwrap(); + if let Some(cached) = cache.get(&address) { + if cached.info.is_some() { + return Ok(cached.info.clone()); + } + } + } + + // Check MVHashMap for balance + let balance_key = EvmStateKey::Balance(address); + let balance_result = self.mv_hashmap.read(self.txn_idx, &balance_key); + + // Check MVHashMap for nonce + let nonce_key = EvmStateKey::Nonce(address); + let nonce_result = self.mv_hashmap.read(self.txn_idx, &nonce_key); + + // Check MVHashMap for code hash + let code_hash_key = EvmStateKey::CodeHash(address); + let code_hash_result = self.mv_hashmap.read(self.txn_idx, &code_hash_key); + + // Check for aborts + if let ReadResult::Aborted { txn_idx } = &balance_result { + self.mark_aborted(*txn_idx); + return Err(VersionedDbError::ReadAborted { aborted_txn_idx: *txn_idx }); + } + if let ReadResult::Aborted { txn_idx } = &nonce_result { + self.mark_aborted(*txn_idx); + return Err(VersionedDbError::ReadAborted { aborted_txn_idx: *txn_idx }); + } + if let ReadResult::Aborted { txn_idx } = &code_hash_result { + self.mark_aborted(*txn_idx); + return Err(VersionedDbError::ReadAborted { aborted_txn_idx: *txn_idx }); + } + + // Read base account if needed + let base_account = self.base_db.basic_ref(address) + .map_err(|e| VersionedDbError::BaseDbError(e.to_string()))?; + + let base_info = base_account.unwrap_or_default(); + + // Merge MVHashMap values with base state + // For balance, we also need to consider pending deltas (fee increments) + let balance = match &balance_result { + ReadResult::Value { value: EvmStateValue::Balance(b), version } => { + // Got a direct write from MVHashMap - now resolve any pending deltas + self.record_versioned_read(balance_key.clone(), *version, EvmStateValue::Balance(*b)); + // Resolve with deltas (if any) + self.resolve_balance_with_deltas(address, *b, Some(*version))? + } + _ => { + // No direct write, use base state - but still check for deltas + let base_balance = base_info.balance; + + // Check if there are pending deltas + if self.mv_hashmap.has_pending_deltas(&address, self.txn_idx) { + // Resolve with deltas - this will record the resolved balance read + self.resolve_balance_with_deltas(address, base_balance, None)? + } else { + // No deltas, just record as normal base read + self.record_base_read(balance_key, EvmStateValue::Balance(base_balance)); + base_balance + } + } + }; + + let nonce = match &nonce_result { + ReadResult::Value { value: EvmStateValue::Nonce(n), version } => { + self.record_versioned_read(nonce_key, *version, EvmStateValue::Nonce(*n)); + *n + } + _ => { + self.record_base_read(nonce_key, EvmStateValue::Nonce(base_info.nonce)); + base_info.nonce + } + }; + + let code_hash = match &code_hash_result { + ReadResult::Value { value: EvmStateValue::CodeHash(h), version } => { + self.record_versioned_read(code_hash_key, *version, EvmStateValue::CodeHash(*h)); + *h + } + _ => { + self.record_base_read(code_hash_key, EvmStateValue::CodeHash(base_info.code_hash)); + base_info.code_hash + } + }; + + let account_info = AccountInfo { + balance, + nonce, + code_hash, + code: base_info.code.clone(), + }; + + trace!( + txn_idx = self.txn_idx, + address = %address, + balance = %balance, + nonce = nonce, + "Read account info" + ); + + // Cache the result + { + let mut cache = self.account_cache.lock().unwrap(); + let entry = cache.entry(address).or_default(); + entry.info = Some(account_info.clone()); + } + + Ok(Some(account_info)) + } + + /// Read a storage slot. + fn storage_ref(&self, address: Address, slot: U256) -> Result { + let key = EvmStateKey::Storage(address, slot); + + match self.mv_hashmap.read(self.txn_idx, &key) { + ReadResult::Value { value: EvmStateValue::Storage(v), version } => { + trace!( + txn_idx = self.txn_idx, + address = %address, + slot = %slot, + value = %v, + source_txn = version.txn_idx, + "Read storage from MVHashMap" + ); + self.record_versioned_read(key, version, EvmStateValue::Storage(v)); + Ok(v) + } + ReadResult::Value { .. } => { + // Unexpected value type + Ok(U256::ZERO) + } + ReadResult::NotFound => { + // Read from base state + let value = self.base_db.storage_ref(address, slot) + .map_err(|e| VersionedDbError::BaseDbError(e.to_string()))?; + trace!( + txn_idx = self.txn_idx, + address = %address, + slot = %slot, + value = %value, + "Read storage from base state" + ); + self.record_base_read(key, EvmStateValue::Storage(value)); + Ok(value) + } + ReadResult::Aborted { txn_idx: aborted_txn_idx } => { + trace!( + txn_idx = self.txn_idx, + address = %address, + slot = %slot, + aborted_txn = aborted_txn_idx, + "Read storage from aborted transaction" + ); + self.mark_aborted(aborted_txn_idx); + Err(VersionedDbError::ReadAborted { aborted_txn_idx }) + } + } + } + + /// Read a block hash. + /// Block hashes are immutable within a block, so we don't track them as dependencies. + fn block_hash_ref(&self, number: u64) -> Result { + self.base_db.block_hash_ref(number) + .map_err(|e| VersionedDbError::BaseDbError(e.to_string())) + } + + /// Read contract code by hash. + fn code_by_hash_ref(&self, code_hash: B256) -> Result { + // Code is usually immutable, read directly from base + self.base_db.code_by_hash_ref(code_hash) + .map_err(|e| VersionedDbError::BaseDbError(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::block_stm::mv_hashmap::MVHashMap; + + /// Mock base database for testing. + struct MockBaseDb { + accounts: HashMap, + storage: HashMap<(Address, U256), U256>, + } + + impl MockBaseDb { + fn new() -> Self { + Self { + accounts: HashMap::new(), + storage: HashMap::new(), + } + } + + fn with_account(mut self, address: Address, balance: U256, nonce: u64) -> Self { + self.accounts.insert(address, AccountInfo { + balance, + nonce, + code_hash: B256::ZERO, + code: None, + }); + self + } + + fn with_storage(mut self, address: Address, slot: U256, value: U256) -> Self { + self.storage.insert((address, slot), value); + self + } + } + + impl revm::DatabaseRef for MockBaseDb { + type Error = std::convert::Infallible; + + fn basic_ref(&self, address: Address) -> Result, Self::Error> { + Ok(self.accounts.get(&address).cloned()) + } + + fn code_by_hash_ref(&self, _code_hash: B256) -> Result { + Ok(Bytecode::default()) + } + + fn storage_ref(&self, address: Address, index: U256) -> Result { + Ok(self.storage.get(&(address, index)).copied().unwrap_or(U256::ZERO)) + } + + fn block_hash_ref(&self, _number: u64) -> Result { + Ok(B256::ZERO) + } + } + + #[test] + fn test_read_from_base_state() { + let mv = MVHashMap::new(10); + let addr = Address::from([1u8; 20]); + let mut base = MockBaseDb::new() + .with_account(addr, U256::from(1000), 5); + + let db = VersionedDatabase::new(0, 0, &mv, &mut base); + + let info = db.basic_ref(addr).unwrap().unwrap(); + assert_eq!(info.balance, U256::from(1000)); + assert_eq!(info.nonce, 5); + + // Check that reads were captured + let reads = db.take_captured_reads(); + assert!(!reads.is_empty()); + } + + #[test] + fn test_read_from_mvhashmap() { + let mv = MVHashMap::new(10); + let addr = Address::from([1u8; 20]); + + // Tx0 writes a balance + mv.write(0, 0, EvmStateKey::Balance(addr), EvmStateValue::Balance(U256::from(2000))); + mv.write(0, 0, EvmStateKey::Nonce(addr), EvmStateValue::Nonce(10)); + + let mut base = MockBaseDb::new() + .with_account(addr, U256::from(1000), 5); + + // Tx1 reads - should see tx0's writes + let db = VersionedDatabase::new(1, 0, &mv, &mut base); + + let info = db.basic_ref(addr).unwrap().unwrap(); + assert_eq!(info.balance, U256::from(2000)); // From MVHashMap + assert_eq!(info.nonce, 10); // From MVHashMap + } + + #[test] + fn test_storage_read() { + let mv = MVHashMap::new(10); + let addr = Address::from([1u8; 20]); + let slot = U256::from(42); + + // Tx0 writes to storage + mv.write(0, 0, EvmStateKey::Storage(addr, slot), EvmStateValue::Storage(U256::from(999))); + + let mut base = MockBaseDb::new() + .with_storage(addr, slot, U256::from(100)); + + // Tx1 should see tx0's write + let db = VersionedDatabase::new(1, 0, &mv, &mut base); + let value = db.storage_ref(addr, slot).unwrap(); + assert_eq!(value, U256::from(999)); + + // Tx0 should see base state (can't see own writes) + let db0 = VersionedDatabase::new(0, 0, &mv, &mut base); + let value0 = db0.storage_ref(addr, slot).unwrap(); + assert_eq!(value0, U256::from(100)); + } + + #[test] + fn test_aborted_read_detection() { + let mv = MVHashMap::new(10); + let addr = Address::from([1u8; 20]); + + // Tx0 writes and is marked as aborted + mv.write(0, 0, EvmStateKey::Balance(addr), EvmStateValue::Balance(U256::from(2000))); + mv.mark_aborted(0); + + let mut base = MockBaseDb::new(); + + // Tx1 tries to read - should get abort error + let db = VersionedDatabase::new(1, 0, &mv, &mut base); + let result = db.basic_ref(addr); + + assert!(result.is_err()); + assert!(db.was_aborted().is_some()); + } +} + diff --git a/crates/op-rbuilder/src/block_stm/executor.rs b/crates/op-rbuilder/src/block_stm/executor.rs new file mode 100644 index 00000000..ea5298c7 --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/executor.rs @@ -0,0 +1,455 @@ +//! Block-STM Executor +//! +//! The main executor that orchestrates parallel transaction execution using Block-STM. +//! +//! # Execution Flow +//! +//! 1. Collect all transactions to execute +//! 2. Initialize MVHashMap and Scheduler +//! 3. Spawn worker threads +//! 4. Workers execute transactions speculatively +//! 5. Handle conflicts via abort/re-execute +//! 6. Commit in order +//! 7. Collect results + +use crate::block_stm::{ + captured_reads::CapturedReads, + mv_hashmap::MVHashMap, + scheduler::{Scheduler, SchedulerStats}, + types::{Task, TxnIndex}, + view::{LatestView, WriteSet}, +}; +use std::sync::Arc; +use std::thread; +use tracing::{info, trace}; + +/// Configuration for the Block-STM executor. +#[derive(Debug, Clone)] +pub struct BlockStmConfig { + /// Number of worker threads + pub num_threads: usize, +} + +impl Default for BlockStmConfig { + fn default() -> Self { + Self { + num_threads: std::thread::available_parallelism() + .map(|p| p.get()) + .unwrap_or(4), + } + } +} + +impl BlockStmConfig { + /// Create a config with a specific number of threads. + pub fn with_threads(num_threads: usize) -> Self { + Self { num_threads } + } +} + +/// Result of executing a single transaction. +#[derive(Debug, Clone)] +pub struct TxnExecutionResult { + /// Transaction index + pub txn_idx: TxnIndex, + /// Gas used + pub gas_used: u64, + /// Whether the transaction succeeded + pub success: bool, +} + +/// Result of parallel execution. +#[derive(Debug)] +pub struct ParallelExecutionResult { + /// Results for each transaction, in order + pub results: Vec, + /// Execution statistics + pub stats: SchedulerStats, +} + +/// The Block-STM parallel executor. +pub struct BlockStmExecutor { + config: BlockStmConfig, +} + +impl BlockStmExecutor { + /// Create a new executor with the given configuration. + pub fn new(config: BlockStmConfig) -> Self { + Self { config } + } + + /// Create a new executor with default configuration. + pub fn default_executor() -> Self { + Self::new(BlockStmConfig::default()) + } + + /// Execute transactions in parallel. + /// + /// # Type Parameters + /// - `Tx`: Transaction type + /// - `BaseDB`: Base database type for state reads + /// - `ExecFn`: Function to execute a single transaction + /// + /// # Arguments + /// - `transactions`: The transactions to execute + /// - `base_db`: The base database for reading initial state + /// - `exec_fn`: Function that executes a single transaction given a view + /// + /// The `exec_fn` receives: + /// - Transaction index + /// - Reference to the transaction + /// - A `LatestView` for reading state (which tracks dependencies) + /// + /// It should return: + /// - `CapturedReads`: The reads performed (from view.take_captured_reads()) + /// - `WriteSet`: The writes to apply + /// - `u64`: Gas used + /// - `bool`: Whether the transaction succeeded + pub fn execute( + &self, + transactions: &[Tx], + base_db: &BaseDB, + exec_fn: ExecFn, + ) -> ParallelExecutionResult + where + Tx: Sync, + BaseDB: Sync, + ExecFn: Fn(TxnIndex, &Tx, &LatestView<'_, BaseDB>) -> (CapturedReads, WriteSet, u64, bool) + + Sync + + Send, + { + let num_txns = transactions.len(); + if num_txns == 0 { + return ParallelExecutionResult { + results: vec![], + stats: SchedulerStats::default(), + }; + } + + info!( + num_txns = num_txns, + num_threads = self.config.num_threads, + "Starting Block-STM parallel execution" + ); + + // Initialize shared state + let mv_hashmap = Arc::new(MVHashMap::new(num_txns)); + let scheduler = Arc::new(Scheduler::new(num_txns)); + + // Use scoped threads so we can borrow transactions and base_db + // Reference to exec_fn for sharing across threads + let exec_fn_ref = &exec_fn; + + thread::scope(|s| { + // Spawn worker threads + let num_threads = self.config.num_threads.min(num_txns); + + for thread_id in 0..num_threads { + let mv_hashmap = Arc::clone(&mv_hashmap); + let scheduler = Arc::clone(&scheduler); + + s.spawn(move || { + scheduler.worker_start(); + + trace!( + thread_id = thread_id, + "Block-STM worker started" + ); + + loop { + let task = scheduler.next_task(); + + match task { + Task::Execute { txn_idx, incarnation } => { + trace!( + thread_id = thread_id, + txn_idx = txn_idx, + incarnation = incarnation, + "Worker executing transaction" + ); + + scheduler.start_execution(txn_idx, incarnation); + + // Create the view for this transaction + let view = LatestView::new( + txn_idx, + incarnation, + &mv_hashmap, + base_db, + ); + + // Execute the transaction + let tx = &transactions[txn_idx as usize]; + let (reads, writes, gas_used, success) = exec_fn_ref(txn_idx, tx, &view); + + trace!( + thread_id = thread_id, + txn_idx = txn_idx, + incarnation = incarnation, + gas_used = gas_used, + success = success, + num_reads = reads.len(), + num_writes = writes.len(), + "Worker finished executing transaction" + ); + + // Notify scheduler of completion + scheduler.finish_execution( + txn_idx, + incarnation, + reads, + writes, + gas_used, + success, + &mv_hashmap, + ); + } + Task::Validate { txn_idx } => { + trace!( + thread_id = thread_id, + txn_idx = txn_idx, + "Worker validating transaction" + ); + // Validation is handled in try_commit for now + } + Task::NoTask => { + // No work available, check if we should wait or exit + if scheduler.is_done() { + break; + } + scheduler.wait_for_work(); + } + Task::Done => { + break; + } + } + } + + scheduler.worker_done(); + + trace!( + thread_id = thread_id, + "Block-STM worker finished" + ); + }); + } + }); + + // Collect results + let stats = scheduler.get_stats(); + let results: Vec<_> = (0..num_txns as TxnIndex) + .map(|txn_idx| TxnExecutionResult { + txn_idx, + gas_used: scheduler.get_gas_used(txn_idx), + success: scheduler.was_successful(txn_idx), + }) + .collect(); + + info!( + num_txns = num_txns, + total_executions = stats.total_executions, + total_aborts = stats.total_aborts, + "Block-STM parallel execution complete" + ); + + ParallelExecutionResult { results, stats } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::block_stm::types::{EvmStateKey, EvmStateValue}; + use alloy_primitives::{Address, U256}; + + /// A simple mock transaction for testing. + #[derive(Debug, Clone)] + struct MockTransaction { + /// Keys this transaction reads + pub reads: Vec, + /// Keys and values this transaction writes + pub writes: Vec<(EvmStateKey, EvmStateValue)>, + /// Gas to use + pub gas: u64, + } + + struct MockDb; + + fn test_key(slot: u64) -> EvmStateKey { + EvmStateKey::Storage(Address::ZERO, U256::from(slot)) + } + + fn test_value(v: u64) -> EvmStateValue { + EvmStateValue::Storage(U256::from(v)) + } + + #[test] + fn test_executor_empty() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + let transactions: Vec = vec![]; + let db = MockDb; + + let result = executor.execute(&transactions, &db, |_, _, _| { + (CapturedReads::new(), WriteSet::new(), 21000, true) + }); + + assert_eq!(result.results.len(), 0); + } + + #[test] + fn test_executor_single_transaction() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(1)); + let transactions = vec![MockTransaction { + reads: vec![], + writes: vec![(test_key(1), test_value(100))], + gas: 21000, + }]; + let db = MockDb; + + let result = executor.execute(&transactions, &db, |_, tx, view| { + // Simulate reads + for key in &tx.reads { + let _ = view.read_from_mvhashmap(key); + } + + // Build write set + let mut writes = WriteSet::new(); + for (key, value) in &tx.writes { + writes.write(key.clone(), value.clone()); + } + + (view.take_captured_reads(), writes, tx.gas, true) + }); + + assert_eq!(result.results.len(), 1); + assert_eq!(result.results[0].gas_used, 21000); + assert!(result.results[0].success); + assert_eq!(result.stats.total_commits, 1); + } + + #[test] + fn test_executor_multiple_independent_transactions() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(4)); + + // 10 transactions that don't conflict (each writes to different key) + let transactions: Vec = (0..10) + .map(|i| MockTransaction { + reads: vec![], + writes: vec![(test_key(i), test_value(i * 100))], + gas: 21000, + }) + .collect(); + + let db = MockDb; + + let result = executor.execute(&transactions, &db, |_, tx, view| { + let mut writes = WriteSet::new(); + for (key, value) in &tx.writes { + writes.write(key.clone(), value.clone()); + } + (view.take_captured_reads(), writes, tx.gas, true) + }); + + assert_eq!(result.results.len(), 10); + assert_eq!(result.stats.total_commits, 10); + // No conflicts, so no aborts + assert_eq!(result.stats.total_aborts, 0); + + // All should succeed + for r in &result.results { + assert!(r.success); + assert_eq!(r.gas_used, 21000); + } + } + + #[test] + fn test_executor_dependent_transactions() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + + // tx0 writes to key 1 + // tx1 reads key 1, writes to key 2 + // This creates a dependency: tx1 depends on tx0 + let key1 = test_key(1); + let key2 = test_key(2); + + let transactions = vec![ + MockTransaction { + reads: vec![], + writes: vec![(key1.clone(), test_value(100))], + gas: 21000, + }, + MockTransaction { + reads: vec![key1.clone()], + writes: vec![(key2.clone(), test_value(200))], + gas: 21000, + }, + ]; + + let db = MockDb; + + let result = executor.execute(&transactions, &db, |txn_idx, tx, view| { + // Simulate reads + for key in &tx.reads { + match view.read_from_mvhashmap(key) { + Ok(Some((_value, version))) => { + trace!( + txn_idx = txn_idx, + key = %key, + source_txn = version.txn_idx, + "Read value from MVHashMap" + ); + } + Ok(None) => { + // Would read from base, record it + view.record_base_read(key.clone(), EvmStateValue::NotFound); + } + Err(_) => { + // Read from aborted transaction - in real impl would need to handle + } + } + } + + let mut writes = WriteSet::new(); + for (key, value) in &tx.writes { + writes.write(key.clone(), value.clone()); + } + + (view.take_captured_reads(), writes, tx.gas, true) + }); + + assert_eq!(result.results.len(), 2); + assert_eq!(result.stats.total_commits, 2); + + // Both should succeed + assert!(result.results[0].success); + assert!(result.results[1].success); + } + + #[test] + fn test_executor_with_many_threads() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(8)); + + // 100 independent transactions + let transactions: Vec = (0..100) + .map(|i| MockTransaction { + reads: vec![], + writes: vec![(test_key(i), test_value(i * 100))], + gas: 21000, + }) + .collect(); + + let db = MockDb; + + let result = executor.execute(&transactions, &db, |_, tx, view| { + let mut writes = WriteSet::new(); + for (key, value) in &tx.writes { + writes.write(key.clone(), value.clone()); + } + (view.take_captured_reads(), writes, tx.gas, true) + }); + + assert_eq!(result.results.len(), 100); + assert_eq!(result.stats.total_commits, 100); + } +} + diff --git a/crates/op-rbuilder/src/block_stm/mod.rs b/crates/op-rbuilder/src/block_stm/mod.rs new file mode 100644 index 00000000..c4ccd0ca --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/mod.rs @@ -0,0 +1,41 @@ +//! Block-STM: Parallel Transaction Execution Engine for EVM +//! +//! This module implements a parallel execution engine for EVM transactions based on +//! the Block-STM algorithm. Block-STM enables speculative parallel execution by: +//! +//! 1. Executing all transactions in parallel speculatively +//! 2. Tracking read/write sets during execution +//! 3. Detecting conflicts via push-based invalidation +//! 4. Re-executing conflicting transactions +//! 5. Committing results in transaction order +//! +//! # Architecture +//! +//! - [`types`]: Core types (TxnIndex, Version, EvmStateKey) +//! - [`mv_hashmap`]: Multi-version data structure for concurrent state access +//! - [`captured_reads`]: Read set tracking during execution +//! - [`view`]: Versioned state view implementing revm's Database trait +//! - [`scheduler`]: Transaction scheduling and abort management +//! - [`executor`]: Main parallel execution orchestrator + +pub mod types; +pub mod mv_hashmap; +pub mod captured_reads; +pub mod view; +pub mod scheduler; +pub mod executor; +pub mod db_adapter; + +#[cfg(test)] +mod tests; + +// Re-export commonly used types +pub use types::{ + EvmStateKey, EvmStateValue, ExecutionStatus, Incarnation, ReadResult, Task, TxnIndex, Version, +}; +pub use mv_hashmap::MVHashMap; +pub use captured_reads::CapturedReads; +pub use scheduler::Scheduler; +pub use executor::BlockStmExecutor; +pub use db_adapter::{VersionedDatabase, VersionedDbError}; + diff --git a/crates/op-rbuilder/src/block_stm/mv_hashmap.rs b/crates/op-rbuilder/src/block_stm/mv_hashmap.rs new file mode 100644 index 00000000..bb41549b --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/mv_hashmap.rs @@ -0,0 +1,869 @@ +//! Multi-Version Hash Map for Block-STM +//! +//! The MVHashMap is the central data structure for parallel execution. It stores +//! versioned writes from transactions, allowing concurrent reads while tracking +//! dependencies for conflict detection. +//! +//! # Key Features +//! +//! - **Versioned Storage**: Each key can have multiple versions (one per transaction) +//! - **Dependency Tracking**: Readers register dependencies on writers for push-based invalidation +//! - **Concurrent Access**: Uses fine-grained locking for parallel read/write + +use crate::block_stm::types::{ + EvmStateKey, EvmStateValue, Incarnation, ReadResult, ResolvedBalance, TxnIndex, Version, +}; +use alloy_primitives::{Address, U256}; +use parking_lot::RwLock; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::sync::atomic::{AtomicBool, Ordering}; +use tracing::trace; + +/// Entry for a single transaction's write to a key. +#[derive(Debug)] +struct WriteEntry { + /// The incarnation that wrote this value + incarnation: Incarnation, + /// The written value + value: EvmStateValue, + /// Whether this entry has been marked as aborted + aborted: AtomicBool, +} + +impl WriteEntry { + fn new(incarnation: Incarnation, value: EvmStateValue) -> Self { + Self { + incarnation, + value, + aborted: AtomicBool::new(false), + } + } + + fn is_aborted(&self) -> bool { + self.aborted.load(Ordering::Acquire) + } + + fn mark_aborted(&self) { + self.aborted.store(true, Ordering::Release); + } +} + +/// Versioned data for a single key. +/// Maps transaction index to the write entry. +#[derive(Debug, Default)] +struct VersionedValue { + /// Map from txn_idx to write entry. + /// BTreeMap keeps entries sorted by txn_idx for efficient "latest before" queries. + writes: BTreeMap, + /// Set of transactions that have read this key (for dependency tracking). + /// Maps reader txn_idx to the version they observed. + readers: HashMap>, +} + +impl VersionedValue { + /// Write a value at the given version. + fn write(&mut self, txn_idx: TxnIndex, incarnation: Incarnation, value: EvmStateValue) { + self.writes.insert(txn_idx, WriteEntry::new(incarnation, value)); + } + + /// Read the latest value written by a transaction with index < reader_txn_idx. + /// Returns the value and version, or NotFound if no such write exists. + fn read(&mut self, reader_txn_idx: TxnIndex) -> ReadResult { + // Find the latest write with txn_idx < reader_txn_idx + let maybe_entry = self + .writes + .range(..reader_txn_idx) + .next_back(); + + match maybe_entry { + Some((&writer_txn_idx, entry)) => { + if entry.is_aborted() { + // Track that we tried to read from an aborted transaction + self.readers.insert(reader_txn_idx, None); + ReadResult::Aborted { txn_idx: writer_txn_idx } + } else { + let version = Version::new(writer_txn_idx, entry.incarnation); + // Track the dependency + self.readers.insert(reader_txn_idx, Some(version)); + ReadResult::Value { + value: entry.value.clone(), + version, + } + } + } + None => { + // No write found, reader depends on base state + self.readers.insert(reader_txn_idx, None); + ReadResult::NotFound + } + } + } + + /// Mark a transaction's write as aborted. + /// Returns the set of reader transactions that need to be invalidated. + fn mark_aborted(&mut self, txn_idx: TxnIndex) -> HashSet { + if let Some(entry) = self.writes.get(&txn_idx) { + entry.mark_aborted(); + } + + // Find all readers that read from this transaction or later + // (they may have been affected by this write) + let version = self.writes.get(&txn_idx).map(|e| Version::new(txn_idx, e.incarnation)); + + self.readers + .iter() + .filter_map(|(&reader_idx, &observed_version)| { + // Reader is affected if: + // 1. They read from the aborted transaction + // 2. They read from base state but the aborted tx is before them + // (they should have seen the write) + if reader_idx > txn_idx { + if observed_version == version || observed_version.is_none() { + Some(reader_idx) + } else { + None + } + } else { + None + } + }) + .collect() + } + + /// Clear the read tracking for a transaction (when it's re-executed). + fn clear_reader(&mut self, txn_idx: TxnIndex) { + self.readers.remove(&txn_idx); + } + + /// Delete a transaction's write (when the transaction is re-executed with new incarnation). + fn delete_write(&mut self, txn_idx: TxnIndex) { + self.writes.remove(&txn_idx); + } +} + + +/// Entry for a balance delta with abort tracking. +#[derive(Debug)] +struct DeltaEntry { + /// The incarnation that wrote this delta + incarnation: Incarnation, + /// The delta amount + delta: U256, + /// Whether this entry has been marked as aborted + aborted: AtomicBool, +} + +impl DeltaEntry { + fn new(incarnation: Incarnation, delta: U256) -> Self { + Self { + incarnation, + delta, + aborted: AtomicBool::new(false), + } + } + + fn is_aborted(&self) -> bool { + self.aborted.load(Ordering::Acquire) + } + + fn mark_aborted(&self) { + self.aborted.store(true, Ordering::Release); + } +} + +/// Versioned deltas for a single address. +#[derive(Debug, Default)] +struct VersionedDeltas { + /// Map from txn_idx to delta entry. + deltas: BTreeMap, + /// Readers that have resolved deltas for this address. + /// Maps reader txn_idx to the list of contributor versions they observed. + readers: HashMap>, +} + +impl VersionedDeltas { + /// Write a delta at the given version. + fn write(&mut self, txn_idx: TxnIndex, incarnation: Incarnation, delta: U256) { + self.deltas.insert(txn_idx, DeltaEntry::new(incarnation, delta)); + } + + /// Resolve all deltas from transactions before reader_txn_idx. + /// Returns the total delta and the list of contributor versions. + fn resolve(&mut self, reader_txn_idx: TxnIndex) -> Result<(U256, Vec), TxnIndex> { + let mut total = U256::ZERO; + let mut contributors = Vec::new(); + + for (&txn_idx, entry) in self.deltas.range(..reader_txn_idx) { + if entry.is_aborted() { + return Err(txn_idx); + } + total = total.saturating_add(entry.delta); + contributors.push(Version::new(txn_idx, entry.incarnation)); + } + + // Track that this reader resolved these deltas + self.readers.insert(reader_txn_idx, contributors.clone()); + + Ok((total, contributors)) + } + + /// Mark a transaction's delta as aborted. + /// Returns the set of reader transactions that need to be invalidated. + fn mark_aborted(&mut self, txn_idx: TxnIndex) -> HashSet { + if let Some(entry) = self.deltas.get(&txn_idx) { + entry.mark_aborted(); + } + + // Find all readers that included this transaction's delta + self.readers + .iter() + .filter_map(|(&reader_idx, contributors)| { + if contributors.iter().any(|v| v.txn_idx == txn_idx) { + Some(reader_idx) + } else { + None + } + }) + .collect() + } + + /// Clear the read tracking for a transaction (when it's re-executed). + fn clear_reader(&mut self, txn_idx: TxnIndex) { + self.readers.remove(&txn_idx); + } + + /// Delete a transaction's delta (when the transaction is re-executed). + fn delete_delta(&mut self, txn_idx: TxnIndex) { + self.deltas.remove(&txn_idx); + } +} + +/// Multi-Version Hash Map for Block-STM parallel execution. +/// +/// Stores versioned writes per key and tracks read dependencies for push-based invalidation. +/// Also stores balance deltas separately for commutative fee accumulation. +#[derive(Debug)] +pub struct MVHashMap { + /// Map from state key to versioned values. + data: RwLock>>, + /// Balance deltas indexed by address. + /// These are commutative increments that don't conflict with each other. + balance_deltas: RwLock>>, + /// Number of transactions in the block (reserved for future use). + #[allow(dead_code)] + num_txns: usize, +} + +impl MVHashMap { + /// Create a new MVHashMap for a block with the given number of transactions. + pub fn new(num_txns: usize) -> Self { + Self { + data: RwLock::new(HashMap::new()), + balance_deltas: RwLock::new(HashMap::new()), + num_txns, + } + } + + /// Write a value at the given version. + pub fn write(&self, txn_idx: TxnIndex, incarnation: Incarnation, key: EvmStateKey, value: EvmStateValue) { + trace!( + txn_idx = txn_idx, + incarnation = incarnation, + key = %key, + "MVHashMap write" + ); + + // Get or create the versioned value entry for this key + { + let data = self.data.read(); + if let Some(versioned) = data.get(&key) { + versioned.write().write(txn_idx, incarnation, value); + return; + } + } + + // Key doesn't exist, need to create it + let mut data = self.data.write(); + let versioned = data.entry(key.clone()).or_insert_with(|| RwLock::new(VersionedValue::default())); + versioned.write().write(txn_idx, incarnation, value); + } + + /// Read the latest value for a key that was written by a transaction before reader_txn_idx. + pub fn read(&self, reader_txn_idx: TxnIndex, key: &EvmStateKey) -> ReadResult { + let data = self.data.read(); + + match data.get(key) { + Some(versioned) => { + let result = versioned.write().read(reader_txn_idx); + trace!( + reader_txn_idx = reader_txn_idx, + key = %key, + result = ?result, + "MVHashMap read" + ); + result + } + None => { + trace!( + reader_txn_idx = reader_txn_idx, + key = %key, + "MVHashMap read - key not in map" + ); + ReadResult::NotFound + } + } + } + + /// Mark a transaction as aborted and return the set of dependent transactions + /// that need to be invalidated. + pub fn mark_aborted(&self, txn_idx: TxnIndex) -> HashSet { + trace!(txn_idx = txn_idx, "MVHashMap marking transaction as aborted"); + + let mut dependents = HashSet::new(); + + // Mark regular writes as aborted + let data = self.data.read(); + for versioned in data.values() { + let affected = versioned.write().mark_aborted(txn_idx); + dependents.extend(affected); + } + drop(data); + + // Mark deltas as aborted + let delta_dependents = self.mark_delta_aborted(txn_idx); + dependents.extend(delta_dependents); + + trace!( + txn_idx = txn_idx, + num_dependents = dependents.len(), + "MVHashMap found dependent transactions" + ); + + dependents + } + + /// Clear all read tracking for a transaction (called before re-execution). + pub fn clear_reads(&self, txn_idx: TxnIndex) { + // Clear regular reads + let data = self.data.read(); + for versioned in data.values() { + versioned.write().clear_reader(txn_idx); + } + drop(data); + + // Clear delta reads + self.clear_delta_reads(txn_idx); + } + + /// Delete all writes from a transaction (called before re-execution with new incarnation). + pub fn delete_writes(&self, txn_idx: TxnIndex) { + // Delete regular writes + let data = self.data.read(); + for versioned in data.values() { + versioned.write().delete_write(txn_idx); + } + drop(data); + + // Delete deltas + self.delete_deltas(txn_idx); + } + + /// Apply multiple writes from a transaction. + pub fn apply_writes(&self, txn_idx: TxnIndex, incarnation: Incarnation, writes: Vec<(EvmStateKey, EvmStateValue)>) { + for (key, value) in writes { + self.write(txn_idx, incarnation, key, value); + } + } + + /// Get the final committed value for a key. + /// Should only be called after all transactions are committed. + pub fn get_committed_value(&self, key: &EvmStateKey) -> Option { + let data = self.data.read(); + data.get(key).and_then(|versioned| { + let v = versioned.read(); + v.writes + .iter() + .next_back() + .map(|(_, entry)| entry.value.clone()) + }) + } + + /// Get all keys that have been written to. + pub fn get_written_keys(&self) -> Vec { + self.data.read().keys().cloned().collect() + } + + // ========== Balance Delta Methods (for commutative fee accumulation) ========== + + /// Write a balance delta (fee increment) at the given version. + /// + /// Balance deltas are commutative - multiple transactions can write deltas + /// to the same address without conflicting. Conflicts only occur when + /// a transaction reads the resolved balance. + pub fn write_balance_delta( + &self, + address: Address, + txn_idx: TxnIndex, + incarnation: Incarnation, + delta: U256, + ) { + trace!( + txn_idx = txn_idx, + incarnation = incarnation, + address = %address, + delta = %delta, + "MVHashMap write_balance_delta" + ); + + // Get or create the versioned deltas entry for this address + { + let data = self.balance_deltas.read(); + if let Some(versioned) = data.get(&address) { + versioned.write().write(txn_idx, incarnation, delta); + return; + } + } + + // Address doesn't exist, need to create it + let mut data = self.balance_deltas.write(); + let versioned = data + .entry(address) + .or_insert_with(|| RwLock::new(VersionedDeltas::default())); + versioned.write().write(txn_idx, incarnation, delta); + } + + /// Resolve balance including all deltas from transactions before reader_txn_idx. + /// + /// This combines: + /// 1. A base value (from storage or an earlier write to Balance(address)) + /// 2. All deltas written by transactions with index < reader_txn_idx + /// + /// Returns a ResolvedBalance with the final value and all contributing versions. + /// Returns Err(aborted_txn_idx) if a delta from an aborted transaction was encountered. + pub fn resolve_balance( + &self, + address: Address, + reader_txn_idx: TxnIndex, + base_value: U256, + base_version: Option, + ) -> Result { + let data = self.balance_deltas.read(); + + let (total_delta, contributors) = match data.get(&address) { + Some(versioned) => versioned.write().resolve(reader_txn_idx)?, + None => (U256::ZERO, Vec::new()), + }; + + let resolved_value = base_value.saturating_add(total_delta); + + trace!( + reader_txn_idx = reader_txn_idx, + address = %address, + base_value = %base_value, + total_delta = %total_delta, + resolved_value = %resolved_value, + num_contributors = contributors.len(), + "MVHashMap resolve_balance" + ); + + Ok(ResolvedBalance { + base_value, + base_version, + total_delta, + resolved_value, + contributors, + }) + } + + /// Check if there are any pending deltas for an address from transactions before reader_txn_idx. + pub fn has_pending_deltas(&self, address: &Address, reader_txn_idx: TxnIndex) -> bool { + let data = self.balance_deltas.read(); + match data.get(address) { + Some(versioned) => { + let v = versioned.read(); + v.deltas.range(..reader_txn_idx).next().is_some() + } + None => false, + } + } + + /// Mark a transaction's delta as aborted and return dependent readers. + pub fn mark_delta_aborted(&self, txn_idx: TxnIndex) -> HashSet { + trace!(txn_idx = txn_idx, "MVHashMap marking delta as aborted"); + + let mut dependents = HashSet::new(); + let data = self.balance_deltas.read(); + + for versioned in data.values() { + let affected = versioned.write().mark_aborted(txn_idx); + dependents.extend(affected); + } + + dependents + } + + /// Clear delta read tracking for a transaction (called before re-execution). + pub fn clear_delta_reads(&self, txn_idx: TxnIndex) { + let data = self.balance_deltas.read(); + for versioned in data.values() { + versioned.write().clear_reader(txn_idx); + } + } + + /// Delete a transaction's deltas (called before re-execution with new incarnation). + pub fn delete_deltas(&self, txn_idx: TxnIndex) { + let data = self.balance_deltas.read(); + for versioned in data.values() { + versioned.write().delete_delta(txn_idx); + } + } + + /// Apply multiple balance deltas from a transaction. + pub fn apply_balance_deltas( + &self, + txn_idx: TxnIndex, + incarnation: Incarnation, + deltas: Vec<(Address, U256)>, + ) { + for (address, delta) in deltas { + self.write_balance_delta(address, txn_idx, incarnation, delta); + } + } + + /// Get the final committed delta sum for an address. + /// Should only be called after all transactions are committed. + pub fn get_committed_delta_sum(&self, address: &Address) -> U256 { + let data = self.balance_deltas.read(); + match data.get(address) { + Some(versioned) => { + let v = versioned.read(); + v.deltas.values().map(|e| e.delta).fold(U256::ZERO, |acc, d| acc.saturating_add(d)) + } + None => U256::ZERO, + } + } + + /// Get all addresses that have balance deltas. + pub fn get_delta_addresses(&self) -> Vec
{ + self.balance_deltas.read().keys().cloned().collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{Address, U256}; + + fn test_key(slot: u64) -> EvmStateKey { + EvmStateKey::Storage(Address::ZERO, U256::from(slot)) + } + + fn test_value(v: u64) -> EvmStateValue { + EvmStateValue::Storage(U256::from(v)) + } + + #[test] + fn test_simple_write_read() { + let mv = MVHashMap::new(10); + let key = test_key(1); + let value = test_value(42); + + // Transaction 0 writes + mv.write(0, 0, key.clone(), value.clone()); + + // Transaction 1 reads -> should see tx0's write + match mv.read(1, &key) { + ReadResult::Value { value: v, version } => { + assert_eq!(v, value); + assert_eq!(version.txn_idx, 0); + assert_eq!(version.incarnation, 0); + } + _ => panic!("Expected Value result"), + } + + // Transaction 0 reads -> should not see its own write + match mv.read(0, &key) { + ReadResult::NotFound => {} + _ => panic!("Expected NotFound result"), + } + } + + #[test] + fn test_read_latest_before() { + let mv = MVHashMap::new(10); + let key = test_key(1); + + // tx0 writes 100 + mv.write(0, 0, key.clone(), test_value(100)); + // tx2 writes 200 + mv.write(2, 0, key.clone(), test_value(200)); + // tx5 writes 500 + mv.write(5, 0, key.clone(), test_value(500)); + + // tx1 should see tx0's write (100) + match mv.read(1, &key) { + ReadResult::Value { value, version } => { + assert_eq!(value, test_value(100)); + assert_eq!(version.txn_idx, 0); + } + _ => panic!("Expected Value"), + } + + // tx3 should see tx2's write (200) + match mv.read(3, &key) { + ReadResult::Value { value, version } => { + assert_eq!(value, test_value(200)); + assert_eq!(version.txn_idx, 2); + } + _ => panic!("Expected Value"), + } + + // tx6 should see tx5's write (500) + match mv.read(6, &key) { + ReadResult::Value { value, version } => { + assert_eq!(value, test_value(500)); + assert_eq!(version.txn_idx, 5); + } + _ => panic!("Expected Value"), + } + } + + #[test] + fn test_incarnation_overwrite() { + let mv = MVHashMap::new(10); + let key = test_key(1); + + // tx0 incarnation 0 writes 100 + mv.write(0, 0, key.clone(), test_value(100)); + + // tx1 reads, should see 100 + match mv.read(1, &key) { + ReadResult::Value { value, version } => { + assert_eq!(value, test_value(100)); + assert_eq!(version.incarnation, 0); + } + _ => panic!("Expected Value"), + } + + // tx0 gets re-executed (incarnation 1), writes 200 + mv.delete_writes(0); + mv.write(0, 1, key.clone(), test_value(200)); + + // tx1 reads again, should see 200 with incarnation 1 + match mv.read(1, &key) { + ReadResult::Value { value, version } => { + assert_eq!(value, test_value(200)); + assert_eq!(version.incarnation, 1); + } + _ => panic!("Expected Value"), + } + } + + #[test] + fn test_abort_tracking() { + let mv = MVHashMap::new(10); + let key = test_key(1); + + // tx0 writes + mv.write(0, 0, key.clone(), test_value(100)); + + // tx1, tx2, tx3 all read from tx0 + let _ = mv.read(1, &key); + let _ = mv.read(2, &key); + let _ = mv.read(3, &key); + + // Mark tx0 as aborted + let dependents = mv.mark_aborted(0); + + // All readers should be in the dependent set + assert!(dependents.contains(&1)); + assert!(dependents.contains(&2)); + assert!(dependents.contains(&3)); + } + + #[test] + fn test_multiple_keys() { + let mv = MVHashMap::new(10); + let key1 = test_key(1); + let key2 = test_key(2); + + // tx0 writes to key1 + mv.write(0, 0, key1.clone(), test_value(100)); + // tx1 writes to key2 + mv.write(1, 0, key2.clone(), test_value(200)); + + // tx2 reads both + match mv.read(2, &key1) { + ReadResult::Value { value, version } => { + assert_eq!(value, test_value(100)); + assert_eq!(version.txn_idx, 0); + } + _ => panic!("Expected Value for key1"), + } + + match mv.read(2, &key2) { + ReadResult::Value { value, version } => { + assert_eq!(value, test_value(200)); + assert_eq!(version.txn_idx, 1); + } + _ => panic!("Expected Value for key2"), + } + } + + #[test] + fn test_not_found() { + let mv = MVHashMap::new(10); + let key = test_key(1); + + // No writes yet + match mv.read(0, &key) { + ReadResult::NotFound => {} + _ => panic!("Expected NotFound"), + } + + // tx5 writes + mv.write(5, 0, key.clone(), test_value(500)); + + // tx3 still shouldn't see it (tx5 > tx3) + match mv.read(3, &key) { + ReadResult::NotFound => {} + _ => panic!("Expected NotFound"), + } + } + + // ========== Balance Delta Tests ========== + + #[test] + fn test_balance_delta_simple() { + let mv = MVHashMap::new(10); + let coinbase = Address::from([1u8; 20]); + + // Tx0 adds delta +100 + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + + // Tx1 adds delta +50 + mv.write_balance_delta(coinbase, 1, 0, U256::from(50)); + + // Tx2 resolves balance with base=1000 + let result = mv.resolve_balance(coinbase, 2, U256::from(1000), None).unwrap(); + + assert_eq!(result.base_value, U256::from(1000)); + assert_eq!(result.total_delta, U256::from(150)); // 100 + 50 + assert_eq!(result.resolved_value, U256::from(1150)); // 1000 + 150 + assert_eq!(result.contributors.len(), 2); + } + + #[test] + fn test_balance_delta_no_conflict_between_delta_writes() { + let mv = MVHashMap::new(10); + let coinbase = Address::from([1u8; 20]); + + // Multiple transactions add deltas - this should NOT cause conflicts + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + mv.write_balance_delta(coinbase, 1, 0, U256::from(200)); + mv.write_balance_delta(coinbase, 2, 0, U256::from(300)); + + // Get the total committed sum + let total = mv.get_committed_delta_sum(&coinbase); + assert_eq!(total, U256::from(600)); // 100 + 200 + 300 + } + + #[test] + fn test_balance_delta_resolution_only_sees_earlier_txns() { + let mv = MVHashMap::new(10); + let coinbase = Address::from([1u8; 20]); + + // Tx0, Tx2, Tx5 add deltas + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + mv.write_balance_delta(coinbase, 2, 0, U256::from(200)); + mv.write_balance_delta(coinbase, 5, 0, U256::from(500)); + + // Tx3 resolves - should see Tx0 and Tx2, but NOT Tx5 + let result = mv.resolve_balance(coinbase, 3, U256::ZERO, None).unwrap(); + assert_eq!(result.total_delta, U256::from(300)); // 100 + 200 + assert_eq!(result.contributors.len(), 2); + + // Tx6 resolves - should see all three + let result2 = mv.resolve_balance(coinbase, 6, U256::ZERO, None).unwrap(); + assert_eq!(result2.total_delta, U256::from(800)); // 100 + 200 + 500 + assert_eq!(result2.contributors.len(), 3); + } + + #[test] + fn test_balance_delta_abort_invalidates_readers() { + let mv = MVHashMap::new(10); + let coinbase = Address::from([1u8; 20]); + + // Tx0 and Tx1 add deltas + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + mv.write_balance_delta(coinbase, 1, 0, U256::from(50)); + + // Tx2 resolves (reads from both Tx0 and Tx1) + let _ = mv.resolve_balance(coinbase, 2, U256::ZERO, None).unwrap(); + + // Mark Tx0 as aborted + let dependents = mv.mark_aborted(0); + + // Tx2 should be in the dependents set + assert!(dependents.contains(&2)); + } + + #[test] + fn test_balance_delta_no_deltas() { + let mv = MVHashMap::new(10); + let coinbase = Address::from([1u8; 20]); + + // No deltas written - should return base value unchanged + let result = mv.resolve_balance(coinbase, 5, U256::from(1000), None).unwrap(); + + assert_eq!(result.resolved_value, U256::from(1000)); + assert_eq!(result.total_delta, U256::ZERO); + assert!(result.contributors.is_empty()); + } + + #[test] + fn test_balance_delta_reexecution() { + let mv = MVHashMap::new(10); + let coinbase = Address::from([1u8; 20]); + + // Tx0 first execution adds +100 + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + + // Tx1 resolves + let result1 = mv.resolve_balance(coinbase, 1, U256::ZERO, None).unwrap(); + assert_eq!(result1.total_delta, U256::from(100)); + + // Tx0 re-executes with different delta + mv.delete_deltas(0); + mv.write_balance_delta(coinbase, 0, 1, U256::from(200)); // incarnation 1 + + // Tx1 resolves again - should see new value + let result2 = mv.resolve_balance(coinbase, 1, U256::ZERO, None).unwrap(); + assert_eq!(result2.total_delta, U256::from(200)); + assert_eq!(result2.contributors[0].incarnation, 1); + } + + #[test] + fn test_has_pending_deltas() { + let mv = MVHashMap::new(10); + let coinbase = Address::from([1u8; 20]); + let other = Address::from([2u8; 20]); + + // No deltas yet + assert!(!mv.has_pending_deltas(&coinbase, 5)); + + // Add delta for coinbase + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + + // Tx1+ should see pending delta for coinbase + assert!(mv.has_pending_deltas(&coinbase, 1)); + assert!(mv.has_pending_deltas(&coinbase, 5)); + + // Tx0 should NOT see its own delta + assert!(!mv.has_pending_deltas(&coinbase, 0)); + + // Other address has no deltas + assert!(!mv.has_pending_deltas(&other, 5)); + } +} + diff --git a/crates/op-rbuilder/src/block_stm/scheduler.rs b/crates/op-rbuilder/src/block_stm/scheduler.rs new file mode 100644 index 00000000..759fa476 --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/scheduler.rs @@ -0,0 +1,583 @@ +//! Scheduler for Block-STM Parallel Execution +//! +//! The Scheduler coordinates parallel transaction execution, handling: +//! - Task distribution to worker threads +//! - Abort management and push-based invalidation +//! - In-order commit sequencing +//! +//! # Execution Flow +//! +//! 1. All transactions start in `PendingScheduling` state +//! 2. Workers request tasks and execute transactions speculatively +//! 3. When conflicts are detected, dependent transactions are aborted +//! 4. Transactions are committed in order (tx0 must commit before tx1) + +use crate::block_stm::{ + captured_reads::CapturedReads, + mv_hashmap::MVHashMap, + types::{ExecutionStatus, Incarnation, Task, TxnIndex}, + view::{WriteSet}, +}; +use parking_lot::{Condvar, Mutex, RwLock}; +use std::collections::{HashSet, VecDeque}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use tracing::debug; + +/// Per-transaction execution state. +#[derive(Debug)] +struct TxnState { + /// Current execution status + status: ExecutionStatus, + /// Number of times this transaction has been executed + incarnation: Incarnation, + /// The read set from the latest execution (for validation) + reads: Option, + /// The write set from the latest execution + writes: Option, + /// Gas used in the latest execution + gas_used: u64, + /// Whether the latest execution was successful + success: bool, +} + +impl TxnState { + fn new() -> Self { + Self { + status: ExecutionStatus::PendingScheduling, + incarnation: 0, + reads: None, + writes: None, + gas_used: 0, + success: false, + } + } +} + +/// Statistics about the execution. +#[derive(Debug, Default)] +pub struct SchedulerStats { + /// Total number of executions (including re-executions) + pub total_executions: usize, + /// Number of aborts + pub total_aborts: usize, + /// Number of successful commits + pub total_commits: usize, +} + +/// The Block-STM Scheduler. +/// +/// Manages task distribution, abort handling, and commit ordering. +pub struct Scheduler { + /// Number of transactions in the block + num_txns: usize, + /// Per-transaction state + txn_states: Vec>, + /// Queue of transactions ready for execution + execution_queue: Mutex>, + /// Set of transactions that need validation + validation_queue: Mutex>, + /// Index of the next transaction to commit (commits must be in order) + commit_idx: AtomicUsize, + /// Condition variable for waking up workers + work_available: Condvar, + /// Lock for condition variable + work_lock: Mutex<()>, + /// Number of active workers + active_workers: AtomicUsize, + /// Whether execution is complete + done: RwLock, + /// Execution statistics + stats: Mutex, +} + +impl Scheduler { + /// Create a new scheduler for a block with the given number of transactions. + pub fn new(num_txns: usize) -> Self { + // Initialize all transactions as pending + let txn_states: Vec<_> = (0..num_txns) + .map(|_| RwLock::new(TxnState::new())) + .collect(); + + // Queue all transactions for initial execution + let execution_queue: VecDeque<_> = (0..num_txns as TxnIndex).collect(); + + Self { + num_txns, + txn_states, + execution_queue: Mutex::new(execution_queue), + validation_queue: Mutex::new(HashSet::new()), + commit_idx: AtomicUsize::new(0), + work_available: Condvar::new(), + work_lock: Mutex::new(()), + active_workers: AtomicUsize::new(0), + done: RwLock::new(false), + stats: Mutex::new(SchedulerStats::default()), + } + } + + pub fn get_commit_idx(&self) -> usize { + self.commit_idx.load(Ordering::SeqCst) + } + + /// Get the number of transactions. + pub fn num_txns(&self) -> usize { + self.num_txns + } + + /// Register a worker starting work. + pub fn worker_start(&self) { + self.active_workers.fetch_add(1, Ordering::SeqCst); + } + + /// Register a worker finishing work. + pub fn worker_done(&self) { + let prev = self.active_workers.fetch_sub(1, Ordering::SeqCst); + if prev == 1 { + // Last worker, wake up anyone waiting + self.work_available.notify_all(); + } + } + + /// Check if all work is done. + pub fn is_done(&self) -> bool { + *self.done.read() + } + + /// Get the next task for a worker. + pub fn next_task(&self) -> Task { + // Check if we're done + if *self.done.read() { + return Task::Done; + } + + // Try to get a transaction to execute + if let Some(txn_idx) = self.execution_queue.lock().pop_front() { + let state = self.txn_states[txn_idx as usize].read(); + let incarnation = state.incarnation; + return Task::Execute { txn_idx, incarnation }; + } + + // Try to get a transaction to validate + if let Some(&txn_idx) = self.validation_queue.lock().iter().next() { + self.validation_queue.lock().remove(&txn_idx); + return Task::Validate { txn_idx }; + } + + // No work available + Task::NoTask + } + + /// Wait for work to become available. + pub fn wait_for_work(&self) { + let mut lock = self.work_lock.lock(); + // Check one more time if there's work or we're done + if !self.execution_queue.lock().is_empty() + || !self.validation_queue.lock().is_empty() + || *self.done.read() + { + return; + } + // Wait with timeout to avoid deadlocks + self.work_available.wait_for(&mut lock, std::time::Duration::from_millis(10)); + } + + /// Notify that work is available. + fn notify_work(&self) { + self.work_available.notify_all(); + } + + /// Mark a transaction as starting execution. + pub fn start_execution(&self, txn_idx: TxnIndex, incarnation: Incarnation) { + let mut state = self.txn_states[txn_idx as usize].write(); + state.status = ExecutionStatus::Executing(incarnation); + self.stats.lock().total_executions += 1; + + debug!( + txn_idx = txn_idx, + incarnation = incarnation, + "Transaction starting execution" + ); + } + + /// Record execution completion for a transaction. + pub fn finish_execution( + &self, + txn_idx: TxnIndex, + incarnation: Incarnation, + reads: CapturedReads, + writes: WriteSet, + gas_used: u64, + success: bool, + mv_hashmap: &MVHashMap, + ) { + // Separate regular writes and balance deltas + let (regular_writes, balance_deltas) = writes.into_parts(); + + // Apply regular writes to MVHashMap + mv_hashmap.apply_writes(txn_idx, incarnation, regular_writes); + + // Apply balance deltas to MVHashMap + // These are tracked separately for commutative accumulation + mv_hashmap.apply_balance_deltas(txn_idx, incarnation, balance_deltas); + + // Update transaction state + { + let mut state = self.txn_states[txn_idx as usize].write(); + state.status = ExecutionStatus::Executed(incarnation); + state.reads = Some(reads); + state.writes = None; // Already applied to MVHashMap + state.gas_used = gas_used; + state.success = success; + } + + debug!( + txn_idx = txn_idx, + incarnation = incarnation, + gas_used = gas_used, + success = success, + "Transaction finished execution" + ); + + // Try to commit if this is the next transaction to commit + self.try_commit(mv_hashmap); + } + + /// Abort a transaction due to a conflict. + pub fn abort(&self, txn_idx: TxnIndex, mv_hashmap: &MVHashMap) { + let mut state = self.txn_states[txn_idx as usize].write(); + let old_incarnation = state.incarnation; + + // Increment incarnation for re-execution + state.incarnation += 1; + state.status = ExecutionStatus::Aborted(old_incarnation); + state.reads = None; + state.writes = None; + + self.stats.lock().total_aborts += 1; + + debug!( + txn_idx = txn_idx, + old_incarnation = old_incarnation, + new_incarnation = state.incarnation, + "Transaction aborted" + ); + + // Clear MVHashMap entries and get dependents to abort + mv_hashmap.delete_writes(txn_idx); + let dependents = mv_hashmap.mark_aborted(txn_idx); + + drop(state); + + // Schedule re-execution at the FRONT of the queue. + // Aborted transactions block commits, so they should be prioritized. + self.execution_queue.lock().push_front(txn_idx); + + // Abort dependent transactions + for dep_idx in dependents { + if dep_idx > txn_idx { + self.abort(dep_idx, mv_hashmap); + } + } + + self.notify_work(); + } + + /// Try to commit transactions in order. + /// + /// Uses compare_exchange on commit_idx to ensure exactly one thread + /// commits each transaction, preventing race conditions in stats tracking. + fn try_commit(&self, mv_hashmap: &MVHashMap) { + loop { + let commit_idx = self.commit_idx.load(Ordering::SeqCst); + if commit_idx >= self.num_txns { + // All transactions committed + *self.done.write() = true; + self.notify_work(); + return; + } + + let state = self.txn_states[commit_idx].read(); + + // Check if the transaction at commit_idx is ready to commit + match state.status { + ExecutionStatus::Executed(incarnation) => { + // Validate the transaction + if self.validate_transaction(commit_idx as TxnIndex, &state, mv_hashmap) { + drop(state); + + // Atomically claim this commit slot using compare_exchange. + // Only the thread that successfully advances commit_idx is + // responsible for updating the status and incrementing stats. + match self.commit_idx.compare_exchange( + commit_idx, + commit_idx + 1, + Ordering::SeqCst, + Ordering::SeqCst, + ) { + Ok(_) => { + // We successfully claimed this commit slot + { + let mut state = self.txn_states[commit_idx].write(); + state.status = ExecutionStatus::Committed; + } + + self.stats.lock().total_commits += 1; + + debug!( + txn_idx = commit_idx, + incarnation = incarnation, + "Transaction committed" + ); + + // Continue to try committing the next transaction + continue; + } + Err(_) => { + // Another thread already claimed this slot, + // loop to check the next transaction + continue; + } + } + } else { + // Validation failed, abort and re-execute + drop(state); + self.abort(commit_idx as TxnIndex, mv_hashmap); + return; + } + } + _ => { + // Transaction not ready yet + return; + } + } + } + } + + /// Validate a transaction's read set. + fn validate_transaction( + &self, + txn_idx: TxnIndex, + state: &TxnState, + mv_hashmap: &MVHashMap, + ) -> bool { + let reads = match &state.reads { + Some(r) => r, + None => return true, // No reads to validate + }; + + // Check each regular read to see if it's still valid + for (key, captured) in reads.reads() { + // Re-read from MVHashMap + let current = mv_hashmap.read(txn_idx, key); + + match (captured.version, ¤t) { + // Both read from same version - valid + (Some(v1), crate::block_stm::types::ReadResult::Value { version: v2, .. }) + if v1 == *v2 => continue, + // Both read from base state - valid + (None, crate::block_stm::types::ReadResult::NotFound) => continue, + // Mismatch - invalid + _ => { + debug!( + txn_idx = txn_idx, + key = %key, + original_version = ?captured.version, + "Validation failed - read version mismatch" + ); + return false; + } + } + } + + // Check resolved balance reads (balance reads with deltas applied) + for (address, resolved) in reads.resolved_balances() { + // Re-resolve the balance with current deltas + let current_result = mv_hashmap.resolve_balance( + *address, + txn_idx, + resolved.base_value, + resolved.base_version, + ); + + match current_result { + Ok(current) => { + // Check if the resolved value is the same + if current.resolved_value != resolved.resolved_value { + debug!( + txn_idx = txn_idx, + address = %address, + original_value = %resolved.resolved_value, + current_value = %current.resolved_value, + "Validation failed - resolved balance mismatch" + ); + return false; + } + + // Check if contributors are the same (versions match) + if current.contributors.len() != resolved.contributors.len() { + debug!( + txn_idx = txn_idx, + address = %address, + original_contributors = resolved.contributors.len(), + current_contributors = current.contributors.len(), + "Validation failed - contributor count mismatch" + ); + return false; + } + + // Verify each contributor version matches + for (orig, curr) in resolved.contributors.iter().zip(current.contributors.iter()) { + if orig != curr { + debug!( + txn_idx = txn_idx, + address = %address, + original_version = ?orig, + current_version = ?curr, + "Validation failed - contributor version mismatch" + ); + return false; + } + } + } + Err(aborted_txn_idx) => { + // One of the contributors was aborted + debug!( + txn_idx = txn_idx, + address = %address, + aborted_contributor = aborted_txn_idx, + "Validation failed - contributor was aborted" + ); + return false; + } + } + } + + true + } + + /// Get the current execution statistics. + pub fn get_stats(&self) -> SchedulerStats { + let stats = self.stats.lock(); + SchedulerStats { + total_executions: stats.total_executions, + total_aborts: stats.total_aborts, + total_commits: stats.total_commits, + } + } + + /// Get the status of a transaction. + pub fn get_status(&self, txn_idx: TxnIndex) -> ExecutionStatus { + self.txn_states[txn_idx as usize].read().status + } + + /// Get the gas used by a committed transaction. + pub fn get_gas_used(&self, txn_idx: TxnIndex) -> u64 { + self.txn_states[txn_idx as usize].read().gas_used + } + + /// Check if a transaction was successful. + pub fn was_successful(&self, txn_idx: TxnIndex) -> bool { + self.txn_states[txn_idx as usize].read().success + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_scheduler_initial_state() { + let scheduler = Scheduler::new(5); + + assert_eq!(scheduler.num_txns(), 5); + assert!(!scheduler.is_done()); + + // All transactions should be queued for execution + let task = scheduler.next_task(); + assert!(matches!(task, Task::Execute { txn_idx: 0, incarnation: 0 })); + } + + #[test] + fn test_scheduler_task_ordering() { + let scheduler = Scheduler::new(3); + + // Should get transactions in order + assert!(matches!(scheduler.next_task(), Task::Execute { txn_idx: 0, .. })); + assert!(matches!(scheduler.next_task(), Task::Execute { txn_idx: 1, .. })); + assert!(matches!(scheduler.next_task(), Task::Execute { txn_idx: 2, .. })); + + // No more tasks + assert!(matches!(scheduler.next_task(), Task::NoTask)); + } + + #[test] + fn test_scheduler_execution_flow() { + let scheduler = Scheduler::new(2); + let mv = MVHashMap::new(2); + + // Execute tx0 + let task = scheduler.next_task(); + assert!(matches!(task, Task::Execute { txn_idx: 0, incarnation: 0 })); + scheduler.start_execution(0, 0); + + let reads = CapturedReads::new(); + let writes = WriteSet::new(); + scheduler.finish_execution(0, 0, reads, writes, 21000, true, &mv); + + // tx0 should now be committed + assert!(matches!(scheduler.get_status(0), ExecutionStatus::Committed)); + + // Execute tx1 + let task = scheduler.next_task(); + assert!(matches!(task, Task::Execute { txn_idx: 1, incarnation: 0 })); + scheduler.start_execution(1, 0); + + let reads = CapturedReads::new(); + let writes = WriteSet::new(); + scheduler.finish_execution(1, 0, reads, writes, 21000, true, &mv); + + // tx1 should now be committed + assert!(matches!(scheduler.get_status(1), ExecutionStatus::Committed)); + + // Should be done + assert!(scheduler.is_done()); + } + + #[test] + fn test_scheduler_abort_reschedules() { + let scheduler = Scheduler::new(3); + let mv = MVHashMap::new(3); + + // Get all initial tasks + let _ = scheduler.next_task(); // tx0 + let _ = scheduler.next_task(); // tx1 + let _ = scheduler.next_task(); // tx2 + + // Abort tx1 + scheduler.start_execution(1, 0); + scheduler.abort(1, &mv); + + // tx1 should be re-queued with incremented incarnation + let task = scheduler.next_task(); + assert!(matches!(task, Task::Execute { txn_idx: 1, incarnation: 1 })); + } + + #[test] + fn test_scheduler_stats() { + let scheduler = Scheduler::new(2); + let mv = MVHashMap::new(2); + + // Execute both transactions + scheduler.start_execution(0, 0); + scheduler.finish_execution(0, 0, CapturedReads::new(), WriteSet::new(), 21000, true, &mv); + + scheduler.start_execution(1, 0); + scheduler.finish_execution(1, 0, CapturedReads::new(), WriteSet::new(), 21000, true, &mv); + + let stats = scheduler.get_stats(); + assert_eq!(stats.total_executions, 2); + assert_eq!(stats.total_commits, 2); + assert_eq!(stats.total_aborts, 0); + } +} + diff --git a/crates/op-rbuilder/src/block_stm/tests.rs b/crates/op-rbuilder/src/block_stm/tests.rs new file mode 100644 index 00000000..a9673729 --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/tests.rs @@ -0,0 +1,934 @@ +//! Integration and Consistency Tests for Block-STM +//! +//! These tests verify that: +//! 1. Parallel execution produces identical results to sequential execution +//! 2. Conflict detection and re-execution work correctly +//! 3. The system handles various edge cases + +use crate::block_stm::{ + executor::{BlockStmConfig, BlockStmExecutor, TxnExecutionResult}, + types::{EvmStateKey, EvmStateValue, TxnIndex}, + view::WriteSet, +}; +use alloy_primitives::{Address, U256}; +use std::collections::HashMap; +use std::sync::atomic::{AtomicU64, Ordering}; + +// ============================================================================= +// Test Helpers +// ============================================================================= + +/// A mock database that stores account balances. +#[derive(Debug, Default)] +struct MockDb { + balances: HashMap, +} + +impl MockDb { + fn with_balance(mut self, addr: Address, balance: U256) -> Self { + self.balances.insert(addr, balance); + self + } + + fn get_balance(&self, addr: &Address) -> U256 { + self.balances.get(addr).cloned().unwrap_or(U256::ZERO) + } +} + +/// A simple transaction for testing. +#[derive(Debug, Clone)] +struct TestTransaction { + /// Sender address + from: Address, + /// Recipient address (None for simple balance read) + to: Option
, + /// Amount to transfer + amount: U256, + /// Gas to use + gas: u64, +} + +impl TestTransaction { + fn transfer(from: Address, to: Address, amount: U256) -> Self { + Self { + from, + to: Some(to), + amount, + gas: 21000, + } + } + + fn read_balance(addr: Address) -> Self { + Self { + from: addr, + to: None, + amount: U256::ZERO, + gas: 21000, + } + } +} + +/// Execute transactions sequentially (baseline for comparison). +fn execute_sequential( + transactions: &[TestTransaction], + initial_balances: &HashMap, +) -> (Vec, HashMap) { + let mut balances = initial_balances.clone(); + let mut results = Vec::new(); + + for (idx, tx) in transactions.iter().enumerate() { + let from_balance = balances.get(&tx.from).cloned().unwrap_or(U256::ZERO); + + let success = if let Some(to) = tx.to { + if from_balance >= tx.amount { + // Perform transfer + *balances.entry(tx.from).or_insert(U256::ZERO) -= tx.amount; + *balances.entry(to).or_insert(U256::ZERO) += tx.amount; + true + } else { + // Insufficient balance + false + } + } else { + // Just a read operation + true + }; + + results.push(TxnExecutionResult { + txn_idx: idx as TxnIndex, + gas_used: tx.gas, + success, + }); + } + + (results, balances) +} + +/// Create test addresses. +fn test_addresses() -> (Address, Address, Address) { + let a = Address::from([1u8; 20]); + let b = Address::from([2u8; 20]); + let c = Address::from([3u8; 20]); + (a, b, c) +} + +fn balance_key(addr: Address) -> EvmStateKey { + EvmStateKey::Balance(addr) +} + +fn balance_value(v: U256) -> EvmStateValue { + EvmStateValue::Balance(v) +} + +// ============================================================================= +// Consistency Tests +// ============================================================================= + +#[test] +fn test_parallel_matches_sequential_independent_transfers() { + let (addr_a, addr_b, addr_c) = test_addresses(); + + // Initial state: A has 1000, B has 500, C has 0 + let mut initial_balances = HashMap::new(); + initial_balances.insert(addr_a, U256::from(1000)); + initial_balances.insert(addr_b, U256::from(500)); + initial_balances.insert(addr_c, U256::ZERO); + + // Independent transfers (no conflicts): + // A -> B: 100 + // B -> C: 50 + // These are independent because we're not implementing actual balance checks + // in the mock - we're testing the execution infrastructure + let transactions = vec![ + TestTransaction::transfer(addr_a, addr_b, U256::from(100)), + TestTransaction::transfer(addr_b, addr_c, U256::from(50)), + ]; + + // Execute sequentially + let (seq_results, _seq_balances) = execute_sequential(&transactions, &initial_balances); + + // Execute in parallel + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + let db = MockDb::default() + .with_balance(addr_a, U256::from(1000)) + .with_balance(addr_b, U256::from(500)); + + let par_result = executor.execute(&transactions, &db, |_txn_idx, tx, view| { + let mut writes = WriteSet::new(); + + // Read sender balance from MVHashMap or base + let from_key = balance_key(tx.from); + let from_balance = match view.read_from_mvhashmap(&from_key) { + Ok(Some((EvmStateValue::Balance(b), _))) => b, + Ok(None) => { + let b = db.get_balance(&tx.from); + view.record_base_read(from_key.clone(), balance_value(b)); + b + } + _ => U256::ZERO, + }; + + let success = if let Some(to) = tx.to { + if from_balance >= tx.amount { + // Write updated balances + writes.write_balance(tx.from, from_balance - tx.amount); + + // Read recipient balance + let to_key = balance_key(to); + let to_balance = match view.read_from_mvhashmap(&to_key) { + Ok(Some((EvmStateValue::Balance(b), _))) => b, + Ok(None) => { + let b = db.get_balance(&to); + view.record_base_read(to_key.clone(), balance_value(b)); + b + } + _ => U256::ZERO, + }; + writes.write_balance(to, to_balance + tx.amount); + true + } else { + false + } + } else { + true + }; + + (view.take_captured_reads(), writes, tx.gas, success) + }); + + // Compare results + assert_eq!(seq_results.len(), par_result.results.len()); + for (seq, par) in seq_results.iter().zip(par_result.results.iter()) { + assert_eq!(seq.txn_idx, par.txn_idx, "Transaction index mismatch"); + assert_eq!(seq.gas_used, par.gas_used, "Gas used mismatch at tx {}", seq.txn_idx); + assert_eq!(seq.success, par.success, "Success mismatch at tx {}", seq.txn_idx); + } +} + +#[test] +fn test_parallel_many_independent_transactions() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(4)); + + // Create 50 addresses, each making a transfer to a unique recipient + let transactions: Vec = (0..50) + .map(|i| { + let from = Address::from([i as u8; 20]); + let to = Address::from([(i + 100) as u8; 20]); + TestTransaction::transfer(from, to, U256::from(100)) + }) + .collect(); + + let db = MockDb::default(); + + let result = executor.execute(&transactions, &db, |_txn_idx, tx, view| { + let mut writes = WriteSet::new(); + + // Just write the transfers without complex balance logic + if let Some(to) = tx.to { + writes.write_balance(tx.from, U256::from(900)); // Assume 1000 - 100 + writes.write_balance(to, tx.amount); + } + + (view.take_captured_reads(), writes, tx.gas, true) + }); + + assert_eq!(result.results.len(), 50); + assert_eq!(result.stats.total_commits, 50); + // Independent transactions should have no aborts + assert_eq!(result.stats.total_aborts, 0); +} + +// ============================================================================= +// Conflict Detection Tests +// ============================================================================= + +#[test] +fn test_write_write_conflict_same_key() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + let addr = Address::from([1u8; 20]); + + // Two transactions both writing to the same address's balance + let transactions = vec![ + TestTransaction::transfer(addr, Address::from([2u8; 20]), U256::from(100)), + TestTransaction::transfer(addr, Address::from([3u8; 20]), U256::from(200)), + ]; + + let db = MockDb::default().with_balance(addr, U256::from(1000)); + let execution_count = AtomicU64::new(0); + + let result = executor.execute(&transactions, &db, |_txn_idx, tx, view| { + execution_count.fetch_add(1, Ordering::Relaxed); + + let mut writes = WriteSet::new(); + + // Read sender balance + let from_key = balance_key(tx.from); + let from_balance = match view.read_from_mvhashmap(&from_key) { + Ok(Some((EvmStateValue::Balance(b), _))) => b, + Ok(None) => { + let b = db.get_balance(&tx.from); + view.record_base_read(from_key.clone(), balance_value(b)); + b + } + _ => db.get_balance(&tx.from), + }; + + if let Some(to) = tx.to { + // tx1 depends on tx0's write to from_balance + writes.write_balance(tx.from, from_balance - tx.amount); + writes.write_balance(to, tx.amount); + } + + (view.take_captured_reads(), writes, tx.gas, true) + }); + + // Both should eventually commit + assert_eq!(result.results.len(), 2); + assert_eq!(result.stats.total_commits, 2); + + // Both should succeed + assert!(result.results[0].success); + assert!(result.results[1].success); +} + +#[test] +fn test_read_write_conflict() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + let (addr_a, addr_b, _) = test_addresses(); + + // tx0 writes to A's balance + // tx1 reads A's balance + // tx1 depends on tx0 + let transactions = vec![ + TestTransaction::transfer(addr_a, addr_b, U256::from(100)), + TestTransaction::read_balance(addr_a), + ]; + + let db = MockDb::default().with_balance(addr_a, U256::from(1000)); + + let result = executor.execute(&transactions, &db, |_txn_idx, tx, view| { + let mut writes = WriteSet::new(); + + // Read sender balance + let from_key = balance_key(tx.from); + let _from_balance = match view.read_from_mvhashmap(&from_key) { + Ok(Some((EvmStateValue::Balance(b), _))) => b, + Ok(None) => { + let b = db.get_balance(&tx.from); + view.record_base_read(from_key.clone(), balance_value(b)); + b + } + _ => db.get_balance(&tx.from), + }; + + if let Some(to) = tx.to { + writes.write_balance(tx.from, U256::from(900)); + writes.write_balance(to, tx.amount); + } + + (view.take_captured_reads(), writes, tx.gas, true) + }); + + assert_eq!(result.results.len(), 2); + assert_eq!(result.stats.total_commits, 2); +} + +#[test] +fn test_chain_of_dependencies() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(4)); + + // Chain: A->B, B->C, C->D + // Each transfer depends on the previous one completing + let addr_a = Address::from([1u8; 20]); + let addr_b = Address::from([2u8; 20]); + let addr_c = Address::from([3u8; 20]); + let addr_d = Address::from([4u8; 20]); + + let transactions = vec![ + TestTransaction::transfer(addr_a, addr_b, U256::from(100)), + TestTransaction::transfer(addr_b, addr_c, U256::from(50)), + TestTransaction::transfer(addr_c, addr_d, U256::from(25)), + ]; + + let db = MockDb::default() + .with_balance(addr_a, U256::from(1000)) + .with_balance(addr_b, U256::from(500)) + .with_balance(addr_c, U256::from(200)); + + let result = executor.execute(&transactions, &db, |_txn_idx, tx, view| { + let mut writes = WriteSet::new(); + + // Read sender balance + let from_key = balance_key(tx.from); + let from_balance = match view.read_from_mvhashmap(&from_key) { + Ok(Some((EvmStateValue::Balance(b), _))) => b, + Ok(None) => { + let b = db.get_balance(&tx.from); + view.record_base_read(from_key.clone(), balance_value(b)); + b + } + _ => db.get_balance(&tx.from), + }; + + if let Some(to) = tx.to { + // Read recipient balance + let to_key = balance_key(to); + let to_balance = match view.read_from_mvhashmap(&to_key) { + Ok(Some((EvmStateValue::Balance(b), _))) => b, + Ok(None) => { + let b = db.get_balance(&to); + view.record_base_read(to_key.clone(), balance_value(b)); + b + } + _ => db.get_balance(&to), + }; + + writes.write_balance(tx.from, from_balance - tx.amount); + writes.write_balance(to, to_balance + tx.amount); + } + + (view.take_captured_reads(), writes, tx.gas, true) + }); + + // All should commit + assert_eq!(result.results.len(), 3); + assert_eq!(result.stats.total_commits, 3); + + // All should succeed + for r in &result.results { + assert!(r.success); + } +} + +// ============================================================================= +// Edge Case Tests +// ============================================================================= + +#[test] +fn test_single_transaction() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(4)); + let (addr_a, addr_b, _) = test_addresses(); + + let transactions = vec![ + TestTransaction::transfer(addr_a, addr_b, U256::from(100)), + ]; + + let db = MockDb::default().with_balance(addr_a, U256::from(1000)); + + let result = executor.execute(&transactions, &db, |_, tx, view| { + let mut writes = WriteSet::new(); + if let Some(to) = tx.to { + writes.write_balance(tx.from, U256::from(900)); + writes.write_balance(to, tx.amount); + } + (view.take_captured_reads(), writes, tx.gas, true) + }); + + assert_eq!(result.results.len(), 1); + assert_eq!(result.stats.total_commits, 1); + assert_eq!(result.stats.total_aborts, 0); +} + +#[test] +fn test_empty_transactions() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(4)); + let transactions: Vec = vec![]; + let db = MockDb::default(); + + let result = executor.execute(&transactions, &db, |_, _, view| { + (view.take_captured_reads(), WriteSet::new(), 21000, true) + }); + + assert_eq!(result.results.len(), 0); + assert_eq!(result.stats.total_commits, 0); +} + +#[test] +fn test_all_transactions_fail() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + + let transactions: Vec = (0..5) + .map(|i| { + let from = Address::from([i as u8; 20]); + let to = Address::from([(i + 100) as u8; 20]); + TestTransaction::transfer(from, to, U256::from(100)) + }) + .collect(); + + let db = MockDb::default(); // No balances, all transfers should "fail" + + let result = executor.execute(&transactions, &db, |_, _, view| { + // All transactions fail (insufficient balance) + (view.take_captured_reads(), WriteSet::new(), 21000, false) + }); + + assert_eq!(result.results.len(), 5); + assert_eq!(result.stats.total_commits, 5); + + // All should be marked as failed + for r in &result.results { + assert!(!r.success); + } +} + +#[test] +fn test_mixed_success_and_failure() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + + // Odd-indexed transactions succeed, even-indexed fail + let transactions: Vec = (0..10) + .map(|i| { + let from = Address::from([i as u8; 20]); + let to = Address::from([(i + 100) as u8; 20]); + TestTransaction::transfer(from, to, U256::from(100)) + }) + .collect(); + + let db = MockDb::default(); + + let result = executor.execute(&transactions, &db, |txn_idx, _, view| { + let success = txn_idx % 2 == 1; // Odd indices succeed + let mut writes = WriteSet::new(); + if success { + writes.write_balance(Address::from([txn_idx as u8; 20]), U256::from(900)); + } + (view.take_captured_reads(), writes, 21000, success) + }); + + assert_eq!(result.results.len(), 10); + + for (i, r) in result.results.iter().enumerate() { + assert_eq!(r.success, i % 2 == 1, "Mismatch at index {}", i); + } +} + +// ============================================================================= +// Storage Slot Tests +// ============================================================================= + +#[test] +fn test_storage_slot_conflicts() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(2)); + let contract = Address::from([42u8; 20]); + let slot = U256::from(1); + + // Two transactions writing to the same storage slot + #[derive(Debug, Clone)] + struct StorageWriteTx { + contract: Address, + slot: U256, + value: U256, + } + + let transactions = vec![ + StorageWriteTx { contract, slot, value: U256::from(100) }, + StorageWriteTx { contract, slot, value: U256::from(200) }, + ]; + + let db = MockDb::default(); + + let result = executor.execute(&transactions, &db, |_txn_idx, tx, view| { + let mut writes = WriteSet::new(); + + // Read current storage value + let key = EvmStateKey::Storage(tx.contract, tx.slot); + let _ = view.read_from_mvhashmap(&key); + + // Write new value + writes.write_storage(tx.contract, tx.slot, tx.value); + + (view.take_captured_reads(), writes, 21000, true) + }); + + assert_eq!(result.results.len(), 2); + assert_eq!(result.stats.total_commits, 2); +} + +#[test] +fn test_multiple_storage_slots_no_conflict() { + let executor = BlockStmExecutor::new(BlockStmConfig::with_threads(4)); + let contract = Address::from([42u8; 20]); + + // Each transaction writes to a different storage slot + #[derive(Debug, Clone)] + struct StorageWriteTx { + slot: U256, + value: U256, + } + + let transactions: Vec = (0..20) + .map(|i| StorageWriteTx { + slot: U256::from(i), + value: U256::from(i * 100), + }) + .collect(); + + let db = MockDb::default(); + + let result = executor.execute(&transactions, &db, |_txn_idx, tx, view| { + let mut writes = WriteSet::new(); + writes.write_storage(contract, tx.slot, tx.value); + (view.take_captured_reads(), writes, 21000, true) + }); + + assert_eq!(result.results.len(), 20); + assert_eq!(result.stats.total_commits, 20); + assert_eq!(result.stats.total_aborts, 0); // No conflicts +} + +// ============================================================================= +// Balance Delta Tests (Fee Accumulation) +// ============================================================================= + +/// Test that many transactions can add deltas to the same address without conflicts. +/// This simulates parallel fee accumulation to coinbase. +#[test] +fn test_balance_delta_parallel_accumulation_no_conflicts() { + use crate::block_stm::mv_hashmap::MVHashMap; + use crate::block_stm::scheduler::Scheduler; + use crate::block_stm::captured_reads::CapturedReads; + + let num_txns = 100; + let coinbase = Address::from([0xCB; 20]); + let mv = MVHashMap::new(num_txns); + let scheduler = Scheduler::new(num_txns); + + // Each transaction adds a fee delta - NO reads of coinbase balance + for i in 0..num_txns { + let txn_idx = i as TxnIndex; + scheduler.start_execution(txn_idx, 0); + + let mut writes = WriteSet::new(); + // Add a fee delta (commutative operation) + writes.add_balance_delta(coinbase, U256::from(100 + i)); + + // No reads captured - just delta writes + let reads = CapturedReads::new(); + + scheduler.finish_execution(txn_idx, 0, reads, writes, 21000, true, &mv); + } + + // All transactions should commit with NO aborts + let stats = scheduler.get_stats(); + assert_eq!(stats.total_commits, num_txns); + assert_eq!(stats.total_aborts, 0, "Balance deltas should NOT cause conflicts"); + + // Verify total delta sum + let total = mv.get_committed_delta_sum(&coinbase); + let expected: u64 = (0..num_txns).map(|i| 100 + i as u64).sum(); + assert_eq!(total, U256::from(expected)); +} + +/// Test that regular balance writes still cause conflicts (deltas are separate). +#[test] +fn test_balance_regular_writes_still_conflict() { + use crate::block_stm::mv_hashmap::MVHashMap; + + let coinbase = Address::from([0xCB; 20]); + let mv = MVHashMap::new(10); + + // Tx0 writes a regular balance (NOT a delta) + mv.write(0, 0, EvmStateKey::Balance(coinbase), EvmStateValue::Balance(U256::from(1000))); + + // Tx1 reads the balance - should see Tx0's write + let result = mv.read(1, &EvmStateKey::Balance(coinbase)); + + match result { + crate::block_stm::types::ReadResult::Value { version, .. } => { + assert_eq!(version.txn_idx, 0); + } + _ => panic!("Expected to read from Tx0"), + } +} + +/// Test that delta resolution correctly sums all prior deltas. +#[test] +fn test_balance_delta_resolution_correctness() { + use crate::block_stm::mv_hashmap::MVHashMap; + + let coinbase = Address::from([0xCB; 20]); + let mv = MVHashMap::new(100); + + // Many transactions add deltas + let num_deltas = 50; + for i in 0..num_deltas { + mv.write_balance_delta(coinbase, i as TxnIndex, 0, U256::from(i + 1)); + } + + // Each reader at different positions should see correct cumulative sum + for reader_idx in 1..=num_deltas { + let result = mv.resolve_balance(coinbase, reader_idx as TxnIndex, U256::ZERO, None).unwrap(); + + // Expected sum: 1 + 2 + ... + (reader_idx - 1) = (reader_idx - 1) * reader_idx / 2 + // But we're summing (i+1) for i in 0..(reader_idx), so it's reader_idx * (reader_idx + 1) / 2 + // Actually: sum of (1, 2, ..., reader_idx) where each is from tx i-1 + // For reader_idx = 5, we see deltas from tx0=1, tx1=2, tx2=3, tx3=4, tx4=5 -> but tx4 is NOT visible to reader 5 + // Reader sees tx0..tx(reader_idx-1), so deltas 1..reader_idx + let expected: u64 = (1..reader_idx as u64 + 1).sum(); + assert_eq!( + result.total_delta, + U256::from(expected), + "Reader {} should see delta sum {}", + reader_idx, + expected + ); + } +} + +/// Test mixed scenario: some txns write deltas, one reads balance. +/// Only the reader should have dependencies, delta writers should not conflict. +#[test] +fn test_balance_delta_mixed_read_and_delta_writes() { + use crate::block_stm::mv_hashmap::MVHashMap; + use crate::block_stm::scheduler::Scheduler; + use crate::block_stm::captured_reads::CapturedReads; + + let coinbase = Address::from([0xCB; 20]); + let mv = MVHashMap::new(10); + let scheduler = Scheduler::new(10); + + // Tx0-4: Just write deltas (no reads) + for i in 0..5 { + scheduler.start_execution(i, 0); + + let mut writes = WriteSet::new(); + writes.add_balance_delta(coinbase, U256::from(100)); + + let reads = CapturedReads::new(); + scheduler.finish_execution(i, 0, reads, writes, 21000, true, &mv); + } + + // Tx5: Reads the balance (triggers resolution) + scheduler.start_execution(5, 0); + + let base_value = U256::from(1000); + let resolved = mv.resolve_balance(coinbase, 5, base_value, None).unwrap(); + + let mut reads = CapturedReads::new(); + reads.capture_resolved_balance(coinbase, resolved.clone()); + + let writes = WriteSet::new(); + scheduler.finish_execution(5, 0, reads, writes, 21000, true, &mv); + + // Tx6-9: More delta writes after the reader + for i in 6..10 { + scheduler.start_execution(i, 0); + + let mut writes = WriteSet::new(); + writes.add_balance_delta(coinbase, U256::from(50)); + + let reads = CapturedReads::new(); + scheduler.finish_execution(i, 0, reads, writes, 21000, true, &mv); + } + + // All should commit + let stats = scheduler.get_stats(); + assert_eq!(stats.total_commits, 10); + assert_eq!(stats.total_aborts, 0); + + // Verify the reader saw the correct resolved value + assert_eq!(resolved.resolved_value, U256::from(1500)); // 1000 + 5*100 + assert_eq!(resolved.contributors.len(), 5); +} + +/// Stress test: Parallel execution with many delta writes and occasional reads. +#[test] +fn test_balance_delta_stress_parallel() { + use std::sync::Arc; + use std::thread; + + use crate::block_stm::mv_hashmap::MVHashMap; + use crate::block_stm::scheduler::Scheduler; + use crate::block_stm::captured_reads::CapturedReads; + + let num_txns = 100; + let coinbase = Address::from([0xCB; 20]); + let mv = Arc::new(MVHashMap::new(num_txns)); + let scheduler = Arc::new(Scheduler::new(num_txns)); + + let num_threads = 4; + let mut handles = Vec::new(); + + for thread_id in 0..num_threads { + let mv = Arc::clone(&mv); + let scheduler = Arc::clone(&scheduler); + + handles.push(thread::spawn(move || { + loop { + let task = scheduler.next_task(); + match task { + crate::block_stm::types::Task::Execute { txn_idx, incarnation } => { + scheduler.start_execution(txn_idx, incarnation); + + let mut writes = WriteSet::new(); + let mut reads = CapturedReads::new(); + + // Every 10th transaction reads the balance + if txn_idx % 10 == 9 { + let base = U256::from(1000); + match mv.resolve_balance(coinbase, txn_idx, base, None) { + Ok(resolved) => { + reads.capture_resolved_balance(coinbase, resolved); + } + Err(_aborted) => { + // Aborted, will be rescheduled + continue; + } + } + } else { + // Most transactions just add deltas + writes.add_balance_delta(coinbase, U256::from(txn_idx as u64 + 1)); + } + + scheduler.finish_execution(txn_idx, incarnation, reads, writes, 21000, true, &mv); + } + crate::block_stm::types::Task::Done => break, + crate::block_stm::types::Task::NoTask => { + thread::yield_now(); + } + _ => {} + } + } + })); + } + + for handle in handles { + handle.join().unwrap(); + } + + let stats = scheduler.get_stats(); + assert_eq!(stats.total_commits, num_txns); + + // Delta writers (90 of them) should have no aborts + // Readers (10 of them) might have some aborts due to re-execution + // But the total should be reasonable + println!( + "Stress test: {} commits, {} aborts, {} executions", + stats.total_commits, stats.total_aborts, stats.total_executions + ); +} + +/// Test that delta contributor re-execution properly invalidates readers. +#[test] +fn test_balance_delta_contributor_reexecution_invalidates_reader() { + use crate::block_stm::mv_hashmap::MVHashMap; + use crate::block_stm::scheduler::Scheduler; + use crate::block_stm::captured_reads::CapturedReads; + + let coinbase = Address::from([0xCB; 20]); + let mv = MVHashMap::new(10); + let scheduler = Scheduler::new(10); + + // Tx0: Writes delta +100 + scheduler.start_execution(0, 0); + let mut writes0 = WriteSet::new(); + writes0.add_balance_delta(coinbase, U256::from(100)); + let reads0 = CapturedReads::new(); + scheduler.finish_execution(0, 0, reads0, writes0, 21000, true, &mv); + + // Tx1: Reads and resolves balance + scheduler.start_execution(1, 0); + let resolved = mv.resolve_balance(coinbase, 1, U256::from(1000), None).unwrap(); + assert_eq!(resolved.resolved_value, U256::from(1100)); + + let mut reads1 = CapturedReads::new(); + reads1.capture_resolved_balance(coinbase, resolved); + let writes1 = WriteSet::new(); + scheduler.finish_execution(1, 0, reads1, writes1, 21000, true, &mv); + + // Now abort Tx0 and re-execute with different delta + scheduler.abort(0, &mv); + + // Tx0 incarnation 1: Writes delta +200 instead + scheduler.start_execution(0, 1); + let mut writes0_new = WriteSet::new(); + writes0_new.add_balance_delta(coinbase, U256::from(200)); + let reads0_new = CapturedReads::new(); + scheduler.finish_execution(0, 1, reads0_new, writes0_new, 21000, true, &mv); + + // Check that Tx1 was marked for re-execution + let tx1_status = scheduler.get_status(1); + assert!( + matches!(tx1_status, crate::block_stm::types::ExecutionStatus::Aborted(_)), + "Tx1 should be aborted because its contributor Tx0 re-executed" + ); +} + +/// Test validation correctly detects when resolved balance changes. +#[test] +fn test_balance_delta_validation_detects_changes() { + use crate::block_stm::mv_hashmap::MVHashMap; + + let coinbase = Address::from([0xCB; 20]); + let mv = MVHashMap::new(10); + + // Tx0 writes delta +100 + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + + // Tx1 resolves + let resolved_v1 = mv.resolve_balance(coinbase, 1, U256::from(1000), None).unwrap(); + assert_eq!(resolved_v1.resolved_value, U256::from(1100)); + + // Now Tx0 re-executes with different delta + mv.delete_deltas(0); + mv.write_balance_delta(coinbase, 0, 1, U256::from(200)); // incarnation 1 + + // Tx1 tries to validate - should detect the change + let resolved_v2 = mv.resolve_balance(coinbase, 1, U256::from(1000), None).unwrap(); + + // The new resolution is different + assert_ne!(resolved_v1.resolved_value, resolved_v2.resolved_value); + assert_eq!(resolved_v2.resolved_value, U256::from(1200)); // 1000 + 200 + + // The contributor version also changed + assert_ne!(resolved_v1.contributors[0].incarnation, resolved_v2.contributors[0].incarnation); +} + +/// Test multiple addresses receiving deltas independently. +#[test] +fn test_balance_delta_multiple_addresses() { + use crate::block_stm::mv_hashmap::MVHashMap; + + let addr1 = Address::from([1u8; 20]); + let addr2 = Address::from([2u8; 20]); + let addr3 = Address::from([3u8; 20]); + let mv = MVHashMap::new(30); + + // 10 txns add to addr1, 10 to addr2, 10 to addr3 + for i in 0..10 { + mv.write_balance_delta(addr1, i as TxnIndex, 0, U256::from(100)); + } + for i in 10..20 { + mv.write_balance_delta(addr2, i as TxnIndex, 0, U256::from(200)); + } + for i in 20..30 { + mv.write_balance_delta(addr3, i as TxnIndex, 0, U256::from(300)); + } + + // Verify sums are independent + assert_eq!(mv.get_committed_delta_sum(&addr1), U256::from(1000)); // 10 * 100 + assert_eq!(mv.get_committed_delta_sum(&addr2), U256::from(2000)); // 10 * 200 + assert_eq!(mv.get_committed_delta_sum(&addr3), U256::from(3000)); // 10 * 300 +} + +/// Test that aborted delta shows up as error during resolution. +#[test] +fn test_balance_delta_aborted_contributor_fails_resolution() { + use crate::block_stm::mv_hashmap::MVHashMap; + + let coinbase = Address::from([0xCB; 20]); + let mv = MVHashMap::new(10); + + // Tx0 writes delta + mv.write_balance_delta(coinbase, 0, 0, U256::from(100)); + + // Mark Tx0 as aborted + mv.mark_aborted(0); + + // Tx1 tries to resolve - should fail + let result = mv.resolve_balance(coinbase, 1, U256::from(1000), None); + + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), 0); // Aborted txn idx +} + diff --git a/crates/op-rbuilder/src/block_stm/types.rs b/crates/op-rbuilder/src/block_stm/types.rs new file mode 100644 index 00000000..0229e796 --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/types.rs @@ -0,0 +1,255 @@ +//! Core types for Block-STM parallel execution engine. +//! +//! This module defines the fundamental types used throughout the Block-STM implementation: +//! - Transaction indexing and versioning +//! - EVM state key abstraction +//! - Read/write tracking types + +use alloy_primitives::{Address, Bytes, B256, U256}; +use std::fmt; + +/// Index of a transaction within a block (0-based). +pub type TxnIndex = u32; + +/// Incarnation number - incremented each time a transaction is re-executed. +/// Starts at 0 for the first execution. +pub type Incarnation = u32; + +/// A version uniquely identifies a specific execution of a transaction. +/// Consists of (transaction index, incarnation number). +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct Version { + pub txn_idx: TxnIndex, + pub incarnation: Incarnation, +} + +impl Version { + pub fn new(txn_idx: TxnIndex, incarnation: Incarnation) -> Self { + Self { txn_idx, incarnation } + } +} + +impl fmt::Display for Version { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "(txn={}, inc={})", self.txn_idx, self.incarnation) + } +} + +/// Represents a key in the EVM state that can be read or written. +/// This abstracts over the different types of state in the EVM. +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum EvmStateKey { + /// Account balance: key is the address + Balance(Address), + /// Account nonce: key is the address + Nonce(Address), + /// Account code hash: key is the address + CodeHash(Address), + /// Account code: key is the address + Code(Address), + /// Storage slot: key is (address, slot) + Storage(Address, U256), +} + +impl fmt::Display for EvmStateKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + EvmStateKey::Balance(addr) => write!(f, "Balance({})", addr), + EvmStateKey::Nonce(addr) => write!(f, "Nonce({})", addr), + EvmStateKey::CodeHash(addr) => write!(f, "CodeHash({})", addr), + EvmStateKey::Code(addr) => write!(f, "Code({})", addr), + EvmStateKey::Storage(addr, slot) => write!(f, "Storage({}, {})", addr, slot), + } + } +} + +/// Represents a value in the EVM state. +/// Encapsulates all possible value types that can be stored. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum EvmStateValue { + /// Balance value (U256) + Balance(U256), + /// Nonce value (u64) + Nonce(u64), + /// Code hash + CodeHash(B256), + /// Contract bytecode + Code(Bytes), + /// Storage slot value + Storage(U256), + /// Account does not exist (for distinguishing "not found" from "zero") + NotFound, +} + +impl EvmStateValue { + /// Returns true if this represents a non-existent account/value + pub fn is_not_found(&self) -> bool { + matches!(self, EvmStateValue::NotFound) + } +} + +/// Result of reading from the MVHashMap. +#[derive(Debug, Clone)] +pub enum ReadResult { + /// Value was written by a previous transaction at this version + Value { + value: EvmStateValue, + version: Version, + }, + /// Value is not in MVHashMap, should read from base state. + /// The reader should register itself as dependent on this key. + NotFound, + /// A previous transaction wrote to this key but was aborted. + /// Reader should wait or abort. + Aborted { + /// The transaction that aborted + txn_idx: TxnIndex, + }, +} + +/// Represents a read operation recorded during transaction execution. +#[derive(Debug, Clone)] +pub struct RecordedRead { + /// The key that was read + pub key: EvmStateKey, + /// The version from which the value was read (None if from base state) + pub version: Option, + /// The value that was observed + pub value: EvmStateValue, +} + +/// A balance delta (fee increment) that can be accumulated without conflicts. +/// +/// Balance deltas are commutative operations - they can be applied in any order +/// with the same result. This enables parallel fee accumulation to coinbase +/// without creating read-write conflicts. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BalanceDelta { + /// The address receiving the balance increment + pub address: Address, + /// The amount to add to the balance + pub delta: U256, +} + +impl BalanceDelta { + /// Create a new balance delta. + pub fn new(address: Address, delta: U256) -> Self { + Self { address, delta } + } +} + +/// Entry for a balance delta with version tracking. +#[derive(Debug, Clone)] +pub struct VersionedDelta { + /// The version (txn_idx, incarnation) that wrote this delta + pub version: Version, + /// The delta amount + pub delta: U256, +} + +impl VersionedDelta { + /// Create a new versioned delta. + pub fn new(version: Version, delta: U256) -> Self { + Self { version, delta } + } +} + +/// Result of resolving a balance with pending deltas. +#[derive(Debug, Clone)] +pub struct ResolvedBalance { + /// The base value (from storage or earlier write) + pub base_value: U256, + /// The version of the base value (None if from storage) + pub base_version: Option, + /// Sum of all deltas applied + pub total_delta: U256, + /// The final resolved value (base + total_delta) + pub resolved_value: U256, + /// All versions that contributed deltas (for dependency tracking) + pub contributors: Vec, +} + +/// Represents a write operation to be committed. +#[derive(Debug, Clone)] +pub struct WriteOp { + /// The key being written + pub key: EvmStateKey, + /// The new value + pub value: EvmStateValue, +} + +/// Status of a transaction in the scheduler. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ExecutionStatus { + /// Transaction is waiting to be scheduled for execution + PendingScheduling, + /// Transaction is currently being executed + Executing(Incarnation), + /// Transaction finished execution successfully + Executed(Incarnation), + /// Transaction was aborted and needs re-execution + Aborted(Incarnation), + /// Transaction has been committed (finalized) + Committed, +} + +impl ExecutionStatus { + /// Returns the incarnation if the status has one + pub fn incarnation(&self) -> Option { + match self { + ExecutionStatus::Executing(inc) + | ExecutionStatus::Executed(inc) + | ExecutionStatus::Aborted(inc) => Some(*inc), + ExecutionStatus::PendingScheduling | ExecutionStatus::Committed => None, + } + } +} + +/// Task type for worker threads. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Task { + /// Execute transaction at given index with given incarnation + Execute { txn_idx: TxnIndex, incarnation: Incarnation }, + /// Validate transaction at given index + Validate { txn_idx: TxnIndex }, + /// No more tasks available (workers should check for completion) + NoTask, + /// All work is done + Done, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_version_ordering() { + let v1 = Version::new(0, 0); + let v2 = Version::new(0, 1); + let v3 = Version::new(1, 0); + + assert!(v1 < v2); + assert!(v2 < v3); + assert!(v1 < v3); + } + + #[test] + fn test_evm_state_key_display() { + let addr = Address::ZERO; + let key = EvmStateKey::Balance(addr); + assert!(key.to_string().contains("Balance")); + + let storage_key = EvmStateKey::Storage(addr, U256::from(42)); + assert!(storage_key.to_string().contains("Storage")); + } + + #[test] + fn test_execution_status_incarnation() { + assert_eq!(ExecutionStatus::PendingScheduling.incarnation(), None); + assert_eq!(ExecutionStatus::Executing(5).incarnation(), Some(5)); + assert_eq!(ExecutionStatus::Executed(3).incarnation(), Some(3)); + assert_eq!(ExecutionStatus::Aborted(2).incarnation(), Some(2)); + assert_eq!(ExecutionStatus::Committed.incarnation(), None); + } +} + diff --git a/crates/op-rbuilder/src/block_stm/view.rs b/crates/op-rbuilder/src/block_stm/view.rs new file mode 100644 index 00000000..8504b7a8 --- /dev/null +++ b/crates/op-rbuilder/src/block_stm/view.rs @@ -0,0 +1,349 @@ +//! Versioned State View for Block-STM +//! +//! The `LatestView` provides a versioned view of state for a specific transaction. +//! It wraps the MVHashMap and base database, routing reads through the multi-version +//! data structure while tracking dependencies. +//! +//! # Read Resolution +//! +//! When reading state: +//! 1. Check MVHashMap for writes from earlier transactions +//! 2. If found, record the dependency and return the value +//! 3. If not found, read from base state and record as base dependency + +use crate::block_stm::{ + captured_reads::CapturedReads, + mv_hashmap::MVHashMap, + types::{EvmStateKey, EvmStateValue, Incarnation, ReadResult, TxnIndex, Version}, +}; +use alloy_primitives::{Address, B256, Bytes, U256}; +use parking_lot::Mutex; +use tracing::trace; + +/// Error returned when a read encounters an aborted transaction. +#[derive(Debug, Clone)] +pub struct ReadAbortedError { + /// The transaction that was aborted + pub aborted_txn_idx: TxnIndex, +} + +/// Result type for view operations. +pub type ViewResult = Result; + +/// A versioned view of state for a specific transaction. +/// +/// Provides read access to state, checking the MVHashMap first for writes +/// from earlier transactions, then falling back to base state. +pub struct LatestView<'a, BaseDB> { + /// The transaction index this view is for + txn_idx: TxnIndex, + /// The incarnation of this execution + incarnation: Incarnation, + /// The multi-version hash map with concurrent writes + mv_hashmap: &'a MVHashMap, + /// The base database for reads not in MVHashMap + base_db: &'a BaseDB, + /// Captured reads for this transaction (interior mutability for tracking) + captured_reads: Mutex, +} + +impl<'a, BaseDB> LatestView<'a, BaseDB> { + /// Create a new view for a transaction. + pub fn new( + txn_idx: TxnIndex, + incarnation: Incarnation, + mv_hashmap: &'a MVHashMap, + base_db: &'a BaseDB, + ) -> Self { + Self { + txn_idx, + incarnation, + mv_hashmap, + base_db, + captured_reads: Mutex::new(CapturedReads::new()), + } + } + + /// Get the transaction index. + pub fn txn_idx(&self) -> TxnIndex { + self.txn_idx + } + + /// Get the incarnation. + pub fn incarnation(&self) -> Incarnation { + self.incarnation + } + + /// Take the captured reads (consumes the internal state). + pub fn take_captured_reads(&self) -> CapturedReads { + std::mem::take(&mut *self.captured_reads.lock()) + } + + /// Get a reference to the base database. + pub fn base_db(&self) -> &'a BaseDB { + self.base_db + } + + /// Read a value from the versioned state. + /// + /// Returns the value if found, or an error if an aborted transaction was encountered. + /// If the key is not in MVHashMap, the caller should read from base state + /// and call `record_base_read` with the result. + pub fn read_from_mvhashmap(&self, key: &EvmStateKey) -> ViewResult> { + match self.mv_hashmap.read(self.txn_idx, key) { + ReadResult::Value { value, version } => { + trace!( + txn_idx = self.txn_idx, + incarnation = self.incarnation, + key = %key, + source_txn = version.txn_idx, + "View read from MVHashMap" + ); + + // Record the read + self.captured_reads + .lock() + .capture_read(key.clone(), version, value.clone()); + + Ok(Some((value, version))) + } + ReadResult::NotFound => { + trace!( + txn_idx = self.txn_idx, + incarnation = self.incarnation, + key = %key, + "View read - not in MVHashMap, will read from base" + ); + Ok(None) + } + ReadResult::Aborted { txn_idx: aborted_txn_idx } => { + trace!( + txn_idx = self.txn_idx, + incarnation = self.incarnation, + key = %key, + aborted_txn = aborted_txn_idx, + "View read - encountered aborted transaction" + ); + Err(ReadAbortedError { aborted_txn_idx }) + } + } + } + + /// Record a read from base state (when MVHashMap doesn't have the value). + pub fn record_base_read(&self, key: EvmStateKey, value: EvmStateValue) { + trace!( + txn_idx = self.txn_idx, + incarnation = self.incarnation, + key = %key, + "View recording base state read" + ); + self.captured_reads.lock().capture_base_read(key, value); + } +} + +/// Write set collected during transaction execution. +#[derive(Debug, Default)] +pub struct WriteSet { + /// The writes to be applied (regular state changes) + writes: Vec<(EvmStateKey, EvmStateValue)>, + /// Balance deltas (commutative fee increments) + /// These are handled separately to allow parallel accumulation + balance_deltas: Vec<(Address, U256)>, +} + +impl WriteSet { + /// Create a new empty write set. + pub fn new() -> Self { + Self { + writes: Vec::new(), + balance_deltas: Vec::new(), + } + } + + /// Add a write to the set. + pub fn write(&mut self, key: EvmStateKey, value: EvmStateValue) { + self.writes.push((key, value)); + } + + /// Record a balance write. + pub fn write_balance(&mut self, address: Address, balance: U256) { + self.write(EvmStateKey::Balance(address), EvmStateValue::Balance(balance)); + } + + /// Record a nonce write. + pub fn write_nonce(&mut self, address: Address, nonce: u64) { + self.write(EvmStateKey::Nonce(address), EvmStateValue::Nonce(nonce)); + } + + /// Record a code write. + pub fn write_code(&mut self, address: Address, code: Bytes) { + self.write(EvmStateKey::Code(address), EvmStateValue::Code(code)); + } + + /// Record a code hash write. + pub fn write_code_hash(&mut self, address: Address, hash: B256) { + self.write(EvmStateKey::CodeHash(address), EvmStateValue::CodeHash(hash)); + } + + /// Record a storage write. + pub fn write_storage(&mut self, address: Address, slot: U256, value: U256) { + self.write( + EvmStateKey::Storage(address, slot), + EvmStateValue::Storage(value), + ); + } + + /// Add a balance delta (commutative fee increment). + /// + /// Balance deltas are different from regular writes - they can be + /// accumulated in parallel without conflicts. Only when the balance + /// is read do they need to be resolved. + pub fn add_balance_delta(&mut self, address: Address, delta: U256) { + self.balance_deltas.push((address, delta)); + } + + /// Consume the write set and return the regular writes. + pub fn into_writes(self) -> Vec<(EvmStateKey, EvmStateValue)> { + self.writes + } + + /// Consume the write set and return both regular writes and balance deltas. + pub fn into_parts(self) -> (Vec<(EvmStateKey, EvmStateValue)>, Vec<(Address, U256)>) { + (self.writes, self.balance_deltas) + } + + /// Get the balance deltas. + pub fn balance_deltas(&self) -> &[(Address, U256)] { + &self.balance_deltas + } + + /// Get the number of regular writes. + pub fn len(&self) -> usize { + self.writes.len() + } + + /// Get the number of balance deltas. + pub fn num_deltas(&self) -> usize { + self.balance_deltas.len() + } + + /// Check if empty (no writes or deltas). + pub fn is_empty(&self) -> bool { + self.writes.is_empty() && self.balance_deltas.is_empty() + } +} + +/// Execution output from a single transaction. +#[derive(Debug)] +pub struct TxnOutput { + /// The read set (dependencies) + pub reads: CapturedReads, + /// The write set + pub writes: WriteSet, + /// Gas used + pub gas_used: u64, + /// Whether the transaction succeeded + pub success: bool, +} + +impl TxnOutput { + /// Create a new transaction output. + pub fn new(reads: CapturedReads, writes: WriteSet, gas_used: u64, success: bool) -> Self { + Self { + reads, + writes, + gas_used, + success, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::block_stm::mv_hashmap::MVHashMap; + + struct MockBaseDb; + + fn test_key(slot: u64) -> EvmStateKey { + EvmStateKey::Storage(Address::ZERO, U256::from(slot)) + } + + fn test_value(v: u64) -> EvmStateValue { + EvmStateValue::Storage(U256::from(v)) + } + + #[test] + fn test_view_read_from_mvhashmap() { + let mv = MVHashMap::new(10); + let base = MockBaseDb; + let key = test_key(1); + let value = test_value(42); + + // Transaction 0 writes + mv.write(0, 0, key.clone(), value.clone()); + + // Transaction 1's view reads + let view = LatestView::new(1, 0, &mv, &base); + let result = view.read_from_mvhashmap(&key).unwrap(); + + assert!(result.is_some()); + let (read_value, version) = result.unwrap(); + assert_eq!(read_value, value); + assert_eq!(version.txn_idx, 0); + } + + #[test] + fn test_view_read_not_found() { + let mv = MVHashMap::new(10); + let base = MockBaseDb; + let key = test_key(1); + + // No writes yet + let view = LatestView::new(1, 0, &mv, &base); + let result = view.read_from_mvhashmap(&key).unwrap(); + + assert!(result.is_none()); + } + + #[test] + fn test_view_captures_reads() { + let mv = MVHashMap::new(10); + let base = MockBaseDb; + let key1 = test_key(1); + let key2 = test_key(2); + let value1 = test_value(100); + + // tx0 writes to key1 + mv.write(0, 0, key1.clone(), value1.clone()); + + // tx2's view + let view = LatestView::new(2, 0, &mv, &base); + + // Read from MVHashMap + let _ = view.read_from_mvhashmap(&key1); + + // Record a base read + view.record_base_read(key2.clone(), test_value(200)); + + // Check captured reads + let reads = view.take_captured_reads(); + assert_eq!(reads.len(), 2); + assert!(reads.depends_on(0)); // Depends on tx0 for key1 + } + + #[test] + fn test_write_set() { + let mut ws = WriteSet::new(); + + ws.write_balance(Address::ZERO, U256::from(1000)); + ws.write_nonce(Address::ZERO, 5); + ws.write_storage(Address::ZERO, U256::from(1), U256::from(42)); + + assert_eq!(ws.len(), 3); + + let writes = ws.into_writes(); + assert_eq!(writes.len(), 3); + } +} + diff --git a/crates/op-rbuilder/src/builders/builder_tx.rs b/crates/op-rbuilder/src/builders/builder_tx.rs index 590ae1b2..79cb106c 100644 --- a/crates/op-rbuilder/src/builders/builder_tx.rs +++ b/crates/op-rbuilder/src/builders/builder_tx.rs @@ -24,8 +24,7 @@ use reth_rpc_api::eth::{EthTxEnvError, transaction::TryIntoTxEnv}; use revm::{ DatabaseCommit, DatabaseRef, context::{ - ContextTr, - result::{EVMError, ExecutionResult, ResultAndState}, + ContextTr, inner::LazyEvmStateHandle, result::{EVMError, ExecutionResult, ResultAndState} }, inspector::NoOpInspector, state::Account, @@ -225,6 +224,7 @@ pub trait BuilderTransactions { pub address_gas_limiter: AddressGasLimiter, /// Per transaction resource metering information pub resource_metering: ResourceMetering, + /// Number of parallel threads for transaction execution. + pub parallel_threads: usize, } impl OpPayloadBuilderCtx { @@ -336,6 +343,7 @@ impl OpPayloadBuilderCtx { return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))); } }; + let state = LazyEvmStateHandle(state).resolve_full_state(evm.db_mut()).unwrap(); // add gas used by the transaction to cumulative gas used, before creating the receipt let gas_used = result.gas_used(); @@ -378,7 +386,8 @@ impl OpPayloadBuilderCtx { Ok(info) } - /// Executes the given best transactions and updates the execution info. + /// Executes the given best transactions sequentially and updates the execution info. + /// Used when `parallel_threads == 1`. /// /// Returns `Ok(Some(())` if the job was cancelled. pub(super) fn execute_best_transactions( @@ -522,6 +531,8 @@ impl OpPayloadBuilderCtx { return Err(PayloadBuilderError::evm(err)); } }; + let state = LazyEvmStateHandle(state).resolve_full_state(evm.db_mut()).unwrap(); + self.metrics .tx_simulation_duration @@ -623,4 +634,522 @@ impl OpPayloadBuilderCtx { ); Ok(None) } + + + /// Executes the given best transactions in parallel using Block-STM. + /// + /// This implementation uses Block-STM for true parallel execution: + /// - Each transaction gets its own `State` + /// - Reads route through MVHashMap to see earlier transactions' writes + /// - Conflicts are detected via read/write set tracking + /// - Commits happen in transaction order + /// + /// Returns `Ok(Some(())` if the job was cancelled. + pub(super) fn execute_best_transactions_parallel( + &self, + info: &mut ExecutionInfo, + db: &mut State, + best_txs: &mut (impl PayloadTxsBounds + Send), + block_gas_limit: u64, + block_da_limit: Option, + _block_da_footprint_limit: Option, + ) -> Result, PayloadBuilderError> + where + ExtraCtx: Sync, + E: Debug + Default + Send, + DB: Database + DatabaseRef + Send + Sync, + { + let num_threads = self.parallel_threads; + + let execute_txs_start_time = Instant::now(); + let base_fee = self.base_fee(); + let tx_da_limit = self.da_config.max_da_tx_size(); + + let block_attr = BlockConditionalAttributes { + number: self.block_number(), + timestamp: self.attributes().timestamp(), + }; + + // Collect candidate transactions from the iterator. + let mut candidate_txs = Vec::new(); + while let Some(tx) = best_txs.next(()) { + candidate_txs.push(tx); + } + + let num_candidates = candidate_txs.len(); + if num_candidates == 0 { + return Ok(None); + } + + info!( + target: "payload_builder", + message = "Executing best transactions (Block-STM)", + block_da_limit = ?block_da_limit, + tx_da_limit = ?tx_da_limit, + block_gas_limit = ?block_gas_limit, + num_threads = num_threads, + num_candidates = num_candidates, + ); + + // Initialize Block-STM components + let scheduler = Arc::new(Scheduler::new(num_candidates)); + let mv_hashmap = Arc::new(MVHashMap::new(num_candidates)); + + // Store execution results per transaction (for deferred commit) + let execution_results: Arc>>> = + Arc::new(Mutex::new(vec![None; num_candidates])); + + // Shared state (info still needs mutex for cumulative values, best_txs for mark_invalid) + let shared_info = Arc::new(Mutex::new(std::mem::take(info))); + let shared_best_txs = Arc::new(Mutex::new(best_txs)); + let metrics = Arc::new(ParallelExecutionMetrics::new()); + + // Shared reference to database for reads (workers only need DatabaseRef) + let db_ref: &State = &*db; + + // Spawn worker threads using Block-STM scheduler + thread::scope(|s| { + let num_threads = num_threads.min(num_candidates); + + for worker_id in 0..num_threads { + let scheduler = Arc::clone(&scheduler); + let mv_hashmap = Arc::clone(&mv_hashmap); + let execution_results = Arc::clone(&execution_results); + let _shared_info = Arc::clone(&shared_info); + let shared_best_txs = Arc::clone(&shared_best_txs); + let metrics = Arc::clone(&metrics); + let candidate_txs = &candidate_txs; + let evm_config = &self.evm_config; + let address_gas_limiter = &self.address_gas_limiter; + let max_gas_per_txn = self.max_gas_per_txn; + let cancelled = &self.cancel; + let evm_env = &self.evm_env; + let base_db = db_ref; // Shared reference for reads + + s.spawn(move || { + scheduler.worker_start(); + trace!(worker_id = worker_id, "Block-STM worker started"); + + loop { + // Check for cancellation + if cancelled.is_cancelled() { + trace!(worker_id = worker_id, "Cancellation detected"); + break; + } + + // Get next task from Block-STM scheduler + let task = scheduler.next_task(); + + match task { + Task::Execute { txn_idx, incarnation } => { + trace!( + worker_id = worker_id, + txn_idx = txn_idx, + incarnation = incarnation, + "Executing transaction" + ); + + scheduler.start_execution(txn_idx, incarnation); + + metrics.num_txs_considered + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + + let pool_tx = &candidate_txs[txn_idx as usize]; + let tx_da_size = pool_tx.estimated_da_size(); + let reverted_hashes = pool_tx.reverted_hashes().clone(); + let conditional = pool_tx.conditional().cloned(); + let tx = pool_tx.clone().into_consensus(); + let tx_hash = tx.tx_hash(); + + let is_bundle_tx = reverted_hashes.is_some(); + let exclude_reverting_txs = + is_bundle_tx && !reverted_hashes.unwrap().contains(&tx_hash); + + // Pre-execution checks (no DB access) + let skip_tx = if let Some(conditional) = conditional { + !conditional.matches_block_attributes(&block_attr) + } else { + false + } || tx.is_eip4844() || tx.is_deposit(); + + if skip_tx { + shared_best_txs.lock().unwrap() + .mark_invalid(tx.signer(), tx.nonce()); + scheduler.finish_execution( + txn_idx, incarnation, + crate::block_stm::CapturedReads::new(), + WriteSet::new(), + 0, false, &mv_hashmap, + ); + continue; + } + + // Create versioned database for this transaction + // Routes reads through MVHashMap, falls back to base state + let versioned_db = VersionedDatabase::new(txn_idx, incarnation, &mv_hashmap, base_db); + + // Create State wrapper for EVM execution + let mut tx_state = State::builder() + .with_database(versioned_db) + .build(); + + // Execute transaction with versioned state + let exec_result = { + let mut evm = evm_config + .evm_with_env(&mut tx_state, evm_env.clone()); + evm.transact(&tx) + }; + + let exec_result = match exec_result { + Ok(res) => res, + Err(err) => { + if let Some(err) = err.as_invalid_tx_err() { + if !err.is_nonce_too_low() { + shared_best_txs.lock().unwrap() + .mark_invalid(tx.signer(), tx.nonce()); + } + } + // Get captured reads even on failure + let captured_reads = tx_state.database.take_captured_reads(); + scheduler.finish_execution( + txn_idx, incarnation, + captured_reads, + WriteSet::new(), + 0, false, &mv_hashmap, + ); + continue; + } + }; + + // Check if we read from an aborted transaction + if let Some(aborted_txn) = tx_state.database.was_aborted() { + trace!( + worker_id = worker_id, + txn_idx = txn_idx, + aborted_txn = aborted_txn, + "Read from aborted transaction, will re-execute" + ); + let captured_reads = tx_state.database.take_captured_reads(); + scheduler.finish_execution( + txn_idx, incarnation, + captured_reads, + WriteSet::new(), + 0, false, &mv_hashmap, + ); + continue; + } + + let ResultAndState { result, state } = exec_result; + let gas_used = result.gas_used(); + + // Post-execution checks + let should_skip = address_gas_limiter + .consume_gas(tx.signer(), gas_used).is_err() + || (!result.is_success() && exclude_reverting_txs) + || max_gas_per_txn.map(|max| gas_used > max).unwrap_or(false); + + if should_skip { + shared_best_txs.lock().unwrap() + .mark_invalid(tx.signer(), tx.nonce()); + let captured_reads = tx_state.database.take_captured_reads(); + scheduler.finish_execution( + txn_idx, incarnation, + captured_reads, + WriteSet::new(), + gas_used, false, &mv_hashmap, + ); + continue; + } + + // Build write set from state changes + let mut write_set: WriteSet = WriteSet::new(); +let captured_reads = tx_state.database.take_captured_reads(); + + // Add writes only for values that actually changed + for (addr, account) in state.loaded_state.iter() { + if account.is_touched() { +// Get original values from captured reads (if available) + let original_balance = captured_reads.get_balance(*addr); + let original_nonce = captured_reads.get_nonce(*addr); + let original_code_hash = captured_reads.get_code_hash(*addr); + + // Only write balance if it changed + if original_balance != Some(account.info.balance) { + write_set.write_balance(*addr, account.info.balance); +} + + // Only write nonce if it changed + if original_nonce != Some(account.info.nonce) { + write_set.write_nonce(*addr, account.info.nonce); +} + + // Only write code hash if it changed + if original_code_hash != Some(account.info.code_hash) { + write_set.write_code_hash(*addr, account.info.code_hash); + } + + // Storage slots already have is_changed() check + for (slot, value) in account.storage.iter() { + if value.is_changed() { + write_set.write_storage(*addr, *slot, value.present_value); + } + } + } + } + + // Add pending balance increments as deltas (commutative fee accumulation) + // These are tracked separately to allow parallel accumulation + for (addr, delta) in state.pending_balance_increments.iter() { + write_set.add_balance_delta(*addr, *delta); + } + + // Get captured reads for validation + + // Store execution result for commit phase + let miner_fee = tx.effective_tip_per_gas(base_fee) + .expect("fee is always valid"); + + // Extract success and logs from result + let success = result.is_success(); + let logs = result.into_logs(); + + { + let mut results = execution_results.lock().unwrap(); + results[txn_idx as usize] = Some(TxExecutionResult { + tx, + state, + success, + logs, + gas_used, + tx_da_size, + miner_fee, + }); + } + + // Update metrics + metrics.num_txs_simulated + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + + if success { + metrics.num_txs_simulated_success + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + } else { + metrics.num_txs_simulated_fail + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + metrics.reverted_gas_used + .fetch_add(gas_used as i32, std::sync::atomic::Ordering::Relaxed); + } + + // Report to scheduler + scheduler.finish_execution( + txn_idx, incarnation, + captured_reads, + write_set, + gas_used, success, &mv_hashmap, + ); + + trace!( + worker_id = worker_id, + txn_idx = txn_idx, + gas_used = gas_used, + success = success, + "Transaction execution complete" + ); + } + Task::Validate { txn_idx: _ } => { + // Validation handled in scheduler's try_commit + } + Task::NoTask => { + if scheduler.is_done() { + break; + } + scheduler.wait_for_work(); + } + Task::Done => { + break; + } + } + } + + scheduler.worker_done(); + trace!(worker_id = worker_id, "Block-STM worker finished"); + }); + } + }); + + // Commit phase: apply results in order + let results = Arc::try_unwrap(execution_results) + .map_err(|_| PayloadBuilderError::Other("Failed to unwrap execution results".into()))? + .into_inner() + .unwrap(); + + let mut info_guard = shared_info.lock().unwrap(); + + // only save up to committed_idx + let committed_idx = scheduler.get_commit_idx(); + let results = results.into_iter().take(committed_idx).collect::>(); + + // Process committed transactions in order + for (txn_idx, result_opt) in results.into_iter().enumerate() { + if let Some(tx_result) = result_opt { + // Update cumulative gas before building receipt + info_guard.cumulative_gas_used += tx_result.gas_used; + info_guard.cumulative_da_bytes_used += tx_result.tx_da_size; + info_guard.total_fees += U256::from(tx_result.miner_fee) * U256::from(tx_result.gas_used); + + // Build receipt with correct cumulative gas + let receipt = alloy_consensus::Receipt { + status: Eip658Value::Eip658(tx_result.success), + cumulative_gas_used: info_guard.cumulative_gas_used, + logs: tx_result.logs, + }; + + // Build OpReceipt based on transaction type + let op_receipt = match tx_result.tx.tx_type() { + OpTxType::Legacy => OpReceipt::Legacy(receipt), + OpTxType::Eip2930 => OpReceipt::Eip2930(receipt), + OpTxType::Eip1559 => OpReceipt::Eip1559(receipt), + OpTxType::Eip7702 => OpReceipt::Eip7702(receipt), + OpTxType::Deposit => { + // Deposits shouldn't come from the pool, but handle gracefully + OpReceipt::Deposit(OpDepositReceipt { + inner: receipt, + deposit_nonce: None, + deposit_receipt_version: None, + }) + } + }; + info_guard.receipts.push(op_receipt); + + // Load accounts into cache before committing + // (State requires accounts to be in cache before applying changes) + // Note: LazyEvmState has both loaded_state and pending_balance_increments + for address in tx_result.state.loaded_state.keys() { + let _ = db.load_cache_account(*address); + } + for address in tx_result.state.pending_balance_increments.keys() { + let _ = db.load_cache_account(*address); + } + + // Resolve the lazy state to get the full EvmState + // This combines loaded_state with any remaining pending_balance_increments + let resolved_state = revm::context::inner::LazyEvmStateHandle(tx_result.state) + .resolve_full_state(db) + .map_err(|e| PayloadBuilderError::Other( + format!("Failed to resolve state: {:?}", e).into() + ))?; + + // Commit resolved state to actual DB + db.commit(resolved_state); + + // Record transaction + info_guard.executed_senders.push(tx_result.tx.signer()); + info_guard.executed_transactions.push(tx_result.tx.into_inner()); + + trace!( + txn_idx = txn_idx, + cumulative_gas = info_guard.cumulative_gas_used, + "Committed transaction" + ); + } + } + + // Restore info + *info = std::mem::take(&mut *info_guard); + drop(info_guard); + + // Get scheduler stats + let sched_stats = scheduler.get_stats(); + debug!( + target: "payload_builder", + total_executions = sched_stats.total_executions, + total_aborts = sched_stats.total_aborts, + total_commits = sched_stats.total_commits, + "Block-STM scheduler stats" + ); + + // Read metrics from atomics + let num_txs_considered = metrics.num_txs_considered + .load(std::sync::atomic::Ordering::Relaxed); + let num_txs_simulated = metrics.num_txs_simulated + .load(std::sync::atomic::Ordering::Relaxed); + let num_txs_simulated_success = metrics.num_txs_simulated_success + .load(std::sync::atomic::Ordering::Relaxed); + let num_txs_simulated_fail = metrics.num_txs_simulated_fail + .load(std::sync::atomic::Ordering::Relaxed); + let num_bundles_reverted = metrics.num_bundles_reverted + .load(std::sync::atomic::Ordering::Relaxed); + let reverted_gas_used = metrics.reverted_gas_used + .load(std::sync::atomic::Ordering::Relaxed); + + if self.cancel.is_cancelled() { + debug!("Cancellation detected, returning"); + return Ok(Some(())); + } + + let payload_transaction_simulation_time = execute_txs_start_time.elapsed(); + self.metrics.set_payload_builder_metrics( + payload_transaction_simulation_time, + num_txs_considered as i32, + num_txs_simulated as i32, + num_txs_simulated_success as i32, + num_txs_simulated_fail as i32, + num_bundles_reverted as i32, + reverted_gas_used, + ); + + debug!( + target: "payload_builder", + message = "Completed executing best transactions (Block-STM)", + txs_executed = num_txs_considered, + txs_applied = num_txs_simulated_success, + txs_rejected = num_txs_simulated_fail, + bundles_reverted = num_bundles_reverted, + ); + + Ok(None) + } +} + +/// Result of executing a single transaction in parallel. +/// Stored for deferred commit during the commit phase. +#[derive(Clone)] +struct TxExecutionResult { + /// The transaction that was executed + tx: Recovered, + /// State changes from execution (using alloy's HashMap for compatibility) + state: LazyEvmState, + /// Whether execution succeeded + success: bool, + /// Logs from execution (needed for receipt building) + logs: Vec, + /// Gas used + gas_used: u64, + /// DA size + tx_da_size: u64, + /// Miner fee per gas + miner_fee: u128, +} + +/// Atomic metrics counters for parallel execution. +struct ParallelExecutionMetrics { + num_txs_considered: std::sync::atomic::AtomicUsize, + num_txs_simulated: std::sync::atomic::AtomicUsize, + num_txs_simulated_success: std::sync::atomic::AtomicUsize, + num_txs_simulated_fail: std::sync::atomic::AtomicUsize, + num_bundles_reverted: std::sync::atomic::AtomicUsize, + reverted_gas_used: std::sync::atomic::AtomicI32, +} + +impl ParallelExecutionMetrics { + fn new() -> Self { + Self { + num_txs_considered: std::sync::atomic::AtomicUsize::new(0), + num_txs_simulated: std::sync::atomic::AtomicUsize::new(0), + num_txs_simulated_success: std::sync::atomic::AtomicUsize::new(0), + num_txs_simulated_fail: std::sync::atomic::AtomicUsize::new(0), + num_bundles_reverted: std::sync::atomic::AtomicUsize::new(0), + reverted_gas_used: std::sync::atomic::AtomicI32::new(0), + } + } } diff --git a/crates/op-rbuilder/src/builders/flashblocks/ctx.rs b/crates/op-rbuilder/src/builders/flashblocks/ctx.rs index 28cbae76..bc32621b 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/ctx.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/ctx.rs @@ -32,6 +32,8 @@ pub(super) struct OpPayloadSyncerCtx { metrics: Arc, /// Resource metering tracking resource_metering: ResourceMetering, + /// Number of parallel threads for transaction execution. + parallel_threads: usize, } impl OpPayloadSyncerCtx { @@ -52,6 +54,7 @@ impl OpPayloadSyncerCtx { max_gas_per_txn: builder_config.max_gas_per_txn, metrics, resource_metering: builder_config.resource_metering, + parallel_threads: builder_config.parallel_threads, }) } @@ -85,6 +88,7 @@ impl OpPayloadSyncerCtx { max_gas_per_txn: self.max_gas_per_txn, address_gas_limiter: AddressGasLimiter::new(GasLimiterArgs::default()), resource_metering: self.resource_metering.clone(), + parallel_threads: self.parallel_threads, } } } diff --git a/crates/op-rbuilder/src/builders/flashblocks/payload.rs b/crates/op-rbuilder/src/builders/flashblocks/payload.rs index 35e16834..2833ad58 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/payload.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/payload.rs @@ -41,7 +41,7 @@ use reth_revm::{ }; use reth_transaction_pool::TransactionPool; use reth_trie::{HashedPostState, updates::TrieUpdates}; -use revm::Database; +use revm::{Database, DatabaseRef}; use rollup_boost::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashblocksPayloadV1, }; @@ -283,6 +283,7 @@ where max_gas_per_txn: self.config.max_gas_per_txn, address_gas_limiter: self.address_gas_limiter.clone(), resource_metering: self.config.resource_metering.clone(), + parallel_threads: self.config.parallel_threads, }) } @@ -301,7 +302,7 @@ where ) -> Result<(), PayloadBuilderError> { let block_build_start_time = Instant::now(); let BuildArguments { - mut cached_reads, + cached_reads: _, // Flashblocks always uses parallel execution which doesn't use cached_reads config, cancel: block_cancel, } = args; @@ -344,7 +345,7 @@ where // 1. execute the pre steps and seal an early block with that let sequencer_tx_start_time = Instant::now(); let mut state = State::builder() - .with_database(cached_reads.as_db_mut(db)) + .with_database(db) .with_bundle_update() .build(); @@ -596,7 +597,7 @@ where #[allow(clippy::too_many_arguments)] async fn build_next_flashblock< - DB: Database + std::fmt::Debug + AsRef

, + DB: Database + DatabaseRef + std::fmt::Debug + AsRef

+ Send + Sync, P: StateRootProvider + HashedPostStateProvider + StorageRootProvider, >( &self, @@ -681,7 +682,10 @@ where .set(transaction_pool_fetch_time); let tx_execution_start_time = Instant::now(); - ctx.execute_best_transactions( + + // Use parallel execution only when parallel_threads > 1 + if ctx.parallel_threads > 1 { + ctx.execute_best_transactions_parallel( info, state, best_txs, @@ -690,6 +694,18 @@ where target_da_footprint_for_batch, ) .wrap_err("failed to execute best transactions")?; + } else { + // Sequential execution for single-threaded mode + ctx.execute_best_transactions( + info, + state, + best_txs, + target_gas_for_batch.min(ctx.block_gas_limit()), + target_da_for_batch, + target_da_footprint_for_batch, + ) + .wrap_err("failed to execute best transactions")?; + } // Extract last transactions let new_transactions = info.executed_transactions[info.extra.last_flashblock_index..] .to_vec() @@ -874,6 +890,7 @@ where // FCU(a) could arrive with `block_time - fb_time < delay`. In this case we could only produce 1 flashblock // FCU(a) could arrive with `delay < fb_time` - in this case we will shrink first flashblock // FCU(a) could arrive with `fb_time < delay < block_time - fb_time` - in this case we will issue less flashblocks + let target_time = std::time::SystemTime::UNIX_EPOCH + Duration::from_secs(timestamp) - self.config.specific.leeway_time; let now = std::time::SystemTime::now(); diff --git a/crates/op-rbuilder/src/builders/flashblocks/payload_handler.rs b/crates/op-rbuilder/src/builders/flashblocks/payload_handler.rs index 96b6f683..914d0196 100644 --- a/crates/op-rbuilder/src/builders/flashblocks/payload_handler.rs +++ b/crates/op-rbuilder/src/builders/flashblocks/payload_handler.rs @@ -19,6 +19,7 @@ use reth_optimism_node::{OpEngineTypes, OpPayloadBuilderAttributes}; use reth_optimism_payload_builder::OpBuiltPayload; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; use reth_payload_builder::EthPayloadBuilderAttributes; +use revm::context::inner::LazyEvmStateHandle; use rollup_boost::FlashblocksPayloadV1; use std::sync::Arc; use tokio::sync::mpsc; @@ -345,6 +346,7 @@ fn execute_transactions( return Err(err).wrap_err("failed to execute flashblock transaction"); } }; + let state = LazyEvmStateHandle(state).resolve_full_state(evm.db_mut()).unwrap(); if let Some(max_gas_per_txn) = max_gas_per_txn && result.gas_used() > max_gas_per_txn diff --git a/crates/op-rbuilder/src/builders/mod.rs b/crates/op-rbuilder/src/builders/mod.rs index 48ce625b..40cfed5f 100644 --- a/crates/op-rbuilder/src/builders/mod.rs +++ b/crates/op-rbuilder/src/builders/mod.rs @@ -130,6 +130,10 @@ pub struct BuilderConfig { /// Resource metering context pub resource_metering: ResourceMetering, + + /// Number of parallel threads for transaction execution. + /// Defaults to the number of available CPU cores. + pub parallel_threads: usize, } impl core::fmt::Debug for BuilderConfig { @@ -152,6 +156,7 @@ impl core::fmt::Debug for BuilderConfig { .field("specific", &self.specific) .field("max_gas_per_txn", &self.max_gas_per_txn) .field("gas_limiter_config", &self.gas_limiter_config) + .field("parallel_threads", &self.parallel_threads) .finish() } } @@ -171,6 +176,9 @@ impl Default for BuilderConfig { max_gas_per_txn: None, gas_limiter_config: GasLimiterArgs::default(), resource_metering: ResourceMetering::default(), + parallel_threads: std::thread::available_parallelism() + .map(|p| p.get()) + .unwrap_or(4), } } } @@ -197,6 +205,11 @@ where args.enable_resource_metering, args.resource_metering_buffer_size, ), + parallel_threads: args.parallel_threads.unwrap_or_else(|| { + std::thread::available_parallelism() + .map(|p| p.get()) + .unwrap_or(4) + }), specific: S::try_from(args)?, }) } diff --git a/crates/op-rbuilder/src/builders/standard/payload.rs b/crates/op-rbuilder/src/builders/standard/payload.rs index d9a74add..2b35763b 100644 --- a/crates/op-rbuilder/src/builders/standard/payload.rs +++ b/crates/op-rbuilder/src/builders/standard/payload.rs @@ -32,6 +32,7 @@ use reth_revm::{ use reth_transaction_pool::{ BestTransactions, BestTransactionsAttributes, PoolTransaction, TransactionPool, }; +use revm::DatabaseRef; use std::{sync::Arc, time::Instant}; use tokio_util::sync::CancellationToken; use tracing::{error, info, warn}; @@ -91,7 +92,7 @@ pub(super) trait OpPayloadTransactions: &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions; + ) -> impl PayloadTransactions + Send; } impl OpPayloadTransactions for () { @@ -115,7 +116,7 @@ where Pool: PoolBounds, Client: ClientBounds, BuilderTx: BuilderTransactions + Clone + Send + Sync, - Txs: OpPayloadTransactions, + Txs: OpPayloadTransactions + Send, { type Attributes = OpPayloadBuilderAttributes; type BuiltPayload = OpBuiltPayload; @@ -187,7 +188,7 @@ where /// Given build arguments including an Optimism client, transaction pool, /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. - fn build_payload<'a, Txs: PayloadTxsBounds>( + fn build_payload<'a, Txs: PayloadTxsBounds + Send>( &self, args: BuildArguments, OpBuiltPayload>, best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a, @@ -252,6 +253,7 @@ where max_gas_per_txn: self.config.max_gas_per_txn, address_gas_limiter: self.address_gas_limiter.clone(), resource_metering: self.config.resource_metering.clone(), + parallel_threads: self.config.parallel_threads, }; let builder = OpBuilder::new(best); @@ -261,19 +263,29 @@ where let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; let db = StateProviderDatabase::new(&state_provider); let metrics = ctx.metrics.clone(); + let parallel_threads = ctx.parallel_threads; + if ctx.attributes().no_tx_pool { + // No mempool transactions - use db directly + let state = State::builder() + .with_database(db) + .with_bundle_update() + .build(); + builder.build(state, &state_provider, ctx, self.builder_tx.clone()) + } else if parallel_threads > 1 { + // Parallel execution - use db directly (needs DatabaseRef) let state = State::builder() .with_database(db) .with_bundle_update() .build(); builder.build(state, &state_provider, ctx, self.builder_tx.clone()) } else { - // sequencer mode we can reuse cachedreads from previous runs + // Sequential execution - use cached_reads for better performance let state = State::builder() .with_database(cached_reads.as_db_mut(db)) .with_bundle_update() .build(); - builder.build(state, &state_provider, ctx, self.builder_tx.clone()) + builder.build_sequential(state, &state_provider, ctx, self.builder_tx.clone()) } .map(|out| { let total_block_building_time = block_build_start_time.elapsed(); @@ -305,12 +317,12 @@ where /// And finally /// 5. build the block: compute all roots (txs, state) #[derive(derive_more::Debug)] -pub(super) struct OpBuilder<'a, Txs> { +pub(super) struct OpBuilder<'a, Txs: Send> { /// Yields the best transaction to include if transactions from the mempool are allowed. best: Box Txs + 'a>, } -impl<'a, Txs> OpBuilder<'a, Txs> { +impl<'a, Txs: Send> OpBuilder<'a, Txs> { fn new(best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a) -> Self { Self { best: Box::new(best), @@ -325,12 +337,13 @@ pub(super) struct ExecutedPayload { pub info: ExecutionInfo, } -impl OpBuilder<'_, Txs> { - /// Executes the payload and returns the outcome. - pub(crate) fn execute( +impl OpBuilder<'_, Txs> { + /// Executes the payload using parallel transaction execution (Block-STM). + /// Requires `DatabaseRef` bound for parallel reads. + pub(crate) fn execute( self, state_provider: impl StateProvider, - db: &mut State, + db: &mut State, ctx: &OpPayloadBuilderCtx, builder_tx: BuilderTx, ) -> Result, PayloadBuilderError> @@ -338,7 +351,136 @@ impl OpBuilder<'_, Txs> { BuilderTx: BuilderTransactions, { let Self { best } = self; - info!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); + info!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload (parallel)"); + + // 1. apply pre-execution changes + ctx.evm_config + .builder_for_next_block(db, ctx.parent(), ctx.block_env_attributes.clone()) + .map_err(PayloadBuilderError::other)? + .apply_pre_execution_changes()?; + + let sequencer_tx_start_time = Instant::now(); + + // 3. execute sequencer transactions + let mut info = ctx.execute_sequencer_transactions(db)?; + + let sequencer_tx_time = sequencer_tx_start_time.elapsed(); + ctx.metrics.sequencer_tx_duration.record(sequencer_tx_time); + ctx.metrics.sequencer_tx_gauge.set(sequencer_tx_time); + + // 4. if mem pool transactions are requested we execute them + + // gas reserved for builder tx + let builder_txs = + match builder_tx.add_builder_txs(&state_provider, &mut info, ctx, db, true) { + Ok(builder_txs) => builder_txs, + Err(e) => { + error!(target: "payload_builder", "Error adding builder txs to block: {}", e); + vec![] + } + }; + + let builder_tx_gas = builder_txs.iter().fold(0, |acc, tx| acc + tx.gas_used); + + let block_gas_limit = ctx.block_gas_limit().saturating_sub(builder_tx_gas); + if block_gas_limit == 0 { + error!( + "Builder tx gas subtraction resulted in block gas limit to be 0. No transactions would be included" + ); + } + // Save some space in the block_da_limit for builder tx + let builder_tx_da_size = builder_txs.iter().fold(0, |acc, tx| acc + tx.da_size); + let block_da_limit = ctx + .da_config + .max_da_block_size() + .map(|da_limit| { + let da_limit = da_limit.saturating_sub(builder_tx_da_size); + if da_limit == 0 { + error!("Builder tx da size subtraction caused max_da_block_size to be 0. No transaction would be included."); + } + da_limit + }); + let block_da_footprint = info.da_footprint_scalar + .map(|da_footprint_scalar| { + let da_footprint_limit = ctx.block_gas_limit().saturating_sub(builder_tx_da_size.saturating_mul(da_footprint_scalar as u64)); + if da_footprint_limit == 0 { + error!("Builder tx da size subtraction caused max_da_footprint to be 0. No transaction would be included."); + } + da_footprint_limit + }); + + if !ctx.attributes().no_tx_pool { + let best_txs_start_time = Instant::now(); + let mut best_txs = best(ctx.best_transaction_attributes()); + let transaction_pool_fetch_time = best_txs_start_time.elapsed(); + ctx.metrics + .transaction_pool_fetch_duration + .record(transaction_pool_fetch_time); + ctx.metrics + .transaction_pool_fetch_gauge + .set(transaction_pool_fetch_time); + + if ctx + .execute_best_transactions_parallel( + &mut info, + db, + &mut best_txs, + block_gas_limit, + block_da_limit, + block_da_footprint, + )? + .is_some() + { + return Ok(BuildOutcomeKind::Cancelled); + } + } + + // Add builder tx to the block + if let Err(e) = builder_tx.add_builder_txs(&state_provider, &mut info, ctx, db, false) { + error!(target: "payload_builder", "Error adding builder txs to fallback block: {}", e); + }; + + let state_merge_start_time = Instant::now(); + + // merge all transitions into bundle state, this would apply the withdrawal balance changes + // and 4788 contract call + db.merge_transitions(BundleRetention::Reverts); + + let state_transition_merge_time = state_merge_start_time.elapsed(); + ctx.metrics + .state_transition_merge_duration + .record(state_transition_merge_time); + ctx.metrics + .state_transition_merge_gauge + .set(state_transition_merge_time); + + ctx.metrics + .payload_num_tx + .record(info.executed_transactions.len() as f64); + ctx.metrics + .payload_num_tx_gauge + .set(info.executed_transactions.len() as f64); + + let payload = ExecutedPayload { info }; + + ctx.metrics.block_built_success.increment(1); + Ok(BuildOutcomeKind::Better { payload }) + } + + /// Executes the payload using sequential transaction execution. + /// Used when `parallel_threads == 1` for better cache utilization with CachedReads. + pub(crate) fn execute_sequential( + self, + state_provider: impl StateProvider, + db: &mut State, + ctx: &OpPayloadBuilderCtx, + builder_tx: BuilderTx, + ) -> Result, PayloadBuilderError> + where + BuilderTx: BuilderTransactions, + { + let Self { best } = self; + info!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload (sequential)"); // 1. apply pre-execution changes ctx.evm_config @@ -457,7 +599,7 @@ impl OpBuilder<'_, Txs> { /// Builds the payload on top of the state. pub(super) fn build( self, - state: impl Database, + state: impl Database + DatabaseRef + Send + Sync, state_provider: impl StateProvider, ctx: OpPayloadBuilderCtx, builder_tx: BuilderTx, @@ -623,4 +765,175 @@ impl OpBuilder<'_, Txs> { Ok(BuildOutcomeKind::Better { payload }) } } + + /// Builds the payload sequentially (single-threaded). + /// Used when `parallel_threads == 1` for better cache utilization with CachedReads. + pub(super) fn build_sequential( + self, + state: impl Database, + state_provider: impl StateProvider, + ctx: OpPayloadBuilderCtx, + builder_tx: BuilderTx, + ) -> Result, PayloadBuilderError> + where + BuilderTx: BuilderTransactions, + { + let mut db = State::builder() + .with_database(state) + .with_bundle_update() + .build(); + let ExecutedPayload { info } = + match self.execute_sequential(&state_provider, &mut db, &ctx, builder_tx)? { + BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, + BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), + BuildOutcomeKind::Aborted { fees } => { + return Ok(BuildOutcomeKind::Aborted { fees }); + } + }; + + let block_number = ctx.block_number(); + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(&info); + + let execution_outcome = ExecutionOutcome::new( + db.take_bundle(), + vec![info.receipts], + block_number, + Vec::new(), + ); + let receipts_root = execution_outcome + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism( + receipts, + &ctx.chain_spec, + ctx.attributes().timestamp(), + ) + }) + .expect("Number is in range"); + let logs_bloom = execution_outcome + .block_logs_bloom(block_number) + .expect("Number is in range"); + + // calculate the state root + let state_root_start_time = Instant::now(); + + let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); + let (state_root, trie_output) = { + state_provider + .state_root_with_updates(hashed_state.clone()) + .inspect_err(|err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + })? + }; + + let state_root_calculation_time = state_root_start_time.elapsed(); + ctx.metrics + .state_root_calculation_duration + .record(state_root_calculation_time); + ctx.metrics + .state_root_calculation_gauge + .set(state_root_calculation_time); + + let (withdrawals_root, requests_hash) = if ctx.is_isthmus_active() { + // withdrawals root field in block header is used for storage root of L2 predeploy + // `l2tol1-message-passer` + ( + Some( + isthmus::withdrawals_root(execution_outcome.state(), state_provider) + .map_err(PayloadBuilderError::other)?, + ), + Some(EMPTY_REQUESTS_HASH), + ) + } else if ctx.is_canyon_active() { + (Some(EMPTY_WITHDRAWALS), None) + } else { + (None, None) + }; + + // create the block header + let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); + + let extra_data = ctx.extra_data()?; + + let header = Header { + parent_hash: ctx.parent().hash(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: ctx.evm_env.block_env.beneficiary, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp: ctx.attributes().payload_attributes.timestamp, + mix_hash: ctx.attributes().payload_attributes.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(ctx.base_fee()), + number: ctx.parent().number + 1, + gas_limit: ctx.block_gas_limit(), + difficulty: U256::ZERO, + gas_used: info.cumulative_gas_used, + extra_data, + parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, + blob_gas_used, + excess_blob_gas, + requests_hash, + }; + + // seal the block + let block = alloy_consensus::Block::::new( + header, + BlockBody { + transactions: info.executed_transactions, + ommers: vec![], + withdrawals: ctx.withdrawals().cloned(), + }, + ); + + let sealed_block = Arc::new(block.seal_slow()); + info!(target: "payload_builder", id=%ctx.attributes().payload_id(), "sealed built block"); + + // create the executed block data + let executed = ExecutedBlock { + recovered_block: Arc::new( + RecoveredBlock::>::new_sealed( + sealed_block.as_ref().clone(), + info.executed_senders, + ), + ), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_output), + }; + + let no_tx_pool = ctx.attributes().no_tx_pool; + + let payload = OpBuiltPayload::new( + ctx.payload_id(), + sealed_block, + info.total_fees, + Some(executed), + ); + + ctx.metrics + .payload_byte_size + .record(InMemorySize::size(payload.block()) as f64); + ctx.metrics + .payload_byte_size_gauge + .set(InMemorySize::size(payload.block()) as f64); + + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included + // in the payload. In other words, the payload is deterministic and we can + // freeze it once we've successfully built it. + Ok(BuildOutcomeKind::Freeze(payload)) + } else { + Ok(BuildOutcomeKind::Better { payload }) + } + } } diff --git a/crates/op-rbuilder/src/lib.rs b/crates/op-rbuilder/src/lib.rs index f61c39b0..0ca3e717 100644 --- a/crates/op-rbuilder/src/lib.rs +++ b/crates/op-rbuilder/src/lib.rs @@ -1,4 +1,5 @@ pub mod args; +pub mod block_stm; pub mod builders; pub mod flashtestations; pub mod gas_limiter;