diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go
index 0ae5a3b8f1f1..6893d64a1626 100644
--- a/beacon/engine/gen_ed.go
+++ b/beacon/engine/gen_ed.go
@@ -17,24 +17,23 @@ var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableData struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
- StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
- LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
- Random common.Hash `json:"prevRandao" gencodec:"required"`
- Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
- GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
- BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
- BlockHash common.Hash `json:"blockHash" gencodec:"required"`
- Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
- Withdrawals []*types.Withdrawal `json:"withdrawals"`
- BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
- ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
- ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
+ StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
+ ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
+ LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
+ Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
+ GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
+ BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
+ BlockHash common.Hash `json:"blockHash" gencodec:"required"`
+ Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var enc ExecutableData
enc.ParentHash = e.ParentHash
@@ -59,31 +58,29 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
- enc.ExecutionWitness = e.ExecutionWitness
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableData struct {
- ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
- FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
- StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
- LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
- Random *common.Hash `json:"prevRandao" gencodec:"required"`
- Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
- GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
- BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
- BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
- Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
- Withdrawals []*types.Withdrawal `json:"withdrawals"`
- BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
- ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
- ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
+ ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
+ FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
+ StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
+ ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
+ LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
+ Random *common.Hash `json:"prevRandao" gencodec:"required"`
+ Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
+ GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
+ BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
+ BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
+ Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil {
@@ -157,8 +154,5 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
- if dec.ExecutionWitness != nil {
- e.ExecutionWitness = dec.ExecutionWitness
- }
return nil
}
diff --git a/beacon/engine/types.go b/beacon/engine/types.go
index ddb276ab0914..da9b6568f296 100644
--- a/beacon/engine/types.go
+++ b/beacon/engine/types.go
@@ -73,24 +73,23 @@ type payloadAttributesMarshaling struct {
// ExecutableData is the data necessary to execute an EL payload.
type ExecutableData struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
- StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
- LogsBloom []byte `json:"logsBloom" gencodec:"required"`
- Random common.Hash `json:"prevRandao" gencodec:"required"`
- Number uint64 `json:"blockNumber" gencodec:"required"`
- GasLimit uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed uint64 `json:"gasUsed" gencodec:"required"`
- Timestamp uint64 `json:"timestamp" gencodec:"required"`
- ExtraData []byte `json:"extraData" gencodec:"required"`
- BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
- BlockHash common.Hash `json:"blockHash" gencodec:"required"`
- Transactions [][]byte `json:"transactions" gencodec:"required"`
- Withdrawals []*types.Withdrawal `json:"withdrawals"`
- BlobGasUsed *uint64 `json:"blobGasUsed"`
- ExcessBlobGas *uint64 `json:"excessBlobGas"`
- ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
+ StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
+ ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
+ LogsBloom []byte `json:"logsBloom" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
+ Number uint64 `json:"blockNumber" gencodec:"required"`
+ GasLimit uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed uint64 `json:"gasUsed" gencodec:"required"`
+ Timestamp uint64 `json:"timestamp" gencodec:"required"`
+ ExtraData []byte `json:"extraData" gencodec:"required"`
+ BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
+ BlockHash common.Hash `json:"blockHash" gencodec:"required"`
+ Transactions [][]byte `json:"transactions" gencodec:"required"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ BlobGasUsed *uint64 `json:"blobGasUsed"`
+ ExcessBlobGas *uint64 `json:"excessBlobGas"`
}
// JSON type overrides for executableData.
@@ -316,8 +315,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
RequestsHash: requestsHash,
}
return types.NewBlockWithHeader(header).
- WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
- WithWitness(data.ExecutionWitness),
+ WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}),
nil
}
@@ -325,24 +323,23 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
data := &ExecutableData{
- BlockHash: block.Hash(),
- ParentHash: block.ParentHash(),
- FeeRecipient: block.Coinbase(),
- StateRoot: block.Root(),
- Number: block.NumberU64(),
- GasLimit: block.GasLimit(),
- GasUsed: block.GasUsed(),
- BaseFeePerGas: block.BaseFee(),
- Timestamp: block.Time(),
- ReceiptsRoot: block.ReceiptHash(),
- LogsBloom: block.Bloom().Bytes(),
- Transactions: encodeTransactions(block.Transactions()),
- Random: block.MixDigest(),
- ExtraData: block.Extra(),
- Withdrawals: block.Withdrawals(),
- BlobGasUsed: block.BlobGasUsed(),
- ExcessBlobGas: block.ExcessBlobGas(),
- ExecutionWitness: block.ExecutionWitness(),
+ BlockHash: block.Hash(),
+ ParentHash: block.ParentHash(),
+ FeeRecipient: block.Coinbase(),
+ StateRoot: block.Root(),
+ Number: block.NumberU64(),
+ GasLimit: block.GasLimit(),
+ GasUsed: block.GasUsed(),
+ BaseFeePerGas: block.BaseFee(),
+ Timestamp: block.Time(),
+ ReceiptsRoot: block.ReceiptHash(),
+ LogsBloom: block.Bloom().Bytes(),
+ Transactions: encodeTransactions(block.Transactions()),
+ Random: block.MixDigest(),
+ ExtraData: block.Extra(),
+ Withdrawals: block.Withdrawals(),
+ BlobGasUsed: block.BlobGasUsed(),
+ ExcessBlobGas: block.ExcessBlobGas(),
}
// Add blobs.
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index b294ee593e2e..96f9f58dde34 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -251,8 +251,6 @@ func init() {
utils.ShowDeprecated,
// See snapshot.go
snapshotCommand,
- // See verkle.go
- verkleCommand,
}
if logTestCommand != nil {
app.Commands = append(app.Commands, logTestCommand)
diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go
deleted file mode 100644
index c064d70ababc..000000000000
--- a/cmd/geth/verkle.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of go-ethereum.
-//
-// go-ethereum is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// go-ethereum is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see .
-
-package main
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "os"
- "slices"
-
- "github.com/ethereum/go-ethereum/cmd/utils"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-verkle"
- "github.com/urfave/cli/v2"
-)
-
-var (
- zero [32]byte
-
- verkleCommand = &cli.Command{
- Name: "verkle",
- Usage: "A set of experimental verkle tree management commands",
- Description: "",
- Subcommands: []*cli.Command{
- {
- Name: "verify",
- Usage: "verify the conversion of a MPT into a verkle tree",
- ArgsUsage: "",
- Action: verifyVerkle,
- Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
- Description: `
-geth verkle verify
-This command takes a root commitment and attempts to rebuild the tree.
- `,
- },
- {
- Name: "dump",
- Usage: "Dump a verkle tree to a DOT file",
- ArgsUsage: " [ ...]",
- Action: expandVerkle,
- Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
- Description: `
-geth verkle dump [ ...]
-This command will produce a dot file representing the tree, rooted at .
-in which key1, key2, ... are expanded.
- `,
- },
- },
- }
-)
-
-// recurse into each child to ensure they can be loaded from the db. The tree isn't rebuilt
-// (only its nodes are loaded) so there is no need to flush them, the garbage collector should
-// take care of that for us.
-func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error {
- switch node := root.(type) {
- case *verkle.InternalNode:
- for i, child := range node.Children() {
- childC := child.Commit().Bytes()
-
- if bytes.Equal(childC[:], zero[:]) {
- continue
- }
- childS, err := resolver(childC[:])
- if err != nil {
- return fmt.Errorf("could not find child %x in db: %w", childC, err)
- }
- // depth is set to 0, the tree isn't rebuilt so it's not a problem
- childN, err := verkle.ParseNode(childS, 0)
- if err != nil {
- return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err)
- }
- if err := checkChildren(childN, resolver); err != nil {
- return fmt.Errorf("%x%w", i, err) // write the path to the erroring node
- }
- }
- case *verkle.LeafNode:
- // sanity check: ensure at least one value is non-zero
-
- for i := 0; i < verkle.NodeWidth; i++ {
- if len(node.Value(i)) != 0 {
- return nil
- }
- }
- return errors.New("both balance and nonce are 0")
- case verkle.Empty:
- // nothing to do
- default:
- return fmt.Errorf("unsupported type encountered %v", root)
- }
-
- return nil
-}
-
-func verifyVerkle(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
-
- chaindb := utils.MakeChainDatabase(ctx, stack, true)
- defer chaindb.Close()
- headBlock := rawdb.ReadHeadBlock(chaindb)
- if headBlock == nil {
- log.Error("Failed to load head block")
- return errors.New("no head block")
- }
- if ctx.NArg() > 1 {
- log.Error("Too many arguments given")
- return errors.New("too many arguments")
- }
- var (
- rootC common.Hash
- err error
- )
- if ctx.NArg() == 1 {
- rootC, err = parseRoot(ctx.Args().First())
- if err != nil {
- log.Error("Failed to resolve state root", "error", err)
- return err
- }
- log.Info("Rebuilding the tree", "root", rootC)
- } else {
- rootC = headBlock.Root()
- log.Info("Rebuilding the tree", "root", rootC, "number", headBlock.NumberU64())
- }
-
- serializedRoot, err := chaindb.Get(rootC[:])
- if err != nil {
- return err
- }
- root, err := verkle.ParseNode(serializedRoot, 0)
- if err != nil {
- return err
- }
-
- if err := checkChildren(root, chaindb.Get); err != nil {
- log.Error("Could not rebuild the tree from the database", "err", err)
- return err
- }
-
- log.Info("Tree was rebuilt from the database")
- return nil
-}
-
-func expandVerkle(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
-
- chaindb := utils.MakeChainDatabase(ctx, stack, true)
- defer chaindb.Close()
- var (
- rootC common.Hash
- keylist [][]byte
- err error
- )
- if ctx.NArg() >= 2 {
- rootC, err = parseRoot(ctx.Args().First())
- if err != nil {
- log.Error("Failed to resolve state root", "error", err)
- return err
- }
- keylist = make([][]byte, 0, ctx.Args().Len()-1)
- args := ctx.Args().Slice()
- for i := range args[1:] {
- key, err := hex.DecodeString(args[i+1])
- log.Info("decoded key", "arg", args[i+1], "key", key)
- if err != nil {
- return fmt.Errorf("error decoding key #%d: %w", i+1, err)
- }
- keylist = append(keylist, key)
- }
- log.Info("Rebuilding the tree", "root", rootC)
- } else {
- return fmt.Errorf("usage: %s root key1 [key 2...]", ctx.App.Name)
- }
-
- serializedRoot, err := chaindb.Get(rootC[:])
- if err != nil {
- return err
- }
- root, err := verkle.ParseNode(serializedRoot, 0)
- if err != nil {
- return err
- }
-
- for i, key := range keylist {
- log.Info("Reading key", "index", i, "key", key)
- root.Get(key, chaindb.Get)
- }
-
- if err := os.WriteFile("dump.dot", []byte(verkle.ToDot(root)), 0600); err != nil {
- log.Error("Failed to dump file", "err", err)
- } else {
- log.Info("Tree was dumped to file", "file", "dump.dot")
- }
- return nil
-}
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index dbba73947f32..eed27407a5fc 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -365,46 +365,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
header.Root = state.IntermediateRoot(true)
// Assemble the final block.
- block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))
-
- // Create the block witness and attach to block.
- // This step needs to happen as late as possible to catch all access events.
- if chain.Config().IsVerkle(header.Number, header.Time) {
- keys := state.AccessEvents().Keys()
-
- // Open the pre-tree to prove the pre-state against
- parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1)
- if parent == nil {
- return nil, fmt.Errorf("nil parent header for block %d", header.Number)
- }
- preTrie, err := state.Database().OpenTrie(parent.Root)
- if err != nil {
- return nil, fmt.Errorf("error opening pre-state tree root: %w", err)
- }
- postTrie := state.GetTrie()
- if postTrie == nil {
- return nil, errors.New("post-state tree is not available")
- }
- vktPreTrie, okpre := preTrie.(*trie.VerkleTrie)
- vktPostTrie, okpost := postTrie.(*trie.VerkleTrie)
-
- // The witness is only attached iff both parent and current block are
- // using verkle tree.
- if okpre && okpost {
- if len(keys) > 0 {
- verkleProof, stateDiff, err := vktPreTrie.Proof(vktPostTrie, keys)
- if err != nil {
- return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err)
- }
- block = block.WithWitness(&types.ExecutionWitness{
- StateDiff: stateDiff,
- VerkleProof: verkleProof,
- })
- }
- }
- }
-
- return block, nil
+ return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil
}
// Seal generates a new sealing request for the given input block and pushes
diff --git a/core/state/access_events.go b/core/state/access_events.go
index 0575c9898aef..86f44bd62350 100644
--- a/core/state/access_events.go
+++ b/core/state/access_events.go
@@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie/utils"
+ "github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/holiman/uint256"
)
@@ -45,15 +45,12 @@ var zeroTreeIndex uint256.Int
type AccessEvents struct {
branches map[branchAccessKey]mode
chunks map[chunkAccessKey]mode
-
- pointCache *utils.PointCache
}
-func NewAccessEvents(pointCache *utils.PointCache) *AccessEvents {
+func NewAccessEvents() *AccessEvents {
return &AccessEvents{
- branches: make(map[branchAccessKey]mode),
- chunks: make(map[chunkAccessKey]mode),
- pointCache: pointCache,
+ branches: make(map[branchAccessKey]mode),
+ chunks: make(map[chunkAccessKey]mode),
}
}
@@ -75,8 +72,11 @@ func (ae *AccessEvents) Keys() [][]byte {
// TODO: consider if parallelizing this is worth it, probably depending on len(ae.chunks).
keys := make([][]byte, 0, len(ae.chunks))
for chunk := range ae.chunks {
- basePoint := ae.pointCache.Get(chunk.addr[:])
- key := utils.GetTreeKeyWithEvaluatedAddress(basePoint, &chunk.treeIndex, chunk.leafKey)
+ var offset [32]byte
+ treeIndexBytes := chunk.treeIndex.Bytes32()
+ copy(offset[:31], treeIndexBytes[1:])
+ offset[31] = chunk.leafKey
+ key := bintrie.GetBinaryTreeKey(chunk.addr, offset[:])
keys = append(keys, key)
}
return keys
@@ -84,9 +84,8 @@ func (ae *AccessEvents) Keys() [][]byte {
func (ae *AccessEvents) Copy() *AccessEvents {
cpy := &AccessEvents{
- branches: maps.Clone(ae.branches),
- chunks: maps.Clone(ae.chunks),
- pointCache: ae.pointCache,
+ branches: maps.Clone(ae.branches),
+ chunks: maps.Clone(ae.chunks),
}
return cpy
}
@@ -95,12 +94,12 @@ func (ae *AccessEvents) Copy() *AccessEvents {
// member fields of an account.
func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool, availableGas uint64) uint64 {
var gas uint64 // accumulate the consumed gas
- consumed, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite, availableGas)
+ consumed, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, isWrite, availableGas)
if consumed < expected {
return expected
}
gas += consumed
- consumed, expected = ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite, availableGas-consumed)
+ consumed, expected = ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, isWrite, availableGas-consumed)
if consumed < expected {
return expected + gas
}
@@ -112,7 +111,7 @@ func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool, availableG
// cold member fields of an account, that need to be touched when making a message
// call to that account.
func (ae *AccessEvents) MessageCallGas(destination common.Address, availableGas uint64) uint64 {
- _, expected := ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.BasicDataLeafKey, false, availableGas)
+ _, expected := ae.touchAddressAndChargeGas(destination, zeroTreeIndex, bintrie.BasicDataLeafKey, false, availableGas)
if expected == 0 {
expected = params.WarmStorageReadCostEIP2929
}
@@ -122,11 +121,11 @@ func (ae *AccessEvents) MessageCallGas(destination common.Address, availableGas
// ValueTransferGas returns the gas to be charged for each of the currently
// cold balance member fields of the caller and the callee accounts.
func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address, availableGas uint64) uint64 {
- _, expected1 := ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas)
+ _, expected1 := ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas)
if expected1 > availableGas {
return expected1
}
- _, expected2 := ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas-expected1)
+ _, expected2 := ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas-expected1)
if expected1+expected2 == 0 {
return params.WarmStorageReadCostEIP2929
}
@@ -138,8 +137,8 @@ func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address,
// address collision is done before the transfer, and so no write
// are guaranteed to happen at this point.
func (ae *AccessEvents) ContractCreatePreCheckGas(addr common.Address, availableGas uint64) uint64 {
- consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, false, availableGas)
- _, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, false, availableGas-consumed)
+ consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, false, availableGas)
+ _, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, false, availableGas-consumed)
return expected1 + expected2
}
@@ -147,9 +146,9 @@ func (ae *AccessEvents) ContractCreatePreCheckGas(addr common.Address, available
// a contract creation.
func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, availableGas uint64) (uint64, uint64) {
var gas uint64
- consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas)
+ consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas)
gas += consumed
- consumed, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, true, availableGas-consumed)
+ consumed, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, true, availableGas-consumed)
gas += consumed
return gas, expected1 + expected2
}
@@ -157,20 +156,20 @@ func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, availableGas
// AddTxOrigin adds the member fields of the sender account to the access event list,
// so that cold accesses are not charged, since they are covered by the 21000 gas.
func (ae *AccessEvents) AddTxOrigin(originAddr common.Address) {
- ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, gomath.MaxUint64)
- ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeHashLeafKey, false, gomath.MaxUint64)
+ ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, gomath.MaxUint64)
+ ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, bintrie.CodeHashLeafKey, false, gomath.MaxUint64)
}
// AddTxDestination adds the member fields of the sender account to the access event list,
// so that cold accesses are not charged, since they are covered by the 21000 gas.
func (ae *AccessEvents) AddTxDestination(addr common.Address, sendsValue, doesntExist bool) {
- ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, sendsValue, gomath.MaxUint64)
- ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, doesntExist, gomath.MaxUint64)
+ ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, sendsValue, gomath.MaxUint64)
+ ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, doesntExist, gomath.MaxUint64)
}
// SlotGas returns the amount of gas to be charged for a cold storage access.
func (ae *AccessEvents) SlotGas(addr common.Address, slot common.Hash, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 {
- treeIndex, subIndex := utils.StorageIndex(slot.Bytes())
+ treeIndex, subIndex := bintrie.StorageIndex(slot.Bytes())
_, expected := ae.touchAddressAndChargeGas(addr, *treeIndex, subIndex, isWrite, availableGas)
if expected == 0 && chargeWarmCosts {
expected = params.WarmStorageReadCostEIP2929
@@ -313,7 +312,7 @@ func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC,
// Note that an access in write mode implies an access in read mode, whereas an
// access in read mode does not imply an access in write mode.
func (ae *AccessEvents) BasicDataGas(addr common.Address, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 {
- _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite, availableGas)
+ _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, isWrite, availableGas)
if expected == 0 && chargeWarmCosts {
if availableGas < params.WarmStorageReadCostEIP2929 {
return availableGas
@@ -329,7 +328,7 @@ func (ae *AccessEvents) BasicDataGas(addr common.Address, isWrite bool, availabl
// Note that an access in write mode implies an access in read mode, whereas an access in
// read mode does not imply an access in write mode.
func (ae *AccessEvents) CodeHashGas(addr common.Address, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 {
- _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite, availableGas)
+ _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, isWrite, availableGas)
if expected == 0 && chargeWarmCosts {
if availableGas < params.WarmStorageReadCostEIP2929 {
return availableGas
diff --git a/core/state/access_events_test.go b/core/state/access_events_test.go
index e80859a0b428..0b39130e8db8 100644
--- a/core/state/access_events_test.go
+++ b/core/state/access_events_test.go
@@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie/utils"
)
var (
@@ -38,7 +37,7 @@ func init() {
}
func TestAccountHeaderGas(t *testing.T) {
- ae := NewAccessEvents(utils.NewPointCache(1024))
+ ae := NewAccessEvents()
// Check cold read cost
gas := ae.BasicDataGas(testAddr, false, math.MaxUint64, false)
@@ -93,7 +92,7 @@ func TestAccountHeaderGas(t *testing.T) {
// TestContractCreateInitGas checks that the gas cost of contract creation is correctly
// calculated.
func TestContractCreateInitGas(t *testing.T) {
- ae := NewAccessEvents(utils.NewPointCache(1024))
+ ae := NewAccessEvents()
var testAddr [20]byte
for i := byte(0); i < 20; i++ {
@@ -116,7 +115,7 @@ func TestContractCreateInitGas(t *testing.T) {
// TestMessageCallGas checks that the gas cost of message calls is correctly
// calculated.
func TestMessageCallGas(t *testing.T) {
- ae := NewAccessEvents(utils.NewPointCache(1024))
+ ae := NewAccessEvents()
// Check cold read cost, without a value
gas := ae.MessageCallGas(testAddr, math.MaxUint64)
diff --git a/core/state/database.go b/core/state/database.go
index ae177d964f9a..7f4b04d50931 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/trienode"
- "github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
)
@@ -57,9 +56,6 @@ type Database interface {
// OpenStorageTrie opens the storage trie of an account.
OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error)
- // PointCache returns the cache holding points used in verkle tree key computation
- PointCache() *utils.PointCache
-
// TrieDB returns the underlying trie database for managing trie nodes.
TrieDB() *triedb.Database
@@ -161,7 +157,6 @@ type CachingDB struct {
snap *snapshot.Tree
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
codeSizeCache *lru.Cache[common.Hash, int]
- pointCache *utils.PointCache
// Transition-specific fields
TransitionStatePerRoot *lru.Cache[common.Hash, *overlay.TransitionState]
@@ -175,7 +170,6 @@ func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB {
snap: snap,
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
- pointCache: utils.NewPointCache(pointCacheSize),
TransitionStatePerRoot: lru.NewCache[common.Hash, *overlay.TransitionState](1000),
}
}
@@ -211,7 +205,7 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
}
// Configure the trie reader, which is expected to be available as the
// gatekeeper unless the state is corrupted.
- tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
+ tr, err := newTrieReader(stateRoot, db.triedb)
if err != nil {
return nil, err
}
@@ -289,11 +283,6 @@ func (db *CachingDB) TrieDB() *triedb.Database {
return db.triedb
}
-// PointCache returns the cache of evaluated curve points.
-func (db *CachingDB) PointCache() *utils.PointCache {
- return db.pointCache
-}
-
// Snapshot returns the underlying state snapshot.
func (db *CachingDB) Snapshot() *snapshot.Tree {
return db.snap
@@ -304,8 +293,6 @@ func mustCopyTrie(t Trie) Trie {
switch t := t.(type) {
case *trie.StateTrie:
return t.Copy()
- case *trie.VerkleTrie:
- return t.Copy()
case *transitiontrie.TransitionTrie:
return t.Copy()
default:
diff --git a/core/state/database_history.go b/core/state/database_history.go
index 314c56c4708a..f9c4a69f2f3b 100644
--- a/core/state/database_history.go
+++ b/core/state/database_history.go
@@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
)
@@ -105,7 +104,6 @@ type HistoricDB struct {
triedb *triedb.Database
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
codeSizeCache *lru.Cache[common.Hash, int]
- pointCache *utils.PointCache
}
// NewHistoricDatabase creates a historic state database.
@@ -115,7 +113,6 @@ func NewHistoricDatabase(disk ethdb.KeyValueStore, triedb *triedb.Database) *His
triedb: triedb,
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
- pointCache: utils.NewPointCache(pointCacheSize),
}
}
@@ -139,11 +136,6 @@ func (db *HistoricDB) OpenStorageTrie(stateRoot common.Hash, address common.Addr
return nil, errors.New("not implemented")
}
-// PointCache returns the cache holding points used in verkle tree key computation
-func (db *HistoricDB) PointCache() *utils.PointCache {
- return db.pointCache
-}
-
// TrieDB returns the underlying trie database for managing trie nodes.
func (db *HistoricDB) TrieDB() *triedb.Database {
return db.triedb
diff --git a/core/state/reader.go b/core/state/reader.go
index c912ca28da8b..38228f845323 100644
--- a/core/state/reader.go
+++ b/core/state/reader.go
@@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
- "github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/database"
)
@@ -267,7 +266,7 @@ type trieReader struct {
// newTrieReader constructs a trie reader of the specific state. An error will be
// returned if the associated trie specified by root is not existent.
-func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCache) (*trieReader, error) {
+func newTrieReader(root common.Hash, db *triedb.Database) (*trieReader, error) {
var (
tr Trie
err error
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 8f2f323327dd..c0ccb60290d0 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
@@ -498,8 +499,8 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
}
switch s.trie.(type) {
- case *trie.VerkleTrie:
- // Verkle uses only one tree, and the copy has already been
+ case *bintrie.BinaryTrie:
+ // UBT uses only one tree, and the copy has already been
// made in mustCopyTrie.
obj.trie = db.trie
case *transitiontrie.TransitionTrie:
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 8d8ab00e483e..969f42e18939 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
- "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
)
@@ -186,7 +185,7 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
transientStorage: newTransientStorage(),
}
if db.TrieDB().IsVerkle() {
- sdb.accessEvents = NewAccessEvents(db.PointCache())
+ sdb.accessEvents = NewAccessEvents()
}
return sdb, nil
}
@@ -1493,11 +1492,6 @@ func (s *StateDB) markUpdate(addr common.Address) {
s.mutations[addr].typ = update
}
-// PointCache returns the point cache used by verkle tree.
-func (s *StateDB) PointCache() *utils.PointCache {
- return s.db.PointCache()
-}
-
// Witness retrieves the current state witness being collected.
func (s *StateDB) Witness() *stateless.Witness {
return s.witness
diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go
index 50acc03aa8be..33a2016784e3 100644
--- a/core/state/statedb_hooked.go
+++ b/core/state/statedb_hooked.go
@@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
@@ -133,10 +132,6 @@ func (s *hookedStateDB) AddSlotToAccessList(addr common.Address, slot common.Has
s.inner.AddSlotToAccessList(addr, slot)
}
-func (s *hookedStateDB) PointCache() *utils.PointCache {
- return s.inner.PointCache()
-}
-
func (s *hookedStateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) {
s.inner.Prepare(rules, sender, coinbase, dest, precompiles, txAccesses)
}
diff --git a/core/types/block.go b/core/types/block.go
index b5b6468a131c..c52c05a4c7c5 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-verkle"
)
// A BlockNonce is a 64-bit hash which proves (combined with the
@@ -61,13 +60,6 @@ func (n *BlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
}
-// ExecutionWitness represents the witness + proof used in a verkle context,
-// to provide the ability to execute a block statelessly.
-type ExecutionWitness struct {
- StateDiff verkle.StateDiff `json:"stateDiff"`
- VerkleProof *verkle.VerkleProof `json:"verkleProof"`
-}
-
//go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
//go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go
@@ -209,11 +201,6 @@ type Block struct {
transactions Transactions
withdrawals Withdrawals
- // witness is not an encoded part of the block body.
- // It is held in Block in order for easy relaying to the places
- // that process it.
- witness *ExecutionWitness
-
// caches
hash atomic.Pointer[common.Hash]
size atomic.Uint64
@@ -429,9 +416,6 @@ func (b *Block) BlobGasUsed() *uint64 {
return blobGasUsed
}
-// ExecutionWitness returns the verkle execution witneess + proof for a block
-func (b *Block) ExecutionWitness() *ExecutionWitness { return b.witness }
-
// Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previously cached value.
func (b *Block) Size() uint64 {
@@ -494,7 +478,6 @@ func (b *Block) WithSeal(header *Header) *Block {
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
- witness: b.witness,
}
}
@@ -506,7 +489,6 @@ func (b *Block) WithBody(body Body) *Block {
transactions: slices.Clone(body.Transactions),
uncles: make([]*Header, len(body.Uncles)),
withdrawals: slices.Clone(body.Withdrawals),
- witness: b.witness,
}
for i := range body.Uncles {
block.uncles[i] = CopyHeader(body.Uncles[i])
@@ -514,16 +496,6 @@ func (b *Block) WithBody(body Body) *Block {
return block
}
-func (b *Block) WithWitness(witness *ExecutionWitness) *Block {
- return &Block{
- header: b.header,
- transactions: b.transactions,
- uncles: b.uncles,
- withdrawals: b.withdrawals,
- witness: witness,
- }
-}
-
// Hash returns the keccak256 hash of b's header.
// The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash {
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 8975c791c842..25a3318c0228 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -214,7 +214,7 @@ func (evm *EVM) SetJumpDestCache(jumpDests JumpDestCache) {
// This is not threadsafe and should only be done very cautiously.
func (evm *EVM) SetTxContext(txCtx TxContext) {
if evm.chainRules.IsEIP4762 {
- txCtx.AccessEvents = state.NewAccessEvents(evm.StateDB.PointCache())
+ txCtx.AccessEvents = state.NewAccessEvents()
}
evm.TxContext = txCtx
}
diff --git a/core/vm/interface.go b/core/vm/interface.go
index d7f4c10e1f5b..e2f6a65189c8 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
@@ -84,9 +83,6 @@ type StateDB interface {
// even if the feature/fork is not active yet
AddSlotToAccessList(addr common.Address, slot common.Hash)
- // PointCache returns the point cache used in computations
- PointCache() *utils.PointCache
-
Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList)
RevertToSnapshot(int)
diff --git a/go.mod b/go.mod
index aff1d53923cd..66f3a3ffa51b 100644
--- a/go.mod
+++ b/go.mod
@@ -15,7 +15,6 @@ require (
github.com/cockroachdb/pebble v1.1.5
github.com/consensys/gnark-crypto v0.18.1
github.com/crate-crypto/go-eth-kzg v1.4.0
- github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a
github.com/davecgh/go-spew v1.1.1
github.com/dchest/siphash v1.2.3
github.com/deckarep/golang-set/v2 v2.6.0
@@ -24,7 +23,6 @@ require (
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
github.com/ethereum/c-kzg-4844/v2 v2.1.5
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab
- github.com/ethereum/go-verkle v0.2.2
github.com/fatih/color v1.16.0
github.com/ferranbt/fastssz v0.1.4
github.com/fsnotify/fsnotify v1.6.0
diff --git a/trie/bintrie/key_encoding.go b/trie/bintrie/key_encoding.go
index cda797521a66..5a93fcde9aba 100644
--- a/trie/bintrie/key_encoding.go
+++ b/trie/bintrie/key_encoding.go
@@ -33,8 +33,17 @@ const (
)
var (
- zeroHash = common.Hash{}
- codeOffset = uint256.NewInt(128)
+ zeroInt = uint256.NewInt(0)
+ zeroHash = common.Hash{}
+ verkleNodeWidthLog2 = 8
+ headerStorageOffset = uint256.NewInt(64)
+ codeOffset = uint256.NewInt(128)
+ codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
+ mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2))
+ CodeOffset = uint256.NewInt(128)
+ VerkleNodeWidth = uint256.NewInt(256)
+ HeaderStorageOffset = uint256.NewInt(64)
+ VerkleNodeWidthLog2 = 8
)
func GetBinaryTreeKey(addr common.Address, key []byte) []byte {
@@ -83,3 +92,38 @@ func GetBinaryTreeKeyCodeChunk(address common.Address, chunknr *uint256.Int) []b
chunkOffset := new(uint256.Int).Add(codeOffset, chunknr).Bytes()
return GetBinaryTreeKey(address, chunkOffset)
}
+
+func StorageIndex(storageKey []byte) (*uint256.Int, byte) {
+ // If the storage slot is in the header, we need to add the header offset.
+ var key uint256.Int
+ key.SetBytes(storageKey)
+ if key.Cmp(codeStorageDelta) < 0 {
+ // This addition is always safe; it can't ever overflow since pos.
-
-package utils
-
-import (
- "encoding/binary"
- "sync"
-
- "github.com/crate-crypto/go-ipa/bandersnatch/fr"
- "github.com/ethereum/go-ethereum/common/lru"
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-verkle"
- "github.com/holiman/uint256"
-)
-
-const (
- BasicDataLeafKey = 0
- CodeHashLeafKey = 1
-
- BasicDataVersionOffset = 0
- BasicDataCodeSizeOffset = 5
- BasicDataNonceOffset = 8
- BasicDataBalanceOffset = 16
-)
-
-var (
- zero = uint256.NewInt(0)
- verkleNodeWidthLog2 = 8
- headerStorageOffset = uint256.NewInt(64)
- codeOffset = uint256.NewInt(128)
- verkleNodeWidth = uint256.NewInt(256)
- codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
- mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2))
- CodeOffset = uint256.NewInt(128)
- VerkleNodeWidth = uint256.NewInt(256)
- HeaderStorageOffset = uint256.NewInt(64)
- VerkleNodeWidthLog2 = 8
-
- index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64]
-
- // cacheHitGauge is the metric to track how many cache hit occurred.
- cacheHitGauge = metrics.NewRegisteredGauge("trie/verkle/cache/hit", nil)
-
- // cacheMissGauge is the metric to track how many cache miss occurred.
- cacheMissGauge = metrics.NewRegisteredGauge("trie/verkle/cache/miss", nil)
-)
-
-func init() {
- // The byte array is the Marshalled output of the point computed as such:
- //
- // var (
- // config = verkle.GetConfig()
- // fr verkle.Fr
- // )
- // verkle.FromLEBytes(&fr, []byte{2, 64})
- // point := config.CommitToPoly([]verkle.Fr{fr}, 1)
- index0Point = new(verkle.Point)
- err := index0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191})
- if err != nil {
- panic(err)
- }
-}
-
-// PointCache is the LRU cache for storing evaluated address commitment.
-type PointCache struct {
- lru lru.BasicLRU[string, *verkle.Point]
- lock sync.RWMutex
-}
-
-// NewPointCache returns the cache with specified size.
-func NewPointCache(maxItems int) *PointCache {
- return &PointCache{
- lru: lru.NewBasicLRU[string, *verkle.Point](maxItems),
- }
-}
-
-// Get returns the cached commitment for the specified address, or computing
-// it on the flight.
-func (c *PointCache) Get(addr []byte) *verkle.Point {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- p, ok := c.lru.Get(string(addr))
- if ok {
- cacheHitGauge.Inc(1)
- return p
- }
- cacheMissGauge.Inc(1)
- p = evaluateAddressPoint(addr)
- c.lru.Add(string(addr), p)
- return p
-}
-
-// GetStem returns the first 31 bytes of the tree key as the tree stem. It only
-// works for the account metadata whose treeIndex is 0.
-func (c *PointCache) GetStem(addr []byte) []byte {
- p := c.Get(addr)
- return pointToHash(p, 0)[:31]
-}
-
-// GetTreeKey performs both the work of the spec's get_tree_key function, and that
-// of pedersen_hash: it builds the polynomial in pedersen_hash without having to
-// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte
-// array. Since at most the first 5 coefficients of the polynomial will be non-zero,
-// these 5 coefficients are created directly.
-func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte {
- if len(address) < 32 {
- var aligned [32]byte
- address = append(aligned[:32-len(address)], address...)
- }
- // poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high]
- var poly [5]fr.Element
-
- // 32-byte address, interpreted as two little endian
- // 16-byte numbers.
- verkle.FromLEBytes(&poly[1], address[:16])
- verkle.FromLEBytes(&poly[2], address[16:])
-
- // treeIndex must be interpreted as a 32-byte aligned little-endian integer.
- // e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00.
- // poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes).
- //
- // To avoid unnecessary endianness conversions for go-ipa, we do some trick:
- // - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of
- // 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})).
- // - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of
- // the 32-byte aligned big-endian representation (BE({00,00,...}).
- trieIndexBytes := treeIndex.Bytes32()
- verkle.FromBytes(&poly[3], trieIndexBytes[16:])
- verkle.FromBytes(&poly[4], trieIndexBytes[:16])
-
- cfg := verkle.GetConfig()
- ret := cfg.CommitToPoly(poly[:], 0)
-
- // add a constant point corresponding to poly[0]=[2+256*64].
- ret.Add(ret, index0Point)
-
- return pointToHash(ret, subIndex)
-}
-
-// GetTreeKeyWithEvaluatedAddress is basically identical to GetTreeKey, the only
-// difference is a part of polynomial is already evaluated.
-//
-// Specifically, poly = [2+256*64, address_le_low, address_le_high] is already
-// evaluated.
-func GetTreeKeyWithEvaluatedAddress(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte {
- var poly [5]fr.Element
-
- // little-endian, 32-byte aligned treeIndex
- var index [32]byte
- for i := 0; i < len(treeIndex); i++ {
- binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i])
- }
- verkle.FromLEBytes(&poly[3], index[:16])
- verkle.FromLEBytes(&poly[4], index[16:])
-
- cfg := verkle.GetConfig()
- ret := cfg.CommitToPoly(poly[:], 0)
-
- // add the pre-evaluated address
- ret.Add(ret, evaluated)
-
- return pointToHash(ret, subIndex)
-}
-
-// BasicDataKey returns the verkle tree key of the basic data field for
-// the specified account.
-func BasicDataKey(address []byte) []byte {
- return GetTreeKey(address, zero, BasicDataLeafKey)
-}
-
-// CodeHashKey returns the verkle tree key of the code hash field for
-// the specified account.
-func CodeHashKey(address []byte) []byte {
- return GetTreeKey(address, zero, CodeHashLeafKey)
-}
-
-func codeChunkIndex(chunk *uint256.Int) (*uint256.Int, byte) {
- var (
- chunkOffset = new(uint256.Int).Add(codeOffset, chunk)
- treeIndex, subIndexMod = new(uint256.Int).DivMod(chunkOffset, verkleNodeWidth, new(uint256.Int))
- )
- return treeIndex, byte(subIndexMod.Uint64())
-}
-
-// CodeChunkKey returns the verkle tree key of the code chunk for the
-// specified account.
-func CodeChunkKey(address []byte, chunk *uint256.Int) []byte {
- treeIndex, subIndex := codeChunkIndex(chunk)
- return GetTreeKey(address, treeIndex, subIndex)
-}
-
-func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) {
- chunkOffset := new(uint256.Int).Add(CodeOffset, chunk)
- treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth)
- subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth)
- var subIndex byte
- if len(subIndexMod) != 0 {
- subIndex = byte(subIndexMod[0])
- }
- return treeIndex, subIndex
-}
-
-func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte {
- treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk)
- return GetTreeKey(address, treeIndex, subIndex)
-}
-
-func StorageIndex(storageKey []byte) (*uint256.Int, byte) {
- // If the storage slot is in the header, we need to add the header offset.
- var key uint256.Int
- key.SetBytes(storageKey)
- if key.Cmp(codeStorageDelta) < 0 {
- // This addition is always safe; it can't ever overflow since pos.
-
-package utils
-
-import (
- "bytes"
- "testing"
-
- "github.com/ethereum/go-verkle"
- "github.com/holiman/uint256"
-)
-
-func TestTreeKey(t *testing.T) {
- var (
- address = []byte{0x01}
- addressEval = evaluateAddressPoint(address)
- smallIndex = uint256.NewInt(1)
- largeIndex = uint256.NewInt(10000)
- smallStorage = []byte{0x1}
- largeStorage = bytes.Repeat([]byte{0xff}, 16)
- )
- if !bytes.Equal(BasicDataKey(address), BasicDataKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched basic data key")
- }
- if !bytes.Equal(CodeHashKey(address), CodeHashKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched code hash key")
- }
- if !bytes.Equal(CodeChunkKey(address, smallIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, smallIndex)) {
- t.Fatal("Unmatched code chunk key")
- }
- if !bytes.Equal(CodeChunkKey(address, largeIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, largeIndex)) {
- t.Fatal("Unmatched code chunk key")
- }
- if !bytes.Equal(StorageSlotKey(address, smallStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, smallStorage)) {
- t.Fatal("Unmatched storage slot key")
- }
- if !bytes.Equal(StorageSlotKey(address, largeStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, largeStorage)) {
- t.Fatal("Unmatched storage slot key")
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ethereum/go-ethereum/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkTreeKey
-// BenchmarkTreeKey-8 398731 2961 ns/op 32 B/op 1 allocs/op
-func BenchmarkTreeKey(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- BasicDataKey([]byte{0x01})
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ethereum/go-ethereum/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkTreeKeyWithEvaluation
-// BenchmarkTreeKeyWithEvaluation-8 513855 2324 ns/op 32 B/op 1 allocs/op
-func BenchmarkTreeKeyWithEvaluation(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- addr := []byte{0x01}
- eval := evaluateAddressPoint(addr)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- BasicDataKeyWithEvaluatedAddress(eval)
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ethereum/go-ethereum/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkStorageKey
-// BenchmarkStorageKey-8 230516 4584 ns/op 96 B/op 3 allocs/op
-func BenchmarkStorageKey(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- StorageSlotKey([]byte{0x01}, bytes.Repeat([]byte{0xff}, 32))
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ethereum/go-ethereum/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkStorageKeyWithEvaluation
-// BenchmarkStorageKeyWithEvaluation-8 320125 3753 ns/op 96 B/op 3 allocs/op
-func BenchmarkStorageKeyWithEvaluation(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- addr := []byte{0x01}
- eval := evaluateAddressPoint(addr)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- StorageSlotKeyWithEvaluatedAddress(eval, bytes.Repeat([]byte{0xff}, 32))
- }
-}
diff --git a/trie/verkle.go b/trie/verkle.go
deleted file mode 100644
index 70793330c534..000000000000
--- a/trie/verkle.go
+++ /dev/null
@@ -1,458 +0,0 @@
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/trie/trienode"
- "github.com/ethereum/go-ethereum/trie/utils"
- "github.com/ethereum/go-ethereum/triedb/database"
- "github.com/ethereum/go-verkle"
- "github.com/holiman/uint256"
-)
-
-var (
- errInvalidRootType = errors.New("invalid node type for root")
-)
-
-// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie
-// interface so that Verkle trees can be reused verbatim.
-type VerkleTrie struct {
- root verkle.VerkleNode
- cache *utils.PointCache
- reader *Reader
- tracer *PrevalueTracer
-}
-
-// NewVerkleTrie constructs a verkle tree based on the specified root hash.
-func NewVerkleTrie(root common.Hash, db database.NodeDatabase, cache *utils.PointCache) (*VerkleTrie, error) {
- reader, err := NewReader(root, common.Hash{}, db)
- if err != nil {
- return nil, err
- }
- t := &VerkleTrie{
- root: verkle.New(),
- cache: cache,
- reader: reader,
- tracer: NewPrevalueTracer(),
- }
- // Parse the root verkle node if it's not empty.
- if root != types.EmptyVerkleHash && root != types.EmptyRootHash {
- blob, err := t.nodeResolver(nil)
- if err != nil {
- return nil, err
- }
- node, err := verkle.ParseNode(blob, 0)
- if err != nil {
- return nil, err
- }
- t.root = node
- }
- return t, nil
-}
-
-// GetKey returns the sha3 preimage of a hashed key that was previously used
-// to store a value.
-func (t *VerkleTrie) GetKey(key []byte) []byte {
- return key
-}
-
-// GetAccount implements state.Trie, retrieving the account with the specified
-// account address. If the specified account is not in the verkle tree, nil will
-// be returned. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) {
- var (
- acc = &types.StateAccount{}
- values [][]byte
- err error
- )
- switch n := t.root.(type) {
- case *verkle.InternalNode:
- values, err = n.GetValuesAtStem(t.cache.GetStem(addr[:]), t.nodeResolver)
- if err != nil {
- return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err)
- }
- default:
- return nil, errInvalidRootType
- }
- if values == nil {
- return nil, nil
- }
- basicData := values[utils.BasicDataLeafKey]
- acc.Nonce = binary.BigEndian.Uint64(basicData[utils.BasicDataNonceOffset:])
- acc.Balance = new(uint256.Int).SetBytes(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16])
- acc.CodeHash = values[utils.CodeHashLeafKey]
-
- // TODO account.Root is leave as empty. How should we handle the legacy account?
- return acc, nil
-}
-
-// PrefetchAccount attempts to resolve specific accounts from the database
-// to accelerate subsequent trie operations.
-func (t *VerkleTrie) PrefetchAccount(addresses []common.Address) error {
- for _, addr := range addresses {
- if _, err := t.GetAccount(addr); err != nil {
- return err
- }
- }
- return nil
-}
-
-// GetStorage implements state.Trie, retrieving the storage slot with the specified
-// account address and storage key. If the specified slot is not in the verkle tree,
-// nil will be returned. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
- val, err := t.root.Get(k, t.nodeResolver)
- if err != nil {
- return nil, err
- }
- return common.TrimLeftZeroes(val), nil
-}
-
-// PrefetchStorage attempts to resolve specific storage slots from the database
-// to accelerate subsequent trie operations.
-func (t *VerkleTrie) PrefetchStorage(addr common.Address, keys [][]byte) error {
- for _, key := range keys {
- if _, err := t.GetStorage(addr, key); err != nil {
- return err
- }
- }
- return nil
-}
-
-// UpdateAccount implements state.Trie, writing the provided account into the tree.
-// If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
- var (
- err error
- basicData [32]byte
- values = make([][]byte, verkle.NodeWidth)
- stem = t.cache.GetStem(addr[:])
- )
-
- // Code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present
- // before the code size to support bigger integers in the future. PutUint32(...) requires
- // 4 bytes, so we need to shift the offset 1 byte to the left.
- binary.BigEndian.PutUint32(basicData[utils.BasicDataCodeSizeOffset-1:], uint32(codeLen))
- binary.BigEndian.PutUint64(basicData[utils.BasicDataNonceOffset:], acc.Nonce)
- if acc.Balance.ByteLen() > 16 {
- panic("balance too large")
- }
- acc.Balance.WriteToSlice(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16])
- values[utils.BasicDataLeafKey] = basicData[:]
- values[utils.CodeHashLeafKey] = acc.CodeHash[:]
-
- switch root := t.root.(type) {
- case *verkle.InternalNode:
- err = root.InsertValuesAtStem(stem, values, t.nodeResolver)
- default:
- return errInvalidRootType
- }
- if err != nil {
- return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err)
- }
-
- return nil
-}
-
-// UpdateStorage implements state.Trie, writing the provided storage slot into
-// the tree. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error {
- // Left padding the slot value to 32 bytes.
- var v [32]byte
- if len(value) >= 32 {
- copy(v[:], value[:32])
- } else {
- copy(v[32-len(value):], value[:])
- }
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(address.Bytes()), key)
- return t.root.Insert(k, v[:], t.nodeResolver)
-}
-
-// DeleteAccount leaves the account untouched, as no account deletion can happen
-// in verkle.
-// There is a special corner case, in which an account that is prefunded, CREATE2-d
-// and then SELFDESTRUCT-d should see its funds drained. EIP161 says that account
-// should be removed, but this is verboten by the verkle spec. This contains a
-// workaround in which the method checks for this corner case, and if so, overwrites
-// the balance with 0. This will be removed once the spec has been clarified.
-func (t *VerkleTrie) DeleteAccount(addr common.Address) error {
- k := utils.BasicDataKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()))
- values, err := t.root.(*verkle.InternalNode).GetValuesAtStem(k, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("Error getting data at %x in delete: %w", k, err)
- }
- var prefunded bool
- for i, v := range values {
- switch i {
- case 0:
- prefunded = len(v) == 32
- case 1:
- prefunded = len(v) == 32 && bytes.Equal(v, types.EmptyCodeHash[:])
- default:
- prefunded = v == nil
- }
- if !prefunded {
- break
- }
- }
- if prefunded {
- t.root.Insert(k, common.Hash{}.Bytes(), t.nodeResolver)
- }
- return nil
-}
-
-// RollBackAccount removes the account info + code from the tree, unlike DeleteAccount
-// that will overwrite it with 0s. The first 64 storage slots are also removed.
-func (t *VerkleTrie) RollBackAccount(addr common.Address) error {
- var (
- evaluatedAddr = t.cache.Get(addr.Bytes())
- basicDataKey = utils.BasicDataKeyWithEvaluatedAddress(evaluatedAddr)
- )
- basicDataBytes, err := t.root.Get(basicDataKey, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("rollback: error finding code size: %w", err)
- }
- if len(basicDataBytes) == 0 {
- return errors.New("rollback: basic data is not existent")
- }
- // The code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present
- // before the code size to support bigger integers in the future.
- // LittleEndian.Uint32(...) expects 4-bytes, so we need to shift the offset 1-byte to the left.
- codeSize := binary.BigEndian.Uint32(basicDataBytes[utils.BasicDataCodeSizeOffset-1:])
-
- // Delete the account header + first 64 slots + first 128 code chunks
- _, err = t.root.(*verkle.InternalNode).DeleteAtStem(basicDataKey[:31], t.nodeResolver)
- if err != nil {
- return fmt.Errorf("error rolling back account header: %w", err)
- }
-
- // Delete all further code
- for i, chunknr := uint64(31*128), uint64(128); i < uint64(codeSize); i, chunknr = i+31*256, chunknr+256 {
- // evaluate group key at the start of a new group
- offset := uint256.NewInt(chunknr)
- key := utils.CodeChunkKeyWithEvaluatedAddress(evaluatedAddr, offset)
-
- if _, err = t.root.(*verkle.InternalNode).DeleteAtStem(key[:], t.nodeResolver); err != nil {
- return fmt.Errorf("error deleting code chunk stem (addr=%x, offset=%d) error: %w", addr[:], offset, err)
- }
- }
- return nil
-}
-
-// DeleteStorage implements state.Trie, deleting the specified storage slot from
-// the trie. If the storage slot was not existent in the trie, no error will be
-// returned. If the trie is corrupted, an error will be returned.
-func (t *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error {
- var zero [32]byte
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
- return t.root.Insert(k, zero[:], t.nodeResolver)
-}
-
-// Hash returns the root hash of the tree. It does not write to the database and
-// can be used even if the tree doesn't have one.
-func (t *VerkleTrie) Hash() common.Hash {
- return t.root.Commit().Bytes()
-}
-
-// Commit writes all nodes to the tree's memory database.
-func (t *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) {
- root := t.root.(*verkle.InternalNode)
- nodes, err := root.BatchSerialize()
- if err != nil {
- // Error return from this function indicates error in the code logic
- // of BatchSerialize, and we fail catastrophically if this is the case.
- panic(fmt.Errorf("BatchSerialize failed: %v", err))
- }
- nodeset := trienode.NewNodeSet(common.Hash{})
- for _, node := range nodes {
- // Hash parameter is not used in pathdb
- nodeset.AddNode(node.Path, trienode.NewNodeWithPrev(common.Hash{}, node.SerializedBytes, t.tracer.Get(node.Path)))
- }
- // Serialize root commitment form
- return t.Hash(), nodeset
-}
-
-// NodeIterator implements state.Trie, returning an iterator that returns
-// nodes of the trie. Iteration starts at the key after the given start key.
-//
-// TODO(gballet, rjl493456442) implement it.
-func (t *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
- // TODO(@CPerezz): remove.
- return nil, errors.New("not implemented")
-}
-
-// Prove implements state.Trie, constructing a Merkle proof for key. The result
-// contains all encoded nodes on the path to the value at key. The value itself
-// is also included in the last node and can be retrieved by verifying the proof.
-//
-// If the trie does not contain a value for key, the returned proof contains all
-// nodes of the longest existing prefix of the key (at least the root), ending
-// with the node that proves the absence of the key.
-//
-// TODO(gballet, rjl493456442) implement it.
-func (t *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
- panic("not implemented")
-}
-
-// Copy returns a deep-copied verkle tree.
-func (t *VerkleTrie) Copy() *VerkleTrie {
- return &VerkleTrie{
- root: t.root.Copy(),
- cache: t.cache,
- reader: t.reader,
- tracer: t.tracer.Copy(),
- }
-}
-
-// IsVerkle indicates if the trie is a Verkle trie.
-func (t *VerkleTrie) IsVerkle() bool {
- return true
-}
-
-// Proof builds and returns the verkle multiproof for keys, built against
-// the pre tree. The post tree is passed in order to add the post values
-// to that proof.
-func (t *VerkleTrie) Proof(posttrie *VerkleTrie, keys [][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) {
- var postroot verkle.VerkleNode
- if posttrie != nil {
- postroot = posttrie.root
- }
- proof, _, _, _, err := verkle.MakeVerkleMultiProof(t.root, postroot, keys, t.nodeResolver)
- if err != nil {
- return nil, nil, err
- }
- p, kvps, err := verkle.SerializeProof(proof)
- if err != nil {
- return nil, nil, err
- }
- return p, kvps, nil
-}
-
-// ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which
-// are actual code, and 1 byte is the pushdata offset).
-type ChunkedCode []byte
-
-// Copy the values here so as to avoid an import cycle
-const (
- PUSH1 = byte(0x60)
- PUSH32 = byte(0x7f)
-)
-
-// ChunkifyCode generates the chunked version of an array representing EVM bytecode
-func ChunkifyCode(code []byte) ChunkedCode {
- var (
- chunkOffset = 0 // offset in the chunk
- chunkCount = len(code) / 31
- codeOffset = 0 // offset in the code
- )
- if len(code)%31 != 0 {
- chunkCount++
- }
- chunks := make([]byte, chunkCount*32)
- for i := 0; i < chunkCount; i++ {
- // number of bytes to copy, 31 unless the end of the code has been reached.
- end := 31 * (i + 1)
- if len(code) < end {
- end = len(code)
- }
- copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself
-
- // chunk offset = taken from the last chunk.
- if chunkOffset > 31 {
- // skip offset calculation if push data covers the whole chunk
- chunks[i*32] = 31
- chunkOffset = 1
- continue
- }
- chunks[32*i] = byte(chunkOffset)
- chunkOffset = 0
-
- // Check each instruction and update the offset it should be 0 unless
- // a PUSH-N overflows.
- for ; codeOffset < end; codeOffset++ {
- if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 {
- codeOffset += int(code[codeOffset] - PUSH1 + 1)
- if codeOffset+1 >= 31*(i+1) {
- codeOffset++
- chunkOffset = codeOffset - 31*(i+1)
- break
- }
- }
- }
- }
- return chunks
-}
-
-// UpdateContractCode implements state.Trie, writing the provided contract code
-// into the trie.
-// Note that the code-size *must* be already saved by a previous UpdateAccount call.
-func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
- var (
- chunks = ChunkifyCode(code)
- values [][]byte
- key []byte
- err error
- )
- for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 {
- groupOffset := (chunknr + 128) % 256
- if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ {
- values = make([][]byte, verkle.NodeWidth)
- key = utils.CodeChunkKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), uint256.NewInt(chunknr))
- }
- values[groupOffset] = chunks[i : i+32]
-
- if groupOffset == 255 || len(chunks)-i <= 32 {
- switch root := t.root.(type) {
- case *verkle.InternalNode:
- err = root.InsertValuesAtStem(key[:31], values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err)
- }
- default:
- return errInvalidRootType
- }
- }
- }
- return nil
-}
-
-func (t *VerkleTrie) ToDot() string {
- return verkle.ToDot(t.root)
-}
-
-func (t *VerkleTrie) nodeResolver(path []byte) ([]byte, error) {
- blob, err := t.reader.Node(path, common.Hash{})
- if err != nil {
- return nil, err
- }
- t.tracer.Put(path, blob)
- return blob, nil
-}
-
-// Witness returns a set containing all trie nodes that have been accessed.
-func (t *VerkleTrie) Witness() map[string][]byte {
- panic("not implemented")
-}
diff --git a/trie/verkle_test.go b/trie/verkle_test.go
deleted file mode 100644
index 1832e3db1338..000000000000
--- a/trie/verkle_test.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "reflect"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/trie/utils"
- "github.com/holiman/uint256"
-)
-
-var (
- accounts = map[common.Address]*types.StateAccount{
- {1}: {
- Nonce: 100,
- Balance: uint256.NewInt(100),
- CodeHash: common.Hash{0x1}.Bytes(),
- },
- {2}: {
- Nonce: 200,
- Balance: uint256.NewInt(200),
- CodeHash: common.Hash{0x2}.Bytes(),
- },
- }
- storages = map[common.Address]map[common.Hash][]byte{
- {1}: {
- common.Hash{10}: []byte{10},
- common.Hash{11}: []byte{11},
- common.MaxHash: []byte{0xff},
- },
- {2}: {
- common.Hash{20}: []byte{20},
- common.Hash{21}: []byte{21},
- common.MaxHash: []byte{0xff},
- },
- }
-)
-
-func TestVerkleTreeReadWrite(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
- tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
-
- for addr, acct := range accounts {
- if err := tr.UpdateAccount(addr, acct, 0); err != nil {
- t.Fatalf("Failed to update account, %v", err)
- }
- for key, val := range storages[addr] {
- if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
- t.Fatalf("Failed to update storage, %v", err)
- }
- }
- }
-
- for addr, acct := range accounts {
- stored, err := tr.GetAccount(addr)
- if err != nil {
- t.Fatalf("Failed to get account, %v", err)
- }
- if !reflect.DeepEqual(stored, acct) {
- t.Fatal("account is not matched")
- }
- for key, val := range storages[addr] {
- stored, err := tr.GetStorage(addr, key.Bytes())
- if err != nil {
- t.Fatalf("Failed to get storage, %v", err)
- }
- if !bytes.Equal(stored, val) {
- t.Fatal("storage is not matched")
- }
- }
- }
-}
-
-func TestVerkleRollBack(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
- tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
-
- for addr, acct := range accounts {
- // create more than 128 chunks of code
- code := make([]byte, 129*32)
- for i := 0; i < len(code); i += 2 {
- code[i] = 0x60
- code[i+1] = byte(i % 256)
- }
- if err := tr.UpdateAccount(addr, acct, len(code)); err != nil {
- t.Fatalf("Failed to update account, %v", err)
- }
- for key, val := range storages[addr] {
- if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
- t.Fatalf("Failed to update storage, %v", err)
- }
- }
- hash := crypto.Keccak256Hash(code)
- if err := tr.UpdateContractCode(addr, hash, code); err != nil {
- t.Fatalf("Failed to update contract, %v", err)
- }
- }
-
- // Check that things were created
- for addr, acct := range accounts {
- stored, err := tr.GetAccount(addr)
- if err != nil {
- t.Fatalf("Failed to get account, %v", err)
- }
- if !reflect.DeepEqual(stored, acct) {
- t.Fatal("account is not matched")
- }
- for key, val := range storages[addr] {
- stored, err := tr.GetStorage(addr, key.Bytes())
- if err != nil {
- t.Fatalf("Failed to get storage, %v", err)
- }
- if !bytes.Equal(stored, val) {
- t.Fatal("storage is not matched")
- }
- }
- }
-
- // ensure there is some code in the 2nd group of the 1st account
- keyOf2ndGroup := utils.CodeChunkKeyWithEvaluatedAddress(tr.cache.Get(common.Address{1}.Bytes()), uint256.NewInt(128))
- chunk, err := tr.root.Get(keyOf2ndGroup, nil)
- if err != nil {
- t.Fatalf("Failed to get account, %v", err)
- }
- if len(chunk) == 0 {
- t.Fatal("account was not created ")
- }
-
- // Rollback first account and check that it is gone
- addr1 := common.Address{1}
- err = tr.RollBackAccount(addr1)
- if err != nil {
- t.Fatalf("error rolling back address 1: %v", err)
- }
-
- // ensure the account is gone
- stored, err := tr.GetAccount(addr1)
- if err != nil {
- t.Fatalf("Failed to get account, %v", err)
- }
- if stored != nil {
- t.Fatal("account was not deleted")
- }
-
- // ensure that the last code chunk is also gone from the tree
- chunk, err = tr.root.Get(keyOf2ndGroup, nil)
- if err != nil {
- t.Fatalf("Failed to get account, %v", err)
- }
- if len(chunk) != 0 {
- t.Fatal("account was not deleted")
- }
-}